diff --git a/.github/workflows/proto-version-check.yml b/.github/workflows/proto-version-check.yml
new file mode 100644
index 000000000..ea300419d
--- /dev/null
+++ b/.github/workflows/proto-version-check.yml
@@ -0,0 +1,62 @@
+name: Proto Version Check
+
+on:
+ pull_request:
+ paths:
+ - "**/*.pb.go"
+
+jobs:
+ check-proto-versions:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check for proto tool version changes
+ uses: actions/github-script@v7
+ with:
+ script: |
+ const files = await github.paginate(github.rest.pulls.listFiles, {
+ owner: context.repo.owner,
+ repo: context.repo.repo,
+ pull_number: context.issue.number,
+ per_page: 100,
+ });
+
+ const pbFiles = files.filter(f => f.filename.endsWith('.pb.go'));
+ const missingPatch = pbFiles.filter(f => !f.patch).map(f => f.filename);
+ if (missingPatch.length > 0) {
+ core.setFailed(
+ `Cannot inspect patch data for:\n` +
+ missingPatch.map(f => `- ${f}`).join('\n') +
+ `\nThis can happen with very large PRs. Verify proto versions manually.`
+ );
+ return;
+ }
+ const versionPattern = /^[+-]\s*\/\/\s+protoc(?:-gen-go)?\s+v[\d.]+/;
+ const violations = [];
+
+ for (const file of pbFiles) {
+ const changed = file.patch
+ .split('\n')
+ .filter(line => versionPattern.test(line));
+ if (changed.length > 0) {
+ violations.push({
+ file: file.filename,
+ lines: changed,
+ });
+ }
+ }
+
+ if (violations.length > 0) {
+ const details = violations.map(v =>
+ `${v.file}:\n${v.lines.map(l => ' ' + l).join('\n')}`
+ ).join('\n\n');
+
+ core.setFailed(
+ `Proto version strings changed in generated files.\n` +
+ `This usually means the wrong protoc or protoc-gen-go version was used.\n` +
+ `Regenerate with the matching tool versions.\n\n` +
+ details
+ );
+ return;
+ }
+
+ console.log('No proto version string changes detected');
diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml
index 83444b541..5ada1033d 100644
--- a/.github/workflows/release.yml
+++ b/.github/workflows/release.yml
@@ -9,7 +9,7 @@ on:
pull_request:
env:
- SIGN_PIPE_VER: "v0.1.1"
+ SIGN_PIPE_VER: "v0.1.2"
GORELEASER_VER: "v2.14.3"
PRODUCT_NAME: "NetBird"
COPYRIGHT: "NetBird GmbH"
diff --git a/client/cmd/debug.go b/client/cmd/debug.go
index 0e2717756..e3d3afe5f 100644
--- a/client/cmd/debug.go
+++ b/client/cmd/debug.go
@@ -199,9 +199,11 @@ func runForDuration(cmd *cobra.Command, args []string) error {
cmd.Println("Log level set to trace.")
}
+ needsRestoreUp := false
if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil {
cmd.PrintErrf("Failed to bring service down: %v\n", status.Convert(err).Message())
} else {
+ needsRestoreUp = !stateWasDown
cmd.Println("netbird down")
}
@@ -217,6 +219,7 @@ func runForDuration(cmd *cobra.Command, args []string) error {
if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil {
cmd.PrintErrf("Failed to bring service up: %v\n", status.Convert(err).Message())
} else {
+ needsRestoreUp = false
cmd.Println("netbird up")
}
@@ -264,6 +267,14 @@ func runForDuration(cmd *cobra.Command, args []string) error {
return fmt.Errorf("failed to bundle debug: %v", status.Convert(err).Message())
}
+ if needsRestoreUp {
+ if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil {
+ cmd.PrintErrf("Failed to restore service up state: %v\n", status.Convert(err).Message())
+ } else {
+ cmd.Println("netbird up (restored)")
+ }
+ }
+
if stateWasDown {
if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil {
cmd.PrintErrf("Failed to restore service down state: %v\n", status.Convert(err).Message())
diff --git a/client/cmd/expose.go b/client/cmd/expose.go
index f4727703e..c48a6adac 100644
--- a/client/cmd/expose.go
+++ b/client/cmd/expose.go
@@ -14,6 +14,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/spf13/cobra"
+ "google.golang.org/grpc/status"
"github.com/netbirdio/netbird/client/internal/expose"
"github.com/netbirdio/netbird/client/proto"
@@ -201,7 +202,7 @@ func exposeFn(cmd *cobra.Command, args []string) error {
stream, err := client.ExposeService(ctx, req)
if err != nil {
- return fmt.Errorf("expose service: %w", err)
+ return fmt.Errorf("expose service: %v", status.Convert(err).Message())
}
if err := handleExposeReady(cmd, stream, port); err != nil {
@@ -236,7 +237,7 @@ func toExposeProtocol(exposeProtocol string) (proto.ExposeProtocol, error) {
func handleExposeReady(cmd *cobra.Command, stream proto.DaemonService_ExposeServiceClient, port uint64) error {
event, err := stream.Recv()
if err != nil {
- return fmt.Errorf("receive expose event: %w", err)
+ return fmt.Errorf("receive expose event: %v", status.Convert(err).Message())
}
ready, ok := event.Event.(*proto.ExposeServiceEvent_Ready)
diff --git a/client/firewall/create_linux.go b/client/firewall/create_linux.go
index 12dcaee8a..d916ebad4 100644
--- a/client/firewall/create_linux.go
+++ b/client/firewall/create_linux.go
@@ -6,6 +6,7 @@ import (
"errors"
"fmt"
"os"
+ "strconv"
"github.com/coreos/go-iptables/iptables"
"github.com/google/nftables"
@@ -35,20 +36,34 @@ const SKIP_NFTABLES_ENV = "NB_SKIP_NFTABLES_CHECK"
type FWType int
func NewFirewall(iface IFaceMapper, stateManager *statemanager.Manager, flowLogger nftypes.FlowLogger, disableServerRoutes bool, mtu uint16) (firewall.Manager, error) {
- // on the linux system we try to user nftables or iptables
- // in any case, because we need to allow netbird interface traffic
- // so we use AllowNetbird traffic from these firewall managers
- // for the userspace packet filtering firewall
+ // We run in userspace mode and force userspace firewall was requested. We don't attempt native firewall.
+ if iface.IsUserspaceBind() && forceUserspaceFirewall() {
+ log.Info("forcing userspace firewall")
+ return createUserspaceFirewall(iface, nil, disableServerRoutes, flowLogger, mtu)
+ }
+
+ // Use native firewall for either kernel or userspace, the interface appears identical to netfilter
fm, err := createNativeFirewall(iface, stateManager, disableServerRoutes, mtu)
+ // Kernel cannot fall back to anything else, need to return error
if !iface.IsUserspaceBind() {
return fm, err
}
+ // Fall back to the userspace packet filter if native is unavailable
if err != nil {
log.Warnf("failed to create native firewall: %v. Proceeding with userspace", err)
+ return createUserspaceFirewall(iface, nil, disableServerRoutes, flowLogger, mtu)
}
- return createUserspaceFirewall(iface, fm, disableServerRoutes, flowLogger, mtu)
+
+ // Native firewall handles packet filtering, but the userspace WireGuard bind
+ // needs a device filter for DNS interception hooks. Install a minimal
+ // hooks-only filter that passes all traffic through to the kernel firewall.
+ if err := iface.SetFilter(&uspfilter.HooksFilter{}); err != nil {
+ log.Warnf("failed to set hooks filter, DNS via memory hooks will not work: %v", err)
+ }
+
+ return fm, nil
}
func createNativeFirewall(iface IFaceMapper, stateManager *statemanager.Manager, routes bool, mtu uint16) (firewall.Manager, error) {
@@ -160,3 +175,17 @@ func isIptablesClientAvailable(client *iptables.IPTables) bool {
_, err := client.ListChains("filter")
return err == nil
}
+
+func forceUserspaceFirewall() bool {
+ val := os.Getenv(EnvForceUserspaceFirewall)
+ if val == "" {
+ return false
+ }
+
+ force, err := strconv.ParseBool(val)
+ if err != nil {
+ log.Warnf("failed to parse %s: %v", EnvForceUserspaceFirewall, err)
+ return false
+ }
+ return force
+}
diff --git a/client/firewall/iface.go b/client/firewall/iface.go
index b83c5f912..491f03269 100644
--- a/client/firewall/iface.go
+++ b/client/firewall/iface.go
@@ -7,6 +7,12 @@ import (
"github.com/netbirdio/netbird/client/iface/wgaddr"
)
+// EnvForceUserspaceFirewall forces the use of the userspace packet filter even when
+// native iptables/nftables is available. This only applies when the WireGuard interface
+// runs in userspace mode. When set, peer ACLs are handled by USPFilter instead of
+// kernel netfilter rules.
+const EnvForceUserspaceFirewall = "NB_FORCE_USERSPACE_FIREWALL"
+
// IFaceMapper defines subset methods of interface required for manager
type IFaceMapper interface {
Name() string
diff --git a/client/firewall/iptables/manager_linux.go b/client/firewall/iptables/manager_linux.go
index 04c338375..a1d4467d5 100644
--- a/client/firewall/iptables/manager_linux.go
+++ b/client/firewall/iptables/manager_linux.go
@@ -33,7 +33,6 @@ type Manager struct {
type iFaceMapper interface {
Name() string
Address() wgaddr.Address
- IsUserspaceBind() bool
}
// Create iptables firewall manager
@@ -64,10 +63,9 @@ func Create(wgIface iFaceMapper, mtu uint16) (*Manager, error) {
func (m *Manager) Init(stateManager *statemanager.Manager) error {
state := &ShutdownState{
InterfaceState: &InterfaceState{
- NameStr: m.wgIface.Name(),
- WGAddress: m.wgIface.Address(),
- UserspaceBind: m.wgIface.IsUserspaceBind(),
- MTU: m.router.mtu,
+ NameStr: m.wgIface.Name(),
+ WGAddress: m.wgIface.Address(),
+ MTU: m.router.mtu,
},
}
stateManager.RegisterState(state)
@@ -203,12 +201,10 @@ func (m *Manager) Close(stateManager *statemanager.Manager) error {
return nberrors.FormatErrorOrNil(merr)
}
-// AllowNetbird allows netbird interface traffic
+// AllowNetbird allows netbird interface traffic.
+// This is called when USPFilter wraps the native firewall, adding blanket accept
+// rules so that packet filtering is handled in userspace instead of by netfilter.
func (m *Manager) AllowNetbird() error {
- if !m.wgIface.IsUserspaceBind() {
- return nil
- }
-
_, err := m.AddPeerFiltering(
nil,
net.IP{0, 0, 0, 0},
@@ -286,6 +282,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot
return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort)
}
+// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic.
+func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ return m.router.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort)
+}
+
+// RemoveOutputDNAT removes an OUTPUT chain DNAT rule.
+func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ return m.router.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort)
+}
+
const (
chainNameRaw = "NETBIRD-RAW"
chainOUTPUT = "OUTPUT"
diff --git a/client/firewall/iptables/manager_linux_test.go b/client/firewall/iptables/manager_linux_test.go
index ee47a27c0..cc4bda0e0 100644
--- a/client/firewall/iptables/manager_linux_test.go
+++ b/client/firewall/iptables/manager_linux_test.go
@@ -47,8 +47,6 @@ func (i *iFaceMock) Address() wgaddr.Address {
panic("AddressFunc is not set")
}
-func (i *iFaceMock) IsUserspaceBind() bool { return false }
-
func TestIptablesManager(t *testing.T) {
ipv4Client, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
require.NoError(t, err)
diff --git a/client/firewall/iptables/router_linux.go b/client/firewall/iptables/router_linux.go
index 1fe4c149f..a7c4f67dd 100644
--- a/client/firewall/iptables/router_linux.go
+++ b/client/firewall/iptables/router_linux.go
@@ -36,6 +36,7 @@ const (
chainRTFWDOUT = "NETBIRD-RT-FWD-OUT"
chainRTPRE = "NETBIRD-RT-PRE"
chainRTRDR = "NETBIRD-RT-RDR"
+ chainNATOutput = "NETBIRD-NAT-OUTPUT"
chainRTMSSCLAMP = "NETBIRD-RT-MSSCLAMP"
routingFinalForwardJump = "ACCEPT"
routingFinalNatJump = "MASQUERADE"
@@ -43,6 +44,7 @@ const (
jumpManglePre = "jump-mangle-pre"
jumpNatPre = "jump-nat-pre"
jumpNatPost = "jump-nat-post"
+ jumpNatOutput = "jump-nat-output"
jumpMSSClamp = "jump-mss-clamp"
markManglePre = "mark-mangle-pre"
markManglePost = "mark-mangle-post"
@@ -387,6 +389,14 @@ func (r *router) cleanUpDefaultForwardRules() error {
}
log.Debug("flushing routing related tables")
+
+ // Remove jump rules from built-in chains before deleting custom chains,
+ // otherwise the chain deletion fails with "device or resource busy".
+ jumpRule := []string{"-j", chainNATOutput}
+ if err := r.iptablesClient.Delete(tableNat, "OUTPUT", jumpRule...); err != nil {
+ log.Debugf("clean OUTPUT jump rule: %v", err)
+ }
+
for _, chainInfo := range []struct {
chain string
table string
@@ -396,6 +406,7 @@ func (r *router) cleanUpDefaultForwardRules() error {
{chainRTPRE, tableMangle},
{chainRTNAT, tableNat},
{chainRTRDR, tableNat},
+ {chainNATOutput, tableNat},
{chainRTMSSCLAMP, tableMangle},
} {
ok, err := r.iptablesClient.ChainExists(chainInfo.table, chainInfo.chain)
@@ -970,6 +981,81 @@ func (r *router) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Proto
return nil
}
+// ensureNATOutputChain lazily creates the OUTPUT NAT chain and jump rule on first use.
+func (r *router) ensureNATOutputChain() error {
+ if _, exists := r.rules[jumpNatOutput]; exists {
+ return nil
+ }
+
+ chainExists, err := r.iptablesClient.ChainExists(tableNat, chainNATOutput)
+ if err != nil {
+ return fmt.Errorf("check chain %s: %w", chainNATOutput, err)
+ }
+ if !chainExists {
+ if err := r.iptablesClient.NewChain(tableNat, chainNATOutput); err != nil {
+ return fmt.Errorf("create chain %s: %w", chainNATOutput, err)
+ }
+ }
+
+ jumpRule := []string{"-j", chainNATOutput}
+ if err := r.iptablesClient.Insert(tableNat, "OUTPUT", 1, jumpRule...); err != nil {
+ if !chainExists {
+ if delErr := r.iptablesClient.ClearAndDeleteChain(tableNat, chainNATOutput); delErr != nil {
+ log.Warnf("failed to rollback chain %s: %v", chainNATOutput, delErr)
+ }
+ }
+ return fmt.Errorf("add OUTPUT jump rule: %w", err)
+ }
+ r.rules[jumpNatOutput] = jumpRule
+
+ r.updateState()
+ return nil
+}
+
+// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic.
+func (r *router) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort)
+
+ if _, exists := r.rules[ruleID]; exists {
+ return nil
+ }
+
+ if err := r.ensureNATOutputChain(); err != nil {
+ return err
+ }
+
+ dnatRule := []string{
+ "-p", strings.ToLower(string(protocol)),
+ "--dport", strconv.Itoa(int(sourcePort)),
+ "-d", localAddr.String(),
+ "-j", "DNAT",
+ "--to-destination", ":" + strconv.Itoa(int(targetPort)),
+ }
+
+ if err := r.iptablesClient.Append(tableNat, chainNATOutput, dnatRule...); err != nil {
+ return fmt.Errorf("add output DNAT rule: %w", err)
+ }
+ r.rules[ruleID] = dnatRule
+
+ r.updateState()
+ return nil
+}
+
+// RemoveOutputDNAT removes an OUTPUT chain DNAT rule.
+func (r *router) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort)
+
+ if dnatRule, exists := r.rules[ruleID]; exists {
+ if err := r.iptablesClient.Delete(tableNat, chainNATOutput, dnatRule...); err != nil {
+ return fmt.Errorf("delete output DNAT rule: %w", err)
+ }
+ delete(r.rules, ruleID)
+ }
+
+ r.updateState()
+ return nil
+}
+
func applyPort(flag string, port *firewall.Port) []string {
if port == nil {
return nil
diff --git a/client/firewall/iptables/state_linux.go b/client/firewall/iptables/state_linux.go
index c88774c1f..121c755e9 100644
--- a/client/firewall/iptables/state_linux.go
+++ b/client/firewall/iptables/state_linux.go
@@ -9,10 +9,9 @@ import (
)
type InterfaceState struct {
- NameStr string `json:"name"`
- WGAddress wgaddr.Address `json:"wg_address"`
- UserspaceBind bool `json:"userspace_bind"`
- MTU uint16 `json:"mtu"`
+ NameStr string `json:"name"`
+ WGAddress wgaddr.Address `json:"wg_address"`
+ MTU uint16 `json:"mtu"`
}
func (i *InterfaceState) Name() string {
@@ -23,10 +22,6 @@ func (i *InterfaceState) Address() wgaddr.Address {
return i.WGAddress
}
-func (i *InterfaceState) IsUserspaceBind() bool {
- return i.UserspaceBind
-}
-
type ShutdownState struct {
sync.Mutex
diff --git a/client/firewall/manager/firewall.go b/client/firewall/manager/firewall.go
index 3511a5463..d65d717b3 100644
--- a/client/firewall/manager/firewall.go
+++ b/client/firewall/manager/firewall.go
@@ -169,6 +169,14 @@ type Manager interface {
// RemoveInboundDNAT removes inbound DNAT rule
RemoveInboundDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error
+ // AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic.
+ // localAddr must be IPv4; the underlying iptables/nftables backends are IPv4-only.
+ AddOutputDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error
+
+ // RemoveOutputDNAT removes an OUTPUT chain DNAT rule.
+ // localAddr must be IPv4; the underlying iptables/nftables backends are IPv4-only.
+ RemoveOutputDNAT(localAddr netip.Addr, protocol Protocol, sourcePort, targetPort uint16) error
+
// SetupEBPFProxyNoTrack creates static notrack rules for eBPF proxy loopback traffic.
// This prevents conntrack from interfering with WireGuard proxy communication.
SetupEBPFProxyNoTrack(proxyPort, wgPort uint16) error
diff --git a/client/firewall/nftables/manager_linux.go b/client/firewall/nftables/manager_linux.go
index f57b28abc..0b5b61e04 100644
--- a/client/firewall/nftables/manager_linux.go
+++ b/client/firewall/nftables/manager_linux.go
@@ -40,7 +40,6 @@ func getTableName() string {
type iFaceMapper interface {
Name() string
Address() wgaddr.Address
- IsUserspaceBind() bool
}
// Manager of iptables firewall
@@ -106,10 +105,9 @@ func (m *Manager) Init(stateManager *statemanager.Manager) error {
// cleanup using Close() without needing to store specific rules.
if err := stateManager.UpdateState(&ShutdownState{
InterfaceState: &InterfaceState{
- NameStr: m.wgIface.Name(),
- WGAddress: m.wgIface.Address(),
- UserspaceBind: m.wgIface.IsUserspaceBind(),
- MTU: m.router.mtu,
+ NameStr: m.wgIface.Name(),
+ WGAddress: m.wgIface.Address(),
+ MTU: m.router.mtu,
},
}); err != nil {
log.Errorf("failed to update state: %v", err)
@@ -205,12 +203,10 @@ func (m *Manager) RemoveNatRule(pair firewall.RouterPair) error {
return m.router.RemoveNatRule(pair)
}
-// AllowNetbird allows netbird interface traffic
+// AllowNetbird allows netbird interface traffic.
+// This is called when USPFilter wraps the native firewall, adding blanket accept
+// rules so that packet filtering is handled in userspace instead of by netfilter.
func (m *Manager) AllowNetbird() error {
- if !m.wgIface.IsUserspaceBind() {
- return nil
- }
-
m.mutex.Lock()
defer m.mutex.Unlock()
@@ -346,6 +342,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot
return m.router.RemoveInboundDNAT(localAddr, protocol, sourcePort, targetPort)
}
+// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic.
+func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ return m.router.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort)
+}
+
+// RemoveOutputDNAT removes an OUTPUT chain DNAT rule.
+func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ m.mutex.Lock()
+ defer m.mutex.Unlock()
+
+ return m.router.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort)
+}
+
const (
chainNameRawOutput = "netbird-raw-out"
chainNameRawPrerouting = "netbird-raw-pre"
diff --git a/client/firewall/nftables/manager_linux_test.go b/client/firewall/nftables/manager_linux_test.go
index 75b1e2b6c..d48e4ba88 100644
--- a/client/firewall/nftables/manager_linux_test.go
+++ b/client/firewall/nftables/manager_linux_test.go
@@ -52,8 +52,6 @@ func (i *iFaceMock) Address() wgaddr.Address {
panic("AddressFunc is not set")
}
-func (i *iFaceMock) IsUserspaceBind() bool { return false }
-
func TestNftablesManager(t *testing.T) {
// just check on the local interface
diff --git a/client/firewall/nftables/router_linux.go b/client/firewall/nftables/router_linux.go
index fde654c20..904daf7cb 100644
--- a/client/firewall/nftables/router_linux.go
+++ b/client/firewall/nftables/router_linux.go
@@ -36,6 +36,7 @@ const (
chainNameRoutingFw = "netbird-rt-fwd"
chainNameRoutingNat = "netbird-rt-postrouting"
chainNameRoutingRdr = "netbird-rt-redirect"
+ chainNameNATOutput = "netbird-nat-output"
chainNameForward = "FORWARD"
chainNameMangleForward = "netbird-mangle-forward"
@@ -1853,6 +1854,130 @@ func (r *router) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Proto
return nil
}
+// ensureNATOutputChain lazily creates the OUTPUT NAT chain on first use.
+func (r *router) ensureNATOutputChain() error {
+ if _, exists := r.chains[chainNameNATOutput]; exists {
+ return nil
+ }
+
+ r.chains[chainNameNATOutput] = r.conn.AddChain(&nftables.Chain{
+ Name: chainNameNATOutput,
+ Table: r.workTable,
+ Hooknum: nftables.ChainHookOutput,
+ Priority: nftables.ChainPriorityNATDest,
+ Type: nftables.ChainTypeNAT,
+ })
+
+ if err := r.conn.Flush(); err != nil {
+ delete(r.chains, chainNameNATOutput)
+ return fmt.Errorf("create NAT output chain: %w", err)
+ }
+ return nil
+}
+
+// AddOutputDNAT adds an OUTPUT chain DNAT rule for locally-generated traffic.
+func (r *router) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort)
+
+ if _, exists := r.rules[ruleID]; exists {
+ return nil
+ }
+
+ if err := r.ensureNATOutputChain(); err != nil {
+ return err
+ }
+
+ protoNum, err := protoToInt(protocol)
+ if err != nil {
+ return fmt.Errorf("convert protocol to number: %w", err)
+ }
+
+ exprs := []expr.Any{
+ &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1},
+ &expr.Cmp{
+ Op: expr.CmpOpEq,
+ Register: 1,
+ Data: []byte{protoNum},
+ },
+ &expr.Payload{
+ DestRegister: 2,
+ Base: expr.PayloadBaseTransportHeader,
+ Offset: 2,
+ Len: 2,
+ },
+ &expr.Cmp{
+ Op: expr.CmpOpEq,
+ Register: 2,
+ Data: binaryutil.BigEndian.PutUint16(sourcePort),
+ },
+ }
+
+ exprs = append(exprs, applyPrefix(netip.PrefixFrom(localAddr, 32), false)...)
+
+ exprs = append(exprs,
+ &expr.Immediate{
+ Register: 1,
+ Data: localAddr.AsSlice(),
+ },
+ &expr.Immediate{
+ Register: 2,
+ Data: binaryutil.BigEndian.PutUint16(targetPort),
+ },
+ &expr.NAT{
+ Type: expr.NATTypeDestNAT,
+ Family: uint32(nftables.TableFamilyIPv4),
+ RegAddrMin: 1,
+ RegProtoMin: 2,
+ },
+ )
+
+ dnatRule := &nftables.Rule{
+ Table: r.workTable,
+ Chain: r.chains[chainNameNATOutput],
+ Exprs: exprs,
+ UserData: []byte(ruleID),
+ }
+ r.conn.AddRule(dnatRule)
+
+ if err := r.conn.Flush(); err != nil {
+ return fmt.Errorf("add output DNAT rule: %w", err)
+ }
+
+ r.rules[ruleID] = dnatRule
+
+ return nil
+}
+
+// RemoveOutputDNAT removes an OUTPUT chain DNAT rule.
+func (r *router) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ if err := r.refreshRulesMap(); err != nil {
+ return fmt.Errorf(refreshRulesMapError, err)
+ }
+
+ ruleID := fmt.Sprintf("output-dnat-%s-%s-%d-%d", localAddr.String(), protocol, sourcePort, targetPort)
+
+ rule, exists := r.rules[ruleID]
+ if !exists {
+ return nil
+ }
+
+ if rule.Handle == 0 {
+ log.Warnf("output DNAT rule %s has no handle, removing stale entry", ruleID)
+ delete(r.rules, ruleID)
+ return nil
+ }
+
+ if err := r.conn.DelRule(rule); err != nil {
+ return fmt.Errorf("delete output DNAT rule %s: %w", ruleID, err)
+ }
+ if err := r.conn.Flush(); err != nil {
+ return fmt.Errorf("flush delete output DNAT rule: %w", err)
+ }
+ delete(r.rules, ruleID)
+
+ return nil
+}
+
// applyNetwork generates nftables expressions for networks (CIDR) or sets
func (r *router) applyNetwork(
network firewall.Network,
diff --git a/client/firewall/nftables/state_linux.go b/client/firewall/nftables/state_linux.go
index 48b7b3741..462ad2556 100644
--- a/client/firewall/nftables/state_linux.go
+++ b/client/firewall/nftables/state_linux.go
@@ -8,10 +8,9 @@ import (
)
type InterfaceState struct {
- NameStr string `json:"name"`
- WGAddress wgaddr.Address `json:"wg_address"`
- UserspaceBind bool `json:"userspace_bind"`
- MTU uint16 `json:"mtu"`
+ NameStr string `json:"name"`
+ WGAddress wgaddr.Address `json:"wg_address"`
+ MTU uint16 `json:"mtu"`
}
func (i *InterfaceState) Name() string {
@@ -22,10 +21,6 @@ func (i *InterfaceState) Address() wgaddr.Address {
return i.WGAddress
}
-func (i *InterfaceState) IsUserspaceBind() bool {
- return i.UserspaceBind
-}
-
type ShutdownState struct {
InterfaceState *InterfaceState `json:"interface_state,omitempty"`
}
diff --git a/client/firewall/uspfilter/common/hooks.go b/client/firewall/uspfilter/common/hooks.go
new file mode 100644
index 000000000..dadd800dd
--- /dev/null
+++ b/client/firewall/uspfilter/common/hooks.go
@@ -0,0 +1,37 @@
+package common
+
+import (
+ "net/netip"
+ "sync/atomic"
+)
+
+// PacketHook stores a registered hook for a specific IP:port.
+type PacketHook struct {
+ IP netip.Addr
+ Port uint16
+ Fn func([]byte) bool
+}
+
+// HookMatches checks if a packet's destination matches the hook and invokes it.
+func HookMatches(h *PacketHook, dstIP netip.Addr, dport uint16, packetData []byte) bool {
+ if h == nil {
+ return false
+ }
+ if h.IP == dstIP && h.Port == dport {
+ return h.Fn(packetData)
+ }
+ return false
+}
+
+// SetHook atomically stores a hook, handling nil removal.
+func SetHook(ptr *atomic.Pointer[PacketHook], ip netip.Addr, dPort uint16, hook func([]byte) bool) {
+ if hook == nil {
+ ptr.Store(nil)
+ return
+ }
+ ptr.Store(&PacketHook{
+ IP: ip,
+ Port: dPort,
+ Fn: hook,
+ })
+}
diff --git a/client/firewall/uspfilter/filter.go b/client/firewall/uspfilter/filter.go
index df2e274eb..24b3d0167 100644
--- a/client/firewall/uspfilter/filter.go
+++ b/client/firewall/uspfilter/filter.go
@@ -140,6 +140,10 @@ type Manager struct {
mtu uint16
mssClampValue uint16
mssClampEnabled bool
+
+ // Only one hook per protocol is supported. Outbound direction only.
+ udpHookOut atomic.Pointer[common.PacketHook]
+ tcpHookOut atomic.Pointer[common.PacketHook]
}
// decoder for packages
@@ -594,6 +598,8 @@ func (m *Manager) resetState() {
maps.Clear(m.incomingRules)
maps.Clear(m.routeRulesMap)
m.routeRules = m.routeRules[:0]
+ m.udpHookOut.Store(nil)
+ m.tcpHookOut.Store(nil)
if m.udpTracker != nil {
m.udpTracker.Close()
@@ -713,6 +719,9 @@ func (m *Manager) filterOutbound(packetData []byte, size int) bool {
return true
}
case layers.LayerTypeTCP:
+ if m.tcpHooksDrop(uint16(d.tcp.DstPort), dstIP, packetData) {
+ return true
+ }
// Clamp MSS on all TCP SYN packets, including those from local IPs.
// SNATed routed traffic may appear as local IP but still requires clamping.
if m.mssClampEnabled {
@@ -895,39 +904,12 @@ func (m *Manager) trackInbound(d *decoder, srcIP, dstIP netip.Addr, ruleID []byt
d.dnatOrigPort = 0
}
-// udpHooksDrop checks if any UDP hooks should drop the packet
func (m *Manager) udpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool {
- m.mutex.RLock()
- defer m.mutex.RUnlock()
+ return common.HookMatches(m.udpHookOut.Load(), dstIP, dport, packetData)
+}
- // Check specific destination IP first
- if rules, exists := m.outgoingRules[dstIP]; exists {
- for _, rule := range rules {
- if rule.udpHook != nil && portsMatch(rule.dPort, dport) {
- return rule.udpHook(packetData)
- }
- }
- }
-
- // Check IPv4 unspecified address
- if rules, exists := m.outgoingRules[netip.IPv4Unspecified()]; exists {
- for _, rule := range rules {
- if rule.udpHook != nil && portsMatch(rule.dPort, dport) {
- return rule.udpHook(packetData)
- }
- }
- }
-
- // Check IPv6 unspecified address
- if rules, exists := m.outgoingRules[netip.IPv6Unspecified()]; exists {
- for _, rule := range rules {
- if rule.udpHook != nil && portsMatch(rule.dPort, dport) {
- return rule.udpHook(packetData)
- }
- }
- }
-
- return false
+func (m *Manager) tcpHooksDrop(dport uint16, dstIP netip.Addr, packetData []byte) bool {
+ return common.HookMatches(m.tcpHookOut.Load(), dstIP, dport, packetData)
}
// filterInbound implements filtering logic for incoming packets.
@@ -1278,12 +1260,6 @@ func validateRule(ip netip.Addr, packetData []byte, rules map[string]PeerRule, d
return rule.mgmtId, rule.drop, true
}
case layers.LayerTypeUDP:
- // if rule has UDP hook (and if we are here we match this rule)
- // we ignore rule.drop and call this hook
- if rule.udpHook != nil {
- return rule.mgmtId, rule.udpHook(packetData), true
- }
-
if portsMatch(rule.sPort, uint16(d.udp.SrcPort)) && portsMatch(rule.dPort, uint16(d.udp.DstPort)) {
return rule.mgmtId, rule.drop, true
}
@@ -1342,65 +1318,14 @@ func (m *Manager) ruleMatches(rule *RouteRule, srcAddr, dstAddr netip.Addr, prot
return sourceMatched
}
-// AddUDPPacketHook calls hook when UDP packet from given direction matched
-//
-// Hook function returns flag which indicates should be the matched package dropped or not
-func (m *Manager) AddUDPPacketHook(in bool, ip netip.Addr, dPort uint16, hook func(packet []byte) bool) string {
- r := PeerRule{
- id: uuid.New().String(),
- ip: ip,
- protoLayer: layers.LayerTypeUDP,
- dPort: &firewall.Port{Values: []uint16{dPort}},
- ipLayer: layers.LayerTypeIPv6,
- udpHook: hook,
- }
-
- if ip.Is4() {
- r.ipLayer = layers.LayerTypeIPv4
- }
-
- m.mutex.Lock()
- if in {
- // Incoming UDP hooks are stored in allow rules map
- if _, ok := m.incomingRules[r.ip]; !ok {
- m.incomingRules[r.ip] = make(map[string]PeerRule)
- }
- m.incomingRules[r.ip][r.id] = r
- } else {
- if _, ok := m.outgoingRules[r.ip]; !ok {
- m.outgoingRules[r.ip] = make(map[string]PeerRule)
- }
- m.outgoingRules[r.ip][r.id] = r
- }
- m.mutex.Unlock()
-
- return r.id
+// SetUDPPacketHook sets the outbound UDP packet hook. Pass nil hook to remove.
+func (m *Manager) SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) {
+ common.SetHook(&m.udpHookOut, ip, dPort, hook)
}
-// RemovePacketHook removes packet hook by given ID
-func (m *Manager) RemovePacketHook(hookID string) error {
- m.mutex.Lock()
- defer m.mutex.Unlock()
-
- // Check incoming hooks (stored in allow rules)
- for _, arr := range m.incomingRules {
- for _, r := range arr {
- if r.id == hookID {
- delete(arr, r.id)
- return nil
- }
- }
- }
- // Check outgoing hooks
- for _, arr := range m.outgoingRules {
- for _, r := range arr {
- if r.id == hookID {
- delete(arr, r.id)
- return nil
- }
- }
- }
- return fmt.Errorf("hook with given id not found")
+// SetTCPPacketHook sets the outbound TCP packet hook. Pass nil hook to remove.
+func (m *Manager) SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool) {
+ common.SetHook(&m.tcpHookOut, ip, dPort, hook)
}
// SetLogLevel sets the log level for the firewall manager
diff --git a/client/firewall/uspfilter/filter_test.go b/client/firewall/uspfilter/filter_test.go
index 55a8e723c..39e8efa2c 100644
--- a/client/firewall/uspfilter/filter_test.go
+++ b/client/firewall/uspfilter/filter_test.go
@@ -12,6 +12,7 @@ import (
"github.com/google/gopacket"
"github.com/google/gopacket/layers"
"github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
wgdevice "golang.zx2c4.com/wireguard/device"
@@ -186,81 +187,52 @@ func TestManagerDeleteRule(t *testing.T) {
}
}
-func TestAddUDPPacketHook(t *testing.T) {
- tests := []struct {
- name string
- in bool
- expDir fw.RuleDirection
- ip netip.Addr
- dPort uint16
- hook func([]byte) bool
- expectedID string
- }{
- {
- name: "Test Outgoing UDP Packet Hook",
- in: false,
- expDir: fw.RuleDirectionOUT,
- ip: netip.MustParseAddr("10.168.0.1"),
- dPort: 8000,
- hook: func([]byte) bool { return true },
- },
- {
- name: "Test Incoming UDP Packet Hook",
- in: true,
- expDir: fw.RuleDirectionIN,
- ip: netip.MustParseAddr("::1"),
- dPort: 9000,
- hook: func([]byte) bool { return false },
- },
- }
+func TestSetUDPPacketHook(t *testing.T) {
+ manager, err := Create(&IFaceMock{
+ SetFilterFunc: func(device.PacketFilter) error { return nil },
+ }, false, flowLogger, nbiface.DefaultMTU)
+ require.NoError(t, err)
+ t.Cleanup(func() { require.NoError(t, manager.Close(nil)) })
- for _, tt := range tests {
- t.Run(tt.name, func(t *testing.T) {
- manager, err := Create(&IFaceMock{
- SetFilterFunc: func(device.PacketFilter) error { return nil },
- }, false, flowLogger, nbiface.DefaultMTU)
- require.NoError(t, err)
+ var called bool
+ manager.SetUDPPacketHook(netip.MustParseAddr("10.168.0.1"), 8000, func([]byte) bool {
+ called = true
+ return true
+ })
- manager.AddUDPPacketHook(tt.in, tt.ip, tt.dPort, tt.hook)
+ h := manager.udpHookOut.Load()
+ require.NotNil(t, h)
+ assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.IP)
+ assert.Equal(t, uint16(8000), h.Port)
+ assert.True(t, h.Fn(nil))
+ assert.True(t, called)
- var addedRule PeerRule
- if tt.in {
- // Incoming UDP hooks are stored in allow rules map
- if len(manager.incomingRules[tt.ip]) != 1 {
- t.Errorf("expected 1 incoming rule, got %d", len(manager.incomingRules[tt.ip]))
- return
- }
- for _, rule := range manager.incomingRules[tt.ip] {
- addedRule = rule
- }
- } else {
- if len(manager.outgoingRules[tt.ip]) != 1 {
- t.Errorf("expected 1 outgoing rule, got %d", len(manager.outgoingRules[tt.ip]))
- return
- }
- for _, rule := range manager.outgoingRules[tt.ip] {
- addedRule = rule
- }
- }
+ manager.SetUDPPacketHook(netip.MustParseAddr("10.168.0.1"), 8000, nil)
+ assert.Nil(t, manager.udpHookOut.Load())
+}
- if tt.ip.Compare(addedRule.ip) != 0 {
- t.Errorf("expected ip %s, got %s", tt.ip, addedRule.ip)
- return
- }
- if tt.dPort != addedRule.dPort.Values[0] {
- t.Errorf("expected dPort %d, got %d", tt.dPort, addedRule.dPort.Values[0])
- return
- }
- if layers.LayerTypeUDP != addedRule.protoLayer {
- t.Errorf("expected protoLayer %s, got %s", layers.LayerTypeUDP, addedRule.protoLayer)
- return
- }
- if addedRule.udpHook == nil {
- t.Errorf("expected udpHook to be set")
- return
- }
- })
- }
+func TestSetTCPPacketHook(t *testing.T) {
+ manager, err := Create(&IFaceMock{
+ SetFilterFunc: func(device.PacketFilter) error { return nil },
+ }, false, flowLogger, nbiface.DefaultMTU)
+ require.NoError(t, err)
+ t.Cleanup(func() { require.NoError(t, manager.Close(nil)) })
+
+ var called bool
+ manager.SetTCPPacketHook(netip.MustParseAddr("10.168.0.1"), 53, func([]byte) bool {
+ called = true
+ return true
+ })
+
+ h := manager.tcpHookOut.Load()
+ require.NotNil(t, h)
+ assert.Equal(t, netip.MustParseAddr("10.168.0.1"), h.IP)
+ assert.Equal(t, uint16(53), h.Port)
+ assert.True(t, h.Fn(nil))
+ assert.True(t, called)
+
+ manager.SetTCPPacketHook(netip.MustParseAddr("10.168.0.1"), 53, nil)
+ assert.Nil(t, manager.tcpHookOut.Load())
}
// TestPeerRuleLifecycleDenyRules verifies that deny rules are correctly added
@@ -530,39 +502,12 @@ func TestRemovePacketHook(t *testing.T) {
require.NoError(t, manager.Close(nil))
}()
- // Add a UDP packet hook
- hookFunc := func(data []byte) bool { return true }
- hookID := manager.AddUDPPacketHook(false, netip.MustParseAddr("192.168.0.1"), 8080, hookFunc)
+ manager.SetUDPPacketHook(netip.MustParseAddr("192.168.0.1"), 8080, func([]byte) bool { return true })
- // Assert the hook is added by finding it in the manager's outgoing rules
- found := false
- for _, arr := range manager.outgoingRules {
- for _, rule := range arr {
- if rule.id == hookID {
- found = true
- break
- }
- }
- }
+ require.NotNil(t, manager.udpHookOut.Load(), "hook should be registered")
- if !found {
- t.Fatalf("The hook was not added properly.")
- }
-
- // Now remove the packet hook
- err = manager.RemovePacketHook(hookID)
- if err != nil {
- t.Fatalf("Failed to remove hook: %s", err)
- }
-
- // Assert the hook is removed by checking it in the manager's outgoing rules
- for _, arr := range manager.outgoingRules {
- for _, rule := range arr {
- if rule.id == hookID {
- t.Fatalf("The hook was not removed properly.")
- }
- }
- }
+ manager.SetUDPPacketHook(netip.MustParseAddr("192.168.0.1"), 8080, nil)
+ assert.Nil(t, manager.udpHookOut.Load(), "hook should be removed")
}
func TestProcessOutgoingHooks(t *testing.T) {
@@ -592,8 +537,7 @@ func TestProcessOutgoingHooks(t *testing.T) {
}
hookCalled := false
- hookID := manager.AddUDPPacketHook(
- false,
+ manager.SetUDPPacketHook(
netip.MustParseAddr("100.10.0.100"),
53,
func([]byte) bool {
@@ -601,7 +545,6 @@ func TestProcessOutgoingHooks(t *testing.T) {
return true
},
)
- require.NotEmpty(t, hookID)
// Create test UDP packet
ipv4 := &layers.IPv4{
diff --git a/client/firewall/uspfilter/hooks_filter.go b/client/firewall/uspfilter/hooks_filter.go
new file mode 100644
index 000000000..8d3cc0f5c
--- /dev/null
+++ b/client/firewall/uspfilter/hooks_filter.go
@@ -0,0 +1,90 @@
+package uspfilter
+
+import (
+ "encoding/binary"
+ "net/netip"
+ "sync/atomic"
+
+ "github.com/netbirdio/netbird/client/firewall/uspfilter/common"
+ "github.com/netbirdio/netbird/client/iface/device"
+)
+
+const (
+ ipv4HeaderMinLen = 20
+ ipv4ProtoOffset = 9
+ ipv4FlagsOffset = 6
+ ipv4DstOffset = 16
+ ipProtoUDP = 17
+ ipProtoTCP = 6
+ ipv4FragOffMask = 0x1fff
+ // dstPortOffset is the offset of the destination port within a UDP or TCP header.
+ dstPortOffset = 2
+)
+
+// HooksFilter is a minimal packet filter that only handles outbound DNS hooks.
+// It is installed on the WireGuard interface when the userspace bind is active
+// but a full firewall filter (Manager) is not needed because a native kernel
+// firewall (nftables/iptables) handles packet filtering.
+type HooksFilter struct {
+ udpHook atomic.Pointer[common.PacketHook]
+ tcpHook atomic.Pointer[common.PacketHook]
+}
+
+var _ device.PacketFilter = (*HooksFilter)(nil)
+
+// FilterOutbound checks outbound packets for DNS hook matches.
+// Only IPv4 packets matching the registered hook IP:port are intercepted.
+// IPv6 and non-IP packets pass through unconditionally.
+func (f *HooksFilter) FilterOutbound(packetData []byte, _ int) bool {
+ if len(packetData) < ipv4HeaderMinLen {
+ return false
+ }
+
+ // Only process IPv4 packets, let everything else pass through.
+ if packetData[0]>>4 != 4 {
+ return false
+ }
+
+ ihl := int(packetData[0]&0x0f) * 4
+ if ihl < ipv4HeaderMinLen || len(packetData) < ihl+4 {
+ return false
+ }
+
+ // Skip non-first fragments: they don't carry L4 headers.
+ flagsAndOffset := binary.BigEndian.Uint16(packetData[ipv4FlagsOffset : ipv4FlagsOffset+2])
+ if flagsAndOffset&ipv4FragOffMask != 0 {
+ return false
+ }
+
+ dstIP, ok := netip.AddrFromSlice(packetData[ipv4DstOffset : ipv4DstOffset+4])
+ if !ok {
+ return false
+ }
+
+ proto := packetData[ipv4ProtoOffset]
+ dstPort := binary.BigEndian.Uint16(packetData[ihl+dstPortOffset : ihl+dstPortOffset+2])
+
+ switch proto {
+ case ipProtoUDP:
+ return common.HookMatches(f.udpHook.Load(), dstIP, dstPort, packetData)
+ case ipProtoTCP:
+ return common.HookMatches(f.tcpHook.Load(), dstIP, dstPort, packetData)
+ default:
+ return false
+ }
+}
+
+// FilterInbound allows all inbound packets (native firewall handles filtering).
+func (f *HooksFilter) FilterInbound([]byte, int) bool {
+ return false
+}
+
+// SetUDPPacketHook registers the UDP packet hook.
+func (f *HooksFilter) SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func([]byte) bool) {
+ common.SetHook(&f.udpHook, ip, dPort, hook)
+}
+
+// SetTCPPacketHook registers the TCP packet hook.
+func (f *HooksFilter) SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func([]byte) bool) {
+ common.SetHook(&f.tcpHook, ip, dPort, hook)
+}
diff --git a/client/firewall/uspfilter/localip.go b/client/firewall/uspfilter/localip.go
index ffc807f46..f63fe3e45 100644
--- a/client/firewall/uspfilter/localip.go
+++ b/client/firewall/uspfilter/localip.go
@@ -144,6 +144,8 @@ func (m *localIPManager) UpdateLocalIPs(iface common.IFaceMapper) (err error) {
if err != nil {
log.Warnf("failed to get interfaces: %v", err)
} else {
+ // TODO: filter out down interfaces (net.FlagUp). Also handle the reverse
+ // case where an interface comes up between refreshes.
for _, intf := range interfaces {
m.processInterface(intf, &newIPv4Bitmap, ipv4Set, &ipv4Addresses)
}
diff --git a/client/firewall/uspfilter/nat.go b/client/firewall/uspfilter/nat.go
index 597f892cf..8ed32eb5e 100644
--- a/client/firewall/uspfilter/nat.go
+++ b/client/firewall/uspfilter/nat.go
@@ -421,6 +421,7 @@ func (m *Manager) addPortRedirection(targetIP netip.Addr, protocol gopacket.Laye
}
// AddInboundDNAT adds an inbound DNAT rule redirecting traffic from NetBird peers to local services.
+// TODO: also delegate to nativeFirewall when available for kernel WG mode
func (m *Manager) AddInboundDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
var layerType gopacket.LayerType
switch protocol {
@@ -466,6 +467,22 @@ func (m *Manager) RemoveInboundDNAT(localAddr netip.Addr, protocol firewall.Prot
return m.removePortRedirection(localAddr, layerType, sourcePort, targetPort)
}
+// AddOutputDNAT delegates to the native firewall if available.
+func (m *Manager) AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ if m.nativeFirewall == nil {
+ return fmt.Errorf("output DNAT not supported without native firewall")
+ }
+ return m.nativeFirewall.AddOutputDNAT(localAddr, protocol, sourcePort, targetPort)
+}
+
+// RemoveOutputDNAT delegates to the native firewall if available.
+func (m *Manager) RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error {
+ if m.nativeFirewall == nil {
+ return nil
+ }
+ return m.nativeFirewall.RemoveOutputDNAT(localAddr, protocol, sourcePort, targetPort)
+}
+
// translateInboundPortDNAT applies port-specific DNAT translation to inbound packets.
func (m *Manager) translateInboundPortDNAT(packetData []byte, d *decoder, srcIP, dstIP netip.Addr) bool {
if !m.portDNATEnabled.Load() {
diff --git a/client/firewall/uspfilter/rule.go b/client/firewall/uspfilter/rule.go
index dbe3a7858..08d68a78e 100644
--- a/client/firewall/uspfilter/rule.go
+++ b/client/firewall/uspfilter/rule.go
@@ -18,9 +18,7 @@ type PeerRule struct {
protoLayer gopacket.LayerType
sPort *firewall.Port
dPort *firewall.Port
- drop bool
-
- udpHook func([]byte) bool
+ drop bool
}
// ID returns the rule id
diff --git a/client/firewall/uspfilter/tracer_test.go b/client/firewall/uspfilter/tracer_test.go
index d9f9f1aa8..657f96fc0 100644
--- a/client/firewall/uspfilter/tracer_test.go
+++ b/client/firewall/uspfilter/tracer_test.go
@@ -399,21 +399,17 @@ func TestTracePacket(t *testing.T) {
{
name: "UDPTraffic_WithHook",
setup: func(m *Manager) {
- hookFunc := func([]byte) bool {
- return true
- }
- m.AddUDPPacketHook(true, netip.MustParseAddr("1.1.1.1"), 53, hookFunc)
+ m.SetUDPPacketHook(netip.MustParseAddr("100.10.255.254"), 53, func([]byte) bool {
+ return true // drop (intercepted by hook)
+ })
},
packetBuilder: func() *PacketBuilder {
- return createPacketBuilder("1.1.1.1", "100.10.0.100", "udp", 12345, 53, fw.RuleDirectionIN)
+ return createPacketBuilder("100.10.0.100", "100.10.255.254", "udp", 12345, 53, fw.RuleDirectionOUT)
},
expectedStages: []PacketStage{
StageReceived,
- StageInboundPortDNAT,
- StageInbound1to1NAT,
- StageConntrack,
- StageRouting,
- StagePeerACL,
+ StageOutbound1to1NAT,
+ StageOutboundPortReverse,
StageCompleted,
},
expectedAllow: false,
diff --git a/client/iface/device/device_filter.go b/client/iface/device/device_filter.go
index 708f38d26..4357d1916 100644
--- a/client/iface/device/device_filter.go
+++ b/client/iface/device/device_filter.go
@@ -15,14 +15,17 @@ type PacketFilter interface {
// FilterInbound filter incoming packets from external sources to host
FilterInbound(packetData []byte, size int) bool
- // AddUDPPacketHook calls hook when UDP packet from given direction matched
- //
- // Hook function returns flag which indicates should be the matched package dropped or not.
- // Hook function receives raw network packet data as argument.
- AddUDPPacketHook(in bool, ip netip.Addr, dPort uint16, hook func(packet []byte) bool) string
+ // SetUDPPacketHook registers a hook for outbound UDP packets matching the given IP and port.
+ // Hook function returns true if the packet should be dropped.
+ // Only one UDP hook is supported; calling again replaces the previous hook.
+ // Pass nil hook to remove.
+ SetUDPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool)
- // RemovePacketHook removes hook by ID
- RemovePacketHook(hookID string) error
+ // SetTCPPacketHook registers a hook for outbound TCP packets matching the given IP and port.
+ // Hook function returns true if the packet should be dropped.
+ // Only one TCP hook is supported; calling again replaces the previous hook.
+ // Pass nil hook to remove.
+ SetTCPPacketHook(ip netip.Addr, dPort uint16, hook func(packet []byte) bool)
}
// FilteredDevice to override Read or Write of packets
diff --git a/client/iface/mocks/filter.go b/client/iface/mocks/filter.go
index 566068aa5..5ae98039c 100644
--- a/client/iface/mocks/filter.go
+++ b/client/iface/mocks/filter.go
@@ -34,18 +34,28 @@ func (m *MockPacketFilter) EXPECT() *MockPacketFilterMockRecorder {
return m.recorder
}
-// AddUDPPacketHook mocks base method.
-func (m *MockPacketFilter) AddUDPPacketHook(arg0 bool, arg1 netip.Addr, arg2 uint16, arg3 func([]byte) bool) string {
+// SetUDPPacketHook mocks base method.
+func (m *MockPacketFilter) SetUDPPacketHook(arg0 netip.Addr, arg1 uint16, arg2 func([]byte) bool) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "AddUDPPacketHook", arg0, arg1, arg2, arg3)
- ret0, _ := ret[0].(string)
- return ret0
+ m.ctrl.Call(m, "SetUDPPacketHook", arg0, arg1, arg2)
}
-// AddUDPPacketHook indicates an expected call of AddUDPPacketHook.
-func (mr *MockPacketFilterMockRecorder) AddUDPPacketHook(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
+// SetUDPPacketHook indicates an expected call of SetUDPPacketHook.
+func (mr *MockPacketFilterMockRecorder) SetUDPPacketHook(arg0, arg1, arg2 interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).AddUDPPacketHook), arg0, arg1, arg2, arg3)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).SetUDPPacketHook), arg0, arg1, arg2)
+}
+
+// SetTCPPacketHook mocks base method.
+func (m *MockPacketFilter) SetTCPPacketHook(arg0 netip.Addr, arg1 uint16, arg2 func([]byte) bool) {
+ m.ctrl.T.Helper()
+ m.ctrl.Call(m, "SetTCPPacketHook", arg0, arg1, arg2)
+}
+
+// SetTCPPacketHook indicates an expected call of SetTCPPacketHook.
+func (mr *MockPacketFilterMockRecorder) SetTCPPacketHook(arg0, arg1, arg2 interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetTCPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).SetTCPPacketHook), arg0, arg1, arg2)
}
// FilterInbound mocks base method.
@@ -75,17 +85,3 @@ func (mr *MockPacketFilterMockRecorder) FilterOutbound(arg0 interface{}, arg1 an
mr.mock.ctrl.T.Helper()
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterOutbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterOutbound), arg0, arg1)
}
-
-// RemovePacketHook mocks base method.
-func (m *MockPacketFilter) RemovePacketHook(arg0 string) error {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "RemovePacketHook", arg0)
- ret0, _ := ret[0].(error)
- return ret0
-}
-
-// RemovePacketHook indicates an expected call of RemovePacketHook.
-func (mr *MockPacketFilterMockRecorder) RemovePacketHook(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "RemovePacketHook", reflect.TypeOf((*MockPacketFilter)(nil).RemovePacketHook), arg0)
-}
diff --git a/client/iface/mocks/iface/mocks/filter.go b/client/iface/mocks/iface/mocks/filter.go
deleted file mode 100644
index 291ab9ab5..000000000
--- a/client/iface/mocks/iface/mocks/filter.go
+++ /dev/null
@@ -1,87 +0,0 @@
-// Code generated by MockGen. DO NOT EDIT.
-// Source: github.com/netbirdio/netbird/client/iface (interfaces: PacketFilter)
-
-// Package mocks is a generated GoMock package.
-package mocks
-
-import (
- net "net"
- reflect "reflect"
-
- gomock "github.com/golang/mock/gomock"
-)
-
-// MockPacketFilter is a mock of PacketFilter interface.
-type MockPacketFilter struct {
- ctrl *gomock.Controller
- recorder *MockPacketFilterMockRecorder
-}
-
-// MockPacketFilterMockRecorder is the mock recorder for MockPacketFilter.
-type MockPacketFilterMockRecorder struct {
- mock *MockPacketFilter
-}
-
-// NewMockPacketFilter creates a new mock instance.
-func NewMockPacketFilter(ctrl *gomock.Controller) *MockPacketFilter {
- mock := &MockPacketFilter{ctrl: ctrl}
- mock.recorder = &MockPacketFilterMockRecorder{mock}
- return mock
-}
-
-// EXPECT returns an object that allows the caller to indicate expected use.
-func (m *MockPacketFilter) EXPECT() *MockPacketFilterMockRecorder {
- return m.recorder
-}
-
-// AddUDPPacketHook mocks base method.
-func (m *MockPacketFilter) AddUDPPacketHook(arg0 bool, arg1 net.IP, arg2 uint16, arg3 func(*net.UDPAddr, []byte) bool) {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "AddUDPPacketHook", arg0, arg1, arg2, arg3)
-}
-
-// AddUDPPacketHook indicates an expected call of AddUDPPacketHook.
-func (mr *MockPacketFilterMockRecorder) AddUDPPacketHook(arg0, arg1, arg2, arg3 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "AddUDPPacketHook", reflect.TypeOf((*MockPacketFilter)(nil).AddUDPPacketHook), arg0, arg1, arg2, arg3)
-}
-
-// FilterInbound mocks base method.
-func (m *MockPacketFilter) FilterInbound(arg0 []byte) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "FilterInbound", arg0)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// FilterInbound indicates an expected call of FilterInbound.
-func (mr *MockPacketFilterMockRecorder) FilterInbound(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterInbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterInbound), arg0)
-}
-
-// FilterOutbound mocks base method.
-func (m *MockPacketFilter) FilterOutbound(arg0 []byte) bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "FilterOutbound", arg0)
- ret0, _ := ret[0].(bool)
- return ret0
-}
-
-// FilterOutbound indicates an expected call of FilterOutbound.
-func (mr *MockPacketFilterMockRecorder) FilterOutbound(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "FilterOutbound", reflect.TypeOf((*MockPacketFilter)(nil).FilterOutbound), arg0)
-}
-
-// SetNetwork mocks base method.
-func (m *MockPacketFilter) SetNetwork(arg0 *net.IPNet) {
- m.ctrl.T.Helper()
- m.ctrl.Call(m, "SetNetwork", arg0)
-}
-
-// SetNetwork indicates an expected call of SetNetwork.
-func (mr *MockPacketFilterMockRecorder) SetNetwork(arg0 interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "SetNetwork", reflect.TypeOf((*MockPacketFilter)(nil).SetNetwork), arg0)
-}
diff --git a/client/internal/acl/manager_test.go b/client/internal/acl/manager_test.go
index bd7adfaef..408ed992f 100644
--- a/client/internal/acl/manager_test.go
+++ b/client/internal/acl/manager_test.go
@@ -19,6 +19,9 @@ import (
var flowLogger = netflow.NewManager(nil, []byte{}, nil).GetLogger()
func TestDefaultManager(t *testing.T) {
+ t.Setenv("NB_WG_KERNEL_DISABLED", "true")
+ t.Setenv(firewall.EnvForceUserspaceFirewall, "true")
+
networkMap := &mgmProto.NetworkMap{
FirewallRules: []*mgmProto.FirewallRule{
{
@@ -135,6 +138,7 @@ func TestDefaultManager(t *testing.T) {
func TestDefaultManagerStateless(t *testing.T) {
// stateless currently only in userspace, so we have to disable kernel
t.Setenv("NB_WG_KERNEL_DISABLED", "true")
+ t.Setenv(firewall.EnvForceUserspaceFirewall, "true")
t.Setenv("NB_DISABLE_CONNTRACK", "true")
networkMap := &mgmProto.NetworkMap{
@@ -194,6 +198,7 @@ func TestDefaultManagerStateless(t *testing.T) {
// This tests the full ACL manager -> uspfilter integration.
func TestDenyRulesNotAccumulatedOnRepeatedApply(t *testing.T) {
t.Setenv("NB_WG_KERNEL_DISABLED", "true")
+ t.Setenv(firewall.EnvForceUserspaceFirewall, "true")
networkMap := &mgmProto.NetworkMap{
FirewallRules: []*mgmProto.FirewallRule{
@@ -258,6 +263,7 @@ func TestDenyRulesNotAccumulatedOnRepeatedApply(t *testing.T) {
// up when they're removed from the network map in a subsequent update.
func TestDenyRulesCleanedUpOnRemoval(t *testing.T) {
t.Setenv("NB_WG_KERNEL_DISABLED", "true")
+ t.Setenv(firewall.EnvForceUserspaceFirewall, "true")
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -339,6 +345,7 @@ func TestDenyRulesCleanedUpOnRemoval(t *testing.T) {
// one added without leaking.
func TestRuleUpdateChangingAction(t *testing.T) {
t.Setenv("NB_WG_KERNEL_DISABLED", "true")
+ t.Setenv(firewall.EnvForceUserspaceFirewall, "true")
ctrl := gomock.NewController(t)
defer ctrl.Finish()
diff --git a/client/internal/auth/auth.go b/client/internal/auth/auth.go
index bc768748e..bdfd07430 100644
--- a/client/internal/auth/auth.go
+++ b/client/internal/auth/auth.go
@@ -155,7 +155,7 @@ func (a *Auth) IsLoginRequired(ctx context.Context) (bool, error) {
var needsLogin bool
err = a.withRetry(ctx, func(client *mgm.GrpcClient) error {
- _, _, err := a.doMgmLogin(client, ctx, pubSSHKey)
+ err := a.doMgmLogin(client, ctx, pubSSHKey)
if isLoginNeeded(err) {
needsLogin = true
return nil
@@ -179,8 +179,8 @@ func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (err
var isAuthError bool
err = a.withRetry(ctx, func(client *mgm.GrpcClient) error {
- serverKey, _, err := a.doMgmLogin(client, ctx, pubSSHKey)
- if serverKey != nil && isRegistrationNeeded(err) {
+ err := a.doMgmLogin(client, ctx, pubSSHKey)
+ if isRegistrationNeeded(err) {
log.Debugf("peer registration required")
_, err = a.registerPeer(client, ctx, setupKey, jwtToken, pubSSHKey)
if err != nil {
@@ -201,13 +201,7 @@ func (a *Auth) Login(ctx context.Context, setupKey string, jwtToken string) (err
// getPKCEFlow retrieves PKCE authorization flow configuration and creates a flow instance
func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, error) {
- serverKey, err := client.GetServerPublicKey()
- if err != nil {
- log.Errorf("failed while getting Management Service public key: %v", err)
- return nil, err
- }
-
- protoFlow, err := client.GetPKCEAuthorizationFlow(*serverKey)
+ protoFlow, err := client.GetPKCEAuthorizationFlow()
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
log.Warnf("server couldn't find pkce flow, contact admin: %v", err)
@@ -246,13 +240,7 @@ func (a *Auth) getPKCEFlow(client *mgm.GrpcClient) (*PKCEAuthorizationFlow, erro
// getDeviceFlow retrieves device authorization flow configuration and creates a flow instance
func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow, error) {
- serverKey, err := client.GetServerPublicKey()
- if err != nil {
- log.Errorf("failed while getting Management Service public key: %v", err)
- return nil, err
- }
-
- protoFlow, err := client.GetDeviceAuthorizationFlow(*serverKey)
+ protoFlow, err := client.GetDeviceAuthorizationFlow()
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
log.Warnf("server couldn't find device flow, contact admin: %v", err)
@@ -292,28 +280,16 @@ func (a *Auth) getDeviceFlow(client *mgm.GrpcClient) (*DeviceAuthorizationFlow,
}
// doMgmLogin performs the actual login operation with the management service
-func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) (*wgtypes.Key, *mgmProto.LoginResponse, error) {
- serverKey, err := client.GetServerPublicKey()
- if err != nil {
- log.Errorf("failed while getting Management Service public key: %v", err)
- return nil, nil, err
- }
-
+func (a *Auth) doMgmLogin(client *mgm.GrpcClient, ctx context.Context, pubSSHKey []byte) error {
sysInfo := system.GetInfo(ctx)
a.setSystemInfoFlags(sysInfo)
- loginResp, err := client.Login(*serverKey, sysInfo, pubSSHKey, a.config.DNSLabels)
- return serverKey, loginResp, err
+ _, err := client.Login(sysInfo, pubSSHKey, a.config.DNSLabels)
+ return err
}
// registerPeer checks whether setupKey was provided via cmd line and if not then it prompts user to enter a key.
// Otherwise tries to register with the provided setupKey via command line.
func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKey string, jwtToken string, pubSSHKey []byte) (*mgmProto.LoginResponse, error) {
- serverPublicKey, err := client.GetServerPublicKey()
- if err != nil {
- log.Errorf("failed while getting Management Service public key: %v", err)
- return nil, err
- }
-
validSetupKey, err := uuid.Parse(setupKey)
if err != nil && jwtToken == "" {
return nil, status.Errorf(codes.InvalidArgument, "invalid setup-key or no sso information provided, err: %v", err)
@@ -322,7 +298,7 @@ func (a *Auth) registerPeer(client *mgm.GrpcClient, ctx context.Context, setupKe
log.Debugf("sending peer registration request to Management Service")
info := system.GetInfo(ctx)
a.setSystemInfoFlags(info)
- loginResp, err := client.Register(*serverPublicKey, validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels)
+ loginResp, err := client.Register(validSetupKey.String(), jwtToken, info, pubSSHKey, a.config.DNSLabels)
if err != nil {
log.Errorf("failed registering peer %v", err)
return nil, err
diff --git a/client/internal/connect.go b/client/internal/connect.go
index 1e8f87c08..bc2bd84d9 100644
--- a/client/internal/connect.go
+++ b/client/internal/connect.go
@@ -111,6 +111,7 @@ func (c *ConnectClient) RunOniOS(
fileDescriptor int32,
networkChangeListener listener.NetworkChangeListener,
dnsManager dns.IosDnsManager,
+ dnsAddresses []netip.AddrPort,
stateFilePath string,
) error {
// Set GC percent to 5% to reduce memory usage as iOS only allows 50MB of memory for the extension.
@@ -120,6 +121,7 @@ func (c *ConnectClient) RunOniOS(
FileDescriptor: fileDescriptor,
NetworkChangeListener: networkChangeListener,
DnsManager: dnsManager,
+ HostDNSAddresses: dnsAddresses,
StateFilePath: stateFilePath,
}
return c.run(mobileDependency, nil, "")
@@ -617,12 +619,6 @@ func connectToSignal(ctx context.Context, wtConfig *mgmProto.NetbirdConfig, ourP
// loginToManagement creates Management ServiceDependencies client, establishes a connection, logs-in and gets a global Netbird config (signal, turn, stun hosts, etc)
func loginToManagement(ctx context.Context, client mgm.Client, pubSSHKey []byte, config *profilemanager.Config) (*mgmProto.LoginResponse, error) {
-
- serverPublicKey, err := client.GetServerPublicKey()
- if err != nil {
- return nil, gstatus.Errorf(codes.FailedPrecondition, "failed while getting Management Service public key: %s", err)
- }
-
sysInfo := system.GetInfo(ctx)
sysInfo.SetFlags(
config.RosenpassEnabled,
@@ -641,12 +637,7 @@ func loginToManagement(ctx context.Context, client mgm.Client, pubSSHKey []byte,
config.EnableSSHRemotePortForwarding,
config.DisableSSHAuth,
)
- loginResp, err := client.Login(*serverPublicKey, sysInfo, pubSSHKey, config.DNSLabels)
- if err != nil {
- return nil, err
- }
-
- return loginResp, nil
+ return client.Login(sysInfo, pubSSHKey, config.DNSLabels)
}
func statusRecorderToMgmConnStateNotifier(statusRecorder *peer.Status) mgm.ConnStateNotifier {
diff --git a/client/internal/debug/debug.go b/client/internal/debug/debug.go
index c9ebf25e5..6a8eae324 100644
--- a/client/internal/debug/debug.go
+++ b/client/internal/debug/debug.go
@@ -25,6 +25,7 @@ import (
"google.golang.org/protobuf/encoding/protojson"
"github.com/netbirdio/netbird/client/anonymize"
+ "github.com/netbirdio/netbird/client/configs"
"github.com/netbirdio/netbird/client/internal/peer"
"github.com/netbirdio/netbird/client/internal/profilemanager"
"github.com/netbirdio/netbird/client/internal/updater/installer"
@@ -52,6 +53,7 @@ resolved_domains.txt: Anonymized resolved domain IP addresses from the status re
config.txt: Anonymized configuration information of the NetBird client.
network_map.json: Anonymized sync response containing peer configurations, routes, DNS settings, and firewall rules.
state.json: Anonymized client state dump containing netbird states for the active profile.
+service_params.json: Sanitized service install parameters (service.json). Sensitive environment variable values are masked. Only present when service.json exists.
metrics.txt: Buffered client metrics in InfluxDB line protocol format. Only present when metrics collection is enabled. Peer identifiers are anonymized.
mutex.prof: Mutex profiling information.
goroutine.prof: Goroutine profiling information.
@@ -359,6 +361,10 @@ func (g *BundleGenerator) createArchive() error {
log.Errorf("failed to add corrupted state files to debug bundle: %v", err)
}
+ if err := g.addServiceParams(); err != nil {
+ log.Errorf("failed to add service params to debug bundle: %v", err)
+ }
+
if err := g.addMetrics(); err != nil {
log.Errorf("failed to add metrics to debug bundle: %v", err)
}
@@ -488,6 +494,90 @@ func (g *BundleGenerator) addConfig() error {
return nil
}
+const (
+ serviceParamsFile = "service.json"
+ serviceParamsBundle = "service_params.json"
+ maskedValue = "***"
+ envVarPrefix = "NB_"
+ jsonKeyManagementURL = "management_url"
+ jsonKeyServiceEnv = "service_env_vars"
+)
+
+var sensitiveEnvSubstrings = []string{"key", "token", "secret", "password", "credential"}
+
+// addServiceParams reads the service.json file and adds a sanitized version to the bundle.
+// Non-NB_ env vars and vars with sensitive names are masked. Other NB_ values are anonymized.
+func (g *BundleGenerator) addServiceParams() error {
+ path := filepath.Join(configs.StateDir, serviceParamsFile)
+
+ data, err := os.ReadFile(path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return fmt.Errorf("read service params: %w", err)
+ }
+
+ var params map[string]any
+ if err := json.Unmarshal(data, ¶ms); err != nil {
+ return fmt.Errorf("parse service params: %w", err)
+ }
+
+ if g.anonymize {
+ if mgmtURL, ok := params[jsonKeyManagementURL].(string); ok && mgmtURL != "" {
+ params[jsonKeyManagementURL] = g.anonymizer.AnonymizeURI(mgmtURL)
+ }
+ }
+
+ g.sanitizeServiceEnvVars(params)
+
+ sanitizedData, err := json.MarshalIndent(params, "", " ")
+ if err != nil {
+ return fmt.Errorf("marshal sanitized service params: %w", err)
+ }
+
+ if err := g.addFileToZip(bytes.NewReader(sanitizedData), serviceParamsBundle); err != nil {
+ return fmt.Errorf("add service params to zip: %w", err)
+ }
+
+ return nil
+}
+
+// sanitizeServiceEnvVars masks or anonymizes env var values in service params.
+// Non-NB_ vars and vars with sensitive names (key, token, etc.) are fully masked.
+// Other NB_ var values are passed through the anonymizer when anonymization is enabled.
+func (g *BundleGenerator) sanitizeServiceEnvVars(params map[string]any) {
+ envVars, ok := params[jsonKeyServiceEnv].(map[string]any)
+ if !ok {
+ return
+ }
+
+ sanitized := make(map[string]any, len(envVars))
+ for k, v := range envVars {
+ val, _ := v.(string)
+ switch {
+ case !strings.HasPrefix(k, envVarPrefix) || isSensitiveEnvVar(k):
+ sanitized[k] = maskedValue
+ case g.anonymize:
+ sanitized[k] = g.anonymizer.AnonymizeString(val)
+ default:
+ sanitized[k] = val
+ }
+ }
+ params[jsonKeyServiceEnv] = sanitized
+}
+
+// isSensitiveEnvVar returns true for env var names that may contain secrets.
+func isSensitiveEnvVar(key string) bool {
+ lower := strings.ToLower(key)
+ for _, s := range sensitiveEnvSubstrings {
+ if strings.Contains(lower, s) {
+ return true
+ }
+ }
+ return false
+}
+
func (g *BundleGenerator) addCommonConfigFields(configContent *strings.Builder) {
configContent.WriteString("NetBird Client Configuration:\n\n")
diff --git a/client/internal/debug/debug_test.go b/client/internal/debug/debug_test.go
index 59837c328..6b5bb911c 100644
--- a/client/internal/debug/debug_test.go
+++ b/client/internal/debug/debug_test.go
@@ -1,8 +1,12 @@
package debug
import (
+ "archive/zip"
+ "bytes"
"encoding/json"
"net"
+ "os"
+ "path/filepath"
"strings"
"testing"
@@ -10,6 +14,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/netbirdio/netbird/client/anonymize"
+ "github.com/netbirdio/netbird/client/configs"
mgmProto "github.com/netbirdio/netbird/shared/management/proto"
)
@@ -420,6 +425,226 @@ func TestAnonymizeNetworkMap(t *testing.T) {
}
}
+func TestIsSensitiveEnvVar(t *testing.T) {
+ tests := []struct {
+ key string
+ sensitive bool
+ }{
+ {"NB_SETUP_KEY", true},
+ {"NB_API_TOKEN", true},
+ {"NB_CLIENT_SECRET", true},
+ {"NB_PASSWORD", true},
+ {"NB_CREDENTIAL", true},
+ {"NB_LOG_LEVEL", false},
+ {"NB_MANAGEMENT_URL", false},
+ {"NB_HOSTNAME", false},
+ {"HOME", false},
+ {"PATH", false},
+ }
+ for _, tt := range tests {
+ t.Run(tt.key, func(t *testing.T) {
+ assert.Equal(t, tt.sensitive, isSensitiveEnvVar(tt.key))
+ })
+ }
+}
+
+func TestSanitizeServiceEnvVars(t *testing.T) {
+ tests := []struct {
+ name string
+ anonymize bool
+ input map[string]any
+ check func(t *testing.T, params map[string]any)
+ }{
+ {
+ name: "no env vars key",
+ anonymize: false,
+ input: map[string]any{"management_url": "https://mgmt.example.com"},
+ check: func(t *testing.T, params map[string]any) {
+ t.Helper()
+ assert.Equal(t, "https://mgmt.example.com", params["management_url"], "non-env fields should be untouched")
+ _, ok := params[jsonKeyServiceEnv]
+ assert.False(t, ok, "service_env_vars should not be added")
+ },
+ },
+ {
+ name: "non-NB vars are masked",
+ anonymize: false,
+ input: map[string]any{
+ jsonKeyServiceEnv: map[string]any{
+ "HOME": "/root",
+ "PATH": "/usr/bin",
+ "NB_LOG_LEVEL": "debug",
+ },
+ },
+ check: func(t *testing.T, params map[string]any) {
+ t.Helper()
+ env := params[jsonKeyServiceEnv].(map[string]any)
+ assert.Equal(t, maskedValue, env["HOME"], "non-NB_ var should be masked")
+ assert.Equal(t, maskedValue, env["PATH"], "non-NB_ var should be masked")
+ assert.Equal(t, "debug", env["NB_LOG_LEVEL"], "safe NB_ var should pass through")
+ },
+ },
+ {
+ name: "sensitive NB vars are masked",
+ anonymize: false,
+ input: map[string]any{
+ jsonKeyServiceEnv: map[string]any{
+ "NB_SETUP_KEY": "abc123",
+ "NB_API_TOKEN": "tok_xyz",
+ "NB_LOG_LEVEL": "info",
+ },
+ },
+ check: func(t *testing.T, params map[string]any) {
+ t.Helper()
+ env := params[jsonKeyServiceEnv].(map[string]any)
+ assert.Equal(t, maskedValue, env["NB_SETUP_KEY"], "sensitive NB_ var should be masked")
+ assert.Equal(t, maskedValue, env["NB_API_TOKEN"], "sensitive NB_ var should be masked")
+ assert.Equal(t, "info", env["NB_LOG_LEVEL"], "safe NB_ var should pass through")
+ },
+ },
+ {
+ name: "safe NB vars anonymized when anonymize is true",
+ anonymize: true,
+ input: map[string]any{
+ jsonKeyServiceEnv: map[string]any{
+ "NB_MANAGEMENT_URL": "https://mgmt.example.com:443",
+ "NB_LOG_LEVEL": "debug",
+ "NB_SETUP_KEY": "secret",
+ "SOME_OTHER": "val",
+ },
+ },
+ check: func(t *testing.T, params map[string]any) {
+ t.Helper()
+ env := params[jsonKeyServiceEnv].(map[string]any)
+ // Safe NB_ values should be anonymized (not the original, not masked)
+ mgmtVal := env["NB_MANAGEMENT_URL"].(string)
+ assert.NotEqual(t, "https://mgmt.example.com:443", mgmtVal, "should be anonymized")
+ assert.NotEqual(t, maskedValue, mgmtVal, "should not be masked")
+
+ logVal := env["NB_LOG_LEVEL"].(string)
+ assert.NotEqual(t, maskedValue, logVal, "safe NB_ var should not be masked")
+
+ // Sensitive and non-NB_ still masked
+ assert.Equal(t, maskedValue, env["NB_SETUP_KEY"])
+ assert.Equal(t, maskedValue, env["SOME_OTHER"])
+ },
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ anonymizer := anonymize.NewAnonymizer(anonymize.DefaultAddresses())
+ g := &BundleGenerator{
+ anonymize: tt.anonymize,
+ anonymizer: anonymizer,
+ }
+ g.sanitizeServiceEnvVars(tt.input)
+ tt.check(t, tt.input)
+ })
+ }
+}
+
+func TestAddServiceParams(t *testing.T) {
+ t.Run("missing service.json returns nil", func(t *testing.T) {
+ g := &BundleGenerator{
+ anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()),
+ }
+
+ origStateDir := configs.StateDir
+ configs.StateDir = t.TempDir()
+ t.Cleanup(func() { configs.StateDir = origStateDir })
+
+ err := g.addServiceParams()
+ assert.NoError(t, err)
+ })
+
+ t.Run("management_url anonymized when anonymize is true", func(t *testing.T) {
+ dir := t.TempDir()
+ origStateDir := configs.StateDir
+ configs.StateDir = dir
+ t.Cleanup(func() { configs.StateDir = origStateDir })
+
+ input := map[string]any{
+ jsonKeyManagementURL: "https://api.example.com:443",
+ jsonKeyServiceEnv: map[string]any{
+ "NB_LOG_LEVEL": "trace",
+ },
+ }
+ data, err := json.Marshal(input)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(filepath.Join(dir, serviceParamsFile), data, 0600))
+
+ var buf bytes.Buffer
+ zw := zip.NewWriter(&buf)
+
+ g := &BundleGenerator{
+ anonymize: true,
+ anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()),
+ archive: zw,
+ }
+
+ require.NoError(t, g.addServiceParams())
+ require.NoError(t, zw.Close())
+
+ zr, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ require.NoError(t, err)
+ require.Len(t, zr.File, 1)
+ assert.Equal(t, serviceParamsBundle, zr.File[0].Name)
+
+ rc, err := zr.File[0].Open()
+ require.NoError(t, err)
+ defer rc.Close()
+
+ var result map[string]any
+ require.NoError(t, json.NewDecoder(rc).Decode(&result))
+
+ mgmt := result[jsonKeyManagementURL].(string)
+ assert.NotEqual(t, "https://api.example.com:443", mgmt, "management_url should be anonymized")
+ assert.NotEmpty(t, mgmt)
+
+ env := result[jsonKeyServiceEnv].(map[string]any)
+ assert.NotEqual(t, maskedValue, env["NB_LOG_LEVEL"], "safe NB_ var should not be masked")
+ })
+
+ t.Run("management_url preserved when anonymize is false", func(t *testing.T) {
+ dir := t.TempDir()
+ origStateDir := configs.StateDir
+ configs.StateDir = dir
+ t.Cleanup(func() { configs.StateDir = origStateDir })
+
+ input := map[string]any{
+ jsonKeyManagementURL: "https://api.example.com:443",
+ }
+ data, err := json.Marshal(input)
+ require.NoError(t, err)
+ require.NoError(t, os.WriteFile(filepath.Join(dir, serviceParamsFile), data, 0600))
+
+ var buf bytes.Buffer
+ zw := zip.NewWriter(&buf)
+
+ g := &BundleGenerator{
+ anonymize: false,
+ anonymizer: anonymize.NewAnonymizer(anonymize.DefaultAddresses()),
+ archive: zw,
+ }
+
+ require.NoError(t, g.addServiceParams())
+ require.NoError(t, zw.Close())
+
+ zr, err := zip.NewReader(bytes.NewReader(buf.Bytes()), int64(buf.Len()))
+ require.NoError(t, err)
+
+ rc, err := zr.File[0].Open()
+ require.NoError(t, err)
+ defer rc.Close()
+
+ var result map[string]any
+ require.NoError(t, json.NewDecoder(rc).Decode(&result))
+
+ assert.Equal(t, "https://api.example.com:443", result[jsonKeyManagementURL], "management_url should be preserved")
+ })
+}
+
// Helper function to check if IP is in CGNAT range
func isInCGNATRange(ip net.IP) bool {
cgnat := net.IPNet{
diff --git a/client/internal/dns/handler_chain.go b/client/internal/dns/handler_chain.go
index 06a2056b1..6fbdedc59 100644
--- a/client/internal/dns/handler_chain.go
+++ b/client/internal/dns/handler_chain.go
@@ -73,6 +73,9 @@ func (w *ResponseWriterChain) WriteMsg(m *dns.Msg) error {
return nil
}
w.response = m
+ if m.MsgHdr.Truncated {
+ w.SetMeta("truncated", "true")
+ }
return w.ResponseWriter.WriteMsg(m)
}
@@ -195,10 +198,14 @@ func (c *HandlerChain) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
startTime := time.Now()
requestID := resutil.GenerateRequestID()
- logger := log.WithFields(log.Fields{
+ fields := log.Fields{
"request_id": requestID,
"dns_id": fmt.Sprintf("%04x", r.Id),
- })
+ }
+ if addr := w.RemoteAddr(); addr != nil {
+ fields["client"] = addr.String()
+ }
+ logger := log.WithFields(fields)
question := r.Question[0]
qname := strings.ToLower(question.Name)
@@ -261,9 +268,9 @@ func (c *HandlerChain) logResponse(logger *log.Entry, cw *ResponseWriterChain, q
meta += " " + k + "=" + v
}
- logger.Tracef("response: domain=%s rcode=%s answers=%s%s took=%s",
+ logger.Tracef("response: domain=%s rcode=%s answers=%s size=%dB%s took=%s",
qname, dns.RcodeToString[cw.response.Rcode], resutil.FormatAnswers(cw.response.Answer),
- meta, time.Since(startTime))
+ cw.response.Len(), meta, time.Since(startTime))
}
func (c *HandlerChain) isHandlerMatch(qname string, entry HandlerEntry) bool {
diff --git a/client/internal/dns/local/local_test.go b/client/internal/dns/local/local_test.go
index 73f70035f..2c6b7dbc3 100644
--- a/client/internal/dns/local/local_test.go
+++ b/client/internal/dns/local/local_test.go
@@ -1263,9 +1263,9 @@ func TestLocalResolver_AuthoritativeFlag(t *testing.T) {
})
}
-// TestLocalResolver_Stop tests cleanup on Stop
+// TestLocalResolver_Stop tests cleanup on GracefullyStop
func TestLocalResolver_Stop(t *testing.T) {
- t.Run("Stop clears all state", func(t *testing.T) {
+ t.Run("GracefullyStop clears all state", func(t *testing.T) {
resolver := NewResolver()
resolver.Update([]nbdns.CustomZone{{
Domain: "example.com.",
@@ -1285,7 +1285,7 @@ func TestLocalResolver_Stop(t *testing.T) {
assert.False(t, resolver.isInManagedZone("host.example.com."))
})
- t.Run("Stop is safe to call multiple times", func(t *testing.T) {
+ t.Run("GracefullyStop is safe to call multiple times", func(t *testing.T) {
resolver := NewResolver()
resolver.Update([]nbdns.CustomZone{{
Domain: "example.com.",
@@ -1299,7 +1299,7 @@ func TestLocalResolver_Stop(t *testing.T) {
resolver.Stop()
})
- t.Run("Stop cancels in-flight external resolution", func(t *testing.T) {
+ t.Run("GracefullyStop cancels in-flight external resolution", func(t *testing.T) {
resolver := NewResolver()
lookupStarted := make(chan struct{})
diff --git a/client/internal/dns/mock_server.go b/client/internal/dns/mock_server.go
index 1df57d1db..548b1f54f 100644
--- a/client/internal/dns/mock_server.go
+++ b/client/internal/dns/mock_server.go
@@ -90,6 +90,11 @@ func (m *MockServer) SetRouteChecker(func(netip.Addr) bool) {
// Mock implementation - no-op
}
+// SetFirewall mock implementation of SetFirewall from Server interface
+func (m *MockServer) SetFirewall(Firewall) {
+ // Mock implementation - no-op
+}
+
// BeginBatch mock implementation of BeginBatch from Server interface
func (m *MockServer) BeginBatch() {
// Mock implementation - no-op
diff --git a/client/internal/dns/response_writer.go b/client/internal/dns/response_writer.go
index edc65a5d9..287cf28b0 100644
--- a/client/internal/dns/response_writer.go
+++ b/client/internal/dns/response_writer.go
@@ -104,3 +104,23 @@ func (r *responseWriter) TsigTimersOnly(bool) {
// After a call to Hijack(), the DNS package will not do anything with the connection.
func (r *responseWriter) Hijack() {
}
+
+// remoteAddrFromPacket extracts the source IP:port from a decoded packet for logging.
+func remoteAddrFromPacket(packet gopacket.Packet) *net.UDPAddr {
+ var srcIP net.IP
+ if ipv4 := packet.Layer(layers.LayerTypeIPv4); ipv4 != nil {
+ srcIP = ipv4.(*layers.IPv4).SrcIP
+ } else if ipv6 := packet.Layer(layers.LayerTypeIPv6); ipv6 != nil {
+ srcIP = ipv6.(*layers.IPv6).SrcIP
+ }
+
+ var srcPort int
+ if udp := packet.Layer(layers.LayerTypeUDP); udp != nil {
+ srcPort = int(udp.(*layers.UDP).SrcPort)
+ }
+
+ if srcIP == nil {
+ return nil
+ }
+ return &net.UDPAddr{IP: srcIP, Port: srcPort}
+}
diff --git a/client/internal/dns/server.go b/client/internal/dns/server.go
index 3c47f4ee6..f7865047b 100644
--- a/client/internal/dns/server.go
+++ b/client/internal/dns/server.go
@@ -58,6 +58,7 @@ type Server interface {
UpdateServerConfig(domains dnsconfig.ServerDomains) error
PopulateManagementDomain(mgmtURL *url.URL) error
SetRouteChecker(func(netip.Addr) bool)
+ SetFirewall(Firewall)
}
type nsGroupsByDomain struct {
@@ -151,7 +152,7 @@ func NewDefaultServer(ctx context.Context, config DefaultServerConfig) (*Default
if config.WgInterface.IsUserspaceBind() {
dnsService = NewServiceViaMemory(config.WgInterface)
} else {
- dnsService = newServiceViaListener(config.WgInterface, addrPort)
+ dnsService = newServiceViaListener(config.WgInterface, addrPort, nil)
}
server := newDefaultServer(ctx, config.WgInterface, dnsService, config.StatusRecorder, config.StateManager, config.DisableSys)
@@ -186,11 +187,16 @@ func NewDefaultServerIos(
ctx context.Context,
wgInterface WGIface,
iosDnsManager IosDnsManager,
+ hostsDnsList []netip.AddrPort,
statusRecorder *peer.Status,
disableSys bool,
) *DefaultServer {
+ log.Debugf("iOS host dns address list is: %v", hostsDnsList)
ds := newDefaultServer(ctx, wgInterface, NewServiceViaMemory(wgInterface), statusRecorder, nil, disableSys)
ds.iosDnsManager = iosDnsManager
+ ds.hostsDNSHolder.set(hostsDnsList)
+ ds.permanent = true
+ ds.addHostRootZone()
return ds
}
@@ -374,6 +380,17 @@ func (s *DefaultServer) DnsIP() netip.Addr {
return s.service.RuntimeIP()
}
+// SetFirewall sets the firewall used for DNS port DNAT rules.
+// This must be called before Initialize when using the listener-based service,
+// because the firewall is typically not available at construction time.
+func (s *DefaultServer) SetFirewall(fw Firewall) {
+ if svc, ok := s.service.(*serviceViaListener); ok {
+ svc.listenerFlagLock.Lock()
+ svc.firewall = fw
+ svc.listenerFlagLock.Unlock()
+ }
+}
+
// Stop stops the server
func (s *DefaultServer) Stop() {
s.probeMu.Lock()
@@ -395,8 +412,12 @@ func (s *DefaultServer) Stop() {
maps.Clear(s.extraDomains)
}
-func (s *DefaultServer) disableDNS() error {
- defer s.service.Stop()
+func (s *DefaultServer) disableDNS() (retErr error) {
+ defer func() {
+ if err := s.service.Stop(); err != nil {
+ retErr = errors.Join(retErr, fmt.Errorf("stop DNS service: %w", err))
+ }
+ }()
if s.isUsingNoopHostManager() {
return nil
diff --git a/client/internal/dns/server_test.go b/client/internal/dns/server_test.go
index d3b0c250d..f77f6e898 100644
--- a/client/internal/dns/server_test.go
+++ b/client/internal/dns/server_test.go
@@ -476,8 +476,8 @@ func TestDNSFakeResolverHandleUpdates(t *testing.T) {
packetfilter := pfmock.NewMockPacketFilter(ctrl)
packetfilter.EXPECT().FilterOutbound(gomock.Any(), gomock.Any()).AnyTimes()
- packetfilter.EXPECT().AddUDPPacketHook(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any())
- packetfilter.EXPECT().RemovePacketHook(gomock.Any())
+ packetfilter.EXPECT().SetUDPPacketHook(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
+ packetfilter.EXPECT().SetTCPPacketHook(gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes()
if err := wgIface.SetFilter(packetfilter); err != nil {
t.Errorf("set packet filter: %v", err)
@@ -1071,7 +1071,7 @@ func (m *mockHandler) ID() types.HandlerID { return types.Hand
type mockService struct{}
func (m *mockService) Listen() error { return nil }
-func (m *mockService) Stop() {}
+func (m *mockService) Stop() error { return nil }
func (m *mockService) RuntimeIP() netip.Addr { return netip.MustParseAddr("127.0.0.1") }
func (m *mockService) RuntimePort() int { return 53 }
func (m *mockService) RegisterMux(string, dns.Handler) {}
diff --git a/client/internal/dns/service.go b/client/internal/dns/service.go
index 6a76c53e3..1c6ce7849 100644
--- a/client/internal/dns/service.go
+++ b/client/internal/dns/service.go
@@ -4,15 +4,25 @@ import (
"net/netip"
"github.com/miekg/dns"
+
+ firewall "github.com/netbirdio/netbird/client/firewall/manager"
)
const (
DefaultPort = 53
)
+// Firewall provides DNAT capabilities for DNS port redirection.
+// This is used when the DNS server cannot bind port 53 directly
+// and needs firewall rules to redirect traffic.
+type Firewall interface {
+ AddOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error
+ RemoveOutputDNAT(localAddr netip.Addr, protocol firewall.Protocol, sourcePort, targetPort uint16) error
+}
+
type service interface {
Listen() error
- Stop()
+ Stop() error
RegisterMux(domain string, handler dns.Handler)
DeregisterMux(key string)
RuntimePort() int
diff --git a/client/internal/dns/service_listener.go b/client/internal/dns/service_listener.go
index f7ddfd40f..4e09f1b7f 100644
--- a/client/internal/dns/service_listener.go
+++ b/client/internal/dns/service_listener.go
@@ -10,9 +10,13 @@ import (
"sync"
"time"
+ "github.com/hashicorp/go-multierror"
"github.com/miekg/dns"
log "github.com/sirupsen/logrus"
+ nberrors "github.com/netbirdio/netbird/client/errors"
+
+ firewall "github.com/netbirdio/netbird/client/firewall/manager"
"github.com/netbirdio/netbird/client/internal/ebpf"
ebpfMgr "github.com/netbirdio/netbird/client/internal/ebpf/manager"
)
@@ -31,25 +35,33 @@ type serviceViaListener struct {
dnsMux *dns.ServeMux
customAddr *netip.AddrPort
server *dns.Server
+ tcpServer *dns.Server
listenIP netip.Addr
listenPort uint16
listenerIsRunning bool
listenerFlagLock sync.Mutex
ebpfService ebpfMgr.Manager
+ firewall Firewall
+ tcpDNATConfigured bool
}
-func newServiceViaListener(wgIface WGIface, customAddr *netip.AddrPort) *serviceViaListener {
+func newServiceViaListener(wgIface WGIface, customAddr *netip.AddrPort, fw Firewall) *serviceViaListener {
mux := dns.NewServeMux()
s := &serviceViaListener{
wgInterface: wgIface,
dnsMux: mux,
customAddr: customAddr,
+ firewall: fw,
server: &dns.Server{
Net: "udp",
Handler: mux,
UDPSize: 65535,
},
+ tcpServer: &dns.Server{
+ Net: "tcp",
+ Handler: mux,
+ },
}
return s
@@ -70,43 +82,86 @@ func (s *serviceViaListener) Listen() error {
return fmt.Errorf("eval listen address: %w", err)
}
s.listenIP = s.listenIP.Unmap()
- s.server.Addr = net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort)))
- log.Debugf("starting dns on %s", s.server.Addr)
- go func() {
- s.setListenerStatus(true)
- defer s.setListenerStatus(false)
+ addr := net.JoinHostPort(s.listenIP.String(), strconv.Itoa(int(s.listenPort)))
+ s.server.Addr = addr
+ s.tcpServer.Addr = addr
- err := s.server.ListenAndServe()
- if err != nil {
- log.Errorf("dns server running with %d port returned an error: %v. Will not retry", s.listenPort, err)
+ log.Debugf("starting dns on %s (UDP + TCP)", addr)
+ s.listenerIsRunning = true
+
+ go func() {
+ if err := s.server.ListenAndServe(); err != nil {
+ log.Errorf("failed to run DNS UDP server on port %d: %v", s.listenPort, err)
+ }
+
+ s.listenerFlagLock.Lock()
+ unexpected := s.listenerIsRunning
+ s.listenerIsRunning = false
+ s.listenerFlagLock.Unlock()
+
+ if unexpected {
+ if err := s.tcpServer.Shutdown(); err != nil {
+ log.Debugf("failed to shutdown DNS TCP server: %v", err)
+ }
}
}()
+ go func() {
+ if err := s.tcpServer.ListenAndServe(); err != nil {
+ log.Errorf("failed to run DNS TCP server on port %d: %v", s.listenPort, err)
+ }
+ }()
+
+ // When eBPF redirects UDP port 53 to our listen port, TCP still needs
+ // a DNAT rule because eBPF only handles UDP.
+ if s.ebpfService != nil && s.firewall != nil && s.listenPort != DefaultPort {
+ if err := s.firewall.AddOutputDNAT(s.listenIP, firewall.ProtocolTCP, DefaultPort, s.listenPort); err != nil {
+ log.Warnf("failed to add DNS TCP DNAT rule, TCP DNS on port 53 will not work: %v", err)
+ } else {
+ s.tcpDNATConfigured = true
+ log.Infof("added DNS TCP DNAT rule: %s:%d -> %s:%d", s.listenIP, DefaultPort, s.listenIP, s.listenPort)
+ }
+ }
+
return nil
}
-func (s *serviceViaListener) Stop() {
+func (s *serviceViaListener) Stop() error {
s.listenerFlagLock.Lock()
defer s.listenerFlagLock.Unlock()
if !s.listenerIsRunning {
- return
+ return nil
}
+ s.listenerIsRunning = false
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer cancel()
- err := s.server.ShutdownContext(ctx)
- if err != nil {
- log.Errorf("stopping dns server listener returned an error: %v", err)
+ var merr *multierror.Error
+
+ if err := s.server.ShutdownContext(ctx); err != nil {
+ merr = multierror.Append(merr, fmt.Errorf("stop DNS UDP server: %w", err))
+ }
+
+ if err := s.tcpServer.ShutdownContext(ctx); err != nil {
+ merr = multierror.Append(merr, fmt.Errorf("stop DNS TCP server: %w", err))
+ }
+
+ if s.tcpDNATConfigured && s.firewall != nil {
+ if err := s.firewall.RemoveOutputDNAT(s.listenIP, firewall.ProtocolTCP, DefaultPort, s.listenPort); err != nil {
+ merr = multierror.Append(merr, fmt.Errorf("remove DNS TCP DNAT rule: %w", err))
+ }
+ s.tcpDNATConfigured = false
}
if s.ebpfService != nil {
- err = s.ebpfService.FreeDNSFwd()
- if err != nil {
- log.Errorf("stopping traffic forwarder returned an error: %v", err)
+ if err := s.ebpfService.FreeDNSFwd(); err != nil {
+ merr = multierror.Append(merr, fmt.Errorf("stop traffic forwarder: %w", err))
}
}
+
+ return nberrors.FormatErrorOrNil(merr)
}
func (s *serviceViaListener) RegisterMux(pattern string, handler dns.Handler) {
@@ -133,12 +188,6 @@ func (s *serviceViaListener) RuntimeIP() netip.Addr {
return s.listenIP
}
-func (s *serviceViaListener) setListenerStatus(running bool) {
- s.listenerFlagLock.Lock()
- defer s.listenerFlagLock.Unlock()
-
- s.listenerIsRunning = running
-}
// evalListenAddress figure out the listen address for the DNS server
// first check the 53 port availability on WG interface or lo, if not success
@@ -187,18 +236,28 @@ func (s *serviceViaListener) testFreePort(port int) (netip.Addr, bool) {
}
func (s *serviceViaListener) tryToBind(ip netip.Addr, port int) bool {
- addrString := net.JoinHostPort(ip.String(), strconv.Itoa(port))
- udpAddr := net.UDPAddrFromAddrPort(netip.MustParseAddrPort(addrString))
- probeListener, err := net.ListenUDP("udp", udpAddr)
+ addrPort := netip.AddrPortFrom(ip, uint16(port))
+
+ udpAddr := net.UDPAddrFromAddrPort(addrPort)
+ udpLn, err := net.ListenUDP("udp", udpAddr)
if err != nil {
- log.Warnf("binding dns on %s is not available, error: %s", addrString, err)
+ log.Warnf("binding dns UDP on %s is not available: %s", addrPort, err)
return false
}
-
- err = probeListener.Close()
- if err != nil {
- log.Errorf("got an error closing the probe listener, error: %s", err)
+ if err := udpLn.Close(); err != nil {
+ log.Debugf("close UDP probe listener: %s", err)
}
+
+ tcpAddr := net.TCPAddrFromAddrPort(addrPort)
+ tcpLn, err := net.ListenTCP("tcp", tcpAddr)
+ if err != nil {
+ log.Warnf("binding dns TCP on %s is not available: %s", addrPort, err)
+ return false
+ }
+ if err := tcpLn.Close(); err != nil {
+ log.Debugf("close TCP probe listener: %s", err)
+ }
+
return true
}
diff --git a/client/internal/dns/service_listener_test.go b/client/internal/dns/service_listener_test.go
new file mode 100644
index 000000000..90ef71d19
--- /dev/null
+++ b/client/internal/dns/service_listener_test.go
@@ -0,0 +1,86 @@
+package dns
+
+import (
+ "fmt"
+ "net"
+ "net/netip"
+ "testing"
+ "time"
+
+ "github.com/miekg/dns"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestServiceViaListener_TCPAndUDP(t *testing.T) {
+ handler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Answer = append(m.Answer, &dns.A{
+ Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},
+ A: net.ParseIP("192.0.2.1"),
+ })
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ // Create a service using a custom address to avoid needing root
+ svc := newServiceViaListener(nil, nil, nil)
+ svc.dnsMux.Handle(".", handler)
+
+ // Bind both transports up front to avoid TOCTOU races.
+ udpAddr := net.UDPAddrFromAddrPort(netip.AddrPortFrom(customIP, 0))
+ udpConn, err := net.ListenUDP("udp", udpAddr)
+ if err != nil {
+ t.Skip("cannot bind to 127.0.0.153, skipping")
+ }
+ port := uint16(udpConn.LocalAddr().(*net.UDPAddr).Port)
+
+ tcpAddr := net.TCPAddrFromAddrPort(netip.AddrPortFrom(customIP, port))
+ tcpLn, err := net.ListenTCP("tcp", tcpAddr)
+ if err != nil {
+ udpConn.Close()
+ t.Skip("cannot bind TCP on same port, skipping")
+ }
+
+ addr := fmt.Sprintf("%s:%d", customIP, port)
+ svc.server.PacketConn = udpConn
+ svc.tcpServer.Listener = tcpLn
+ svc.listenIP = customIP
+ svc.listenPort = port
+
+ go func() {
+ if err := svc.server.ActivateAndServe(); err != nil {
+ t.Logf("udp server: %v", err)
+ }
+ }()
+ go func() {
+ if err := svc.tcpServer.ActivateAndServe(); err != nil {
+ t.Logf("tcp server: %v", err)
+ }
+ }()
+ svc.listenerIsRunning = true
+
+ defer func() {
+ require.NoError(t, svc.Stop())
+ }()
+
+ q := new(dns.Msg).SetQuestion("example.com.", dns.TypeA)
+
+ // Test UDP query
+ udpClient := &dns.Client{Net: "udp", Timeout: 2 * time.Second}
+ udpResp, _, err := udpClient.Exchange(q, addr)
+ require.NoError(t, err, "UDP query should succeed")
+ require.NotNil(t, udpResp)
+ require.NotEmpty(t, udpResp.Answer)
+ assert.Contains(t, udpResp.Answer[0].String(), "192.0.2.1", "UDP response should contain expected IP")
+
+ // Test TCP query
+ tcpClient := &dns.Client{Net: "tcp", Timeout: 2 * time.Second}
+ tcpResp, _, err := tcpClient.Exchange(q, addr)
+ require.NoError(t, err, "TCP query should succeed")
+ require.NotNil(t, tcpResp)
+ require.NotEmpty(t, tcpResp.Answer)
+ assert.Contains(t, tcpResp.Answer[0].String(), "192.0.2.1", "TCP response should contain expected IP")
+}
diff --git a/client/internal/dns/service_memory.go b/client/internal/dns/service_memory.go
index 6ef0ab526..e8c036076 100644
--- a/client/internal/dns/service_memory.go
+++ b/client/internal/dns/service_memory.go
@@ -1,6 +1,7 @@
package dns
import (
+ "errors"
"fmt"
"net/netip"
"sync"
@@ -10,6 +11,7 @@ import (
"github.com/miekg/dns"
log "github.com/sirupsen/logrus"
+ "github.com/netbirdio/netbird/client/iface"
nbnet "github.com/netbirdio/netbird/client/net"
)
@@ -18,7 +20,8 @@ type ServiceViaMemory struct {
dnsMux *dns.ServeMux
runtimeIP netip.Addr
runtimePort int
- udpFilterHookID string
+ tcpDNS *tcpDNSServer
+ tcpHookSet bool
listenerIsRunning bool
listenerFlagLock sync.Mutex
}
@@ -28,14 +31,13 @@ func NewServiceViaMemory(wgIface WGIface) *ServiceViaMemory {
if err != nil {
log.Errorf("get last ip from network: %v", err)
}
- s := &ServiceViaMemory{
+
+ return &ServiceViaMemory{
wgInterface: wgIface,
dnsMux: dns.NewServeMux(),
-
runtimeIP: lastIP,
runtimePort: DefaultPort,
}
- return s
}
func (s *ServiceViaMemory) Listen() error {
@@ -46,10 +48,8 @@ func (s *ServiceViaMemory) Listen() error {
return nil
}
- var err error
- s.udpFilterHookID, err = s.filterDNSTraffic()
- if err != nil {
- return fmt.Errorf("filter dns traffice: %w", err)
+ if err := s.filterDNSTraffic(); err != nil {
+ return fmt.Errorf("filter dns traffic: %w", err)
}
s.listenerIsRunning = true
@@ -57,19 +57,29 @@ func (s *ServiceViaMemory) Listen() error {
return nil
}
-func (s *ServiceViaMemory) Stop() {
+func (s *ServiceViaMemory) Stop() error {
s.listenerFlagLock.Lock()
defer s.listenerFlagLock.Unlock()
if !s.listenerIsRunning {
- return
+ return nil
}
- if err := s.wgInterface.GetFilter().RemovePacketHook(s.udpFilterHookID); err != nil {
- log.Errorf("unable to remove DNS packet hook: %s", err)
+ filter := s.wgInterface.GetFilter()
+ if filter != nil {
+ filter.SetUDPPacketHook(s.runtimeIP, uint16(s.runtimePort), nil)
+ if s.tcpHookSet {
+ filter.SetTCPPacketHook(s.runtimeIP, uint16(s.runtimePort), nil)
+ }
+ }
+
+ if s.tcpDNS != nil {
+ s.tcpDNS.Stop()
}
s.listenerIsRunning = false
+
+ return nil
}
func (s *ServiceViaMemory) RegisterMux(pattern string, handler dns.Handler) {
@@ -88,10 +98,18 @@ func (s *ServiceViaMemory) RuntimeIP() netip.Addr {
return s.runtimeIP
}
-func (s *ServiceViaMemory) filterDNSTraffic() (string, error) {
+func (s *ServiceViaMemory) filterDNSTraffic() error {
filter := s.wgInterface.GetFilter()
if filter == nil {
- return "", fmt.Errorf("can't set DNS filter, filter not initialized")
+ return errors.New("DNS filter not initialized")
+ }
+
+ // Create TCP DNS server lazily here since the device may not exist at construction time.
+ if s.tcpDNS == nil {
+ if dev := s.wgInterface.GetDevice(); dev != nil {
+ // MTU only affects TCP segment sizing; DNS messages are small so this has no practical impact.
+ s.tcpDNS = newTCPDNSServer(s.dnsMux, dev.Device, s.runtimeIP, uint16(s.runtimePort), iface.DefaultMTU)
+ }
}
firstLayerDecoder := layers.LayerTypeIPv4
@@ -100,12 +118,16 @@ func (s *ServiceViaMemory) filterDNSTraffic() (string, error) {
}
hook := func(packetData []byte) bool {
- // Decode the packet
packet := gopacket.NewPacket(packetData, firstLayerDecoder, gopacket.Default)
- // Get the UDP layer
udpLayer := packet.Layer(layers.LayerTypeUDP)
- udp := udpLayer.(*layers.UDP)
+ if udpLayer == nil {
+ return true
+ }
+ udp, ok := udpLayer.(*layers.UDP)
+ if !ok {
+ return true
+ }
msg := new(dns.Msg)
if err := msg.Unpack(udp.Payload); err != nil {
@@ -113,13 +135,30 @@ func (s *ServiceViaMemory) filterDNSTraffic() (string, error) {
return true
}
- writer := responseWriter{
- packet: packet,
- device: s.wgInterface.GetDevice().Device,
+ dev := s.wgInterface.GetDevice()
+ if dev == nil {
+ return true
}
- go s.dnsMux.ServeDNS(&writer, msg)
+
+ writer := &responseWriter{
+ remote: remoteAddrFromPacket(packet),
+ packet: packet,
+ device: dev.Device,
+ }
+ go s.dnsMux.ServeDNS(writer, msg)
return true
}
- return filter.AddUDPPacketHook(false, s.runtimeIP, uint16(s.runtimePort), hook), nil
+ filter.SetUDPPacketHook(s.runtimeIP, uint16(s.runtimePort), hook)
+
+ if s.tcpDNS != nil {
+ tcpHook := func(packetData []byte) bool {
+ s.tcpDNS.InjectPacket(packetData)
+ return true
+ }
+ filter.SetTCPPacketHook(s.runtimeIP, uint16(s.runtimePort), tcpHook)
+ s.tcpHookSet = true
+ }
+
+ return nil
}
diff --git a/client/internal/dns/tcpstack.go b/client/internal/dns/tcpstack.go
new file mode 100644
index 000000000..88e72e767
--- /dev/null
+++ b/client/internal/dns/tcpstack.go
@@ -0,0 +1,444 @@
+package dns
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "net"
+ "net/netip"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/miekg/dns"
+ log "github.com/sirupsen/logrus"
+ "golang.zx2c4.com/wireguard/tun"
+ "gvisor.dev/gvisor/pkg/buffer"
+ "gvisor.dev/gvisor/pkg/tcpip"
+ "gvisor.dev/gvisor/pkg/tcpip/adapters/gonet"
+ "gvisor.dev/gvisor/pkg/tcpip/header"
+ "gvisor.dev/gvisor/pkg/tcpip/network/ipv4"
+ "gvisor.dev/gvisor/pkg/tcpip/stack"
+ "gvisor.dev/gvisor/pkg/tcpip/transport/tcp"
+ "gvisor.dev/gvisor/pkg/waiter"
+)
+
+const (
+ dnsTCPReceiveWindow = 8192
+ dnsTCPMaxInFlight = 16
+ dnsTCPIdleTimeout = 30 * time.Second
+ dnsTCPReadTimeout = 5 * time.Second
+)
+
+// tcpDNSServer is an on-demand TCP DNS server backed by a minimal gvisor stack.
+// It is started lazily when a truncated DNS response is detected and shuts down
+// after a period of inactivity to conserve resources.
+type tcpDNSServer struct {
+ mu sync.Mutex
+ s *stack.Stack
+ ep *dnsEndpoint
+ mux *dns.ServeMux
+ tunDev tun.Device
+ ip netip.Addr
+ port uint16
+ mtu uint16
+
+ running bool
+ closed bool
+ timerID uint64
+ timer *time.Timer
+}
+
+func newTCPDNSServer(mux *dns.ServeMux, tunDev tun.Device, ip netip.Addr, port uint16, mtu uint16) *tcpDNSServer {
+ return &tcpDNSServer{
+ mux: mux,
+ tunDev: tunDev,
+ ip: ip,
+ port: port,
+ mtu: mtu,
+ }
+}
+
+// InjectPacket ensures the stack is running and delivers a raw IP packet into
+// the gvisor stack for TCP processing. Combining both operations under a single
+// lock prevents a race where the idle timer could stop the stack between
+// start and delivery.
+func (t *tcpDNSServer) InjectPacket(payload []byte) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ if t.closed {
+ return
+ }
+
+ if !t.running {
+ if err := t.startLocked(); err != nil {
+ log.Errorf("failed to start TCP DNS stack: %v", err)
+ return
+ }
+ t.running = true
+ log.Debugf("TCP DNS stack started on %s:%d (triggered by %s)", t.ip, t.port, srcAddrFromPacket(payload))
+ }
+ t.resetTimerLocked()
+
+ ep := t.ep
+ if ep == nil || ep.dispatcher == nil {
+ return
+ }
+
+ pkt := stack.NewPacketBuffer(stack.PacketBufferOptions{
+ Payload: buffer.MakeWithData(payload),
+ })
+ // DeliverNetworkPacket takes ownership of the packet buffer; do not DecRef.
+ ep.dispatcher.DeliverNetworkPacket(ipv4.ProtocolNumber, pkt)
+}
+
+// Stop tears down the gvisor stack and releases resources permanently.
+// After Stop, InjectPacket becomes a no-op.
+func (t *tcpDNSServer) Stop() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ t.stopLocked()
+ t.closed = true
+}
+
+func (t *tcpDNSServer) startLocked() error {
+ // TODO: add ipv6.NewProtocol when IPv6 overlay support lands.
+ s := stack.New(stack.Options{
+ NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol},
+ TransportProtocols: []stack.TransportProtocolFactory{tcp.NewProtocol},
+ HandleLocal: false,
+ })
+
+ nicID := tcpip.NICID(1)
+ ep := &dnsEndpoint{
+ tunDev: t.tunDev,
+ }
+ ep.mtu.Store(uint32(t.mtu))
+
+ if err := s.CreateNIC(nicID, ep); err != nil {
+ s.Close()
+ s.Wait()
+ return fmt.Errorf("create NIC: %v", err)
+ }
+
+ protoAddr := tcpip.ProtocolAddress{
+ Protocol: ipv4.ProtocolNumber,
+ AddressWithPrefix: tcpip.AddressWithPrefix{
+ Address: tcpip.AddrFromSlice(t.ip.AsSlice()),
+ PrefixLen: 32,
+ },
+ }
+ if err := s.AddProtocolAddress(nicID, protoAddr, stack.AddressProperties{}); err != nil {
+ s.Close()
+ s.Wait()
+ return fmt.Errorf("add protocol address: %s", err)
+ }
+
+ if err := s.SetPromiscuousMode(nicID, true); err != nil {
+ s.Close()
+ s.Wait()
+ return fmt.Errorf("set promiscuous mode: %s", err)
+ }
+ if err := s.SetSpoofing(nicID, true); err != nil {
+ s.Close()
+ s.Wait()
+ return fmt.Errorf("set spoofing: %s", err)
+ }
+
+ defaultSubnet, err := tcpip.NewSubnet(
+ tcpip.AddrFrom4([4]byte{0, 0, 0, 0}),
+ tcpip.MaskFromBytes([]byte{0, 0, 0, 0}),
+ )
+ if err != nil {
+ s.Close()
+ s.Wait()
+ return fmt.Errorf("create default subnet: %w", err)
+ }
+
+ s.SetRouteTable([]tcpip.Route{
+ {Destination: defaultSubnet, NIC: nicID},
+ })
+
+ tcpFwd := tcp.NewForwarder(s, dnsTCPReceiveWindow, dnsTCPMaxInFlight, func(r *tcp.ForwarderRequest) {
+ t.handleTCPDNS(r)
+ })
+ s.SetTransportProtocolHandler(tcp.ProtocolNumber, tcpFwd.HandlePacket)
+
+ t.s = s
+ t.ep = ep
+ return nil
+}
+
+func (t *tcpDNSServer) stopLocked() {
+ if !t.running {
+ return
+ }
+
+ if t.timer != nil {
+ t.timer.Stop()
+ t.timer = nil
+ }
+
+ if t.s != nil {
+ t.s.Close()
+ t.s.Wait()
+ t.s = nil
+ }
+ t.ep = nil
+ t.running = false
+
+ log.Debugf("TCP DNS stack stopped")
+}
+
+func (t *tcpDNSServer) resetTimerLocked() {
+ if t.timer != nil {
+ t.timer.Stop()
+ }
+ t.timerID++
+ id := t.timerID
+ t.timer = time.AfterFunc(dnsTCPIdleTimeout, func() {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
+ // Only stop if this timer is still the active one.
+ // A racing InjectPacket may have replaced it.
+ if t.timerID != id {
+ return
+ }
+ t.stopLocked()
+ })
+}
+
+func (t *tcpDNSServer) handleTCPDNS(r *tcp.ForwarderRequest) {
+ id := r.ID()
+
+ wq := waiter.Queue{}
+ ep, epErr := r.CreateEndpoint(&wq)
+ if epErr != nil {
+ log.Debugf("TCP DNS: failed to create endpoint: %v", epErr)
+ r.Complete(true)
+ return
+ }
+ r.Complete(false)
+
+ conn := gonet.NewTCPConn(&wq, ep)
+ defer func() {
+ if err := conn.Close(); err != nil {
+ log.Tracef("TCP DNS: close conn: %v", err)
+ }
+ }()
+
+ // Reset idle timer on activity
+ t.mu.Lock()
+ t.resetTimerLocked()
+ t.mu.Unlock()
+
+ localAddr := &net.TCPAddr{
+ IP: id.LocalAddress.AsSlice(),
+ Port: int(id.LocalPort),
+ }
+ remoteAddr := &net.TCPAddr{
+ IP: id.RemoteAddress.AsSlice(),
+ Port: int(id.RemotePort),
+ }
+
+ for {
+ if err := conn.SetReadDeadline(time.Now().Add(dnsTCPReadTimeout)); err != nil {
+ log.Debugf("TCP DNS: set deadline for %s: %v", remoteAddr, err)
+ break
+ }
+
+ msg, err := readTCPDNSMessage(conn)
+ if err != nil {
+ if !errors.Is(err, io.EOF) && !errors.Is(err, io.ErrUnexpectedEOF) {
+ log.Debugf("TCP DNS: read from %s: %v", remoteAddr, err)
+ }
+ break
+ }
+
+ writer := &tcpResponseWriter{
+ conn: conn,
+ localAddr: localAddr,
+ remoteAddr: remoteAddr,
+ }
+ t.mux.ServeDNS(writer, msg)
+ }
+}
+
+// dnsEndpoint implements stack.LinkEndpoint for writing packets back via the tun device.
+type dnsEndpoint struct {
+ dispatcher stack.NetworkDispatcher
+ tunDev tun.Device
+ mtu atomic.Uint32
+}
+
+func (e *dnsEndpoint) Attach(dispatcher stack.NetworkDispatcher) { e.dispatcher = dispatcher }
+func (e *dnsEndpoint) IsAttached() bool { return e.dispatcher != nil }
+func (e *dnsEndpoint) MTU() uint32 { return e.mtu.Load() }
+func (e *dnsEndpoint) Capabilities() stack.LinkEndpointCapabilities { return stack.CapabilityNone }
+func (e *dnsEndpoint) MaxHeaderLength() uint16 { return 0 }
+func (e *dnsEndpoint) LinkAddress() tcpip.LinkAddress { return "" }
+func (e *dnsEndpoint) Wait() { /* no async work */ }
+func (e *dnsEndpoint) ARPHardwareType() header.ARPHardwareType { return header.ARPHardwareNone }
+func (e *dnsEndpoint) AddHeader(*stack.PacketBuffer) { /* IP-level endpoint, no link header */ }
+func (e *dnsEndpoint) ParseHeader(*stack.PacketBuffer) bool { return true }
+func (e *dnsEndpoint) Close() { /* lifecycle managed by tcpDNSServer */ }
+func (e *dnsEndpoint) SetLinkAddress(tcpip.LinkAddress) { /* no link address for tun */ }
+func (e *dnsEndpoint) SetMTU(mtu uint32) { e.mtu.Store(mtu) }
+func (e *dnsEndpoint) SetOnCloseAction(func()) { /* not needed */ }
+
+const tunPacketOffset = 40
+
+func (e *dnsEndpoint) WritePackets(pkts stack.PacketBufferList) (int, tcpip.Error) {
+ var written int
+ for _, pkt := range pkts.AsSlice() {
+ data := stack.PayloadSince(pkt.NetworkHeader())
+ if data == nil {
+ continue
+ }
+
+ raw := data.AsSlice()
+ buf := make([]byte, tunPacketOffset, tunPacketOffset+len(raw))
+ buf = append(buf, raw...)
+ data.Release()
+
+ if _, err := e.tunDev.Write([][]byte{buf}, tunPacketOffset); err != nil {
+ log.Tracef("TCP DNS endpoint: failed to write packet: %v", err)
+ continue
+ }
+ written++
+ }
+ return written, nil
+}
+
+// tcpResponseWriter implements dns.ResponseWriter for TCP DNS connections.
+type tcpResponseWriter struct {
+ conn *gonet.TCPConn
+ localAddr net.Addr
+ remoteAddr net.Addr
+}
+
+func (w *tcpResponseWriter) LocalAddr() net.Addr {
+ return w.localAddr
+}
+
+func (w *tcpResponseWriter) RemoteAddr() net.Addr {
+ return w.remoteAddr
+}
+
+func (w *tcpResponseWriter) WriteMsg(msg *dns.Msg) error {
+ data, err := msg.Pack()
+ if err != nil {
+ return fmt.Errorf("pack: %w", err)
+ }
+
+ // DNS TCP: 2-byte length prefix + message
+ buf := make([]byte, 2+len(data))
+ buf[0] = byte(len(data) >> 8)
+ buf[1] = byte(len(data))
+ copy(buf[2:], data)
+
+ if _, err = w.conn.Write(buf); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *tcpResponseWriter) Write(data []byte) (int, error) {
+ buf := make([]byte, 2+len(data))
+ buf[0] = byte(len(data) >> 8)
+ buf[1] = byte(len(data))
+ copy(buf[2:], data)
+ if _, err := w.conn.Write(buf); err != nil {
+ return 0, err
+ }
+ return len(data), nil
+}
+
+func (w *tcpResponseWriter) Close() error {
+ return w.conn.Close()
+}
+
+func (w *tcpResponseWriter) TsigStatus() error { return nil }
+func (w *tcpResponseWriter) TsigTimersOnly(bool) { /* TSIG not supported */ }
+func (w *tcpResponseWriter) Hijack() { /* not supported */ }
+
+// readTCPDNSMessage reads a single DNS message from a TCP connection (length-prefixed).
+func readTCPDNSMessage(conn *gonet.TCPConn) (*dns.Msg, error) {
+ // DNS over TCP uses a 2-byte length prefix
+ lenBuf := make([]byte, 2)
+ if _, err := io.ReadFull(conn, lenBuf); err != nil {
+ return nil, fmt.Errorf("read length: %w", err)
+ }
+
+ msgLen := int(lenBuf[0])<<8 | int(lenBuf[1])
+ if msgLen == 0 || msgLen > 65535 {
+ return nil, fmt.Errorf("invalid message length: %d", msgLen)
+ }
+
+ msgBuf := make([]byte, msgLen)
+ if _, err := io.ReadFull(conn, msgBuf); err != nil {
+ return nil, fmt.Errorf("read message: %w", err)
+ }
+
+ msg := new(dns.Msg)
+ if err := msg.Unpack(msgBuf); err != nil {
+ return nil, fmt.Errorf("unpack: %w", err)
+ }
+ return msg, nil
+}
+
+// srcAddrFromPacket extracts the source IP:port from a raw IP+TCP packet for logging.
+// Supports both IPv4 and IPv6.
+func srcAddrFromPacket(pkt []byte) netip.AddrPort {
+ if len(pkt) == 0 {
+ return netip.AddrPort{}
+ }
+
+ srcIP, transportOffset := srcIPFromPacket(pkt)
+ if !srcIP.IsValid() || len(pkt) < transportOffset+2 {
+ return netip.AddrPort{}
+ }
+
+ srcPort := uint16(pkt[transportOffset])<<8 | uint16(pkt[transportOffset+1])
+ return netip.AddrPortFrom(srcIP.Unmap(), srcPort)
+}
+
+func srcIPFromPacket(pkt []byte) (netip.Addr, int) {
+ switch header.IPVersion(pkt) {
+ case 4:
+ return srcIPv4(pkt)
+ case 6:
+ return srcIPv6(pkt)
+ default:
+ return netip.Addr{}, 0
+ }
+}
+
+func srcIPv4(pkt []byte) (netip.Addr, int) {
+ if len(pkt) < header.IPv4MinimumSize {
+ return netip.Addr{}, 0
+ }
+ hdr := header.IPv4(pkt)
+ src := hdr.SourceAddress()
+ ip, ok := netip.AddrFromSlice(src.AsSlice())
+ if !ok {
+ return netip.Addr{}, 0
+ }
+ return ip, int(hdr.HeaderLength())
+}
+
+func srcIPv6(pkt []byte) (netip.Addr, int) {
+ if len(pkt) < header.IPv6MinimumSize {
+ return netip.Addr{}, 0
+ }
+ hdr := header.IPv6(pkt)
+ src := hdr.SourceAddress()
+ ip, ok := netip.AddrFromSlice(src.AsSlice())
+ if !ok {
+ return netip.Addr{}, 0
+ }
+ return ip, header.IPv6MinimumSize
+}
diff --git a/client/internal/dns/upstream.go b/client/internal/dns/upstream.go
index 5b8135132..746b73ca7 100644
--- a/client/internal/dns/upstream.go
+++ b/client/internal/dns/upstream.go
@@ -41,10 +41,61 @@ const (
reactivatePeriod = 30 * time.Second
probeTimeout = 2 * time.Second
+
+ // ipv6HeaderSize + udpHeaderSize, used to derive the maximum DNS UDP
+ // payload from the tunnel MTU.
+ ipUDPHeaderSize = 60 + 8
)
const testRecord = "com."
+const (
+ protoUDP = "udp"
+ protoTCP = "tcp"
+)
+
+type dnsProtocolKey struct{}
+
+// contextWithDNSProtocol stores the inbound DNS protocol ("udp" or "tcp") in context.
+func contextWithDNSProtocol(ctx context.Context, network string) context.Context {
+ return context.WithValue(ctx, dnsProtocolKey{}, network)
+}
+
+// dnsProtocolFromContext retrieves the inbound DNS protocol from context.
+func dnsProtocolFromContext(ctx context.Context) string {
+ if ctx == nil {
+ return ""
+ }
+ if v, ok := ctx.Value(dnsProtocolKey{}).(string); ok {
+ return v
+ }
+ return ""
+}
+
+type upstreamProtocolKey struct{}
+
+// upstreamProtocolResult holds the protocol used for the upstream exchange.
+// Stored as a pointer in context so the exchange function can set it.
+type upstreamProtocolResult struct {
+ protocol string
+}
+
+// contextWithupstreamProtocolResult stores a mutable result holder in the context.
+func contextWithupstreamProtocolResult(ctx context.Context) (context.Context, *upstreamProtocolResult) {
+ r := &upstreamProtocolResult{}
+ return context.WithValue(ctx, upstreamProtocolKey{}, r), r
+}
+
+// setUpstreamProtocol sets the upstream protocol on the result holder in context, if present.
+func setUpstreamProtocol(ctx context.Context, protocol string) {
+ if ctx == nil {
+ return
+ }
+ if r, ok := ctx.Value(upstreamProtocolKey{}).(*upstreamProtocolResult); ok && r != nil {
+ r.protocol = protocol
+ }
+}
+
type upstreamClient interface {
exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error)
}
@@ -138,7 +189,16 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
return
}
- ok, failures := u.tryUpstreamServers(w, r, logger)
+ // Propagate inbound protocol so upstream exchange can use TCP directly
+ // when the request came in over TCP.
+ ctx := u.ctx
+ if addr := w.RemoteAddr(); addr != nil {
+ network := addr.Network()
+ ctx = contextWithDNSProtocol(ctx, network)
+ resutil.SetMeta(w, "protocol", network)
+ }
+
+ ok, failures := u.tryUpstreamServers(ctx, w, r, logger)
if len(failures) > 0 {
u.logUpstreamFailures(r.Question[0].Name, failures, ok, logger)
}
@@ -153,7 +213,7 @@ func (u *upstreamResolverBase) prepareRequest(r *dns.Msg) {
}
}
-func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) {
+func (u *upstreamResolverBase) tryUpstreamServers(ctx context.Context, w dns.ResponseWriter, r *dns.Msg, logger *log.Entry) (bool, []upstreamFailure) {
timeout := u.upstreamTimeout
if len(u.upstreamServers) > 1 {
maxTotal := 5 * time.Second
@@ -168,7 +228,7 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M
var failures []upstreamFailure
for _, upstream := range u.upstreamServers {
- if failure := u.queryUpstream(w, r, upstream, timeout, logger); failure != nil {
+ if failure := u.queryUpstream(ctx, w, r, upstream, timeout, logger); failure != nil {
failures = append(failures, *failure)
} else {
return true, failures
@@ -178,15 +238,17 @@ func (u *upstreamResolverBase) tryUpstreamServers(w dns.ResponseWriter, r *dns.M
}
// queryUpstream queries a single upstream server. Returns nil on success, or failure info to try next upstream.
-func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure {
+func (u *upstreamResolverBase) queryUpstream(parentCtx context.Context, w dns.ResponseWriter, r *dns.Msg, upstream netip.AddrPort, timeout time.Duration, logger *log.Entry) *upstreamFailure {
var rm *dns.Msg
var t time.Duration
var err error
var startTime time.Time
+ var upstreamProto *upstreamProtocolResult
func() {
- ctx, cancel := context.WithTimeout(u.ctx, timeout)
+ ctx, cancel := context.WithTimeout(parentCtx, timeout)
defer cancel()
+ ctx, upstreamProto = contextWithupstreamProtocolResult(ctx)
startTime = time.Now()
rm, t, err = u.upstreamClient.exchange(ctx, upstream.String(), r)
}()
@@ -203,7 +265,7 @@ func (u *upstreamResolverBase) queryUpstream(w dns.ResponseWriter, r *dns.Msg, u
return &upstreamFailure{upstream: upstream, reason: dns.RcodeToString[rm.Rcode]}
}
- u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, logger)
+ u.writeSuccessResponse(w, rm, upstream, r.Question[0].Name, t, upstreamProto, logger)
return nil
}
@@ -220,10 +282,13 @@ func (u *upstreamResolverBase) handleUpstreamError(err error, upstream netip.Add
return &upstreamFailure{upstream: upstream, reason: reason}
}
-func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, logger *log.Entry) bool {
+func (u *upstreamResolverBase) writeSuccessResponse(w dns.ResponseWriter, rm *dns.Msg, upstream netip.AddrPort, domain string, t time.Duration, upstreamProto *upstreamProtocolResult, logger *log.Entry) bool {
u.successCount.Add(1)
resutil.SetMeta(w, "upstream", upstream.String())
+ if upstreamProto != nil && upstreamProto.protocol != "" {
+ resutil.SetMeta(w, "upstream_protocol", upstreamProto.protocol)
+ }
// Clear Zero bit from external responses to prevent upstream servers from
// manipulating our internal fallthrough signaling mechanism
@@ -428,13 +493,42 @@ func (u *upstreamResolverBase) testNameserver(baseCtx context.Context, externalC
return err
}
+// clientUDPMaxSize returns the maximum UDP response size the client accepts.
+func clientUDPMaxSize(r *dns.Msg) int {
+ if opt := r.IsEdns0(); opt != nil {
+ return int(opt.UDPSize())
+ }
+ return dns.MinMsgSize
+}
+
// ExchangeWithFallback exchanges a DNS message with the upstream server.
// It first tries to use UDP, and if it is truncated, it falls back to TCP.
+// If the inbound request came over TCP (via context), it skips the UDP attempt.
// If the passed context is nil, this will use Exchange instead of ExchangeContext.
func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, upstream string) (*dns.Msg, time.Duration, error) {
- // MTU - ip + udp headers
- // Note: this could be sent out on an interface that is not ours, but higher MTU settings could break truncation handling.
- client.UDPSize = uint16(currentMTU - (60 + 8))
+ // If the request came in over TCP, go straight to TCP upstream.
+ if dnsProtocolFromContext(ctx) == protoTCP {
+ tcpClient := *client
+ tcpClient.Net = protoTCP
+ rm, t, err := tcpClient.ExchangeContext(ctx, r, upstream)
+ if err != nil {
+ return nil, t, fmt.Errorf("with tcp: %w", err)
+ }
+ setUpstreamProtocol(ctx, protoTCP)
+ return rm, t, nil
+ }
+
+ clientMaxSize := clientUDPMaxSize(r)
+
+ // Cap EDNS0 to our tunnel MTU so the upstream doesn't send a
+ // response larger than our read buffer.
+ // Note: the query could be sent out on an interface that is not ours,
+ // but higher MTU settings could break truncation handling.
+ maxUDPPayload := uint16(currentMTU - ipUDPHeaderSize)
+ client.UDPSize = maxUDPPayload
+ if opt := r.IsEdns0(); opt != nil && opt.UDPSize() > maxUDPPayload {
+ opt.SetUDPSize(maxUDPPayload)
+ }
var (
rm *dns.Msg
@@ -453,25 +547,32 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u
}
if rm == nil || !rm.MsgHdr.Truncated {
+ setUpstreamProtocol(ctx, protoUDP)
return rm, t, nil
}
- log.Tracef("udp response for domain=%s type=%v class=%v is truncated, trying TCP.",
- r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass)
+ // TODO: if the upstream's truncated UDP response already contains more
+ // data than the client's buffer, we could truncate locally and skip
+ // the TCP retry.
- client.Net = "tcp"
+ tcpClient := *client
+ tcpClient.Net = protoTCP
if ctx == nil {
- rm, t, err = client.Exchange(r, upstream)
+ rm, t, err = tcpClient.Exchange(r, upstream)
} else {
- rm, t, err = client.ExchangeContext(ctx, r, upstream)
+ rm, t, err = tcpClient.ExchangeContext(ctx, r, upstream)
}
if err != nil {
return nil, t, fmt.Errorf("with tcp: %w", err)
}
- // TODO: once TCP is implemented, rm.Truncate() if the request came in over UDP
+ setUpstreamProtocol(ctx, protoTCP)
+
+ if rm.Len() > clientMaxSize {
+ rm.Truncate(clientMaxSize)
+ }
return rm, t, nil
}
@@ -479,18 +580,46 @@ func ExchangeWithFallback(ctx context.Context, client *dns.Client, r *dns.Msg, u
// ExchangeWithNetstack performs a DNS exchange using netstack for dialing.
// This is needed when netstack is enabled to reach peer IPs through the tunnel.
func ExchangeWithNetstack(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upstream string) (*dns.Msg, error) {
- reply, err := netstackExchange(ctx, nsNet, r, upstream, "udp")
+ // If request came in over TCP, go straight to TCP upstream
+ if dnsProtocolFromContext(ctx) == protoTCP {
+ rm, err := netstackExchange(ctx, nsNet, r, upstream, protoTCP)
+ if err != nil {
+ return nil, err
+ }
+ setUpstreamProtocol(ctx, protoTCP)
+ return rm, nil
+ }
+
+ clientMaxSize := clientUDPMaxSize(r)
+
+ // Cap EDNS0 to our tunnel MTU so the upstream doesn't send a
+ // response larger than what we can read over UDP.
+ maxUDPPayload := uint16(currentMTU - ipUDPHeaderSize)
+ if opt := r.IsEdns0(); opt != nil && opt.UDPSize() > maxUDPPayload {
+ opt.SetUDPSize(maxUDPPayload)
+ }
+
+ reply, err := netstackExchange(ctx, nsNet, r, upstream, protoUDP)
if err != nil {
return nil, err
}
- // If response is truncated, retry with TCP
if reply != nil && reply.MsgHdr.Truncated {
- log.Tracef("udp response for domain=%s type=%v class=%v is truncated, trying TCP",
- r.Question[0].Name, r.Question[0].Qtype, r.Question[0].Qclass)
- return netstackExchange(ctx, nsNet, r, upstream, "tcp")
+ rm, err := netstackExchange(ctx, nsNet, r, upstream, protoTCP)
+ if err != nil {
+ return nil, err
+ }
+
+ setUpstreamProtocol(ctx, protoTCP)
+ if rm.Len() > clientMaxSize {
+ rm.Truncate(clientMaxSize)
+ }
+
+ return rm, nil
}
+ setUpstreamProtocol(ctx, protoUDP)
+
return reply, nil
}
@@ -511,7 +640,7 @@ func netstackExchange(ctx context.Context, nsNet *netstack.Net, r *dns.Msg, upst
}
}
- dnsConn := &dns.Conn{Conn: conn}
+ dnsConn := &dns.Conn{Conn: conn, UDPSize: uint16(currentMTU - ipUDPHeaderSize)}
if err := dnsConn.WriteMsg(r); err != nil {
return nil, fmt.Errorf("write %s message: %w", network, err)
diff --git a/client/internal/dns/upstream_android.go b/client/internal/dns/upstream_android.go
index d7cff377b..ee1ca42fe 100644
--- a/client/internal/dns/upstream_android.go
+++ b/client/internal/dns/upstream_android.go
@@ -51,7 +51,7 @@ func (u *upstreamResolver) exchangeWithinVPN(ctx context.Context, upstream strin
upstreamExchangeClient := &dns.Client{
Timeout: ClientTimeout,
}
- return upstreamExchangeClient.ExchangeContext(ctx, r, upstream)
+ return ExchangeWithFallback(ctx, upstreamExchangeClient, r, upstream)
}
// exchangeWithoutVPN protect the UDP socket by Android SDK to avoid to goes through the VPN
@@ -76,7 +76,7 @@ func (u *upstreamResolver) exchangeWithoutVPN(ctx context.Context, upstream stri
Timeout: timeout,
}
- return upstreamExchangeClient.ExchangeContext(ctx, r, upstream)
+ return ExchangeWithFallback(ctx, upstreamExchangeClient, r, upstream)
}
func (u *upstreamResolver) isLocalResolver(upstream string) bool {
diff --git a/client/internal/dns/upstream_test.go b/client/internal/dns/upstream_test.go
index ab164c30b..1797fdad8 100644
--- a/client/internal/dns/upstream_test.go
+++ b/client/internal/dns/upstream_test.go
@@ -475,3 +475,298 @@ func TestFormatFailures(t *testing.T) {
})
}
}
+
+func TestDNSProtocolContext(t *testing.T) {
+ t.Run("roundtrip udp", func(t *testing.T) {
+ ctx := contextWithDNSProtocol(context.Background(), protoUDP)
+ assert.Equal(t, protoUDP, dnsProtocolFromContext(ctx))
+ })
+
+ t.Run("roundtrip tcp", func(t *testing.T) {
+ ctx := contextWithDNSProtocol(context.Background(), protoTCP)
+ assert.Equal(t, protoTCP, dnsProtocolFromContext(ctx))
+ })
+
+ t.Run("missing returns empty", func(t *testing.T) {
+ assert.Equal(t, "", dnsProtocolFromContext(context.Background()))
+ })
+}
+
+func TestExchangeWithFallback_TCPContext(t *testing.T) {
+ // Start a local DNS server that responds on TCP only
+ tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Answer = append(m.Answer, &dns.A{
+ Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},
+ A: net.ParseIP("10.0.0.1"),
+ })
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ tcpServer := &dns.Server{
+ Addr: "127.0.0.1:0",
+ Net: "tcp",
+ Handler: tcpHandler,
+ }
+
+ tcpLn, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+ tcpServer.Listener = tcpLn
+
+ go func() {
+ if err := tcpServer.ActivateAndServe(); err != nil {
+ t.Logf("tcp server: %v", err)
+ }
+ }()
+ defer func() {
+ _ = tcpServer.Shutdown()
+ }()
+
+ upstream := tcpLn.Addr().String()
+
+ // With TCP context, should connect directly via TCP without trying UDP
+ ctx := contextWithDNSProtocol(context.Background(), protoTCP)
+ client := &dns.Client{Timeout: 2 * time.Second}
+ r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA)
+
+ rm, _, err := ExchangeWithFallback(ctx, client, r, upstream)
+ require.NoError(t, err)
+ require.NotNil(t, rm)
+ require.NotEmpty(t, rm.Answer)
+ assert.Contains(t, rm.Answer[0].String(), "10.0.0.1")
+}
+
+func TestExchangeWithFallback_UDPFallbackToTCP(t *testing.T) {
+ // UDP handler returns a truncated response to trigger TCP retry.
+ udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Truncated = true
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ // TCP handler returns the full answer.
+ tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Answer = append(m.Answer, &dns.A{
+ Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},
+ A: net.ParseIP("10.0.0.3"),
+ })
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ udpPC, err := net.ListenPacket("udp", "127.0.0.1:0")
+ require.NoError(t, err)
+ addr := udpPC.LocalAddr().String()
+
+ udpServer := &dns.Server{
+ PacketConn: udpPC,
+ Net: "udp",
+ Handler: udpHandler,
+ }
+
+ tcpLn, err := net.Listen("tcp", addr)
+ require.NoError(t, err)
+
+ tcpServer := &dns.Server{
+ Listener: tcpLn,
+ Net: "tcp",
+ Handler: tcpHandler,
+ }
+
+ go func() {
+ if err := udpServer.ActivateAndServe(); err != nil {
+ t.Logf("udp server: %v", err)
+ }
+ }()
+ go func() {
+ if err := tcpServer.ActivateAndServe(); err != nil {
+ t.Logf("tcp server: %v", err)
+ }
+ }()
+ defer func() {
+ _ = udpServer.Shutdown()
+ _ = tcpServer.Shutdown()
+ }()
+
+ ctx := context.Background()
+ client := &dns.Client{Timeout: 2 * time.Second}
+ r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA)
+
+ rm, _, err := ExchangeWithFallback(ctx, client, r, addr)
+ require.NoError(t, err, "should fall back to TCP after truncated UDP response")
+ require.NotNil(t, rm)
+ require.NotEmpty(t, rm.Answer, "TCP response should contain the full answer")
+ assert.Contains(t, rm.Answer[0].String(), "10.0.0.3")
+ assert.False(t, rm.Truncated, "TCP response should not be truncated")
+}
+
+func TestExchangeWithFallback_TCPContextSkipsUDP(t *testing.T) {
+ // Start only a TCP server (no UDP). With TCP context it should succeed.
+ tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Answer = append(m.Answer, &dns.A{
+ Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},
+ A: net.ParseIP("10.0.0.2"),
+ })
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ tcpLn, err := net.Listen("tcp", "127.0.0.1:0")
+ require.NoError(t, err)
+
+ tcpServer := &dns.Server{
+ Listener: tcpLn,
+ Net: "tcp",
+ Handler: tcpHandler,
+ }
+
+ go func() {
+ if err := tcpServer.ActivateAndServe(); err != nil {
+ t.Logf("tcp server: %v", err)
+ }
+ }()
+ defer func() {
+ _ = tcpServer.Shutdown()
+ }()
+
+ upstream := tcpLn.Addr().String()
+
+ // TCP context: should skip UDP entirely and go directly to TCP
+ ctx := contextWithDNSProtocol(context.Background(), protoTCP)
+ client := &dns.Client{Timeout: 2 * time.Second}
+ r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA)
+
+ rm, _, err := ExchangeWithFallback(ctx, client, r, upstream)
+ require.NoError(t, err)
+ require.NotNil(t, rm)
+ require.NotEmpty(t, rm.Answer)
+ assert.Contains(t, rm.Answer[0].String(), "10.0.0.2")
+
+ // Without TCP context, trying to reach a TCP-only server via UDP should fail
+ ctx2 := context.Background()
+ client2 := &dns.Client{Timeout: 500 * time.Millisecond}
+ _, _, err = ExchangeWithFallback(ctx2, client2, r, upstream)
+ assert.Error(t, err, "should fail when no UDP server and no TCP context")
+}
+
+func TestExchangeWithFallback_EDNS0Capped(t *testing.T) {
+ // Verify that a client EDNS0 larger than our MTU-derived limit gets
+ // capped in the outgoing request so the upstream doesn't send a
+ // response larger than our read buffer.
+ var receivedUDPSize uint16
+ udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ if opt := r.IsEdns0(); opt != nil {
+ receivedUDPSize = opt.UDPSize()
+ }
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Answer = append(m.Answer, &dns.A{
+ Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeA, Class: dns.ClassINET, Ttl: 60},
+ A: net.ParseIP("10.0.0.1"),
+ })
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ udpPC, err := net.ListenPacket("udp", "127.0.0.1:0")
+ require.NoError(t, err)
+ addr := udpPC.LocalAddr().String()
+
+ udpServer := &dns.Server{PacketConn: udpPC, Net: "udp", Handler: udpHandler}
+ go func() { _ = udpServer.ActivateAndServe() }()
+ t.Cleanup(func() { _ = udpServer.Shutdown() })
+
+ ctx := context.Background()
+ client := &dns.Client{Timeout: 2 * time.Second}
+ r := new(dns.Msg).SetQuestion("example.com.", dns.TypeA)
+ r.SetEdns0(4096, false)
+
+ rm, _, err := ExchangeWithFallback(ctx, client, r, addr)
+ require.NoError(t, err)
+ require.NotNil(t, rm)
+
+ expectedMax := uint16(currentMTU - ipUDPHeaderSize)
+ assert.Equal(t, expectedMax, receivedUDPSize,
+ "upstream should see capped EDNS0, not the client's 4096")
+}
+
+func TestExchangeWithFallback_TCPTruncatesToClientSize(t *testing.T) {
+ // When the client advertises a large EDNS0 (4096) and the upstream
+ // truncates, the TCP response should NOT be truncated since the full
+ // answer fits within the client's original buffer.
+ udpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ m.Truncated = true
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ tcpHandler := dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) {
+ m := new(dns.Msg)
+ m.SetReply(r)
+ // Add enough records to exceed MTU but fit within 4096
+ for i := range 20 {
+ m.Answer = append(m.Answer, &dns.TXT{
+ Hdr: dns.RR_Header{Name: r.Question[0].Name, Rrtype: dns.TypeTXT, Class: dns.ClassINET, Ttl: 60},
+ Txt: []string{fmt.Sprintf("record-%d-padding-data-to-make-it-longer", i)},
+ })
+ }
+ if err := w.WriteMsg(m); err != nil {
+ t.Logf("write msg: %v", err)
+ }
+ })
+
+ udpPC, err := net.ListenPacket("udp", "127.0.0.1:0")
+ require.NoError(t, err)
+ addr := udpPC.LocalAddr().String()
+
+ udpServer := &dns.Server{PacketConn: udpPC, Net: "udp", Handler: udpHandler}
+ tcpLn, err := net.Listen("tcp", addr)
+ require.NoError(t, err)
+ tcpServer := &dns.Server{Listener: tcpLn, Net: "tcp", Handler: tcpHandler}
+
+ go func() { _ = udpServer.ActivateAndServe() }()
+ go func() { _ = tcpServer.ActivateAndServe() }()
+ t.Cleanup(func() {
+ _ = udpServer.Shutdown()
+ _ = tcpServer.Shutdown()
+ })
+
+ ctx := context.Background()
+ client := &dns.Client{Timeout: 2 * time.Second}
+
+ // Client with large buffer: should get all records without truncation
+ r := new(dns.Msg).SetQuestion("example.com.", dns.TypeTXT)
+ r.SetEdns0(4096, false)
+
+ rm, _, err := ExchangeWithFallback(ctx, client, r, addr)
+ require.NoError(t, err)
+ require.NotNil(t, rm)
+ assert.Len(t, rm.Answer, 20, "large EDNS0 client should get all records")
+ assert.False(t, rm.Truncated, "response should not be truncated for large buffer client")
+
+ // Client with small buffer: should get truncated response
+ r2 := new(dns.Msg).SetQuestion("example.com.", dns.TypeTXT)
+ r2.SetEdns0(512, false)
+
+ rm2, _, err := ExchangeWithFallback(ctx, &dns.Client{Timeout: 2 * time.Second}, r2, addr)
+ require.NoError(t, err)
+ require.NotNil(t, rm2)
+ assert.Less(t, len(rm2.Answer), 20, "small EDNS0 client should get fewer records")
+ assert.True(t, rm2.Truncated, "response should be truncated for small buffer client")
+}
diff --git a/client/internal/dnsfwd/forwarder.go b/client/internal/dnsfwd/forwarder.go
index 5c7cb31fc..2e8ef84ab 100644
--- a/client/internal/dnsfwd/forwarder.go
+++ b/client/internal/dnsfwd/forwarder.go
@@ -237,8 +237,8 @@ func (f *DNSForwarder) writeResponse(logger *log.Entry, w dns.ResponseWriter, re
return
}
- logger.Tracef("response: domain=%s rcode=%s answers=%s took=%s",
- qname, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), time.Since(startTime))
+ logger.Tracef("response: domain=%s rcode=%s answers=%s size=%dB took=%s",
+ qname, dns.RcodeToString[resp.Rcode], resutil.FormatAnswers(resp.Answer), resp.Len(), time.Since(startTime))
}
// udpResponseWriter wraps a dns.ResponseWriter to handle UDP-specific truncation.
@@ -263,20 +263,28 @@ func (u *udpResponseWriter) WriteMsg(resp *dns.Msg) error {
func (f *DNSForwarder) handleDNSQueryUDP(w dns.ResponseWriter, query *dns.Msg) {
startTime := time.Now()
- logger := log.WithFields(log.Fields{
+ fields := log.Fields{
"request_id": resutil.GenerateRequestID(),
"dns_id": fmt.Sprintf("%04x", query.Id),
- })
+ }
+ if addr := w.RemoteAddr(); addr != nil {
+ fields["client"] = addr.String()
+ }
+ logger := log.WithFields(fields)
f.handleDNSQuery(logger, &udpResponseWriter{ResponseWriter: w, query: query}, query, startTime)
}
func (f *DNSForwarder) handleDNSQueryTCP(w dns.ResponseWriter, query *dns.Msg) {
startTime := time.Now()
- logger := log.WithFields(log.Fields{
+ fields := log.Fields{
"request_id": resutil.GenerateRequestID(),
"dns_id": fmt.Sprintf("%04x", query.Id),
- })
+ }
+ if addr := w.RemoteAddr(); addr != nil {
+ fields["client"] = addr.String()
+ }
+ logger := log.WithFields(fields)
f.handleDNSQuery(logger, w, query, startTime)
}
diff --git a/client/internal/engine.go b/client/internal/engine.go
index 7b100bd0c..be2d8bbf3 100644
--- a/client/internal/engine.go
+++ b/client/internal/engine.go
@@ -46,6 +46,7 @@ import (
"github.com/netbirdio/netbird/client/internal/peer/guard"
icemaker "github.com/netbirdio/netbird/client/internal/peer/ice"
"github.com/netbirdio/netbird/client/internal/peerstore"
+ "github.com/netbirdio/netbird/client/internal/portforward"
"github.com/netbirdio/netbird/client/internal/profilemanager"
"github.com/netbirdio/netbird/client/internal/relay"
"github.com/netbirdio/netbird/client/internal/rosenpass"
@@ -210,9 +211,10 @@ type Engine struct {
// checks are the client-applied posture checks that need to be evaluated on the client
checks []*mgmProto.Checks
- relayManager *relayClient.Manager
- stateManager *statemanager.Manager
- srWatcher *guard.SRWatcher
+ relayManager *relayClient.Manager
+ stateManager *statemanager.Manager
+ portForwardManager *portforward.Manager
+ srWatcher *guard.SRWatcher
// Sync response persistence (protected by syncRespMux)
syncRespMux sync.RWMutex
@@ -259,26 +261,27 @@ func NewEngine(
mobileDep MobileDependency,
) *Engine {
engine := &Engine{
- clientCtx: clientCtx,
- clientCancel: clientCancel,
- signal: services.SignalClient,
- signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey),
- mgmClient: services.MgmClient,
- relayManager: services.RelayManager,
- peerStore: peerstore.NewConnStore(),
- syncMsgMux: &sync.Mutex{},
- config: config,
- mobileDep: mobileDep,
- STUNs: []*stun.URI{},
- TURNs: []*stun.URI{},
- networkSerial: 0,
- statusRecorder: services.StatusRecorder,
- stateManager: services.StateManager,
- checks: services.Checks,
- probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL),
- jobExecutor: jobexec.NewExecutor(),
- clientMetrics: services.ClientMetrics,
- updateManager: services.UpdateManager,
+ clientCtx: clientCtx,
+ clientCancel: clientCancel,
+ signal: services.SignalClient,
+ signaler: peer.NewSignaler(services.SignalClient, config.WgPrivateKey),
+ mgmClient: services.MgmClient,
+ relayManager: services.RelayManager,
+ peerStore: peerstore.NewConnStore(),
+ syncMsgMux: &sync.Mutex{},
+ config: config,
+ mobileDep: mobileDep,
+ STUNs: []*stun.URI{},
+ TURNs: []*stun.URI{},
+ networkSerial: 0,
+ statusRecorder: services.StatusRecorder,
+ stateManager: services.StateManager,
+ portForwardManager: portforward.NewManager(),
+ checks: services.Checks,
+ probeStunTurn: relay.NewStunTurnProbe(relay.DefaultCacheTTL),
+ jobExecutor: jobexec.NewExecutor(),
+ clientMetrics: services.ClientMetrics,
+ updateManager: services.UpdateManager,
}
log.Infof("I am: %s", config.WgPrivateKey.PublicKey().String())
@@ -500,7 +503,7 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL)
e.routeManager.SetRouteChangeListener(e.mobileDep.NetworkChangeListener)
e.dnsServer.SetRouteChecker(func(ip netip.Addr) bool {
- for _, routes := range e.routeManager.GetClientRoutes() {
+ for _, routes := range e.routeManager.GetSelectedClientRoutes() {
for _, r := range routes {
if r.Network.Contains(ip) {
return true
@@ -521,6 +524,11 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL)
return err
}
+ // Inject firewall into DNS server now that it's available.
+ // The DNS server is created before the firewall because the route manager
+ // depends on the DNS server, and the firewall depends on the wg interface.
+ e.dnsServer.SetFirewall(e.firewall)
+
e.udpMux, err = e.wgInterface.Up()
if err != nil {
log.Errorf("failed to pull up wgInterface [%s]: %s", e.wgInterface.Name(), err.Error())
@@ -532,6 +540,13 @@ func (e *Engine) Start(netbirdConfig *mgmProto.NetbirdConfig, mgmtURL *url.URL)
// conntrack entries from being created before the rules are in place
e.setupWGProxyNoTrack()
+ // Start after interface is up since port may have been resolved from 0 or changed if occupied
+ e.shutdownWg.Add(1)
+ go func() {
+ defer e.shutdownWg.Done()
+ e.portForwardManager.Start(e.ctx, uint16(e.config.WgPort))
+ }()
+
// Set the WireGuard interface for rosenpass after interface is up
if e.rpManager != nil {
e.rpManager.SetInterface(e.wgInterface)
@@ -1535,12 +1550,13 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs []netip.Prefix, agentV
}
serviceDependencies := peer.ServiceDependencies{
- StatusRecorder: e.statusRecorder,
- Signaler: e.signaler,
- IFaceDiscover: e.mobileDep.IFaceDiscover,
- RelayManager: e.relayManager,
- SrWatcher: e.srWatcher,
- MetricsRecorder: e.clientMetrics,
+ StatusRecorder: e.statusRecorder,
+ Signaler: e.signaler,
+ IFaceDiscover: e.mobileDep.IFaceDiscover,
+ RelayManager: e.relayManager,
+ SrWatcher: e.srWatcher,
+ PortForwardManager: e.portForwardManager,
+ MetricsRecorder: e.clientMetrics,
}
peerConn, err := peer.NewConn(config, serviceDependencies)
if err != nil {
@@ -1697,6 +1713,12 @@ func (e *Engine) close() {
if e.rpManager != nil {
_ = e.rpManager.Close()
}
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ defer cancel()
+ if err := e.portForwardManager.GracefullyStop(ctx); err != nil {
+ log.Warnf("failed to gracefully stop port forwarding manager: %s", err)
+ }
}
func (e *Engine) readInitialSettings() ([]*route.Route, *nbdns.Config, bool, error) {
@@ -1800,7 +1822,7 @@ func (e *Engine) newDnsServer(dnsConfig *nbdns.Config) (dns.Server, error) {
return dnsServer, nil
case "ios":
- dnsServer := dns.NewDefaultServerIos(e.ctx, e.wgInterface, e.mobileDep.DnsManager, e.statusRecorder, e.config.DisableDNS)
+ dnsServer := dns.NewDefaultServerIos(e.ctx, e.wgInterface, e.mobileDep.DnsManager, e.mobileDep.HostDNSAddresses, e.statusRecorder, e.config.DisableDNS)
return dnsServer, nil
default:
@@ -1837,6 +1859,11 @@ func (e *Engine) GetExposeManager() *expose.Manager {
return e.exposeManager
}
+// IsBlockInbound returns whether inbound connections are blocked.
+func (e *Engine) IsBlockInbound() bool {
+ return e.config.BlockInbound
+}
+
// GetClientMetrics returns the client metrics
func (e *Engine) GetClientMetrics() *metrics.ClientMetrics {
return e.clientMetrics
diff --git a/client/internal/engine_test.go b/client/internal/engine_test.go
index 77fe9049b..1f6fe384a 100644
--- a/client/internal/engine_test.go
+++ b/client/internal/engine_test.go
@@ -828,7 +828,7 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) {
WgPrivateKey: key,
WgPort: 33100,
MTU: iface.DefaultMTU,
- }, EngineServices{
+ }, EngineServices{
SignalClient: &signal.MockClient{},
MgmClient: &mgmt.MockClient{},
RelayManager: relayMgr,
@@ -1035,7 +1035,7 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) {
WgPrivateKey: key,
WgPort: 33100,
MTU: iface.DefaultMTU,
- }, EngineServices{
+ }, EngineServices{
SignalClient: &signal.MockClient{},
MgmClient: &mgmt.MockClient{},
RelayManager: relayMgr,
@@ -1538,13 +1538,8 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin
return nil, err
}
- publicKey, err := mgmtClient.GetServerPublicKey()
- if err != nil {
- return nil, err
- }
-
info := system.GetInfo(ctx)
- resp, err := mgmtClient.Register(*publicKey, setupKey, "", info, nil, nil)
+ resp, err := mgmtClient.Register(setupKey, "", info, nil, nil)
if err != nil {
return nil, err
}
@@ -1566,7 +1561,7 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin
}
relayMgr := relayClient.NewManager(ctx, nil, key.PublicKey().String(), iface.DefaultMTU)
-e, err := NewEngine(ctx, cancel, conf, EngineServices{
+ e, err := NewEngine(ctx, cancel, conf, EngineServices{
SignalClient: signalClient,
MgmClient: mgmtClient,
RelayManager: relayMgr,
diff --git a/client/internal/peer/conn.go b/client/internal/peer/conn.go
index bea0725f2..8d1585b3f 100644
--- a/client/internal/peer/conn.go
+++ b/client/internal/peer/conn.go
@@ -22,6 +22,7 @@ import (
icemaker "github.com/netbirdio/netbird/client/internal/peer/ice"
"github.com/netbirdio/netbird/client/internal/peer/id"
"github.com/netbirdio/netbird/client/internal/peer/worker"
+ "github.com/netbirdio/netbird/client/internal/portforward"
"github.com/netbirdio/netbird/client/internal/stdnet"
"github.com/netbirdio/netbird/route"
relayClient "github.com/netbirdio/netbird/shared/relay/client"
@@ -45,6 +46,7 @@ type ServiceDependencies struct {
RelayManager *relayClient.Manager
SrWatcher *guard.SRWatcher
PeerConnDispatcher *dispatcher.ConnectionDispatcher
+ PortForwardManager *portforward.Manager
MetricsRecorder MetricsRecorder
}
@@ -87,16 +89,17 @@ type ConnConfig struct {
}
type Conn struct {
- Log *log.Entry
- mu sync.Mutex
- ctx context.Context
- ctxCancel context.CancelFunc
- config ConnConfig
- statusRecorder *Status
- signaler *Signaler
- iFaceDiscover stdnet.ExternalIFaceDiscover
- relayManager *relayClient.Manager
- srWatcher *guard.SRWatcher
+ Log *log.Entry
+ mu sync.Mutex
+ ctx context.Context
+ ctxCancel context.CancelFunc
+ config ConnConfig
+ statusRecorder *Status
+ signaler *Signaler
+ iFaceDiscover stdnet.ExternalIFaceDiscover
+ relayManager *relayClient.Manager
+ srWatcher *guard.SRWatcher
+ portForwardManager *portforward.Manager
onConnected func(remoteWireGuardKey string, remoteRosenpassPubKey []byte, wireGuardIP string, remoteRosenpassAddr string)
onDisconnected func(remotePeer string)
@@ -145,19 +148,20 @@ func NewConn(config ConnConfig, services ServiceDependencies) (*Conn, error) {
dumpState := newStateDump(config.Key, connLog, services.StatusRecorder)
var conn = &Conn{
- Log: connLog,
- config: config,
- statusRecorder: services.StatusRecorder,
- signaler: services.Signaler,
- iFaceDiscover: services.IFaceDiscover,
- relayManager: services.RelayManager,
- srWatcher: services.SrWatcher,
- statusRelay: worker.NewAtomicStatus(),
- statusICE: worker.NewAtomicStatus(),
- dumpState: dumpState,
- endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)),
- wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState),
- metricsRecorder: services.MetricsRecorder,
+ Log: connLog,
+ config: config,
+ statusRecorder: services.StatusRecorder,
+ signaler: services.Signaler,
+ iFaceDiscover: services.IFaceDiscover,
+ relayManager: services.RelayManager,
+ srWatcher: services.SrWatcher,
+ portForwardManager: services.PortForwardManager,
+ statusRelay: worker.NewAtomicStatus(),
+ statusICE: worker.NewAtomicStatus(),
+ dumpState: dumpState,
+ endpointUpdater: NewEndpointUpdater(connLog, config.WgConfig, isController(config)),
+ wgWatcher: NewWGWatcher(connLog, config.WgConfig.WgInterface, config.Key, dumpState),
+ metricsRecorder: services.MetricsRecorder,
}
return conn, nil
diff --git a/client/internal/peer/worker_ice.go b/client/internal/peer/worker_ice.go
index edd70fb20..29bf5aaaa 100644
--- a/client/internal/peer/worker_ice.go
+++ b/client/internal/peer/worker_ice.go
@@ -16,6 +16,7 @@ import (
"github.com/netbirdio/netbird/client/iface/udpmux"
"github.com/netbirdio/netbird/client/internal/peer/conntype"
icemaker "github.com/netbirdio/netbird/client/internal/peer/ice"
+ "github.com/netbirdio/netbird/client/internal/portforward"
"github.com/netbirdio/netbird/client/internal/stdnet"
"github.com/netbirdio/netbird/route"
)
@@ -61,6 +62,9 @@ type WorkerICE struct {
// we record the last known state of the ICE agent to avoid duplicate on disconnected events
lastKnownState ice.ConnectionState
+
+ // portForwardAttempted tracks if we've already tried port forwarding this session
+ portForwardAttempted bool
}
func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, conn *Conn, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, hasRelayOnLocally bool) (*WorkerICE, error) {
@@ -214,6 +218,8 @@ func (w *WorkerICE) Close() {
}
func (w *WorkerICE) reCreateAgent(dialerCancel context.CancelFunc, candidates []ice.CandidateType) (*icemaker.ThreadSafeAgent, error) {
+ w.portForwardAttempted = false
+
agent, err := icemaker.NewAgent(w.ctx, w.iFaceDiscover, w.config.ICEConfig, candidates, w.localUfrag, w.localPwd)
if err != nil {
return nil, fmt.Errorf("create agent: %w", err)
@@ -370,6 +376,93 @@ func (w *WorkerICE) onICECandidate(candidate ice.Candidate) {
w.log.Errorf("failed signaling candidate to the remote peer %s %s", w.config.Key, err)
}
}()
+
+ if candidate.Type() == ice.CandidateTypeServerReflexive {
+ w.injectPortForwardedCandidate(candidate)
+ }
+}
+
+// injectPortForwardedCandidate signals an additional candidate using the pre-created port mapping.
+func (w *WorkerICE) injectPortForwardedCandidate(srflxCandidate ice.Candidate) {
+ pfManager := w.conn.portForwardManager
+ if pfManager == nil {
+ return
+ }
+
+ mapping := pfManager.GetMapping()
+ if mapping == nil {
+ return
+ }
+
+ w.muxAgent.Lock()
+ if w.portForwardAttempted {
+ w.muxAgent.Unlock()
+ return
+ }
+ w.portForwardAttempted = true
+ w.muxAgent.Unlock()
+
+ forwardedCandidate, err := w.createForwardedCandidate(srflxCandidate, mapping)
+ if err != nil {
+ w.log.Warnf("create forwarded candidate: %v", err)
+ return
+ }
+
+ w.log.Debugf("injecting port-forwarded candidate: %s (mapping: %d -> %d via %s, priority: %d)",
+ forwardedCandidate.String(), mapping.InternalPort, mapping.ExternalPort, mapping.NATType, forwardedCandidate.Priority())
+
+ go func() {
+ if err := w.signaler.SignalICECandidate(forwardedCandidate, w.config.Key); err != nil {
+ w.log.Errorf("signal port-forwarded candidate: %v", err)
+ }
+ }()
+}
+
+// createForwardedCandidate creates a new server reflexive candidate with the forwarded port.
+// It uses the NAT gateway's external IP with the forwarded port.
+func (w *WorkerICE) createForwardedCandidate(srflxCandidate ice.Candidate, mapping *portforward.Mapping) (ice.Candidate, error) {
+ var externalIP string
+ if mapping.ExternalIP != nil && !mapping.ExternalIP.IsUnspecified() {
+ externalIP = mapping.ExternalIP.String()
+ } else {
+ // Fallback to STUN-discovered address if NAT didn't provide external IP
+ externalIP = srflxCandidate.Address()
+ }
+
+ // Per RFC 8445, the related address for srflx is the base (host candidate address).
+ // If the original srflx has unspecified related address, use its own address as base.
+ relAddr := srflxCandidate.RelatedAddress().Address
+ if relAddr == "" || relAddr == "0.0.0.0" || relAddr == "::" {
+ relAddr = srflxCandidate.Address()
+ }
+
+ // Arbitrary +1000 boost on top of RFC 8445 priority to favor port-forwarded candidates
+ // over regular srflx during ICE connectivity checks.
+ priority := srflxCandidate.Priority() + 1000
+
+ candidate, err := ice.NewCandidateServerReflexive(&ice.CandidateServerReflexiveConfig{
+ Network: srflxCandidate.NetworkType().String(),
+ Address: externalIP,
+ Port: int(mapping.ExternalPort),
+ Component: srflxCandidate.Component(),
+ Priority: priority,
+ RelAddr: relAddr,
+ RelPort: int(mapping.InternalPort),
+ })
+ if err != nil {
+ return nil, fmt.Errorf("create candidate: %w", err)
+ }
+
+ for _, e := range srflxCandidate.Extensions() {
+ if e.Key == ice.ExtensionKeyCandidateID {
+ e.Value = srflxCandidate.ID()
+ }
+ if err := candidate.AddExtension(e); err != nil {
+ return nil, fmt.Errorf("add extension: %w", err)
+ }
+ }
+
+ return candidate, nil
}
func (w *WorkerICE) onICESelectedCandidatePair(agent *icemaker.ThreadSafeAgent, c1, c2 ice.Candidate) {
@@ -411,10 +504,10 @@ func (w *WorkerICE) logSuccessfulPaths(agent *icemaker.ThreadSafeAgent) {
if !lok || !rok {
continue
}
- w.log.Debugf("successful ICE path %s: [%s %s %s] <-> [%s %s %s] rtt=%.3fms",
+ w.log.Debugf("successful ICE path %s: [%s %s %s:%d] <-> [%s %s %s:%d] rtt=%.3fms",
sessionID,
- local.NetworkType(), local.Type(), local.Address(),
- remote.NetworkType(), remote.Type(), remote.Address(),
+ local.NetworkType(), local.Type(), local.Address(), local.Port(),
+ remote.NetworkType(), remote.Type(), remote.Address(), remote.Port(),
stat.CurrentRoundTripTime*1000)
}
}
diff --git a/client/internal/portforward/env.go b/client/internal/portforward/env.go
new file mode 100644
index 000000000..ba83c79bf
--- /dev/null
+++ b/client/internal/portforward/env.go
@@ -0,0 +1,35 @@
+package portforward
+
+import (
+ "os"
+ "strconv"
+
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ envDisableNATMapper = "NB_DISABLE_NAT_MAPPER"
+ envDisablePCPHealthCheck = "NB_DISABLE_PCP_HEALTH_CHECK"
+)
+
+func isDisabledByEnv() bool {
+ return parseBoolEnv(envDisableNATMapper)
+}
+
+func isHealthCheckDisabled() bool {
+ return parseBoolEnv(envDisablePCPHealthCheck)
+}
+
+func parseBoolEnv(key string) bool {
+ val := os.Getenv(key)
+ if val == "" {
+ return false
+ }
+
+ disabled, err := strconv.ParseBool(val)
+ if err != nil {
+ log.Warnf("failed to parse %s: %v", key, err)
+ return false
+ }
+ return disabled
+}
diff --git a/client/internal/portforward/manager.go b/client/internal/portforward/manager.go
new file mode 100644
index 000000000..b0680160c
--- /dev/null
+++ b/client/internal/portforward/manager.go
@@ -0,0 +1,342 @@
+//go:build !js
+
+package portforward
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "regexp"
+ "sync"
+ "time"
+
+ "github.com/libp2p/go-nat"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/netbirdio/netbird/client/internal/portforward/pcp"
+)
+
+const (
+ defaultMappingTTL = 2 * time.Hour
+ healthCheckInterval = 1 * time.Minute
+ discoveryTimeout = 10 * time.Second
+ mappingDescription = "NetBird"
+)
+
+// upnpErrPermanentLeaseOnly matches UPnP error 725 in SOAP fault XML,
+// allowing for whitespace/newlines between tags from different router firmware.
+var upnpErrPermanentLeaseOnly = regexp.MustCompile(`\s*725\s*`)
+
+// Mapping represents an active NAT port mapping.
+type Mapping struct {
+ Protocol string
+ InternalPort uint16
+ ExternalPort uint16
+ ExternalIP net.IP
+ NATType string
+ // TTL is the lease duration. Zero means a permanent lease that never expires.
+ TTL time.Duration
+}
+
+// TODO: persist mapping state for crash recovery cleanup of permanent leases.
+// Currently not done because State.Cleanup requires NAT gateway re-discovery,
+// which blocks startup for ~10s when no gateway is present (affects all clients).
+
+type Manager struct {
+ cancel context.CancelFunc
+
+ mapping *Mapping
+ mappingLock sync.Mutex
+
+ wgPort uint16
+
+ done chan struct{}
+ stopCtx chan context.Context
+
+ // protect exported functions
+ mu sync.Mutex
+}
+
+// NewManager creates a new port forwarding manager.
+func NewManager() *Manager {
+ return &Manager{
+ stopCtx: make(chan context.Context, 1),
+ }
+}
+
+func (m *Manager) Start(ctx context.Context, wgPort uint16) {
+ m.mu.Lock()
+ if m.cancel != nil {
+ m.mu.Unlock()
+ return
+ }
+
+ if isDisabledByEnv() {
+ log.Infof("NAT port mapper disabled via %s", envDisableNATMapper)
+ m.mu.Unlock()
+ return
+ }
+
+ if wgPort == 0 {
+ log.Warnf("invalid WireGuard port 0; NAT mapping disabled")
+ m.mu.Unlock()
+ return
+ }
+ m.wgPort = wgPort
+
+ m.done = make(chan struct{})
+ defer close(m.done)
+
+ ctx, m.cancel = context.WithCancel(ctx)
+ m.mu.Unlock()
+
+ gateway, mapping, err := m.setup(ctx)
+ if err != nil {
+ log.Infof("port forwarding setup: %v", err)
+ return
+ }
+
+ m.mappingLock.Lock()
+ m.mapping = mapping
+ m.mappingLock.Unlock()
+
+ m.renewLoop(ctx, gateway, mapping.TTL)
+
+ select {
+ case cleanupCtx := <-m.stopCtx:
+ // block the Start while cleaned up gracefully
+ m.cleanup(cleanupCtx, gateway)
+ default:
+ // return Start immediately and cleanup in background
+ cleanupCtx, cleanupCancel := context.WithTimeout(context.Background(), 10*time.Second)
+ go func() {
+ defer cleanupCancel()
+ m.cleanup(cleanupCtx, gateway)
+ }()
+ }
+}
+
+// GetMapping returns the current mapping if ready, nil otherwise
+func (m *Manager) GetMapping() *Mapping {
+ m.mappingLock.Lock()
+ defer m.mappingLock.Unlock()
+
+ if m.mapping == nil {
+ return nil
+ }
+
+ mapping := *m.mapping
+ return &mapping
+}
+
+// GracefullyStop cancels the manager and attempts to delete the port mapping.
+// After GracefullyStop returns, the manager cannot be restarted.
+func (m *Manager) GracefullyStop(ctx context.Context) error {
+ m.mu.Lock()
+ defer m.mu.Unlock()
+
+ if m.cancel == nil {
+ return nil
+ }
+
+ // Send cleanup context before cancelling, so Start picks it up after renewLoop exits.
+ m.startTearDown(ctx)
+
+ m.cancel()
+ m.cancel = nil
+
+ select {
+ case <-ctx.Done():
+ return ctx.Err()
+ case <-m.done:
+ return nil
+ }
+}
+
+func (m *Manager) setup(ctx context.Context) (nat.NAT, *Mapping, error) {
+ discoverCtx, discoverCancel := context.WithTimeout(ctx, discoveryTimeout)
+ defer discoverCancel()
+
+ gateway, err := discoverGateway(discoverCtx)
+ if err != nil {
+ return nil, nil, fmt.Errorf("discover gateway: %w", err)
+ }
+
+ log.Infof("discovered NAT gateway: %s", gateway.Type())
+
+ mapping, err := m.createMapping(ctx, gateway)
+ if err != nil {
+ return nil, nil, fmt.Errorf("create port mapping: %w", err)
+ }
+ return gateway, mapping, nil
+}
+
+func (m *Manager) createMapping(ctx context.Context, gateway nat.NAT) (*Mapping, error) {
+ ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
+ defer cancel()
+
+ ttl := defaultMappingTTL
+ externalPort, err := gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, ttl)
+ if err != nil {
+ if !isPermanentLeaseRequired(err) {
+ return nil, err
+ }
+ log.Infof("gateway only supports permanent leases, retrying with indefinite duration")
+ ttl = 0
+ externalPort, err = gateway.AddPortMapping(ctx, "udp", int(m.wgPort), mappingDescription, ttl)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ externalIP, err := gateway.GetExternalAddress()
+ if err != nil {
+ log.Debugf("failed to get external address: %v", err)
+ }
+
+ mapping := &Mapping{
+ Protocol: "udp",
+ InternalPort: m.wgPort,
+ ExternalPort: uint16(externalPort),
+ ExternalIP: externalIP,
+ NATType: gateway.Type(),
+ TTL: ttl,
+ }
+
+ log.Infof("created port mapping: %d -> %d via %s (external IP: %s)",
+ m.wgPort, externalPort, gateway.Type(), externalIP)
+ return mapping, nil
+}
+
+func (m *Manager) renewLoop(ctx context.Context, gateway nat.NAT, ttl time.Duration) {
+ if ttl == 0 {
+ // Permanent mappings don't expire, just wait for cancellation
+ // but still run health checks for PCP gateways.
+ m.permanentLeaseLoop(ctx, gateway)
+ return
+ }
+
+ renewTicker := time.NewTicker(ttl / 2)
+ healthTicker := time.NewTicker(healthCheckInterval)
+ defer renewTicker.Stop()
+ defer healthTicker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-renewTicker.C:
+ if err := m.renewMapping(ctx, gateway); err != nil {
+ log.Warnf("failed to renew port mapping: %v", err)
+ continue
+ }
+ case <-healthTicker.C:
+ if m.checkHealthAndRecreate(ctx, gateway) {
+ renewTicker.Reset(ttl / 2)
+ }
+ }
+ }
+}
+
+func (m *Manager) permanentLeaseLoop(ctx context.Context, gateway nat.NAT) {
+ healthTicker := time.NewTicker(healthCheckInterval)
+ defer healthTicker.Stop()
+
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case <-healthTicker.C:
+ m.checkHealthAndRecreate(ctx, gateway)
+ }
+ }
+}
+
+func (m *Manager) checkHealthAndRecreate(ctx context.Context, gateway nat.NAT) bool {
+ if isHealthCheckDisabled() {
+ return false
+ }
+
+ m.mappingLock.Lock()
+ hasMapping := m.mapping != nil
+ m.mappingLock.Unlock()
+
+ if !hasMapping {
+ return false
+ }
+
+ pcpNAT, ok := gateway.(*pcp.NAT)
+ if !ok {
+ return false
+ }
+
+ ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
+ defer cancel()
+
+ epoch, serverRestarted, err := pcpNAT.CheckServerHealth(ctx)
+ if err != nil {
+ log.Debugf("PCP health check failed: %v", err)
+ return false
+ }
+
+ if serverRestarted {
+ log.Warnf("PCP server restart detected (epoch=%d), recreating port mapping", epoch)
+ if err := m.renewMapping(ctx, gateway); err != nil {
+ log.Errorf("failed to recreate port mapping after server restart: %v", err)
+ return false
+ }
+ return true
+ }
+
+ return false
+}
+
+func (m *Manager) renewMapping(ctx context.Context, gateway nat.NAT) error {
+ ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
+ defer cancel()
+
+ externalPort, err := gateway.AddPortMapping(ctx, m.mapping.Protocol, int(m.mapping.InternalPort), mappingDescription, m.mapping.TTL)
+ if err != nil {
+ return fmt.Errorf("add port mapping: %w", err)
+ }
+
+ if uint16(externalPort) != m.mapping.ExternalPort {
+ log.Warnf("external port changed on renewal: %d -> %d (candidate may be stale)", m.mapping.ExternalPort, externalPort)
+ m.mappingLock.Lock()
+ m.mapping.ExternalPort = uint16(externalPort)
+ m.mappingLock.Unlock()
+ }
+
+ log.Debugf("renewed port mapping: %d -> %d", m.mapping.InternalPort, m.mapping.ExternalPort)
+ return nil
+}
+
+func (m *Manager) cleanup(ctx context.Context, gateway nat.NAT) {
+ m.mappingLock.Lock()
+ mapping := m.mapping
+ m.mapping = nil
+ m.mappingLock.Unlock()
+
+ if mapping == nil {
+ return
+ }
+
+ if err := gateway.DeletePortMapping(ctx, mapping.Protocol, int(mapping.InternalPort)); err != nil {
+ log.Warnf("delete port mapping on stop: %v", err)
+ return
+ }
+
+ log.Infof("deleted port mapping for port %d", mapping.InternalPort)
+}
+
+func (m *Manager) startTearDown(ctx context.Context) {
+ select {
+ case m.stopCtx <- ctx:
+ default:
+ }
+}
+
+// isPermanentLeaseRequired checks if a UPnP error indicates the gateway only supports permanent leases (error 725).
+func isPermanentLeaseRequired(err error) bool {
+ return err != nil && upnpErrPermanentLeaseOnly.MatchString(err.Error())
+}
diff --git a/client/internal/portforward/manager_js.go b/client/internal/portforward/manager_js.go
new file mode 100644
index 000000000..36c55063b
--- /dev/null
+++ b/client/internal/portforward/manager_js.go
@@ -0,0 +1,39 @@
+package portforward
+
+import (
+ "context"
+ "net"
+ "time"
+)
+
+// Mapping represents an active NAT port mapping.
+type Mapping struct {
+ Protocol string
+ InternalPort uint16
+ ExternalPort uint16
+ ExternalIP net.IP
+ NATType string
+ // TTL is the lease duration. Zero means a permanent lease that never expires.
+ TTL time.Duration
+}
+
+// Manager is a stub for js/wasm builds where NAT-PMP/UPnP is not supported.
+type Manager struct{}
+
+// NewManager returns a stub manager for js/wasm builds.
+func NewManager() *Manager {
+ return &Manager{}
+}
+
+// Start is a no-op on js/wasm: NAT-PMP/UPnP is not available in browser environments.
+func (m *Manager) Start(context.Context, uint16) {
+ // no NAT traversal in wasm
+}
+
+// GracefullyStop is a no-op on js/wasm.
+func (m *Manager) GracefullyStop(context.Context) error { return nil }
+
+// GetMapping always returns nil on js/wasm.
+func (m *Manager) GetMapping() *Mapping {
+ return nil
+}
diff --git a/client/internal/portforward/manager_test.go b/client/internal/portforward/manager_test.go
new file mode 100644
index 000000000..1f66f9ccd
--- /dev/null
+++ b/client/internal/portforward/manager_test.go
@@ -0,0 +1,201 @@
+//go:build !js
+
+package portforward
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+type mockNAT struct {
+ natType string
+ deviceAddr net.IP
+ externalAddr net.IP
+ internalAddr net.IP
+ mappings map[int]int
+ addMappingErr error
+ deleteMappingErr error
+ onlyPermanentLeases bool
+ lastTimeout time.Duration
+}
+
+func newMockNAT() *mockNAT {
+ return &mockNAT{
+ natType: "Mock-NAT",
+ deviceAddr: net.ParseIP("192.168.1.1"),
+ externalAddr: net.ParseIP("203.0.113.50"),
+ internalAddr: net.ParseIP("192.168.1.100"),
+ mappings: make(map[int]int),
+ }
+}
+
+func (m *mockNAT) Type() string {
+ return m.natType
+}
+
+func (m *mockNAT) GetDeviceAddress() (net.IP, error) {
+ return m.deviceAddr, nil
+}
+
+func (m *mockNAT) GetExternalAddress() (net.IP, error) {
+ return m.externalAddr, nil
+}
+
+func (m *mockNAT) GetInternalAddress() (net.IP, error) {
+ return m.internalAddr, nil
+}
+
+func (m *mockNAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, description string, timeout time.Duration) (int, error) {
+ if m.addMappingErr != nil {
+ return 0, m.addMappingErr
+ }
+ if m.onlyPermanentLeases && timeout != 0 {
+ return 0, fmt.Errorf("SOAP fault. Code: | Explanation: | Detail: 725OnlyPermanentLeasesSupported")
+ }
+ externalPort := internalPort
+ m.mappings[internalPort] = externalPort
+ m.lastTimeout = timeout
+ return externalPort, nil
+}
+
+func (m *mockNAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error {
+ if m.deleteMappingErr != nil {
+ return m.deleteMappingErr
+ }
+ delete(m.mappings, internalPort)
+ return nil
+}
+
+func TestManager_CreateMapping(t *testing.T) {
+ m := NewManager()
+ m.wgPort = 51820
+
+ gateway := newMockNAT()
+ mapping, err := m.createMapping(context.Background(), gateway)
+ require.NoError(t, err)
+ require.NotNil(t, mapping)
+
+ assert.Equal(t, "udp", mapping.Protocol)
+ assert.Equal(t, uint16(51820), mapping.InternalPort)
+ assert.Equal(t, uint16(51820), mapping.ExternalPort)
+ assert.Equal(t, "Mock-NAT", mapping.NATType)
+ assert.Equal(t, net.ParseIP("203.0.113.50").To4(), mapping.ExternalIP.To4())
+ assert.Equal(t, defaultMappingTTL, mapping.TTL)
+}
+
+func TestManager_GetMapping_ReturnsNilWhenNotReady(t *testing.T) {
+ m := NewManager()
+ assert.Nil(t, m.GetMapping())
+}
+
+func TestManager_GetMapping_ReturnsCopy(t *testing.T) {
+ m := NewManager()
+ m.mapping = &Mapping{
+ Protocol: "udp",
+ InternalPort: 51820,
+ ExternalPort: 51820,
+ }
+
+ mapping := m.GetMapping()
+ require.NotNil(t, mapping)
+ assert.Equal(t, uint16(51820), mapping.InternalPort)
+
+ // Mutating the returned copy should not affect the manager's mapping.
+ mapping.ExternalPort = 9999
+ assert.Equal(t, uint16(51820), m.GetMapping().ExternalPort)
+}
+
+func TestManager_Cleanup_DeletesMapping(t *testing.T) {
+ m := NewManager()
+ m.mapping = &Mapping{
+ Protocol: "udp",
+ InternalPort: 51820,
+ ExternalPort: 51820,
+ }
+
+ gateway := newMockNAT()
+ // Seed the mock so we can verify deletion.
+ gateway.mappings[51820] = 51820
+
+ m.cleanup(context.Background(), gateway)
+
+ _, exists := gateway.mappings[51820]
+ assert.False(t, exists, "mapping should be deleted from gateway")
+ assert.Nil(t, m.GetMapping(), "in-memory mapping should be cleared")
+}
+
+func TestManager_Cleanup_NilMapping(t *testing.T) {
+ m := NewManager()
+ gateway := newMockNAT()
+
+ // Should not panic or call gateway.
+ m.cleanup(context.Background(), gateway)
+}
+
+
+func TestManager_CreateMapping_PermanentLeaseFallback(t *testing.T) {
+ m := NewManager()
+ m.wgPort = 51820
+
+ gateway := newMockNAT()
+ gateway.onlyPermanentLeases = true
+
+ mapping, err := m.createMapping(context.Background(), gateway)
+ require.NoError(t, err)
+ require.NotNil(t, mapping)
+
+ assert.Equal(t, uint16(51820), mapping.InternalPort)
+ assert.Equal(t, time.Duration(0), mapping.TTL, "should return zero TTL for permanent lease")
+ assert.Equal(t, time.Duration(0), gateway.lastTimeout, "should have retried with zero duration")
+}
+
+func TestIsPermanentLeaseRequired(t *testing.T) {
+ tests := []struct {
+ name string
+ err error
+ expected bool
+ }{
+ {
+ name: "nil error",
+ err: nil,
+ expected: false,
+ },
+ {
+ name: "UPnP error 725",
+ err: fmt.Errorf("SOAP fault. Code: | Detail: 725OnlyPermanentLeasesSupported"),
+ expected: true,
+ },
+ {
+ name: "wrapped error with 725",
+ err: fmt.Errorf("add port mapping: %w", fmt.Errorf("Detail: 725")),
+ expected: true,
+ },
+ {
+ name: "error 725 with newlines in XML",
+ err: fmt.Errorf("\n 725\n"),
+ expected: true,
+ },
+ {
+ name: "bare 725 without XML tag",
+ err: fmt.Errorf("error code 725"),
+ expected: false,
+ },
+ {
+ name: "unrelated error",
+ err: fmt.Errorf("connection refused"),
+ expected: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ assert.Equal(t, tt.expected, isPermanentLeaseRequired(tt.err))
+ })
+ }
+}
diff --git a/client/internal/portforward/pcp/client.go b/client/internal/portforward/pcp/client.go
new file mode 100644
index 000000000..f6d243ef9
--- /dev/null
+++ b/client/internal/portforward/pcp/client.go
@@ -0,0 +1,408 @@
+package pcp
+
+import (
+ "context"
+ "crypto/rand"
+ "errors"
+ "fmt"
+ "net"
+ "net/netip"
+ "sync"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+)
+
+const (
+ defaultTimeout = 3 * time.Second
+ responseBufferSize = 128
+
+ // RFC 6887 Section 8.1.1 retry timing
+ initialRetryDelay = 3 * time.Second
+ maxRetryDelay = 1024 * time.Second
+ maxRetries = 4 // 3s + 6s + 12s + 24s = 45s total worst case
+)
+
+// Client is a PCP protocol client.
+// All methods are safe for concurrent use.
+type Client struct {
+ gateway netip.Addr
+ timeout time.Duration
+
+ mu sync.Mutex
+ // localIP caches the resolved local IP address.
+ localIP netip.Addr
+ // lastEpoch is the last observed server epoch value.
+ lastEpoch uint32
+ // epochTime tracks when lastEpoch was received for state loss detection.
+ epochTime time.Time
+ // externalIP caches the external IP from the last successful MAP response.
+ externalIP netip.Addr
+ // epochStateLost is set when epoch indicates server restart.
+ epochStateLost bool
+}
+
+// NewClient creates a new PCP client for the gateway at the given IP.
+func NewClient(gateway net.IP) *Client {
+ addr, ok := netip.AddrFromSlice(gateway)
+ if !ok {
+ log.Debugf("invalid gateway IP: %v", gateway)
+ }
+ return &Client{
+ gateway: addr.Unmap(),
+ timeout: defaultTimeout,
+ }
+}
+
+// NewClientWithTimeout creates a new PCP client with a custom timeout.
+func NewClientWithTimeout(gateway net.IP, timeout time.Duration) *Client {
+ addr, ok := netip.AddrFromSlice(gateway)
+ if !ok {
+ log.Debugf("invalid gateway IP: %v", gateway)
+ }
+ return &Client{
+ gateway: addr.Unmap(),
+ timeout: timeout,
+ }
+}
+
+// SetLocalIP sets the local IP address to use in PCP requests.
+func (c *Client) SetLocalIP(ip net.IP) {
+ addr, ok := netip.AddrFromSlice(ip)
+ if !ok {
+ log.Debugf("invalid local IP: %v", ip)
+ }
+ c.mu.Lock()
+ c.localIP = addr.Unmap()
+ c.mu.Unlock()
+}
+
+// Gateway returns the gateway IP address.
+func (c *Client) Gateway() net.IP {
+ return c.gateway.AsSlice()
+}
+
+// Announce sends a PCP ANNOUNCE request to discover PCP support.
+// Returns the server's epoch time on success.
+func (c *Client) Announce(ctx context.Context) (epoch uint32, err error) {
+ localIP, err := c.getLocalIP()
+ if err != nil {
+ return 0, fmt.Errorf("get local IP: %w", err)
+ }
+
+ req := buildAnnounceRequest(localIP)
+ resp, err := c.sendRequest(ctx, req)
+ if err != nil {
+ return 0, fmt.Errorf("send announce: %w", err)
+ }
+
+ parsed, err := parseResponse(resp)
+ if err != nil {
+ return 0, fmt.Errorf("parse announce response: %w", err)
+ }
+
+ if parsed.ResultCode != ResultSuccess {
+ return 0, fmt.Errorf("PCP ANNOUNCE failed: %s", ResultCodeString(parsed.ResultCode))
+ }
+
+ c.mu.Lock()
+ if c.updateEpochLocked(parsed.Epoch) {
+ log.Warnf("PCP server epoch indicates state loss - mappings may need refresh")
+ }
+ c.mu.Unlock()
+ return parsed.Epoch, nil
+}
+
+// AddPortMapping requests a port mapping from the PCP server.
+func (c *Client) AddPortMapping(ctx context.Context, protocol string, internalPort int, lifetime time.Duration) (*MapResponse, error) {
+ return c.addPortMappingWithHint(ctx, protocol, internalPort, internalPort, netip.Addr{}, lifetime)
+}
+
+// AddPortMappingWithHint requests a port mapping with suggested external port and IP.
+// Use lifetime <= 0 to delete a mapping.
+func (c *Client) AddPortMappingWithHint(ctx context.Context, protocol string, internalPort, suggestedExtPort int, suggestedExtIP net.IP, lifetime time.Duration) (*MapResponse, error) {
+ var extIP netip.Addr
+ if suggestedExtIP != nil {
+ var ok bool
+ extIP, ok = netip.AddrFromSlice(suggestedExtIP)
+ if !ok {
+ log.Debugf("invalid suggested external IP: %v", suggestedExtIP)
+ }
+ extIP = extIP.Unmap()
+ }
+ return c.addPortMappingWithHint(ctx, protocol, internalPort, suggestedExtPort, extIP, lifetime)
+}
+
+func (c *Client) addPortMappingWithHint(ctx context.Context, protocol string, internalPort, suggestedExtPort int, suggestedExtIP netip.Addr, lifetime time.Duration) (*MapResponse, error) {
+ localIP, err := c.getLocalIP()
+ if err != nil {
+ return nil, fmt.Errorf("get local IP: %w", err)
+ }
+
+ proto, err := protocolNumber(protocol)
+ if err != nil {
+ return nil, fmt.Errorf("parse protocol: %w", err)
+ }
+
+ var nonce [12]byte
+ if _, err := rand.Read(nonce[:]); err != nil {
+ return nil, fmt.Errorf("generate nonce: %w", err)
+ }
+
+ // Convert lifetime to seconds. Lifetime 0 means delete, so only apply
+ // default for positive durations that round to 0 seconds.
+ var lifetimeSec uint32
+ if lifetime > 0 {
+ lifetimeSec = uint32(lifetime.Seconds())
+ if lifetimeSec == 0 {
+ lifetimeSec = DefaultLifetime
+ }
+ }
+
+ req := buildMapRequest(localIP, nonce, proto, uint16(internalPort), uint16(suggestedExtPort), suggestedExtIP, lifetimeSec)
+
+ resp, err := c.sendRequest(ctx, req)
+ if err != nil {
+ return nil, fmt.Errorf("send map request: %w", err)
+ }
+
+ mapResp, err := parseMapResponse(resp)
+ if err != nil {
+ return nil, fmt.Errorf("parse map response: %w", err)
+ }
+
+ if mapResp.Nonce != nonce {
+ return nil, fmt.Errorf("nonce mismatch in response")
+ }
+
+ if mapResp.Protocol != proto {
+ return nil, fmt.Errorf("protocol mismatch: requested %d, got %d", proto, mapResp.Protocol)
+ }
+ if mapResp.InternalPort != uint16(internalPort) {
+ return nil, fmt.Errorf("internal port mismatch: requested %d, got %d", internalPort, mapResp.InternalPort)
+ }
+
+ if mapResp.ResultCode != ResultSuccess {
+ return nil, &Error{
+ Code: mapResp.ResultCode,
+ Message: ResultCodeString(mapResp.ResultCode),
+ }
+ }
+
+ c.mu.Lock()
+ if c.updateEpochLocked(mapResp.Epoch) {
+ log.Warnf("PCP server epoch indicates state loss - mappings may need refresh")
+ }
+ c.cacheExternalIPLocked(mapResp.ExternalIP)
+ c.mu.Unlock()
+ return mapResp, nil
+}
+
+// DeletePortMapping removes a port mapping by requesting zero lifetime.
+func (c *Client) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error {
+ if _, err := c.addPortMappingWithHint(ctx, protocol, internalPort, 0, netip.Addr{}, 0); err != nil {
+ var pcpErr *Error
+ if errors.As(err, &pcpErr) && pcpErr.Code == ResultNotAuthorized {
+ return nil
+ }
+ return fmt.Errorf("delete mapping: %w", err)
+ }
+ return nil
+}
+
+// GetExternalAddress returns the external IP address.
+// First checks for a cached value from previous MAP responses.
+// If not cached, creates a short-lived mapping to discover the external IP.
+func (c *Client) GetExternalAddress(ctx context.Context) (net.IP, error) {
+ c.mu.Lock()
+ if c.externalIP.IsValid() {
+ ip := c.externalIP.AsSlice()
+ c.mu.Unlock()
+ return ip, nil
+ }
+ c.mu.Unlock()
+
+ // Use an ephemeral port in the dynamic range (49152-65535).
+ // Port 0 is not valid with UDP/TCP protocols per RFC 6887.
+ ephemeralPort := 49152 + int(uint16(time.Now().UnixNano()))%(65535-49152)
+
+ // Use minimal lifetime (1 second) for discovery.
+ resp, err := c.AddPortMapping(ctx, "udp", ephemeralPort, time.Second)
+ if err != nil {
+ return nil, fmt.Errorf("create temporary mapping: %w", err)
+ }
+
+ if err := c.DeletePortMapping(ctx, "udp", ephemeralPort); err != nil {
+ log.Debugf("cleanup temporary PCP mapping: %v", err)
+ }
+
+ return resp.ExternalIP.AsSlice(), nil
+}
+
+// LastEpoch returns the last observed server epoch value.
+// A decrease in epoch indicates the server may have restarted and mappings may be lost.
+func (c *Client) LastEpoch() uint32 {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ return c.lastEpoch
+}
+
+// EpochStateLost returns true if epoch state loss was detected and clears the flag.
+func (c *Client) EpochStateLost() bool {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ lost := c.epochStateLost
+ c.epochStateLost = false
+ return lost
+}
+
+// updateEpoch updates the epoch tracking and detects potential state loss.
+// Returns true if state loss was detected (server likely restarted).
+// Caller must hold c.mu.
+func (c *Client) updateEpochLocked(newEpoch uint32) bool {
+ now := time.Now()
+ stateLost := false
+
+ // RFC 6887 Section 8.5: Detect invalid epoch indicating server state loss.
+ // client_delta = time since last response
+ // server_delta = epoch change since last response
+ // Invalid if: client_delta+2 < server_delta - server_delta/16
+ // OR: server_delta+2 < client_delta - client_delta/16
+ // The +2 handles quantization, /16 (6.25%) handles clock drift.
+ if !c.epochTime.IsZero() && c.lastEpoch > 0 {
+ clientDelta := uint32(now.Sub(c.epochTime).Seconds())
+ serverDelta := newEpoch - c.lastEpoch
+
+ // Check for epoch going backwards or jumping unexpectedly.
+ // Subtraction is safe: serverDelta/16 is always <= serverDelta.
+ if clientDelta+2 < serverDelta-(serverDelta/16) ||
+ serverDelta+2 < clientDelta-(clientDelta/16) {
+ stateLost = true
+ c.epochStateLost = true
+ }
+ }
+
+ c.lastEpoch = newEpoch
+ c.epochTime = now
+ return stateLost
+}
+
+// cacheExternalIP stores the external IP from a successful MAP response.
+// Caller must hold c.mu.
+func (c *Client) cacheExternalIPLocked(ip netip.Addr) {
+ if ip.IsValid() && !ip.IsUnspecified() {
+ c.externalIP = ip
+ }
+}
+
+// sendRequest sends a PCP request with retries per RFC 6887 Section 8.1.1.
+func (c *Client) sendRequest(ctx context.Context, req []byte) ([]byte, error) {
+ addr := &net.UDPAddr{IP: c.gateway.AsSlice(), Port: Port}
+
+ var lastErr error
+ delay := initialRetryDelay
+
+ for range maxRetries {
+ resp, err := c.sendOnce(ctx, addr, req)
+ if err == nil {
+ return resp, nil
+ }
+ lastErr = err
+
+ if ctx.Err() != nil {
+ return nil, ctx.Err()
+ }
+
+ // RFC 6887 Section 8.1.1: RT = (1 + RAND) * MIN(2 * RTprev, MRT)
+ // RAND is random between -0.1 and +0.1
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ case <-time.After(retryDelayWithJitter(delay)):
+ }
+ delay = min(delay*2, maxRetryDelay)
+ }
+
+ return nil, fmt.Errorf("PCP request failed after %d retries: %w", maxRetries, lastErr)
+}
+
+// retryDelayWithJitter applies RFC 6887 jitter: multiply by (1 + RAND) where RAND is [-0.1, +0.1].
+func retryDelayWithJitter(d time.Duration) time.Duration {
+ var b [1]byte
+ _, _ = rand.Read(b[:])
+ // Convert byte to range [-0.1, +0.1]: (b/255 * 0.2) - 0.1
+ jitter := (float64(b[0])/255.0)*0.2 - 0.1
+ return time.Duration(float64(d) * (1 + jitter))
+}
+
+func (c *Client) sendOnce(ctx context.Context, addr *net.UDPAddr, req []byte) ([]byte, error) {
+ // Use ListenUDP instead of DialUDP to validate response source address per RFC 6887 §8.3.
+ conn, err := net.ListenUDP("udp", nil)
+ if err != nil {
+ return nil, fmt.Errorf("listen: %w", err)
+ }
+ defer func() {
+ if err := conn.Close(); err != nil {
+ log.Debugf("close UDP connection: %v", err)
+ }
+ }()
+
+ timeout := c.timeout
+ if deadline, ok := ctx.Deadline(); ok {
+ if remaining := time.Until(deadline); remaining < timeout {
+ timeout = remaining
+ }
+ }
+
+ if err := conn.SetDeadline(time.Now().Add(timeout)); err != nil {
+ return nil, fmt.Errorf("set deadline: %w", err)
+ }
+
+ if _, err := conn.WriteToUDP(req, addr); err != nil {
+ return nil, fmt.Errorf("write: %w", err)
+ }
+
+ resp := make([]byte, responseBufferSize)
+ n, from, err := conn.ReadFromUDP(resp)
+ if err != nil {
+ return nil, fmt.Errorf("read: %w", err)
+ }
+
+ // RFC 6887 §8.3: Validate response came from expected PCP server.
+ if !from.IP.Equal(addr.IP) {
+ return nil, fmt.Errorf("response from unexpected source %s (expected %s)", from.IP, addr.IP)
+ }
+
+ return resp[:n], nil
+}
+
+func (c *Client) getLocalIP() (netip.Addr, error) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if !c.localIP.IsValid() {
+ return netip.Addr{}, fmt.Errorf("local IP not set for gateway %s", c.gateway)
+ }
+ return c.localIP, nil
+}
+
+func protocolNumber(protocol string) (uint8, error) {
+ switch protocol {
+ case "udp", "UDP":
+ return ProtoUDP, nil
+ case "tcp", "TCP":
+ return ProtoTCP, nil
+ default:
+ return 0, fmt.Errorf("unsupported protocol: %s", protocol)
+ }
+}
+
+// Error represents a PCP error response.
+type Error struct {
+ Code uint8
+ Message string
+}
+
+func (e *Error) Error() string {
+ return fmt.Sprintf("PCP error: %s (%d)", e.Message, e.Code)
+}
diff --git a/client/internal/portforward/pcp/client_test.go b/client/internal/portforward/pcp/client_test.go
new file mode 100644
index 000000000..79f44a426
--- /dev/null
+++ b/client/internal/portforward/pcp/client_test.go
@@ -0,0 +1,187 @@
+package pcp
+
+import (
+ "context"
+ "net"
+ "net/netip"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+)
+
+func TestAddrConversion(t *testing.T) {
+ tests := []struct {
+ name string
+ addr netip.Addr
+ }{
+ {"IPv4", netip.MustParseAddr("192.168.1.100")},
+ {"IPv4 loopback", netip.MustParseAddr("127.0.0.1")},
+ {"IPv6", netip.MustParseAddr("2001:db8::1")},
+ {"IPv6 loopback", netip.MustParseAddr("::1")},
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ b16 := addrTo16(tt.addr)
+
+ recovered := addrFrom16(b16)
+ assert.Equal(t, tt.addr, recovered, "address should round-trip")
+ })
+ }
+}
+
+func TestBuildAnnounceRequest(t *testing.T) {
+ clientIP := netip.MustParseAddr("192.168.1.100")
+ req := buildAnnounceRequest(clientIP)
+
+ require.Len(t, req, headerSize)
+ assert.Equal(t, byte(Version), req[0], "version")
+ assert.Equal(t, byte(OpAnnounce), req[1], "opcode")
+
+ // Check client IP is properly encoded as IPv4-mapped IPv6
+ assert.Equal(t, byte(0xff), req[18], "IPv4-mapped prefix byte 10")
+ assert.Equal(t, byte(0xff), req[19], "IPv4-mapped prefix byte 11")
+ assert.Equal(t, byte(192), req[20], "IP octet 1")
+ assert.Equal(t, byte(168), req[21], "IP octet 2")
+ assert.Equal(t, byte(1), req[22], "IP octet 3")
+ assert.Equal(t, byte(100), req[23], "IP octet 4")
+}
+
+func TestBuildMapRequest(t *testing.T) {
+ clientIP := netip.MustParseAddr("192.168.1.100")
+ nonce := [12]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}
+ req := buildMapRequest(clientIP, nonce, ProtoUDP, 51820, 51820, netip.Addr{}, 3600)
+
+ require.Len(t, req, mapRequestSize)
+ assert.Equal(t, byte(Version), req[0], "version")
+ assert.Equal(t, byte(OpMap), req[1], "opcode")
+
+ // Lifetime at bytes 4-7
+ assert.Equal(t, uint32(3600), (uint32(req[4])<<24)|(uint32(req[5])<<16)|(uint32(req[6])<<8)|uint32(req[7]), "lifetime")
+
+ // Nonce at bytes 24-35
+ assert.Equal(t, nonce[:], req[24:36], "nonce")
+
+ // Protocol at byte 36
+ assert.Equal(t, byte(ProtoUDP), req[36], "protocol")
+
+ // Internal port at bytes 40-41
+ assert.Equal(t, uint16(51820), (uint16(req[40])<<8)|uint16(req[41]), "internal port")
+
+ // External port at bytes 42-43
+ assert.Equal(t, uint16(51820), (uint16(req[42])<<8)|uint16(req[43]), "external port")
+}
+
+func TestParseResponse(t *testing.T) {
+ // Construct a valid ANNOUNCE response
+ resp := make([]byte, headerSize)
+ resp[0] = Version
+ resp[1] = OpAnnounce | OpReply
+ // Result code = 0 (success)
+ // Lifetime = 0
+ // Epoch = 12345
+ resp[8] = 0
+ resp[9] = 0
+ resp[10] = 0x30
+ resp[11] = 0x39
+
+ parsed, err := parseResponse(resp)
+ require.NoError(t, err)
+ assert.Equal(t, uint8(Version), parsed.Version)
+ assert.Equal(t, uint8(OpAnnounce|OpReply), parsed.Opcode)
+ assert.Equal(t, uint8(ResultSuccess), parsed.ResultCode)
+ assert.Equal(t, uint32(12345), parsed.Epoch)
+}
+
+func TestParseResponseErrors(t *testing.T) {
+ t.Run("too short", func(t *testing.T) {
+ _, err := parseResponse([]byte{1, 2, 3})
+ assert.Error(t, err)
+ })
+
+ t.Run("wrong version", func(t *testing.T) {
+ resp := make([]byte, headerSize)
+ resp[0] = 1 // Wrong version
+ resp[1] = OpReply
+ _, err := parseResponse(resp)
+ assert.Error(t, err)
+ })
+
+ t.Run("missing reply bit", func(t *testing.T) {
+ resp := make([]byte, headerSize)
+ resp[0] = Version
+ resp[1] = OpAnnounce // Missing OpReply bit
+ _, err := parseResponse(resp)
+ assert.Error(t, err)
+ })
+}
+
+func TestResultCodeString(t *testing.T) {
+ assert.Equal(t, "SUCCESS", ResultCodeString(ResultSuccess))
+ assert.Equal(t, "NOT_AUTHORIZED", ResultCodeString(ResultNotAuthorized))
+ assert.Equal(t, "ADDRESS_MISMATCH", ResultCodeString(ResultAddressMismatch))
+ assert.Contains(t, ResultCodeString(255), "UNKNOWN")
+}
+
+func TestProtocolNumber(t *testing.T) {
+ proto, err := protocolNumber("udp")
+ require.NoError(t, err)
+ assert.Equal(t, uint8(ProtoUDP), proto)
+
+ proto, err = protocolNumber("tcp")
+ require.NoError(t, err)
+ assert.Equal(t, uint8(ProtoTCP), proto)
+
+ proto, err = protocolNumber("UDP")
+ require.NoError(t, err)
+ assert.Equal(t, uint8(ProtoUDP), proto)
+
+ _, err = protocolNumber("icmp")
+ assert.Error(t, err)
+}
+
+func TestClientCreation(t *testing.T) {
+ gateway := netip.MustParseAddr("192.168.1.1").AsSlice()
+
+ client := NewClient(gateway)
+ assert.Equal(t, net.IP(gateway), client.Gateway())
+ assert.Equal(t, defaultTimeout, client.timeout)
+
+ clientWithTimeout := NewClientWithTimeout(gateway, 5*time.Second)
+ assert.Equal(t, 5*time.Second, clientWithTimeout.timeout)
+}
+
+func TestNATType(t *testing.T) {
+ n := NewNAT(netip.MustParseAddr("192.168.1.1").AsSlice(), netip.MustParseAddr("192.168.1.100").AsSlice())
+ assert.Equal(t, "PCP", n.Type())
+}
+
+// Integration test - skipped unless PCP_TEST_GATEWAY env is set
+func TestClientIntegration(t *testing.T) {
+ t.Skip("Integration test - run manually with PCP_TEST_GATEWAY=")
+
+ gateway := netip.MustParseAddr("10.0.1.1").AsSlice() // Change to your test gateway
+ localIP := netip.MustParseAddr("10.0.1.100").AsSlice() // Change to your local IP
+
+ client := NewClient(gateway)
+ client.SetLocalIP(localIP)
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+
+ // Test ANNOUNCE
+ epoch, err := client.Announce(ctx)
+ require.NoError(t, err)
+ t.Logf("Server epoch: %d", epoch)
+
+ // Test MAP
+ resp, err := client.AddPortMapping(ctx, "udp", 51820, 1*time.Hour)
+ require.NoError(t, err)
+ t.Logf("Mapping: internal=%d external=%d externalIP=%s",
+ resp.InternalPort, resp.ExternalPort, resp.ExternalIP)
+
+ // Cleanup
+ err = client.DeletePortMapping(ctx, "udp", 51820)
+ require.NoError(t, err)
+}
diff --git a/client/internal/portforward/pcp/nat.go b/client/internal/portforward/pcp/nat.go
new file mode 100644
index 000000000..1dc24274b
--- /dev/null
+++ b/client/internal/portforward/pcp/nat.go
@@ -0,0 +1,209 @@
+package pcp
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/netip"
+ "sync"
+ "time"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/libp2p/go-nat"
+ "github.com/libp2p/go-netroute"
+)
+
+var _ nat.NAT = (*NAT)(nil)
+
+// NAT implements the go-nat NAT interface using PCP.
+// Supports dual-stack (IPv4 and IPv6) when available.
+// All methods are safe for concurrent use.
+//
+// TODO: IPv6 pinholes use the local IPv6 address. If the address changes
+// (e.g., due to SLAAC rotation or network change), the pinhole becomes stale
+// and needs to be recreated with the new address.
+type NAT struct {
+ client *Client
+
+ mu sync.RWMutex
+ // client6 is the IPv6 PCP client, nil if IPv6 is unavailable.
+ client6 *Client
+ // localIP6 caches the local IPv6 address used for PCP requests.
+ localIP6 netip.Addr
+}
+
+// NewNAT creates a new NAT instance backed by PCP.
+func NewNAT(gateway, localIP net.IP) *NAT {
+ client := NewClient(gateway)
+ client.SetLocalIP(localIP)
+ return &NAT{
+ client: client,
+ }
+}
+
+// Type returns "PCP" as the NAT type.
+func (n *NAT) Type() string {
+ return "PCP"
+}
+
+// GetDeviceAddress returns the gateway IP address.
+func (n *NAT) GetDeviceAddress() (net.IP, error) {
+ return n.client.Gateway(), nil
+}
+
+// GetExternalAddress returns the external IP address.
+func (n *NAT) GetExternalAddress() (net.IP, error) {
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ defer cancel()
+ return n.client.GetExternalAddress(ctx)
+}
+
+// GetInternalAddress returns the local IP address used to communicate with the gateway.
+func (n *NAT) GetInternalAddress() (net.IP, error) {
+ addr, err := n.client.getLocalIP()
+ if err != nil {
+ return nil, err
+ }
+ return addr.AsSlice(), nil
+}
+
+// AddPortMapping creates a port mapping on both IPv4 and IPv6 (if available).
+func (n *NAT) AddPortMapping(ctx context.Context, protocol string, internalPort int, _ string, timeout time.Duration) (int, error) {
+ resp, err := n.client.AddPortMapping(ctx, protocol, internalPort, timeout)
+ if err != nil {
+ return 0, fmt.Errorf("add mapping: %w", err)
+ }
+
+ n.mu.RLock()
+ client6 := n.client6
+ localIP6 := n.localIP6
+ n.mu.RUnlock()
+
+ if client6 == nil {
+ return int(resp.ExternalPort), nil
+ }
+
+ if _, err := client6.AddPortMapping(ctx, protocol, internalPort, timeout); err != nil {
+ log.Warnf("IPv6 PCP mapping failed (continuing with IPv4): %v", err)
+ return int(resp.ExternalPort), nil
+ }
+
+ log.Infof("created IPv6 PCP pinhole: %s:%d", localIP6, internalPort)
+ return int(resp.ExternalPort), nil
+}
+
+// DeletePortMapping removes a port mapping from both IPv4 and IPv6.
+func (n *NAT) DeletePortMapping(ctx context.Context, protocol string, internalPort int) error {
+ err := n.client.DeletePortMapping(ctx, protocol, internalPort)
+
+ n.mu.RLock()
+ client6 := n.client6
+ n.mu.RUnlock()
+
+ if client6 != nil {
+ if err6 := client6.DeletePortMapping(ctx, protocol, internalPort); err6 != nil {
+ log.Warnf("IPv6 PCP delete mapping failed: %v", err6)
+ }
+ }
+
+ if err != nil {
+ return fmt.Errorf("delete mapping: %w", err)
+ }
+ return nil
+}
+
+// CheckServerHealth sends an ANNOUNCE to verify the server is still responsive.
+// Returns the current epoch and whether the server may have restarted (epoch state loss detected).
+func (n *NAT) CheckServerHealth(ctx context.Context) (epoch uint32, serverRestarted bool, err error) {
+ epoch, err = n.client.Announce(ctx)
+ if err != nil {
+ return 0, false, fmt.Errorf("announce: %w", err)
+ }
+ return epoch, n.client.EpochStateLost(), nil
+}
+
+// DiscoverPCP attempts to discover a PCP-capable gateway.
+// Returns a NAT interface if PCP is supported, or an error otherwise.
+// Discovers both IPv4 and IPv6 gateways when available.
+func DiscoverPCP(ctx context.Context) (nat.NAT, error) {
+ gateway, localIP, err := getDefaultGateway()
+ if err != nil {
+ return nil, fmt.Errorf("get default gateway: %w", err)
+ }
+
+ client := NewClient(gateway)
+ client.SetLocalIP(localIP)
+ if _, err := client.Announce(ctx); err != nil {
+ return nil, fmt.Errorf("PCP announce: %w", err)
+ }
+
+ result := &NAT{client: client}
+ discoverIPv6(ctx, result)
+
+ return result, nil
+}
+
+func discoverIPv6(ctx context.Context, result *NAT) {
+ gateway6, localIP6, err := getDefaultGateway6()
+ if err != nil {
+ log.Debugf("IPv6 gateway discovery failed: %v", err)
+ return
+ }
+
+ client6 := NewClient(gateway6)
+ client6.SetLocalIP(localIP6)
+ if _, err := client6.Announce(ctx); err != nil {
+ log.Debugf("PCP IPv6 announce failed: %v", err)
+ return
+ }
+
+ addr, ok := netip.AddrFromSlice(localIP6)
+ if !ok {
+ log.Debugf("invalid IPv6 local IP: %v", localIP6)
+ return
+ }
+ result.mu.Lock()
+ result.client6 = client6
+ result.localIP6 = addr
+ result.mu.Unlock()
+ log.Debugf("PCP IPv6 gateway discovered: %s (local: %s)", gateway6, localIP6)
+}
+
+// getDefaultGateway returns the default IPv4 gateway and local IP using the system routing table.
+func getDefaultGateway() (gateway net.IP, localIP net.IP, err error) {
+ router, err := netroute.New()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ _, gateway, localIP, err = router.Route(net.IPv4zero)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if gateway == nil {
+ return nil, nil, nat.ErrNoNATFound
+ }
+
+ return gateway, localIP, nil
+}
+
+// getDefaultGateway6 returns the default IPv6 gateway IP address using the system routing table.
+func getDefaultGateway6() (gateway net.IP, localIP net.IP, err error) {
+ router, err := netroute.New()
+ if err != nil {
+ return nil, nil, err
+ }
+
+ _, gateway, localIP, err = router.Route(net.IPv6zero)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ if gateway == nil {
+ return nil, nil, nat.ErrNoNATFound
+ }
+
+ return gateway, localIP, nil
+}
diff --git a/client/internal/portforward/pcp/protocol.go b/client/internal/portforward/pcp/protocol.go
new file mode 100644
index 000000000..d81c50c8c
--- /dev/null
+++ b/client/internal/portforward/pcp/protocol.go
@@ -0,0 +1,225 @@
+// Package pcp implements the Port Control Protocol (RFC 6887).
+//
+// # Implemented Features
+//
+// - ANNOUNCE opcode: Discovers PCP server support
+// - MAP opcode: Creates/deletes port mappings (IPv4 NAT) and firewall pinholes (IPv6)
+// - Dual-stack: Simultaneous IPv4 and IPv6 support via separate clients
+// - Nonce validation: Prevents response spoofing
+// - Epoch tracking: Detects server restarts per Section 8.5
+// - RFC-compliant retry timing: 3s initial, exponential backoff to 1024s max (Section 8.1.1)
+//
+// # Not Implemented
+//
+// - PEER opcode: For outbound peer connections (not needed for inbound NAT traversal)
+// - THIRD_PARTY option: For managing mappings on behalf of other devices
+// - PREFER_FAILURE option: Requires exact external port or fail (IPv4 NAT only, not needed for IPv6 pinholing)
+// - FILTER option: To restrict remote peer addresses
+//
+// These optional features are omitted because the primary use case is simple
+// port forwarding for WireGuard, which only requires MAP with default behavior.
+package pcp
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net/netip"
+)
+
+const (
+ // Version is the PCP protocol version (RFC 6887).
+ Version = 2
+
+ // Port is the standard PCP server port.
+ Port = 5351
+
+ // DefaultLifetime is the default requested mapping lifetime in seconds.
+ DefaultLifetime = 7200 // 2 hours
+
+ // Header sizes
+ headerSize = 24
+ mapPayloadSize = 36
+ mapRequestSize = headerSize + mapPayloadSize // 60 bytes
+)
+
+// Opcodes
+const (
+ OpAnnounce = 0
+ OpMap = 1
+ OpPeer = 2
+ OpReply = 0x80 // OR'd with opcode in responses
+)
+
+// Protocol numbers for MAP requests
+const (
+ ProtoUDP = 17
+ ProtoTCP = 6
+)
+
+// Result codes (RFC 6887 Section 7.4)
+const (
+ ResultSuccess = 0
+ ResultUnsuppVersion = 1
+ ResultNotAuthorized = 2
+ ResultMalformedRequest = 3
+ ResultUnsuppOpcode = 4
+ ResultUnsuppOption = 5
+ ResultMalformedOption = 6
+ ResultNetworkFailure = 7
+ ResultNoResources = 8
+ ResultUnsuppProtocol = 9
+ ResultUserExQuota = 10
+ ResultCannotProvideExt = 11
+ ResultAddressMismatch = 12
+ ResultExcessiveRemotePeers = 13
+)
+
+// ResultCodeString returns a human-readable string for a result code.
+func ResultCodeString(code uint8) string {
+ switch code {
+ case ResultSuccess:
+ return "SUCCESS"
+ case ResultUnsuppVersion:
+ return "UNSUPP_VERSION"
+ case ResultNotAuthorized:
+ return "NOT_AUTHORIZED"
+ case ResultMalformedRequest:
+ return "MALFORMED_REQUEST"
+ case ResultUnsuppOpcode:
+ return "UNSUPP_OPCODE"
+ case ResultUnsuppOption:
+ return "UNSUPP_OPTION"
+ case ResultMalformedOption:
+ return "MALFORMED_OPTION"
+ case ResultNetworkFailure:
+ return "NETWORK_FAILURE"
+ case ResultNoResources:
+ return "NO_RESOURCES"
+ case ResultUnsuppProtocol:
+ return "UNSUPP_PROTOCOL"
+ case ResultUserExQuota:
+ return "USER_EX_QUOTA"
+ case ResultCannotProvideExt:
+ return "CANNOT_PROVIDE_EXTERNAL"
+ case ResultAddressMismatch:
+ return "ADDRESS_MISMATCH"
+ case ResultExcessiveRemotePeers:
+ return "EXCESSIVE_REMOTE_PEERS"
+ default:
+ return fmt.Sprintf("UNKNOWN(%d)", code)
+ }
+}
+
+// Response represents a parsed PCP response header.
+type Response struct {
+ Version uint8
+ Opcode uint8
+ ResultCode uint8
+ Lifetime uint32
+ Epoch uint32
+}
+
+// MapResponse contains the full response to a MAP request.
+type MapResponse struct {
+ Response
+ Nonce [12]byte
+ Protocol uint8
+ InternalPort uint16
+ ExternalPort uint16
+ ExternalIP netip.Addr
+}
+
+// addrTo16 converts an address to its 16-byte IPv4-mapped IPv6 representation.
+func addrTo16(addr netip.Addr) [16]byte {
+ if addr.Is4() {
+ return netip.AddrFrom4(addr.As4()).As16()
+ }
+ return addr.As16()
+}
+
+// addrFrom16 extracts an address from a 16-byte representation, unmapping IPv4.
+func addrFrom16(b [16]byte) netip.Addr {
+ return netip.AddrFrom16(b).Unmap()
+}
+
+// buildAnnounceRequest creates a PCP ANNOUNCE request packet.
+func buildAnnounceRequest(clientIP netip.Addr) []byte {
+ req := make([]byte, headerSize)
+ req[0] = Version
+ req[1] = OpAnnounce
+ mapped := addrTo16(clientIP)
+ copy(req[8:24], mapped[:])
+ return req
+}
+
+// buildMapRequest creates a PCP MAP request packet.
+func buildMapRequest(clientIP netip.Addr, nonce [12]byte, protocol uint8, internalPort, suggestedExtPort uint16, suggestedExtIP netip.Addr, lifetime uint32) []byte {
+ req := make([]byte, mapRequestSize)
+
+ // Header
+ req[0] = Version
+ req[1] = OpMap
+ binary.BigEndian.PutUint32(req[4:8], lifetime)
+ mapped := addrTo16(clientIP)
+ copy(req[8:24], mapped[:])
+
+ // MAP payload
+ copy(req[24:36], nonce[:])
+ req[36] = protocol
+ binary.BigEndian.PutUint16(req[40:42], internalPort)
+ binary.BigEndian.PutUint16(req[42:44], suggestedExtPort)
+ if suggestedExtIP.IsValid() {
+ extMapped := addrTo16(suggestedExtIP)
+ copy(req[44:60], extMapped[:])
+ }
+
+ return req
+}
+
+// parseResponse parses the common PCP response header.
+func parseResponse(data []byte) (*Response, error) {
+ if len(data) < headerSize {
+ return nil, fmt.Errorf("response too short: %d bytes", len(data))
+ }
+
+ resp := &Response{
+ Version: data[0],
+ Opcode: data[1],
+ ResultCode: data[3], // Byte 2 is reserved, byte 3 is result code (RFC 6887 §7.2)
+ Lifetime: binary.BigEndian.Uint32(data[4:8]),
+ Epoch: binary.BigEndian.Uint32(data[8:12]),
+ }
+
+ if resp.Version != Version {
+ return nil, fmt.Errorf("unsupported PCP version: %d", resp.Version)
+ }
+
+ if resp.Opcode&OpReply == 0 {
+ return nil, fmt.Errorf("response missing reply bit: opcode=0x%02x", resp.Opcode)
+ }
+
+ return resp, nil
+}
+
+// parseMapResponse parses a complete MAP response.
+func parseMapResponse(data []byte) (*MapResponse, error) {
+ if len(data) < mapRequestSize {
+ return nil, fmt.Errorf("MAP response too short: %d bytes", len(data))
+ }
+
+ resp, err := parseResponse(data)
+ if err != nil {
+ return nil, fmt.Errorf("parse header: %w", err)
+ }
+
+ mapResp := &MapResponse{
+ Response: *resp,
+ Protocol: data[36],
+ InternalPort: binary.BigEndian.Uint16(data[40:42]),
+ ExternalPort: binary.BigEndian.Uint16(data[42:44]),
+ ExternalIP: addrFrom16([16]byte(data[44:60])),
+ }
+ copy(mapResp.Nonce[:], data[24:36])
+
+ return mapResp, nil
+}
diff --git a/client/internal/portforward/state.go b/client/internal/portforward/state.go
new file mode 100644
index 000000000..b1315cdc0
--- /dev/null
+++ b/client/internal/portforward/state.go
@@ -0,0 +1,63 @@
+//go:build !js
+
+package portforward
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/libp2p/go-nat"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/netbirdio/netbird/client/internal/portforward/pcp"
+)
+
+// discoverGateway is the function used for NAT gateway discovery.
+// It can be replaced in tests to avoid real network operations.
+// Tries PCP first, then falls back to NAT-PMP/UPnP.
+var discoverGateway = defaultDiscoverGateway
+
+func defaultDiscoverGateway(ctx context.Context) (nat.NAT, error) {
+ pcpGateway, err := pcp.DiscoverPCP(ctx)
+ if err == nil {
+ return pcpGateway, nil
+ }
+ log.Debugf("PCP discovery failed: %v, trying NAT-PMP/UPnP", err)
+
+ return nat.DiscoverGateway(ctx)
+}
+
+// State is persisted only for crash recovery cleanup
+type State struct {
+ InternalPort uint16 `json:"internal_port,omitempty"`
+ Protocol string `json:"protocol,omitempty"`
+}
+
+func (s *State) Name() string {
+ return "port_forward_state"
+}
+
+// Cleanup implements statemanager.CleanableState for crash recovery
+func (s *State) Cleanup() error {
+ if s.InternalPort == 0 {
+ return nil
+ }
+
+ log.Infof("cleaning up stale port mapping for port %d", s.InternalPort)
+
+ ctx, cancel := context.WithTimeout(context.Background(), discoveryTimeout)
+ defer cancel()
+
+ gateway, err := discoverGateway(ctx)
+ if err != nil {
+ // Discovery failure is not an error - gateway may not exist
+ log.Debugf("cleanup: no gateway found: %v", err)
+ return nil
+ }
+
+ if err := gateway.DeletePortMapping(ctx, s.Protocol, int(s.InternalPort)); err != nil {
+ return fmt.Errorf("delete port mapping: %w", err)
+ }
+
+ return nil
+}
diff --git a/client/internal/profilemanager/config.go b/client/internal/profilemanager/config.go
index f128ee903..20c615d57 100644
--- a/client/internal/profilemanager/config.go
+++ b/client/internal/profilemanager/config.go
@@ -41,7 +41,7 @@ const (
// mgmProber is the subset of management client needed for URL migration probes.
type mgmProber interface {
- GetServerPublicKey() (*wgtypes.Key, error)
+ HealthCheck() error
Close() error
}
@@ -777,8 +777,7 @@ func UpdateOldManagementURL(ctx context.Context, config *Config, configPath stri
}()
// gRPC check
- _, err = client.GetServerPublicKey()
- if err != nil {
+ if err = client.HealthCheck(); err != nil {
log.Infof("couldn't switch to the new Management %s", newURL.String())
return nil, err
}
diff --git a/client/internal/profilemanager/config_test.go b/client/internal/profilemanager/config_test.go
index c3efb48e6..5216f2423 100644
--- a/client/internal/profilemanager/config_test.go
+++ b/client/internal/profilemanager/config_test.go
@@ -17,12 +17,10 @@ import (
"github.com/netbirdio/netbird/util"
)
-type mockMgmProber struct {
- key wgtypes.Key
-}
+type mockMgmProber struct{}
-func (m *mockMgmProber) GetServerPublicKey() (*wgtypes.Key, error) {
- return &m.key, nil
+func (m *mockMgmProber) HealthCheck() error {
+ return nil
}
func (m *mockMgmProber) Close() error { return nil }
@@ -247,11 +245,7 @@ func TestWireguardPortDefaultVsExplicit(t *testing.T) {
func TestUpdateOldManagementURL(t *testing.T) {
origProber := newMgmProber
newMgmProber = func(_ context.Context, _ string, _ wgtypes.Key, _ bool) (mgmProber, error) {
- key, err := wgtypes.GenerateKey()
- if err != nil {
- return nil, err
- }
- return &mockMgmProber{key: key.PublicKey()}, nil
+ return &mockMgmProber{}, nil
}
t.Cleanup(func() { newMgmProber = origProber })
diff --git a/client/internal/routemanager/manager.go b/client/internal/routemanager/manager.go
index 9afe2049d..3923e153b 100644
--- a/client/internal/routemanager/manager.go
+++ b/client/internal/routemanager/manager.go
@@ -52,6 +52,7 @@ type Manager interface {
TriggerSelection(route.HAMap)
GetRouteSelector() *routeselector.RouteSelector
GetClientRoutes() route.HAMap
+ GetSelectedClientRoutes() route.HAMap
GetClientRoutesWithNetID() map[route.NetID][]*route.Route
SetRouteChangeListener(listener listener.NetworkChangeListener)
InitialRouteRange() []string
@@ -167,6 +168,7 @@ func (m *DefaultManager) setupAndroidRoutes(config ManagerConfig) {
NetworkType: route.IPv4Network,
}
cr = append(cr, fakeIPRoute)
+ m.notifier.SetFakeIPRoute(fakeIPRoute)
}
m.notifier.SetInitialClientRoutes(cr, routesForComparison)
@@ -465,6 +467,16 @@ func (m *DefaultManager) GetClientRoutes() route.HAMap {
return maps.Clone(m.clientRoutes)
}
+// GetSelectedClientRoutes returns only the currently selected/active client routes,
+// filtering out deselected exit nodes. Use this instead of GetClientRoutes when checking
+// if traffic should be routed through the tunnel.
+func (m *DefaultManager) GetSelectedClientRoutes() route.HAMap {
+ m.mux.Lock()
+ defer m.mux.Unlock()
+
+ return m.routeSelector.FilterSelectedExitNodes(maps.Clone(m.clientRoutes))
+}
+
// GetClientRoutesWithNetID returns the current routes from the route map, but the keys consist of the network ID only
func (m *DefaultManager) GetClientRoutesWithNetID() map[route.NetID][]*route.Route {
m.mux.Lock()
diff --git a/client/internal/routemanager/mock.go b/client/internal/routemanager/mock.go
index 6b06144b2..66b5e30dd 100644
--- a/client/internal/routemanager/mock.go
+++ b/client/internal/routemanager/mock.go
@@ -18,6 +18,7 @@ type MockManager struct {
TriggerSelectionFunc func(haMap route.HAMap)
GetRouteSelectorFunc func() *routeselector.RouteSelector
GetClientRoutesFunc func() route.HAMap
+ GetSelectedClientRoutesFunc func() route.HAMap
GetClientRoutesWithNetIDFunc func() map[route.NetID][]*route.Route
StopFunc func(manager *statemanager.Manager)
}
@@ -61,7 +62,7 @@ func (m *MockManager) GetRouteSelector() *routeselector.RouteSelector {
return nil
}
-// GetClientRoutes mock implementation of GetClientRoutes from Manager interface
+// GetClientRoutes mock implementation of GetClientRoutes from the Manager interface
func (m *MockManager) GetClientRoutes() route.HAMap {
if m.GetClientRoutesFunc != nil {
return m.GetClientRoutesFunc()
@@ -69,6 +70,14 @@ func (m *MockManager) GetClientRoutes() route.HAMap {
return nil
}
+// GetSelectedClientRoutes mock implementation of GetSelectedClientRoutes from the Manager interface
+func (m *MockManager) GetSelectedClientRoutes() route.HAMap {
+ if m.GetSelectedClientRoutesFunc != nil {
+ return m.GetSelectedClientRoutesFunc()
+ }
+ return nil
+}
+
// GetClientRoutesWithNetID mock implementation of GetClientRoutesWithNetID from Manager interface
func (m *MockManager) GetClientRoutesWithNetID() map[route.NetID][]*route.Route {
if m.GetClientRoutesWithNetIDFunc != nil {
diff --git a/client/internal/routemanager/notifier/notifier_android.go b/client/internal/routemanager/notifier/notifier_android.go
index 3d2784ae1..55e0b7421 100644
--- a/client/internal/routemanager/notifier/notifier_android.go
+++ b/client/internal/routemanager/notifier/notifier_android.go
@@ -16,6 +16,7 @@ import (
type Notifier struct {
initialRoutes []*route.Route
currentRoutes []*route.Route
+ fakeIPRoute *route.Route
listener listener.NetworkChangeListener
listenerMux sync.Mutex
@@ -31,13 +32,17 @@ func (n *Notifier) SetListener(listener listener.NetworkChangeListener) {
n.listener = listener
}
-// SetInitialClientRoutes stores the full initial route set (including fake IP blocks)
-// and a separate comparison set (without fake IP blocks) for diff detection.
+// SetInitialClientRoutes stores the initial route sets for TUN configuration.
func (n *Notifier) SetInitialClientRoutes(initialRoutes []*route.Route, routesForComparison []*route.Route) {
n.initialRoutes = filterStatic(initialRoutes)
n.currentRoutes = filterStatic(routesForComparison)
}
+// SetFakeIPRoute stores the fake IP route to be included in every TUN rebuild.
+func (n *Notifier) SetFakeIPRoute(r *route.Route) {
+ n.fakeIPRoute = r
+}
+
func (n *Notifier) OnNewRoutes(idMap route.HAMap) {
var newRoutes []*route.Route
for _, routes := range idMap {
@@ -69,7 +74,9 @@ func (n *Notifier) notify() {
}
allRoutes := slices.Clone(n.currentRoutes)
- allRoutes = append(allRoutes, n.extraInitialRoutes()...)
+ if n.fakeIPRoute != nil {
+ allRoutes = append(allRoutes, n.fakeIPRoute)
+ }
routeStrings := n.routesToStrings(allRoutes)
sort.Strings(routeStrings)
@@ -78,23 +85,6 @@ func (n *Notifier) notify() {
}(n.listener)
}
-// extraInitialRoutes returns initialRoutes whose network prefix is absent
-// from currentRoutes (e.g. the fake IP block added at setup time).
-func (n *Notifier) extraInitialRoutes() []*route.Route {
- currentNets := make(map[netip.Prefix]struct{}, len(n.currentRoutes))
- for _, r := range n.currentRoutes {
- currentNets[r.Network] = struct{}{}
- }
-
- var extra []*route.Route
- for _, r := range n.initialRoutes {
- if _, ok := currentNets[r.Network]; !ok {
- extra = append(extra, r)
- }
- }
- return extra
-}
-
func filterStatic(routes []*route.Route) []*route.Route {
out := make([]*route.Route, 0, len(routes))
for _, r := range routes {
diff --git a/client/internal/routemanager/notifier/notifier_ios.go b/client/internal/routemanager/notifier/notifier_ios.go
index bb125cfa4..68c85067a 100644
--- a/client/internal/routemanager/notifier/notifier_ios.go
+++ b/client/internal/routemanager/notifier/notifier_ios.go
@@ -34,6 +34,10 @@ func (n *Notifier) SetInitialClientRoutes([]*route.Route, []*route.Route) {
// iOS doesn't care about initial routes
}
+func (n *Notifier) SetFakeIPRoute(*route.Route) {
+ // Not used on iOS
+}
+
func (n *Notifier) OnNewRoutes(route.HAMap) {
// Not used on iOS
}
@@ -53,7 +57,6 @@ func (n *Notifier) OnNewPrefixes(prefixes []netip.Prefix) {
n.currentPrefixes = newNets
n.notify()
}
-
func (n *Notifier) notify() {
n.listenerMux.Lock()
defer n.listenerMux.Unlock()
diff --git a/client/internal/routemanager/notifier/notifier_other.go b/client/internal/routemanager/notifier/notifier_other.go
index 0521e3dc2..97c815cf0 100644
--- a/client/internal/routemanager/notifier/notifier_other.go
+++ b/client/internal/routemanager/notifier/notifier_other.go
@@ -23,6 +23,10 @@ func (n *Notifier) SetInitialClientRoutes([]*route.Route, []*route.Route) {
// Not used on non-mobile platforms
}
+func (n *Notifier) SetFakeIPRoute(*route.Route) {
+ // Not used on non-mobile platforms
+}
+
func (n *Notifier) OnNewRoutes(idMap route.HAMap) {
// Not used on non-mobile platforms
}
diff --git a/client/ios/NetBirdSDK/client.go b/client/ios/NetBirdSDK/client.go
index 3e2da7f4e..043673904 100644
--- a/client/ios/NetBirdSDK/client.go
+++ b/client/ios/NetBirdSDK/client.go
@@ -161,7 +161,11 @@ func (c *Client) Run(fd int32, interfaceName string, envList *EnvList) error {
cfg.WgIface = interfaceName
c.connectClient = internal.NewConnectClient(ctx, cfg, c.recorder)
- return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, c.stateFile)
+ hostDNS := []netip.AddrPort{
+ netip.MustParseAddrPort("9.9.9.9:53"),
+ netip.MustParseAddrPort("149.112.112.112:53"),
+ }
+ return c.connectClient.RunOniOS(fd, c.networkChangeListener, c.dnsManager, hostDNS, c.stateFile)
}
// Stop the internal client and free the resources
diff --git a/client/server/server.go b/client/server/server.go
index 7c1e70692..e12b6df5b 100644
--- a/client/server/server.go
+++ b/client/server/server.go
@@ -1359,6 +1359,10 @@ func (s *Server) ExposeService(req *proto.ExposeServiceRequest, srv proto.Daemon
return gstatus.Errorf(codes.FailedPrecondition, "engine not initialized")
}
+ if engine.IsBlockInbound() {
+ return gstatus.Errorf(codes.FailedPrecondition, "expose requires inbound connections but 'block inbound' is enabled, disable it first")
+ }
+
mgr := engine.GetExposeManager()
if mgr == nil {
return gstatus.Errorf(codes.Internal, "expose manager not available")
diff --git a/client/server/state_generic.go b/client/server/state_generic.go
index 980ba0cda..86475ca42 100644
--- a/client/server/state_generic.go
+++ b/client/server/state_generic.go
@@ -9,6 +9,7 @@ import (
"github.com/netbirdio/netbird/client/ssh/config"
)
+// registerStates registers all states that need crash recovery cleanup.
func registerStates(mgr *statemanager.Manager) {
mgr.RegisterState(&dns.ShutdownState{})
mgr.RegisterState(&systemops.ShutdownState{})
diff --git a/client/server/state_linux.go b/client/server/state_linux.go
index 019477d8e..b193d4dfa 100644
--- a/client/server/state_linux.go
+++ b/client/server/state_linux.go
@@ -11,6 +11,7 @@ import (
"github.com/netbirdio/netbird/client/ssh/config"
)
+// registerStates registers all states that need crash recovery cleanup.
func registerStates(mgr *statemanager.Manager) {
mgr.RegisterState(&dns.ShutdownState{})
mgr.RegisterState(&systemops.ShutdownState{})
diff --git a/client/ssh/proxy/proxy.go b/client/ssh/proxy/proxy.go
index 8897b9c7e..59007f75c 100644
--- a/client/ssh/proxy/proxy.go
+++ b/client/ssh/proxy/proxy.go
@@ -141,7 +141,7 @@ func (p *SSHProxy) runProxySSHServer(jwtToken string) error {
func (p *SSHProxy) handleSSHSession(session ssh.Session) {
ptyReq, winCh, isPty := session.Pty()
- hasCommand := len(session.Command()) > 0
+ hasCommand := session.RawCommand() != ""
sshClient, err := p.getOrCreateBackendClient(session.Context(), session.User())
if err != nil {
@@ -180,7 +180,7 @@ func (p *SSHProxy) handleSSHSession(session ssh.Session) {
}
if hasCommand {
- if err := serverSession.Run(strings.Join(session.Command(), " ")); err != nil {
+ if err := serverSession.Run(session.RawCommand()); err != nil {
log.Debugf("run command: %v", err)
p.handleProxyExitCode(session, err)
}
diff --git a/client/ssh/proxy/proxy_test.go b/client/ssh/proxy/proxy_test.go
index dba2e88da..b33d5f8f4 100644
--- a/client/ssh/proxy/proxy_test.go
+++ b/client/ssh/proxy/proxy_test.go
@@ -1,6 +1,7 @@
package proxy
import (
+ "bytes"
"context"
"crypto/rand"
"crypto/rsa"
@@ -245,6 +246,191 @@ func TestSSHProxy_Connect(t *testing.T) {
cancel()
}
+// TestSSHProxy_CommandQuoting verifies that the proxy preserves shell quoting
+// when forwarding commands to the backend. This is critical for tools like
+// Ansible that send commands such as:
+//
+// /bin/sh -c '( umask 77 && mkdir -p ... ) && sleep 0'
+//
+// The single quotes must be preserved so the backend shell receives the
+// subshell expression as a single argument to -c.
+func TestSSHProxy_CommandQuoting(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skipping integration test in short mode")
+ }
+
+ sshClient, cleanup := setupProxySSHClient(t)
+ defer cleanup()
+
+ // These commands simulate what the SSH protocol delivers as exec payloads.
+ // When a user types: ssh host '/bin/sh -c "( echo hello )"'
+ // the local shell strips the outer single quotes, and the SSH exec request
+ // contains the raw string: /bin/sh -c "( echo hello )"
+ //
+ // The proxy must forward this string verbatim. Using session.Command()
+ // (shlex.Split + strings.Join) strips the inner double quotes, breaking
+ // the command on the backend.
+ tests := []struct {
+ name string
+ command string
+ expect string
+ }{
+ {
+ name: "subshell_in_double_quotes",
+ command: `/bin/sh -c "( echo from-subshell ) && echo outer"`,
+ expect: "from-subshell\nouter\n",
+ },
+ {
+ name: "printf_with_special_chars",
+ command: `/bin/sh -c "printf '%s\n' 'hello world'"`,
+ expect: "hello world\n",
+ },
+ {
+ name: "nested_command_substitution",
+ command: `/bin/sh -c "echo $(echo nested)"`,
+ expect: "nested\n",
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ session, err := sshClient.NewSession()
+ require.NoError(t, err)
+ defer func() { _ = session.Close() }()
+
+ var stderrBuf bytes.Buffer
+ session.Stderr = &stderrBuf
+
+ outputCh := make(chan []byte, 1)
+ errCh := make(chan error, 1)
+ go func() {
+ output, err := session.Output(tc.command)
+ outputCh <- output
+ errCh <- err
+ }()
+
+ select {
+ case output := <-outputCh:
+ err := <-errCh
+ if stderrBuf.Len() > 0 {
+ t.Logf("stderr: %s", stderrBuf.String())
+ }
+ require.NoError(t, err, "command should succeed: %s", tc.command)
+ assert.Equal(t, tc.expect, string(output), "output mismatch for: %s", tc.command)
+ case <-time.After(5 * time.Second):
+ t.Fatalf("command timed out: %s", tc.command)
+ }
+ })
+ }
+}
+
+// setupProxySSHClient creates a full proxy test environment and returns
+// an SSH client connected through the proxy to a backend NetBird SSH server.
+func setupProxySSHClient(t *testing.T) (*cryptossh.Client, func()) {
+ t.Helper()
+
+ const (
+ issuer = "https://test-issuer.example.com"
+ audience = "test-audience"
+ )
+
+ jwksServer, privateKey, jwksURL := setupJWKSServer(t)
+
+ hostKey, err := nbssh.GeneratePrivateKey(nbssh.ED25519)
+ require.NoError(t, err)
+ hostPubKey, err := nbssh.GeneratePublicKey(hostKey)
+ require.NoError(t, err)
+
+ serverConfig := &server.Config{
+ HostKeyPEM: hostKey,
+ JWT: &server.JWTConfig{
+ Issuer: issuer,
+ Audiences: []string{audience},
+ KeysLocation: jwksURL,
+ },
+ }
+ sshServer := server.New(serverConfig)
+ sshServer.SetAllowRootLogin(true)
+
+ testUsername := testutil.GetTestUsername(t)
+ testJWTUser := "test-username"
+ testUserHash, err := sshuserhash.HashUserID(testJWTUser)
+ require.NoError(t, err)
+
+ authConfig := &sshauth.Config{
+ UserIDClaim: sshauth.DefaultUserIDClaim,
+ AuthorizedUsers: []sshuserhash.UserIDHash{testUserHash},
+ MachineUsers: map[string][]uint32{
+ testUsername: {0},
+ },
+ }
+ sshServer.UpdateSSHAuth(authConfig)
+
+ sshServerAddr := server.StartTestServer(t, sshServer)
+
+ mockDaemon := startMockDaemon(t)
+
+ host, portStr, err := net.SplitHostPort(sshServerAddr)
+ require.NoError(t, err)
+ port, err := strconv.Atoi(portStr)
+ require.NoError(t, err)
+
+ mockDaemon.setHostKey(host, hostPubKey)
+
+ validToken := generateValidJWT(t, privateKey, issuer, audience, testJWTUser)
+ mockDaemon.setJWTToken(validToken)
+
+ proxyInstance, err := New(mockDaemon.addr, host, port, io.Discard, nil)
+ require.NoError(t, err)
+
+ origStdin := os.Stdin
+ origStdout := os.Stdout
+
+ stdinReader, stdinWriter, err := os.Pipe()
+ require.NoError(t, err)
+ stdoutReader, stdoutWriter, err := os.Pipe()
+ require.NoError(t, err)
+
+ os.Stdin = stdinReader
+ os.Stdout = stdoutWriter
+
+ clientConn, proxyConn := net.Pipe()
+
+ go func() { _, _ = io.Copy(stdinWriter, proxyConn) }()
+ go func() { _, _ = io.Copy(proxyConn, stdoutReader) }()
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+
+ go func() {
+ _ = proxyInstance.Connect(ctx)
+ }()
+
+ sshConfig := &cryptossh.ClientConfig{
+ User: testutil.GetTestUsername(t),
+ Auth: []cryptossh.AuthMethod{},
+ HostKeyCallback: cryptossh.InsecureIgnoreHostKey(),
+ Timeout: 5 * time.Second,
+ }
+
+ sshClientConn, chans, reqs, err := cryptossh.NewClientConn(clientConn, "test", sshConfig)
+ require.NoError(t, err)
+
+ client := cryptossh.NewClient(sshClientConn, chans, reqs)
+
+ cleanupFn := func() {
+ _ = client.Close()
+ _ = clientConn.Close()
+ cancel()
+ os.Stdin = origStdin
+ os.Stdout = origStdout
+ _ = sshServer.Stop()
+ mockDaemon.stop()
+ jwksServer.Close()
+ }
+
+ return client, cleanupFn
+}
+
type mockDaemonServer struct {
proto.UnimplementedDaemonServiceServer
hostKeys map[string][]byte
diff --git a/client/ssh/server/server.go b/client/ssh/server/server.go
index 4431ae423..82d3b700f 100644
--- a/client/ssh/server/server.go
+++ b/client/ssh/server/server.go
@@ -284,19 +284,21 @@ func (s *Server) closeListener(ln net.Listener) {
// Stop closes the SSH server
func (s *Server) Stop() error {
s.mu.Lock()
- defer s.mu.Unlock()
-
- if s.sshServer == nil {
+ sshServer := s.sshServer
+ if sshServer == nil {
+ s.mu.Unlock()
return nil
}
+ s.sshServer = nil
+ s.listener = nil
+ s.mu.Unlock()
- if err := s.sshServer.Close(); err != nil {
+ // Close outside the lock: session handlers need s.mu for unregisterSession.
+ if err := sshServer.Close(); err != nil {
log.Debugf("close SSH server: %v", err)
}
- s.sshServer = nil
- s.listener = nil
-
+ s.mu.Lock()
maps.Clear(s.sessions)
maps.Clear(s.pendingAuthJWT)
maps.Clear(s.connections)
@@ -307,6 +309,7 @@ func (s *Server) Stop() error {
}
}
maps.Clear(s.remoteForwardListeners)
+ s.mu.Unlock()
return nil
}
diff --git a/client/ssh/server/session_handlers.go b/client/ssh/server/session_handlers.go
index f12a75961..0e531bb96 100644
--- a/client/ssh/server/session_handlers.go
+++ b/client/ssh/server/session_handlers.go
@@ -60,7 +60,7 @@ func (s *Server) sessionHandler(session ssh.Session) {
}
ptyReq, winCh, isPty := session.Pty()
- hasCommand := len(session.Command()) > 0
+ hasCommand := session.RawCommand() != ""
if isPty && !hasCommand {
// ssh - PTY interactive session (login)
diff --git a/client/system/info.go b/client/system/info.go
index 01176e765..f2546cfe6 100644
--- a/client/system/info.go
+++ b/client/system/info.go
@@ -153,6 +153,9 @@ func networkAddresses() ([]NetworkAddress, error) {
var netAddresses []NetworkAddress
for _, iface := range interfaces {
+ if iface.Flags&net.FlagUp == 0 {
+ continue
+ }
if iface.HardwareAddr.String() == "" {
continue
}
diff --git a/client/system/info_freebsd.go b/client/system/info_freebsd.go
index 8e1353151..755172842 100644
--- a/client/system/info_freebsd.go
+++ b/client/system/info_freebsd.go
@@ -43,18 +43,24 @@ func GetInfo(ctx context.Context) *Info {
systemHostname, _ := os.Hostname()
+ addrs, err := networkAddresses()
+ if err != nil {
+ log.Warnf("failed to discover network addresses: %s", err)
+ }
+
return &Info{
- GoOS: runtime.GOOS,
- Kernel: osInfo[0],
- Platform: runtime.GOARCH,
- OS: osName,
- OSVersion: osVersion,
- Hostname: extractDeviceName(ctx, systemHostname),
- CPUs: runtime.NumCPU(),
- NetbirdVersion: version.NetbirdVersion(),
- UIVersion: extractUserAgent(ctx),
- KernelVersion: osInfo[1],
- Environment: env,
+ GoOS: runtime.GOOS,
+ Kernel: osInfo[0],
+ Platform: runtime.GOARCH,
+ OS: osName,
+ OSVersion: osVersion,
+ Hostname: extractDeviceName(ctx, systemHostname),
+ CPUs: runtime.NumCPU(),
+ NetbirdVersion: version.NetbirdVersion(),
+ UIVersion: extractUserAgent(ctx),
+ KernelVersion: osInfo[1],
+ NetworkAddresses: addrs,
+ Environment: env,
}
}
diff --git a/client/ui/debug.go b/client/ui/debug.go
index 29f73a66a..4ebe4d675 100644
--- a/client/ui/debug.go
+++ b/client/ui/debug.go
@@ -24,9 +24,10 @@ import (
// Initial state for the debug collection
type debugInitialState struct {
- wasDown bool
- logLevel proto.LogLevel
- isLevelTrace bool
+ wasDown bool
+ needsRestoreUp bool
+ logLevel proto.LogLevel
+ isLevelTrace bool
}
// Debug collection parameters
@@ -371,46 +372,51 @@ func (s *serviceClient) configureServiceForDebug(
conn proto.DaemonServiceClient,
state *debugInitialState,
enablePersistence bool,
-) error {
+) {
if state.wasDown {
if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil {
- return fmt.Errorf("bring service up: %v", err)
+ log.Warnf("failed to bring service up: %v", err)
+ } else {
+ log.Info("Service brought up for debug")
+ time.Sleep(time.Second * 10)
}
- log.Info("Service brought up for debug")
- time.Sleep(time.Second * 10)
}
if !state.isLevelTrace {
if _, err := conn.SetLogLevel(s.ctx, &proto.SetLogLevelRequest{Level: proto.LogLevel_TRACE}); err != nil {
- return fmt.Errorf("set log level to TRACE: %v", err)
+ log.Warnf("failed to set log level to TRACE: %v", err)
+ } else {
+ log.Info("Log level set to TRACE for debug")
}
- log.Info("Log level set to TRACE for debug")
}
if _, err := conn.Down(s.ctx, &proto.DownRequest{}); err != nil {
- return fmt.Errorf("bring service down: %v", err)
+ log.Warnf("failed to bring service down: %v", err)
+ } else {
+ state.needsRestoreUp = !state.wasDown
+ time.Sleep(time.Second)
}
- time.Sleep(time.Second)
if enablePersistence {
if _, err := conn.SetSyncResponsePersistence(s.ctx, &proto.SetSyncResponsePersistenceRequest{
Enabled: true,
}); err != nil {
- return fmt.Errorf("enable sync response persistence: %v", err)
+ log.Warnf("failed to enable sync response persistence: %v", err)
+ } else {
+ log.Info("Sync response persistence enabled for debug")
}
- log.Info("Sync response persistence enabled for debug")
}
if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil {
- return fmt.Errorf("bring service back up: %v", err)
+ log.Warnf("failed to bring service back up: %v", err)
+ } else {
+ state.needsRestoreUp = false
+ time.Sleep(time.Second * 3)
}
- time.Sleep(time.Second * 3)
if _, err := conn.StartCPUProfile(s.ctx, &proto.StartCPUProfileRequest{}); err != nil {
log.Warnf("failed to start CPU profiling: %v", err)
}
-
- return nil
}
func (s *serviceClient) collectDebugData(
@@ -424,9 +430,7 @@ func (s *serviceClient) collectDebugData(
var wg sync.WaitGroup
startProgressTracker(ctx, &wg, params.duration, progress)
- if err := s.configureServiceForDebug(conn, state, params.enablePersistence); err != nil {
- return err
- }
+ s.configureServiceForDebug(conn, state, params.enablePersistence)
wg.Wait()
progress.progressBar.Hide()
@@ -482,9 +486,17 @@ func (s *serviceClient) createDebugBundleFromCollection(
// Restore service to original state
func (s *serviceClient) restoreServiceState(conn proto.DaemonServiceClient, state *debugInitialState) {
+ if state.needsRestoreUp {
+ if _, err := conn.Up(s.ctx, &proto.UpRequest{}); err != nil {
+ log.Warnf("failed to restore up state: %v", err)
+ } else {
+ log.Info("Service state restored to up")
+ }
+ }
+
if state.wasDown {
if _, err := conn.Down(s.ctx, &proto.DownRequest{}); err != nil {
- log.Errorf("Failed to restore down state: %v", err)
+ log.Warnf("failed to restore down state: %v", err)
} else {
log.Info("Service state restored to down")
}
@@ -492,7 +504,7 @@ func (s *serviceClient) restoreServiceState(conn proto.DaemonServiceClient, stat
if !state.isLevelTrace {
if _, err := conn.SetLogLevel(s.ctx, &proto.SetLogLevelRequest{Level: state.logLevel}); err != nil {
- log.Errorf("Failed to restore log level: %v", err)
+ log.Warnf("failed to restore log level: %v", err)
} else {
log.Info("Log level restored to original setting")
}
diff --git a/combined/cmd/config.go b/combined/cmd/config.go
index 36bb2fe95..3cbe7f172 100644
--- a/combined/cmd/config.go
+++ b/combined/cmd/config.go
@@ -181,9 +181,11 @@ type StoreConfig struct {
// ReverseProxyConfig contains reverse proxy settings
type ReverseProxyConfig struct {
- TrustedHTTPProxies []string `yaml:"trustedHTTPProxies"`
- TrustedHTTPProxiesCount uint `yaml:"trustedHTTPProxiesCount"`
- TrustedPeers []string `yaml:"trustedPeers"`
+ TrustedHTTPProxies []string `yaml:"trustedHTTPProxies"`
+ TrustedHTTPProxiesCount uint `yaml:"trustedHTTPProxiesCount"`
+ TrustedPeers []string `yaml:"trustedPeers"`
+ AccessLogRetentionDays int `yaml:"accessLogRetentionDays"`
+ AccessLogCleanupIntervalHours int `yaml:"accessLogCleanupIntervalHours"`
}
// DefaultConfig returns a CombinedConfig with default values
@@ -649,7 +651,9 @@ func (c *CombinedConfig) ToManagementConfig() (*nbconfig.Config, error) {
// Build reverse proxy config
reverseProxy := nbconfig.ReverseProxy{
- TrustedHTTPProxiesCount: mgmt.ReverseProxy.TrustedHTTPProxiesCount,
+ TrustedHTTPProxiesCount: mgmt.ReverseProxy.TrustedHTTPProxiesCount,
+ AccessLogRetentionDays: mgmt.ReverseProxy.AccessLogRetentionDays,
+ AccessLogCleanupIntervalHours: mgmt.ReverseProxy.AccessLogCleanupIntervalHours,
}
for _, p := range mgmt.ReverseProxy.TrustedHTTPProxies {
if prefix, err := netip.ParsePrefix(p); err == nil {
diff --git a/combined/cmd/root.go b/combined/cmd/root.go
index ea1ff908a..db986b4d4 100644
--- a/combined/cmd/root.go
+++ b/combined/cmd/root.go
@@ -29,6 +29,7 @@ import (
"github.com/netbirdio/netbird/management/server/telemetry"
"github.com/netbirdio/netbird/relay/healthcheck"
relayServer "github.com/netbirdio/netbird/relay/server"
+ "github.com/netbirdio/netbird/relay/server/listener"
"github.com/netbirdio/netbird/relay/server/listener/ws"
sharedMetrics "github.com/netbirdio/netbird/shared/metrics"
"github.com/netbirdio/netbird/shared/relay/auth"
@@ -523,7 +524,7 @@ func createManagementServer(cfg *CombinedConfig, mgmtConfig *nbconfig.Config) (*
func createCombinedHandler(grpcServer *grpc.Server, httpHandler http.Handler, relaySrv *relayServer.Server, meter metric.Meter, cfg *CombinedConfig) http.Handler {
wsProxy := wsproxyserver.New(grpcServer, wsproxyserver.WithOTelMeter(meter))
- var relayAcceptFn func(conn net.Conn)
+ var relayAcceptFn func(conn listener.Conn)
if relaySrv != nil {
relayAcceptFn = relaySrv.RelayAccept()
}
@@ -563,7 +564,7 @@ func createCombinedHandler(grpcServer *grpc.Server, httpHandler http.Handler, re
}
// handleRelayWebSocket handles incoming WebSocket connections for the relay service
-func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(conn net.Conn), cfg *CombinedConfig) {
+func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(conn listener.Conn), cfg *CombinedConfig) {
acceptOptions := &websocket.AcceptOptions{
OriginPatterns: []string{"*"},
}
@@ -585,15 +586,9 @@ func handleRelayWebSocket(w http.ResponseWriter, r *http.Request, acceptFn func(
return
}
- lAddr, err := net.ResolveTCPAddr("tcp", cfg.Server.ListenAddress)
- if err != nil {
- _ = wsConn.Close(websocket.StatusInternalError, "internal error")
- return
- }
-
log.Debugf("Relay WS client connected from: %s", rAddr)
- conn := ws.NewConn(wsConn, lAddr, rAddr)
+ conn := ws.NewConn(wsConn, rAddr)
acceptFn(conn)
}
diff --git a/flow/client/client.go b/flow/client/client.go
index 318fcfe1e..8ad637974 100644
--- a/flow/client/client.go
+++ b/flow/client/client.go
@@ -14,7 +14,6 @@ import (
log "github.com/sirupsen/logrus"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
- "google.golang.org/grpc/connectivity"
"google.golang.org/grpc/credentials"
"google.golang.org/grpc/credentials/insecure"
"google.golang.org/grpc/keepalive"
@@ -26,11 +25,22 @@ import (
"github.com/netbirdio/netbird/util/wsproxy"
)
+var ErrClientClosed = errors.New("client is closed")
+
+// minHealthyDuration is the minimum time a stream must survive before a failure
+// resets the backoff timer. Streams that fail faster are considered unhealthy and
+// should not reset backoff, so that MaxElapsedTime can eventually stop retries.
+const minHealthyDuration = 5 * time.Second
+
type GRPCClient struct {
realClient proto.FlowServiceClient
clientConn *grpc.ClientConn
stream proto.FlowService_EventsClient
- streamMu sync.Mutex
+ target string
+ opts []grpc.DialOption
+ closed bool // prevent creating conn in the middle of the Close
+ receiving bool // prevent concurrent Receive calls
+ mu sync.Mutex // protects clientConn, realClient, stream, closed, and receiving
}
func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCClient, error) {
@@ -65,7 +75,8 @@ func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCCl
grpc.WithDefaultServiceConfig(`{"healthCheckConfig": {"serviceName": ""}}`),
)
- conn, err := grpc.NewClient(fmt.Sprintf("%s:%s", parsedURL.Hostname(), parsedURL.Port()), opts...)
+ target := parsedURL.Host
+ conn, err := grpc.NewClient(target, opts...)
if err != nil {
return nil, fmt.Errorf("creating new grpc client: %w", err)
}
@@ -73,30 +84,73 @@ func NewClient(addr, payload, signature string, interval time.Duration) (*GRPCCl
return &GRPCClient{
realClient: proto.NewFlowServiceClient(conn),
clientConn: conn,
+ target: target,
+ opts: opts,
}, nil
}
func (c *GRPCClient) Close() error {
- c.streamMu.Lock()
- defer c.streamMu.Unlock()
-
+ c.mu.Lock()
+ c.closed = true
c.stream = nil
- if err := c.clientConn.Close(); err != nil && !errors.Is(err, context.Canceled) {
+ conn := c.clientConn
+ c.clientConn = nil
+ c.mu.Unlock()
+
+ if conn == nil {
+ return nil
+ }
+
+ if err := conn.Close(); err != nil && !errors.Is(err, context.Canceled) {
return fmt.Errorf("close client connection: %w", err)
}
return nil
}
+func (c *GRPCClient) Send(event *proto.FlowEvent) error {
+ c.mu.Lock()
+ stream := c.stream
+ c.mu.Unlock()
+
+ if stream == nil {
+ return errors.New("stream not initialized")
+ }
+
+ if err := stream.Send(event); err != nil {
+ return fmt.Errorf("send flow event: %w", err)
+ }
+
+ return nil
+}
+
func (c *GRPCClient) Receive(ctx context.Context, interval time.Duration, msgHandler func(msg *proto.FlowEventAck) error) error {
+ c.mu.Lock()
+ if c.receiving {
+ c.mu.Unlock()
+ return errors.New("concurrent Receive calls are not supported")
+ }
+ c.receiving = true
+ c.mu.Unlock()
+ defer func() {
+ c.mu.Lock()
+ c.receiving = false
+ c.mu.Unlock()
+ }()
+
backOff := defaultBackoff(ctx, interval)
operation := func() error {
- if err := c.establishStreamAndReceive(ctx, msgHandler); err != nil {
- if s, ok := status.FromError(err); ok && s.Code() == codes.Canceled {
- return fmt.Errorf("receive: %w: %w", err, context.Canceled)
- }
+ stream, err := c.establishStream(ctx)
+ if err != nil {
+ log.Errorf("failed to establish flow stream, retrying: %v", err)
+ return c.handleRetryableError(err, time.Time{}, backOff)
+ }
+
+ streamStart := time.Now()
+
+ if err := c.receive(stream, msgHandler); err != nil {
log.Errorf("receive failed: %v", err)
- return fmt.Errorf("receive: %w", err)
+ return c.handleRetryableError(err, streamStart, backOff)
}
return nil
}
@@ -108,37 +162,106 @@ func (c *GRPCClient) Receive(ctx context.Context, interval time.Duration, msgHan
return nil
}
-func (c *GRPCClient) establishStreamAndReceive(ctx context.Context, msgHandler func(msg *proto.FlowEventAck) error) error {
- if c.clientConn.GetState() == connectivity.Shutdown {
- return errors.New("connection to flow receiver has been shut down")
+// handleRetryableError resets the backoff timer if the stream was healthy long
+// enough and recreates the underlying ClientConn so that gRPC's internal
+// subchannel backoff does not accumulate and compete with our own retry timer.
+// A zero streamStart means the stream was never established.
+func (c *GRPCClient) handleRetryableError(err error, streamStart time.Time, backOff backoff.BackOff) error {
+ if isContextDone(err) {
+ return backoff.Permanent(err)
}
- stream, err := c.realClient.Events(ctx, grpc.WaitForReady(true))
- if err != nil {
- return fmt.Errorf("create event stream: %w", err)
+ var permErr *backoff.PermanentError
+ if errors.As(err, &permErr) {
+ return err
}
- err = stream.Send(&proto.FlowEvent{IsInitiator: true})
+ // Reset the backoff so the next retry starts with a short delay instead of
+ // continuing the already-elapsed timer. Only do this if the stream was healthy
+ // long enough; short-lived connect/drop cycles must not defeat MaxElapsedTime.
+ if !streamStart.IsZero() && time.Since(streamStart) >= minHealthyDuration {
+ backOff.Reset()
+ }
+
+ if recreateErr := c.recreateConnection(); recreateErr != nil {
+ log.Errorf("recreate connection: %v", recreateErr)
+ return recreateErr
+ }
+
+ log.Infof("connection recreated, retrying stream")
+ return fmt.Errorf("retrying after error: %w", err)
+}
+
+func (c *GRPCClient) recreateConnection() error {
+ c.mu.Lock()
+ if c.closed {
+ c.mu.Unlock()
+ return backoff.Permanent(ErrClientClosed)
+ }
+
+ conn, err := grpc.NewClient(c.target, c.opts...)
if err != nil {
- log.Infof("failed to send initiator message to flow receiver but will attempt to continue. Error: %s", err)
+ c.mu.Unlock()
+ return fmt.Errorf("create new connection: %w", err)
+ }
+
+ old := c.clientConn
+ c.clientConn = conn
+ c.realClient = proto.NewFlowServiceClient(conn)
+ c.stream = nil
+ c.mu.Unlock()
+
+ _ = old.Close()
+
+ return nil
+}
+
+func (c *GRPCClient) establishStream(ctx context.Context) (proto.FlowService_EventsClient, error) {
+ c.mu.Lock()
+ if c.closed {
+ c.mu.Unlock()
+ return nil, backoff.Permanent(ErrClientClosed)
+ }
+ cl := c.realClient
+ c.mu.Unlock()
+
+ // open stream outside the lock — blocking operation
+ stream, err := cl.Events(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("create event stream: %w", err)
+ }
+ streamReady := false
+ defer func() {
+ if !streamReady {
+ _ = stream.CloseSend()
+ }
+ }()
+
+ if err = stream.Send(&proto.FlowEvent{IsInitiator: true}); err != nil {
+ return nil, fmt.Errorf("send initiator: %w", err)
}
if err = checkHeader(stream); err != nil {
- return fmt.Errorf("check header: %w", err)
+ return nil, fmt.Errorf("check header: %w", err)
}
- c.streamMu.Lock()
+ c.mu.Lock()
+ if c.closed {
+ c.mu.Unlock()
+ return nil, backoff.Permanent(ErrClientClosed)
+ }
c.stream = stream
- c.streamMu.Unlock()
+ c.mu.Unlock()
+ streamReady = true
- return c.receive(stream, msgHandler)
+ return stream, nil
}
func (c *GRPCClient) receive(stream proto.FlowService_EventsClient, msgHandler func(msg *proto.FlowEventAck) error) error {
for {
msg, err := stream.Recv()
if err != nil {
- return fmt.Errorf("receive from stream: %w", err)
+ return err
}
if msg.IsInitiator {
@@ -169,7 +292,7 @@ func checkHeader(stream proto.FlowService_EventsClient) error {
func defaultBackoff(ctx context.Context, interval time.Duration) backoff.BackOff {
return backoff.WithContext(&backoff.ExponentialBackOff{
InitialInterval: 800 * time.Millisecond,
- RandomizationFactor: 1,
+ RandomizationFactor: 0.5,
Multiplier: 1.7,
MaxInterval: interval / 2,
MaxElapsedTime: 3 * 30 * 24 * time.Hour, // 3 months
@@ -178,18 +301,12 @@ func defaultBackoff(ctx context.Context, interval time.Duration) backoff.BackOff
}, ctx)
}
-func (c *GRPCClient) Send(event *proto.FlowEvent) error {
- c.streamMu.Lock()
- stream := c.stream
- c.streamMu.Unlock()
-
- if stream == nil {
- return errors.New("stream not initialized")
+func isContextDone(err error) bool {
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return true
}
-
- if err := stream.Send(event); err != nil {
- return fmt.Errorf("send flow event: %w", err)
+ if s, ok := status.FromError(err); ok {
+ return s.Code() == codes.Canceled || s.Code() == codes.DeadlineExceeded
}
-
- return nil
+ return false
}
diff --git a/flow/client/client_test.go b/flow/client/client_test.go
index efe01c003..55157acbc 100644
--- a/flow/client/client_test.go
+++ b/flow/client/client_test.go
@@ -2,8 +2,11 @@ package client_test
import (
"context"
+ "encoding/binary"
"errors"
"net"
+ "sync"
+ "sync/atomic"
"testing"
"time"
@@ -11,6 +14,8 @@ import (
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
+ "google.golang.org/grpc/codes"
+ "google.golang.org/grpc/status"
flow "github.com/netbirdio/netbird/flow/client"
"github.com/netbirdio/netbird/flow/proto"
@@ -18,21 +23,89 @@ import (
type testServer struct {
proto.UnimplementedFlowServiceServer
- events chan *proto.FlowEvent
- acks chan *proto.FlowEventAck
- grpcSrv *grpc.Server
- addr string
+ events chan *proto.FlowEvent
+ acks chan *proto.FlowEventAck
+ grpcSrv *grpc.Server
+ addr string
+ listener *connTrackListener
+ closeStream chan struct{} // signal server to close the stream
+ handlerDone chan struct{} // signaled each time Events() exits
+ handlerStarted chan struct{} // signaled each time Events() begins
+}
+
+// connTrackListener wraps a net.Listener to track accepted connections
+// so tests can forcefully close them to simulate PROTOCOL_ERROR/RST_STREAM.
+type connTrackListener struct {
+ net.Listener
+ mu sync.Mutex
+ conns []net.Conn
+}
+
+func (l *connTrackListener) Accept() (net.Conn, error) {
+ c, err := l.Listener.Accept()
+ if err != nil {
+ return nil, err
+ }
+ l.mu.Lock()
+ l.conns = append(l.conns, c)
+ l.mu.Unlock()
+ return c, nil
+}
+
+// sendRSTStream writes a raw HTTP/2 RST_STREAM frame with PROTOCOL_ERROR
+// (error code 0x1) on every tracked connection. This produces the exact error:
+//
+// rpc error: code = Internal desc = stream terminated by RST_STREAM with error code: PROTOCOL_ERROR
+//
+// HTTP/2 RST_STREAM frame format (9-byte header + 4-byte payload):
+//
+// Length (3 bytes): 0x000004
+// Type (1 byte): 0x03 (RST_STREAM)
+// Flags (1 byte): 0x00
+// Stream ID (4 bytes): target stream (must have bit 31 clear)
+// Error Code (4 bytes): 0x00000001 (PROTOCOL_ERROR)
+func (l *connTrackListener) connCount() int {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+ return len(l.conns)
+}
+
+func (l *connTrackListener) sendRSTStream(streamID uint32) {
+ l.mu.Lock()
+ defer l.mu.Unlock()
+
+ frame := make([]byte, 13) // 9-byte header + 4-byte payload
+ // Length = 4 (3 bytes, big-endian)
+ frame[0], frame[1], frame[2] = 0, 0, 4
+ // Type = RST_STREAM (0x03)
+ frame[3] = 0x03
+ // Flags = 0
+ frame[4] = 0x00
+ // Stream ID (4 bytes, big-endian, bit 31 reserved = 0)
+ binary.BigEndian.PutUint32(frame[5:9], streamID)
+ // Error Code = PROTOCOL_ERROR (0x1)
+ binary.BigEndian.PutUint32(frame[9:13], 0x1)
+
+ for _, c := range l.conns {
+ _, _ = c.Write(frame)
+ }
}
func newTestServer(t *testing.T) *testServer {
- listener, err := net.Listen("tcp", "127.0.0.1:0")
+ rawListener, err := net.Listen("tcp", "127.0.0.1:0")
require.NoError(t, err)
+ listener := &connTrackListener{Listener: rawListener}
+
s := &testServer{
- events: make(chan *proto.FlowEvent, 100),
- acks: make(chan *proto.FlowEventAck, 100),
- grpcSrv: grpc.NewServer(),
- addr: listener.Addr().String(),
+ events: make(chan *proto.FlowEvent, 100),
+ acks: make(chan *proto.FlowEventAck, 100),
+ grpcSrv: grpc.NewServer(),
+ addr: rawListener.Addr().String(),
+ listener: listener,
+ closeStream: make(chan struct{}, 1),
+ handlerDone: make(chan struct{}, 10),
+ handlerStarted: make(chan struct{}, 10),
}
proto.RegisterFlowServiceServer(s.grpcSrv, s)
@@ -51,11 +124,23 @@ func newTestServer(t *testing.T) *testServer {
}
func (s *testServer) Events(stream proto.FlowService_EventsServer) error {
+ defer func() {
+ select {
+ case s.handlerDone <- struct{}{}:
+ default:
+ }
+ }()
+
err := stream.Send(&proto.FlowEventAck{IsInitiator: true})
if err != nil {
return err
}
+ select {
+ case s.handlerStarted <- struct{}{}:
+ default:
+ }
+
ctx, cancel := context.WithCancel(stream.Context())
defer cancel()
@@ -91,6 +176,8 @@ func (s *testServer) Events(stream proto.FlowService_EventsServer) error {
if err := stream.Send(ack); err != nil {
return err
}
+ case <-s.closeStream:
+ return status.Errorf(codes.Internal, "server closing stream")
case <-ctx.Done():
return ctx.Err()
}
@@ -110,16 +197,13 @@ func TestReceive(t *testing.T) {
assert.NoError(t, err, "failed to close flow")
})
- receivedAcks := make(map[string]bool)
+ var ackCount atomic.Int32
receiveDone := make(chan struct{})
go func() {
err := client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error {
if !msg.IsInitiator && len(msg.EventId) > 0 {
- id := string(msg.EventId)
- receivedAcks[id] = true
-
- if len(receivedAcks) >= 3 {
+ if ackCount.Add(1) >= 3 {
close(receiveDone)
}
}
@@ -130,7 +214,11 @@ func TestReceive(t *testing.T) {
}
}()
- time.Sleep(500 * time.Millisecond)
+ select {
+ case <-server.handlerStarted:
+ case <-time.After(3 * time.Second):
+ t.Fatal("timeout waiting for stream to be established")
+ }
for i := 0; i < 3; i++ {
eventID := uuid.New().String()
@@ -153,7 +241,7 @@ func TestReceive(t *testing.T) {
t.Fatal("timeout waiting for acks to be processed")
}
- assert.Equal(t, 3, len(receivedAcks))
+ assert.Equal(t, int32(3), ackCount.Load())
}
func TestReceive_ContextCancellation(t *testing.T) {
@@ -254,3 +342,195 @@ func TestSend(t *testing.T) {
t.Fatal("timeout waiting for ack to be received by flow")
}
}
+
+func TestNewClient_PermanentClose(t *testing.T) {
+ server := newTestServer(t)
+
+ client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second)
+ require.NoError(t, err)
+
+ err = client.Close()
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ t.Cleanup(cancel)
+
+ done := make(chan error, 1)
+ go func() {
+ done <- client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error {
+ return nil
+ })
+ }()
+
+ select {
+ case err := <-done:
+ require.ErrorIs(t, err, flow.ErrClientClosed)
+ case <-time.After(2 * time.Second):
+ t.Fatal("Receive did not return after Close — stuck in retry loop")
+ }
+}
+
+func TestNewClient_CloseVerify(t *testing.T) {
+ server := newTestServer(t)
+
+ client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second)
+ require.NoError(t, err)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
+ t.Cleanup(cancel)
+
+ done := make(chan error, 1)
+ go func() {
+ done <- client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error {
+ return nil
+ })
+ }()
+
+ closeDone := make(chan struct{}, 1)
+ go func() {
+ _ = client.Close()
+ closeDone <- struct{}{}
+ }()
+
+ select {
+ case err := <-done:
+ require.Error(t, err)
+ case <-time.After(2 * time.Second):
+ t.Fatal("Receive did not return after Close — stuck in retry loop")
+ }
+
+ select {
+ case <-closeDone:
+ return
+ case <-time.After(2 * time.Second):
+ t.Fatal("Close did not return — blocked in retry loop")
+ }
+
+}
+
+func TestClose_WhileReceiving(t *testing.T) {
+ server := newTestServer(t)
+ client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second)
+ require.NoError(t, err)
+
+ ctx := context.Background() // no timeout — intentional
+ receiveDone := make(chan struct{})
+ go func() {
+ _ = client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error {
+ return nil
+ })
+ close(receiveDone)
+ }()
+
+ // Wait for the server-side handler to confirm the stream is established.
+ select {
+ case <-server.handlerStarted:
+ case <-time.After(3 * time.Second):
+ t.Fatal("timeout waiting for stream to be established")
+ }
+
+ closeDone := make(chan struct{})
+ go func() {
+ _ = client.Close()
+ close(closeDone)
+ }()
+
+ select {
+ case <-closeDone:
+ // Close returned — good
+ case <-time.After(2 * time.Second):
+ t.Fatal("Close blocked forever — Receive stuck in retry loop")
+ }
+
+ select {
+ case <-receiveDone:
+ case <-time.After(2 * time.Second):
+ t.Fatal("Receive did not exit after Close")
+ }
+}
+
+func TestReceive_ProtocolErrorStreamReconnect(t *testing.T) {
+ server := newTestServer(t)
+
+ ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
+ t.Cleanup(cancel)
+
+ client, err := flow.NewClient("http://"+server.addr, "test-payload", "test-signature", 1*time.Second)
+ require.NoError(t, err)
+ t.Cleanup(func() {
+ err := client.Close()
+ assert.NoError(t, err, "failed to close flow")
+ })
+
+ // Track acks received before and after server-side stream close
+ var ackCount atomic.Int32
+ receivedFirst := make(chan struct{})
+ receivedAfterReconnect := make(chan struct{})
+
+ go func() {
+ err := client.Receive(ctx, 1*time.Second, func(msg *proto.FlowEventAck) error {
+ if msg.IsInitiator || len(msg.EventId) == 0 {
+ return nil
+ }
+ n := ackCount.Add(1)
+ if n == 1 {
+ close(receivedFirst)
+ }
+ if n == 2 {
+ close(receivedAfterReconnect)
+ }
+ return nil
+ })
+ if err != nil && !errors.Is(err, context.Canceled) {
+ t.Logf("receive error: %v", err)
+ }
+ }()
+
+ // Wait for stream to be established, then send first ack
+ select {
+ case <-server.handlerStarted:
+ case <-time.After(3 * time.Second):
+ t.Fatal("timeout waiting for stream to be established")
+ }
+ server.acks <- &proto.FlowEventAck{EventId: []byte("before-close")}
+
+ select {
+ case <-receivedFirst:
+ case <-time.After(3 * time.Second):
+ t.Fatal("timeout waiting for first ack")
+ }
+
+ // Snapshot connection count before injecting the fault.
+ connsBefore := server.listener.connCount()
+
+ // Send a raw HTTP/2 RST_STREAM frame with PROTOCOL_ERROR on the TCP connection.
+ // gRPC multiplexes streams on stream IDs 1, 3, 5, ... (odd, client-initiated).
+ // Stream ID 1 is the client's first stream (our Events bidi stream).
+ // This produces the exact error the client sees in production:
+ // "stream terminated by RST_STREAM with error code: PROTOCOL_ERROR"
+ server.listener.sendRSTStream(1)
+
+ // Wait for the old Events() handler to fully exit so it can no longer
+ // drain s.acks and drop our injected ack on a broken stream.
+ select {
+ case <-server.handlerDone:
+ case <-time.After(5 * time.Second):
+ t.Fatal("old Events() handler did not exit after RST_STREAM")
+ }
+
+ require.Eventually(t, func() bool {
+ return server.listener.connCount() > connsBefore
+ }, 5*time.Second, 50*time.Millisecond, "client did not open a new TCP connection after RST_STREAM")
+
+ server.acks <- &proto.FlowEventAck{EventId: []byte("after-close")}
+
+ select {
+ case <-receivedAfterReconnect:
+ // Client successfully reconnected and received ack after server-side stream close
+ case <-time.After(5 * time.Second):
+ t.Fatal("timeout waiting for ack after server-side stream close — client did not reconnect")
+ }
+
+ assert.GreaterOrEqual(t, int(ackCount.Load()), 2, "should have received acks before and after stream close")
+ assert.GreaterOrEqual(t, server.listener.connCount(), 2, "client should have created at least 2 TCP connections (original + reconnect)")
+}
diff --git a/go.mod b/go.mod
index 5d495210e..127aa7cb1 100644
--- a/go.mod
+++ b/go.mod
@@ -13,7 +13,7 @@ require (
github.com/onsi/ginkgo v1.16.5
github.com/onsi/gomega v1.27.6
github.com/rs/cors v1.8.0
- github.com/sirupsen/logrus v1.9.3
+ github.com/sirupsen/logrus v1.9.4
github.com/spf13/cobra v1.10.2
github.com/spf13/pflag v1.0.9
github.com/vishvananda/netlink v1.3.1
@@ -24,17 +24,17 @@ require (
golang.zx2c4.com/wireguard/windows v0.5.3
google.golang.org/grpc v1.80.0
google.golang.org/protobuf v1.36.11
- gopkg.in/natefinch/lumberjack.v2 v2.0.0
+ gopkg.in/natefinch/lumberjack.v2 v2.2.1
)
require (
fyne.io/fyne/v2 v2.7.0
fyne.io/systray v1.12.1-0.20260116214250-81f8e1a496f9
github.com/awnumar/memguard v0.23.0
- github.com/aws/aws-sdk-go-v2 v1.36.3
- github.com/aws/aws-sdk-go-v2/config v1.29.14
- github.com/aws/aws-sdk-go-v2/credentials v1.17.67
- github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2
+ github.com/aws/aws-sdk-go-v2 v1.38.3
+ github.com/aws/aws-sdk-go-v2/config v1.31.6
+ github.com/aws/aws-sdk-go-v2/credentials v1.18.10
+ github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3
github.com/c-robinson/iplib v1.0.3
github.com/caddyserver/certmagic v0.21.3
github.com/cilium/ebpf v0.15.0
@@ -42,6 +42,8 @@ require (
github.com/coreos/go-iptables v0.7.0
github.com/coreos/go-oidc/v3 v3.18.0
github.com/creack/pty v1.1.24
+ github.com/crowdsecurity/crowdsec v1.7.7
+ github.com/crowdsecurity/go-cs-bouncer v0.0.21
github.com/dexidp/dex v2.13.0+incompatible
github.com/dexidp/dex/api/v2 v2.4.0
github.com/eko/gocache/lib/v4 v4.2.0
@@ -60,9 +62,10 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.2-0.20240212192251-757544f21357
github.com/hashicorp/go-multierror v1.1.1
github.com/hashicorp/go-secure-stdlib/base62 v0.1.2
- github.com/hashicorp/go-version v1.6.0
+ github.com/hashicorp/go-version v1.7.0
github.com/jackc/pgx/v5 v5.5.5
github.com/libdns/route53 v1.5.0
+ github.com/libp2p/go-nat v0.2.0
github.com/libp2p/go-netroute v0.2.1
github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81
github.com/mdlayher/socket v0.5.1
@@ -110,7 +113,7 @@ require (
go.uber.org/mock v0.6.0
go.uber.org/zap v1.27.0
goauthentik.io/api/v3 v3.2023051.3
- golang.org/x/exp v0.0.0-20240823005443-9b4947da3948
+ golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b
golang.org/x/mobile v0.0.0-20251113184115-a159579294ab
golang.org/x/mod v0.34.0
golang.org/x/net v0.53.0
@@ -143,37 +146,40 @@ require (
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be // indirect
github.com/apapsch/go-jsonmerge/v2 v2.0.0 // indirect
+ github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/awnumar/memcall v0.4.0 // indirect
- github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
- github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 // indirect
- github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 // indirect
- github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 // indirect
+ github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 // indirect
+ github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect
- github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 // indirect
- github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 // indirect
+ github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 // indirect
+ github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6 // indirect
github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 // indirect
- github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 // indirect
- github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 // indirect
- github.com/aws/smithy-go v1.22.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 // indirect
+ github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 // indirect
+ github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 // indirect
+ github.com/aws/smithy-go v1.23.0 // indirect
github.com/beevik/etree v1.6.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect
github.com/caddyserver/zerossl v0.1.3 // indirect
+ github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/log v0.1.0 // indirect
github.com/containerd/platforms v0.2.1 // indirect
github.com/cpuguy83/dockercfg v0.3.2 // indirect
+ github.com/crowdsecurity/go-cs-lib v0.0.25 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/distribution/reference v0.6.0 // indirect
github.com/docker/docker v28.0.1+incompatible // indirect
- github.com/docker/go-connections v0.5.0 // indirect
+ github.com/docker/go-connections v0.6.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
- github.com/ebitengine/purego v0.8.2 // indirect
+ github.com/ebitengine/purego v0.8.4 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fredbi/uri v1.1.1 // indirect
github.com/fxamacker/cbor/v2 v2.9.1 // indirect
@@ -188,14 +194,26 @@ require (
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-ole/go-ole v1.3.0 // indirect
+ github.com/go-openapi/analysis v0.23.0 // indirect
+ github.com/go-openapi/errors v0.22.2 // indirect
+ github.com/go-openapi/jsonpointer v0.21.1 // indirect
+ github.com/go-openapi/jsonreference v0.21.0 // indirect
+ github.com/go-openapi/loads v0.22.0 // indirect
+ github.com/go-openapi/spec v0.21.0 // indirect
+ github.com/go-openapi/strfmt v0.23.0 // indirect
+ github.com/go-openapi/swag v0.23.1 // indirect
+ github.com/go-openapi/validate v0.24.0 // indirect
github.com/go-sql-driver/mysql v1.9.3 // indirect
github.com/go-text/render v0.2.0 // indirect
github.com/go-text/typesetting v0.2.1 // indirect
github.com/go-viper/mapstructure/v2 v2.5.0 // indirect
github.com/go-webauthn/webauthn v0.16.4 // indirect
github.com/go-webauthn/x v0.2.3 // indirect
+ github.com/goccy/go-yaml v1.18.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang-jwt/jwt/v4 v4.5.2 // indirect
github.com/google/btree v1.1.2 // indirect
+ github.com/google/go-querystring v1.1.0 // indirect
github.com/google/go-tpm v0.9.8 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.14 // indirect
@@ -212,24 +230,29 @@ require (
github.com/hashicorp/go-uuid v1.0.3 // indirect
github.com/hashicorp/hcl v1.0.1-vault-7 // indirect
github.com/huandu/xstrings v1.5.0 // indirect
+ github.com/huin/goupnp v1.2.0 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect
github.com/jackc/puddle/v2 v2.2.1 // indirect
+ github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jeandeaual/go-locale v0.0.0-20250612000132-0ef82f21eade // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jonboulle/clockwork v0.5.0 // indirect
+ github.com/josharian/intern v1.0.0 // indirect
github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 // indirect
github.com/kelseyhightower/envconfig v1.4.0 // indirect
github.com/klauspost/compress v1.18.0 // indirect
- github.com/klauspost/cpuid/v2 v2.2.7 // indirect
+ github.com/klauspost/cpuid/v2 v2.2.10 // indirect
+ github.com/koron/go-ssdp v0.0.4 // indirect
github.com/kr/fs v0.1.0 // indirect
github.com/lib/pq v1.12.3 // indirect
github.com/libdns/libdns v0.2.2 // indirect
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect
github.com/magiconair/properties v1.8.10 // indirect
+ github.com/mailru/easyjson v0.9.0 // indirect
github.com/mattermost/xml-roundtrip-validator v0.1.0 // indirect
github.com/mattn/go-sqlite3 v1.14.42 // indirect
github.com/mdelapenya/tlscert v0.2.0 // indirect
@@ -249,7 +272,8 @@ require (
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646 // indirect
github.com/nicksnyder/go-i18n/v2 v2.5.1 // indirect
- github.com/nxadm/tail v1.4.8 // indirect
+ github.com/nxadm/tail v1.4.11 // indirect
+ github.com/oklog/ulid v1.3.1 // indirect
github.com/onsi/ginkgo/v2 v2.9.5 // indirect
github.com/openbao/openbao/api/v2 v2.5.1 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
@@ -271,7 +295,7 @@ require (
github.com/russellhaering/goxmldsig v1.6.0 // indirect
github.com/ryanuber/go-glob v1.0.0 // indirect
github.com/rymdport/portal v0.4.2 // indirect
- github.com/shirou/gopsutil/v4 v4.25.1 // indirect
+ github.com/shirou/gopsutil/v4 v4.25.8 // indirect
github.com/shoenig/go-m1cpu v0.2.1 // indirect
github.com/shopspring/decimal v1.4.0 // indirect
github.com/spf13/cast v1.7.0 // indirect
@@ -279,14 +303,15 @@ require (
github.com/srwiley/rasterx v0.0.0-20220730225603-2ab79fcdd4ef // indirect
github.com/stretchr/objx v0.5.2 // indirect
github.com/tinylib/msgp v1.6.3 // indirect
- github.com/tklauser/go-sysconf v0.3.14 // indirect
- github.com/tklauser/numcpus v0.8.0 // indirect
+ github.com/tklauser/go-sysconf v0.3.15 // indirect
+ github.com/tklauser/numcpus v0.10.0 // indirect
github.com/vishvananda/netns v0.0.5 // indirect
github.com/vmihailenco/tagparser/v2 v2.0.0 // indirect
github.com/wlynxg/anet v0.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect
github.com/yuin/goldmark v1.7.8 // indirect
github.com/zeebo/blake3 v0.2.3 // indirect
+ go.mongodb.org/mongo-driver v1.17.9 // indirect
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect
go.opentelemetry.io/otel/sdk v1.43.0 // indirect
@@ -300,6 +325,7 @@ require (
google.golang.org/genproto/googleapis/rpc v0.0.0-20260401024825-9d38bb4040a9 // indirect
gopkg.in/square/go-jose.v2 v2.6.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
+ gopkg.in/yaml.v2 v2.4.0 // indirect
)
replace github.com/kardianos/service => github.com/netbirdio/service v0.0.0-20240911161631-f62744f42502
diff --git a/go.sum b/go.sum
index 0ca3b08e8..aa9f349b0 100644
--- a/go.sum
+++ b/go.sum
@@ -42,48 +42,50 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/apapsch/go-jsonmerge/v2 v2.0.0 h1:axGnT1gRIfimI7gJifB699GoE/oq+F2MU7Dml6nw9rQ=
github.com/apapsch/go-jsonmerge/v2 v2.0.0/go.mod h1:lvDnEdqiQrp0O42VQGgmlKpxL1AP2+08jFMw88y4klk=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so=
+github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw=
github.com/awnumar/memcall v0.4.0 h1:B7hgZYdfH6Ot1Goaz8jGne/7i8xD4taZie/PNSFZ29g=
github.com/awnumar/memcall v0.4.0/go.mod h1:8xOx1YbfyuCg3Fy6TO8DK0kZUua3V42/goA5Ru47E8w=
github.com/awnumar/memguard v0.23.0 h1:sJ3a1/SWlcuKIQ7MV+R9p0Pvo9CWsMbGZvcZQtmc68A=
github.com/awnumar/memguard v0.23.0/go.mod h1:olVofBrsPdITtJ2HgxQKrEYEMyIBAIciVG4wNnZhW9M=
-github.com/aws/aws-sdk-go-v2 v1.36.3 h1:mJoei2CxPutQVxaATCzDUjcZEjVRdpsiiXi2o38yqWM=
-github.com/aws/aws-sdk-go-v2 v1.36.3/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
-github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
-github.com/aws/aws-sdk-go-v2/config v1.29.14 h1:f+eEi/2cKCg9pqKBoAIwRGzVb70MRKqWX4dg1BDcSJM=
-github.com/aws/aws-sdk-go-v2/config v1.29.14/go.mod h1:wVPHWcIFv3WO89w0rE10gzf17ZYy+UVS1Geq8Iei34g=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.67 h1:9KxtdcIA/5xPNQyZRgUSpYOE6j9Bc4+D7nZua0KGYOM=
-github.com/aws/aws-sdk-go-v2/credentials v1.17.67/go.mod h1:p3C44m+cfnbv763s52gCqrjaqyPikj9Sg47kUVaNZQQ=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30 h1:x793wxmUWVDhshP8WW2mlnXuFrO4cOd3HLBroh1paFw=
-github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.30/go.mod h1:Jpne2tDnYiFascUEs2AWHJL9Yp7A5ZVy3TNyxaAjD6M=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34 h1:ZK5jHhnrioRkUNOc+hOgQKlUL5JeC3S6JgLxtQ+Rm0Q=
-github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.34/go.mod h1:p4VfIceZokChbA9FzMbRGz5OV+lekcVtHlPKEO0gSZY=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34 h1:SZwFm17ZUNNg5Np0ioo/gq8Mn6u9w19Mri8DnJ15Jf0=
-github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.34/go.mod h1:dFZsC0BLo346mvKQLWmoJxT+Sjp+qcVR1tRVHQGOH9Q=
+github.com/aws/aws-sdk-go-v2 v1.38.3 h1:B6cV4oxnMs45fql4yRH+/Po/YU+597zgWqvDpYMturk=
+github.com/aws/aws-sdk-go-v2 v1.38.3/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1 h1:i8p8P4diljCr60PpJp6qZXNlgX4m2yQFpYk+9ZT+J4E=
+github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.7.1/go.mod h1:ddqbooRZYNoJ2dsTwOty16rM+/Aqmk/GOXrK8cg7V00=
+github.com/aws/aws-sdk-go-v2/config v1.31.6 h1:a1t8fXY4GT4xjyJExz4knbuoxSCacB5hT/WgtfPyLjo=
+github.com/aws/aws-sdk-go-v2/config v1.31.6/go.mod h1:5ByscNi7R+ztvOGzeUaIu49vkMk2soq5NaH5PYe33MQ=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.10 h1:xdJnXCouCx8Y0NncgoptztUocIYLKeQxrCgN6x9sdhg=
+github.com/aws/aws-sdk-go-v2/credentials v1.18.10/go.mod h1:7tQk08ntj914F/5i9jC4+2HQTAuJirq7m1vZVIhEkWs=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6 h1:wbjnrrMnKew78/juW7I2BtKQwa1qlf6EjQgS69uYY14=
+github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.6/go.mod h1:AtiqqNrDioJXuUgz3+3T0mBWN7Hro2n9wll2zRUc0ww=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6 h1:uF68eJA6+S9iVr9WgX1NaRGyQ/6MdIyc4JNUo6TN1FA=
+github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.6/go.mod h1:qlPeVZCGPiobx8wb1ft0GHT5l+dc6ldnwInDFaMvC7Y=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6 h1:pa1DEC6JoI0zduhZePp3zmhWvk/xxm4NB8Hy/Tlsgos=
+github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.6/go.mod h1:gxEjPebnhWGJoaDdtDkA0JX46VRg1wcTHYe63OfX5pE=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34 h1:ZNTqv4nIdE/DiBfUUfXcLZ/Spcuz+RjeziUtNJackkM=
-github.com/aws/aws-sdk-go-v2/internal/v4a v1.3.34/go.mod h1:zf7Vcd1ViW7cPqYWEHLHJkS50X0JS2IKz9Cgaj6ugrs=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3 h1:eAh2A4b5IzM/lum78bZ590jy36+d/aFLgKF/4Vd1xPE=
-github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.3/go.mod h1:0yKJC/kb8sAnmlYa6Zs3QVYqaC8ug2AbnNChv5Ox3uA=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0 h1:lguz0bmOoGzozP9XfRJR1QIayEYo+2vP/No3OfLF0pU=
-github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.7.0/go.mod h1:iu6FSzgt+M2/x3Dk8zhycdIcHjEFb36IS8HVUVFoMg0=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15 h1:dM9/92u2F1JbDaGooxTq18wmmFzbJRfXfVfy96/1CXM=
-github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.15/go.mod h1:SwFBy2vjtA0vZbjjaFtfN045boopadnoVPhu4Fv66vY=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15 h1:moLQUoVq91LiqT1nbvzDukyqAlCv89ZmwaHw/ZFlFZg=
-github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.18.15/go.mod h1:ZH34PJUc8ApjBIfgQCFvkWcUDBtl/WTD+uiYHjd8igA=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6 h1:R0tNFJqfjHL3900cqhXuwQ+1K4G0xc9Yf8EDbFXCKEw=
+github.com/aws/aws-sdk-go-v2/internal/v4a v1.4.6/go.mod h1:y/7sDdu+aJvPtGXr4xYosdpq9a6T9Z0jkXfugmti0rI=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1 h1:oegbebPEMA/1Jny7kvwejowCaHz1FWZAQ94WXFNCyTM=
+github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.1/go.mod h1:kemo5Myr9ac0U9JfSjMo9yHLtw+pECEHsFtJ9tqCEI8=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6 h1:hncKj/4gR+TPauZgTAsxOxNcvBayhUlYZ6LO/BYiQ30=
+github.com/aws/aws-sdk-go-v2/service/internal/checksum v1.8.6/go.mod h1:OiIh45tp6HdJDDJGnja0mw8ihQGz3VGrUflLqSL0SmM=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6 h1:LHS1YAIJXJ4K9zS+1d/xa9JAA9sL2QyXIQCQFQW/X08=
+github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.6/go.mod h1:c9PCiTEuh0wQID5/KqA32J+HAgZxN9tOGXKCiYJjTZI=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6 h1:nEXUSAwyUfLTgnc9cxlDWy637qsq4UWwp3sNAfl0Z3Y=
+github.com/aws/aws-sdk-go-v2/service/internal/s3shared v1.19.6/go.mod h1:HGzIULx4Ge3Do2V0FaiYKcyKzOqwrhUZgCI77NisswQ=
github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3 h1:MmLCRqP4U4Cw9gJ4bNrCG0mWqEtBlmAVleyelcHARMU=
github.com/aws/aws-sdk-go-v2/service/route53 v1.42.3/go.mod h1:AMPjK2YnRh0YgOID3PqhJA1BRNfXDfGOnSsKHtAe8yA=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2 h1:tWUG+4wZqdMl/znThEk9tcCy8tTMxq8dW0JTgamohrY=
-github.com/aws/aws-sdk-go-v2/service/s3 v1.79.2/go.mod h1:U5SNqwhXB3Xe6F47kXvWihPl/ilGaEDe8HD/50Z9wxc=
-github.com/aws/aws-sdk-go-v2/service/sso v1.25.3 h1:1Gw+9ajCV1jogloEv1RRnvfRFia2cL6c9cuKV2Ps+G8=
-github.com/aws/aws-sdk-go-v2/service/sso v1.25.3/go.mod h1:qs4a9T5EMLl/Cajiw2TcbNt2UNo/Hqlyp+GiuG4CFDI=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1 h1:hXmVKytPfTy5axZ+fYbR5d0cFmC3JvwLm5kM83luako=
-github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.1/go.mod h1:MlYRNmYu/fGPoxBQVvBYr9nyr948aY/WLUvwBMBJubs=
-github.com/aws/aws-sdk-go-v2/service/sts v1.33.19 h1:1XuUZ8mYJw9B6lzAkXhqHlJd/XvaX32evhproijJEZY=
-github.com/aws/aws-sdk-go-v2/service/sts v1.33.19/go.mod h1:cQnB8CUnxbMU82JvlqjKR2HBOm3fe9pWorWBza6MBJ4=
-github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
-github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3 h1:ETkfWcXP2KNPLecaDa++5bsQhCRa5M5sLUJa5DWYIIg=
+github.com/aws/aws-sdk-go-v2/service/s3 v1.87.3/go.mod h1:+/3ZTqoYb3Ur7DObD00tarKMLMuKg8iqz5CHEanqTnw=
+github.com/aws/aws-sdk-go-v2/service/sso v1.29.1 h1:8OLZnVJPvjnrxEwHFg9hVUof/P4sibH+Ea4KKuqAGSg=
+github.com/aws/aws-sdk-go-v2/service/sso v1.29.1/go.mod h1:27M3BpVi0C02UiQh1w9nsBEit6pLhlaH3NHna6WUbDE=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2 h1:gKWSTnqudpo8dAxqBqZnDoDWCiEh/40FziUjr/mo6uA=
+github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.2/go.mod h1:x7+rkNmRoEN1U13A6JE2fXne9EWyJy54o3n6d4mGaXQ=
+github.com/aws/aws-sdk-go-v2/service/sts v1.38.2 h1:YZPjhyaGzhDQEvsffDEcpycq49nl7fiGcfJTIo8BszI=
+github.com/aws/aws-sdk-go-v2/service/sts v1.38.2/go.mod h1:2dIN8qhQfv37BdUYGgEC8Q3tteM3zFxTI1MLO2O3J3c=
+github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
+github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/beevik/etree v1.6.0 h1:u8Kwy8pp9D9XeITj2Z0XtA5qqZEmtJtuXZRQi+j03eE=
github.com/beevik/etree v1.6.0/go.mod h1:bh4zJxiIr62SOf9pRzN7UUYaEDa9HEKafK25+sLc0Gc=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@@ -103,6 +105,8 @@ github.com/caddyserver/zerossl v0.1.3 h1:onS+pxp3M8HnHpN5MMbOMyNjmTheJyWRaZYwn+Y
github.com/caddyserver/zerossl v0.1.3/go.mod h1:CxA0acn7oEGO6//4rtrRjYgEoa4MFw/XofZnrYwGqG4=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
+github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cilium/ebpf v0.15.0 h1:7NxJhNiBT3NG8pZJ3c+yfrVdHY8ScgKD27sScgjLMMk=
@@ -122,6 +126,12 @@ github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHf
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
+github.com/crowdsecurity/crowdsec v1.7.7 h1:sduZN763iXsrZodocWDrsR//7nLeffGu+RVkkIsbQkE=
+github.com/crowdsecurity/crowdsec v1.7.7/go.mod h1:L1HLGPDnBYCcY+yfSFnuBbQ1G9DHEJN9c+Kevv9F+4Q=
+github.com/crowdsecurity/go-cs-bouncer v0.0.21 h1:arPz0VtdVSaz+auOSfHythzkZVLyy18CzYvYab8UJDU=
+github.com/crowdsecurity/go-cs-bouncer v0.0.21/go.mod h1:4JiH0XXA4KKnnWThItUpe5+heJHWzsLOSA2IWJqUDBA=
+github.com/crowdsecurity/go-cs-lib v0.0.25 h1:Ov6VPW9yV+OPsbAIQk1iTkEWhwkpaG0v3lrBzeqjzj4=
+github.com/crowdsecurity/go-cs-lib v0.0.25/go.mod h1:X0GMJY2CxdA1S09SpuqIKaWQsvRGxXmecUp9cP599dE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
@@ -132,12 +142,12 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/docker v28.0.1+incompatible h1:FCHjSRdXhNRFjlHMTv4jUNlIBbTeRjrWfeFuJp7jpo0=
github.com/docker/docker v28.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
-github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
-github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94=
+github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
-github.com/ebitengine/purego v0.8.2 h1:jPPGWs2sZ1UgOSgD2bClL0MJIqu58nOmIcBuXr62z1I=
-github.com/ebitengine/purego v0.8.2/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
+github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw=
+github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ=
github.com/eko/gocache/lib/v4 v4.2.0 h1:MNykyi5Xw+5Wu3+PUrvtOCaKSZM1nUSVftbzmeC7Yuw=
github.com/eko/gocache/lib/v4 v4.2.0/go.mod h1:7ViVmbU+CzDHzRpmB4SXKyyzyuJ8A3UW3/cszpcqB4M=
github.com/eko/gocache/store/go_cache/v4 v4.2.2 h1:tAI9nl6TLoJyKG1ujF0CS0n/IgTEMl+NivxtR5R3/hw=
@@ -158,6 +168,7 @@ github.com/fredbi/uri v1.1.1 h1:xZHJC08GZNIUhbP5ImTHnt5Ya0T8FI2VAwI/37kh2Ko=
github.com/fredbi/uri v1.1.1/go.mod h1:4+DZQ5zBjEwQCDmXW5JdIjz0PUA+yJbvtBv+u+adr5o=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
+github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0=
github.com/fxamacker/cbor/v2 v2.9.1 h1:2rWm8B193Ll4VdjsJY28jxs70IdDsHRWgQYAI80+rMQ=
@@ -192,6 +203,24 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-ole/go-ole v1.3.0 h1:Dt6ye7+vXGIKZ7Xtk4s6/xVdGDQynvom7xCFEdWr6uE=
github.com/go-ole/go-ole v1.3.0/go.mod h1:5LS6F96DhAwUc7C+1HLexzMXY1xGRSryjyPPKW6zv78=
+github.com/go-openapi/analysis v0.23.0 h1:aGday7OWupfMs+LbmLZG4k0MYXIANxcuBTYUC03zFCU=
+github.com/go-openapi/analysis v0.23.0/go.mod h1:9mz9ZWaSlV8TvjQHLl2mUW2PbZtemkE8yA5v22ohupo=
+github.com/go-openapi/errors v0.22.2 h1:rdxhzcBUazEcGccKqbY1Y7NS8FDcMyIRr0934jrYnZg=
+github.com/go-openapi/errors v0.22.2/go.mod h1:+n/5UdIqdVnLIJ6Q9Se8HNGUXYaY6CN8ImWzfi/Gzp0=
+github.com/go-openapi/jsonpointer v0.21.1 h1:whnzv/pNXtK2FbX/W9yJfRmE2gsmkfahjMKB0fZvcic=
+github.com/go-openapi/jsonpointer v0.21.1/go.mod h1:50I1STOfbY1ycR8jGz8DaMeLCdXiI6aDteEdRNNzpdk=
+github.com/go-openapi/jsonreference v0.21.0 h1:Rs+Y7hSXT83Jacb7kFyjn4ijOuVGSvOdF2+tg1TRrwQ=
+github.com/go-openapi/jsonreference v0.21.0/go.mod h1:LmZmgsrTkVg9LG4EaHeY8cBDslNPMo06cago5JNLkm4=
+github.com/go-openapi/loads v0.22.0 h1:ECPGd4jX1U6NApCGG1We+uEozOAvXvJSF4nnwHZ8Aco=
+github.com/go-openapi/loads v0.22.0/go.mod h1:yLsaTCS92mnSAZX5WWoxszLj0u+Ojl+Zs5Stn1oF+rs=
+github.com/go-openapi/spec v0.21.0 h1:LTVzPc3p/RzRnkQqLRndbAzjY0d0BCL72A6j3CdL9ZY=
+github.com/go-openapi/spec v0.21.0/go.mod h1:78u6VdPw81XU44qEWGhtr982gJ5BWg2c0I5XwVMotYk=
+github.com/go-openapi/strfmt v0.23.0 h1:nlUS6BCqcnAk0pyhi9Y+kdDVZdZMHfEKQiS4HaMgO/c=
+github.com/go-openapi/strfmt v0.23.0/go.mod h1:NrtIpfKtWIygRkKVsxh7XQMDQW5HKQl6S5ik2elW+K4=
+github.com/go-openapi/swag v0.23.1 h1:lpsStH0n2ittzTnbaSloVZLuB5+fvSY/+hnagBjSNZU=
+github.com/go-openapi/swag v0.23.1/go.mod h1:STZs8TbRvEQQKUA+JZNAm3EWlgaOBGpyFDqQnDHMef0=
+github.com/go-openapi/validate v0.24.0 h1:LdfDKwNbpB6Vn40xhTdNZAnfLECL81w+VX3BumrGD58=
+github.com/go-openapi/validate v0.24.0/go.mod h1:iyeX1sEufmv3nPbBdX3ieNviWnOZaJ1+zquzJEf2BAQ=
github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM=
github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY=
github.com/go-quicktest/qt v1.101.0 h1:O1K29Txy5P2OK0dGo59b7b0LR6wKfIhttaAhHUyn7eI=
@@ -216,10 +245,14 @@ github.com/go-webauthn/webauthn v0.16.4 h1:R9jqR/cYZa7hRquFF7Za/8qoH/K/TIs1/Q/4C
github.com/go-webauthn/webauthn v0.16.4/go.mod h1:SU2ljAgToTV/YLPI0C05QS4qn+e04WpB5g1RMfcZfS4=
github.com/go-webauthn/x v0.2.3 h1:8oArS+Rc1SWFLXhE17KZNx258Z4kUSyaDgsSncCO5RA=
github.com/go-webauthn/x v0.2.3/go.mod h1:tM04GF3V6VYq79AZMl7vbj4q6pz9r7L2criWRzbWhPk=
+github.com/goccy/go-yaml v1.18.0 h1:8W7wMFS12Pcas7KU+VVkaiCng+kG8QiFeFwzFb+rwuw=
+github.com/goccy/go-yaml v1.18.0/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/godbus/dbus/v5 v5.1.0 h1:4KLkAxT3aOY8Li4FRJe/KvhoNFFxo0m6fNuFUO8QJUk=
github.com/godbus/dbus/v5 v5.1.0/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang-jwt/jwt/v4 v4.5.2 h1:YtQM7lnr8iZ+j5q71MGKkNw9Mn7AjHM68uc9g5fXeUI=
+github.com/golang-jwt/jwt/v4 v4.5.2/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0=
github.com/golang-jwt/jwt/v5 v5.3.1 h1:kYf81DTWFe7t+1VvL7eS+jKFVWaUnK9cB1qbwn63YCY=
github.com/golang-jwt/jwt/v5 v5.3.1/go.mod h1:fxCRLWMO43lRc8nhHWY6LGqRcf+1gQWArsqaEUEa5bE=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
@@ -243,6 +276,7 @@ github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl76
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
@@ -250,6 +284,8 @@ github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
+github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8=
+github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU=
github.com/google/go-tpm v0.9.8 h1:slArAR9Ft+1ybZu0lBwpSmpwhRXaa85hWtMinMyRAWo=
github.com/google/go-tpm v0.9.8/go.mod h1:h9jEsEECg7gtLis0upRBQU+GhYVH6jMjrFxI8u6bVUY=
github.com/google/go-tpm-tools v0.3.13-0.20230620182252-4639ecce2aba h1:qJEJcuLzH5KDR0gKc0zcktin6KSAwL7+jWKBYceddTc=
@@ -305,13 +341,15 @@ github.com/hashicorp/go-sockaddr v1.0.7/go.mod h1:FZQbEYa1pxkQ7WLpyXJ6cbjpT8q0Yg
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
github.com/hashicorp/go-uuid v1.0.3 h1:2gKiV6YVmrJ1i2CKKa9obLvRieoRGviZFL26PcT/Co8=
github.com/hashicorp/go-uuid v1.0.3/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
-github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek=
-github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
+github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY=
+github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.1-vault-7 h1:ag5OxFVy3QYTFTJODRzTKVZ6xvdfLLCA1cy/Y6xGI0I=
github.com/hashicorp/hcl v1.0.1-vault-7/go.mod h1:XYhtn6ijBSAj6n4YqAaf7RBPS4I06AItNorpy+MoQNM=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.5.0 h1:2ag3IFq9ZDANvthTwTiqSSZLjDc+BedvHPAp5tJy2TI=
github.com/huandu/xstrings v1.5.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE=
+github.com/huin/goupnp v1.2.0 h1:uOKW26NG1hsSSbXIZ1IR7XP9Gjd1U8pnLaCMgntmkmY=
+github.com/huin/goupnp v1.2.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM=
@@ -322,6 +360,8 @@ github.com/jackc/pgx/v5 v5.5.5 h1:amBjrZVmksIdNjxGW/IiIMzxMKZFelXbUoPNb+8sjQw=
github.com/jackc/pgx/v5 v5.5.5/go.mod h1:ez9gk+OAat140fv9ErkZDYFWmXLfV+++K0uAOiwgm1A=
github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk=
github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4=
+github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
+github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jcmturner/aescts/v2 v2.0.0 h1:9YKLH6ey7H4eDBXW8khjYslgyqG2xZikXP0EQFKrle8=
github.com/jcmturner/aescts/v2 v2.0.0/go.mod h1:AiaICIRyfYg35RUkr8yESTqvSy7csK90qZ5xfvvsoNs=
github.com/jcmturner/dnsutils/v2 v2.0.0 h1:lltnkeZGL0wILNvrNiVCR6Ro5PGU/SeBvVO/8c/iPbo=
@@ -346,6 +386,8 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGw
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I=
github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60=
+github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
+github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25 h1:YLvr1eE6cdCqjOe972w/cYF+FjW34v27+9Vo5106B4M=
github.com/jsummers/gobmp v0.0.0-20230614200233-a9de23ed2e25/go.mod h1:kLgvv7o6UM+0QSf0QjAse3wReFDsb9qbZJdfexWlrQw=
@@ -357,8 +399,10 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c=
-github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
-github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
+github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
+github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
+github.com/koron/go-ssdp v0.0.4 h1:1IDwrghSKYM7yLf7XCzbByg2sJ/JcNOZRXS2jczTwz0=
+github.com/koron/go-ssdp v0.0.4/go.mod h1:oDXq+E5IL5q0U8uSBcoAXzTzInwy5lEgC91HoKtbmZk=
github.com/kr/fs v0.1.0 h1:Jskdu9ieNAYnjxsi0LbQp1ulIKZV1LAFgK1tWhpZgl8=
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
@@ -377,6 +421,8 @@ github.com/libdns/libdns v0.2.2 h1:O6ws7bAfRPaBsgAYt8MDe2HcNBGC29hkZ9MX2eUSX3s=
github.com/libdns/libdns v0.2.2/go.mod h1:4Bj9+5CQiNMVGf87wjX4CY3HQJypUHRuLvlsfsZqLWQ=
github.com/libdns/route53 v1.5.0 h1:2SKdpPFl/qgWsXQvsLNJJAoX7rSxlk7zgoL4jnWdXVA=
github.com/libdns/route53 v1.5.0/go.mod h1:joT4hKmaTNKHEwb7GmZ65eoDz1whTu7KKYPS8ZqIh6Q=
+github.com/libp2p/go-nat v0.2.0 h1:Tyz+bUFAYqGyJ/ppPPymMGbIgNRH+WqC5QrT5fKrrGk=
+github.com/libp2p/go-nat v0.2.0/go.mod h1:3MJr+GRpRkyT65EpVPBstXLvOlAPzUVlG6Pwg9ohLJk=
github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81 h1:J56rFEfUTFT9j9CiRXhi1r8lUJ4W5idG3CiaBZGojNU=
github.com/lrh3321/ipset-go v0.0.0-20250619021614-54a0a98ace81/go.mod h1:RD8ML/YdXctQ7qbcizZkw5mZ6l8Ogrl1dodBzVJduwI=
github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
@@ -384,6 +430,8 @@ github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tA
github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k=
github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE=
github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/mailru/easyjson v0.9.0 h1:PrnmzHw7262yW8sTBwxi1PdJA3Iw/EKBa8psRf7d9a4=
+github.com/mailru/easyjson v0.9.0/go.mod h1:1+xMtQp2MRNVL/V1bOzuP3aP8VNwRW55fQUto+XFtTU=
github.com/mattermost/xml-roundtrip-validator v0.1.0 h1:RXbVD2UAl7A7nOTR4u7E3ILa4IbtvKBHw64LDsmu9hU=
github.com/mattermost/xml-roundtrip-validator v0.1.0/go.mod h1:qccnGMcpgwcNaBnxqpJpWWUiPNr5H3O8eDgGV9gT5To=
github.com/mattn/go-colorable v0.1.14 h1:9A9LHSqF/7dyVVX6g0U9cwm9pG3kP9gSzcuIPHPsaIE=
@@ -454,10 +502,13 @@ github.com/nfnt/resize v0.0.0-20180221191011-83c6a9932646/go.mod h1:jpp1/29i3P1S
github.com/nicksnyder/go-i18n/v2 v2.5.1 h1:IxtPxYsR9Gp60cGXjfuR/llTqV8aYMsC472zD0D1vHk=
github.com/nicksnyder/go-i18n/v2 v2.5.1/go.mod h1:DrhgsSDZxoAfvVrBVLXoxZn/pN5TXqaDbq7ju94viiQ=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
-github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
+github.com/nxadm/tail v1.4.11 h1:8feyoE3OzPrcshW5/MJ4sGESc5cqmGkGCWlco4l0bqY=
+github.com/nxadm/tail v1.4.11/go.mod h1:OTaG3NK980DZzxbRq6lEuzgU+mug70nY11sMd4JXXHc=
github.com/oapi-codegen/runtime v1.1.2 h1:P2+CubHq8fO4Q6fV1tqDBZHCwpVpvPg7oKiYzQgXIyI=
github.com/oapi-codegen/runtime v1.1.2/go.mod h1:SK9X900oXmPWilYR5/WKPzt3Kqxn/uS/+lbpREv+eCg=
+github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4=
+github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/okta/okta-sdk-golang/v2 v2.18.0 h1:cfDasMb7CShbZvOrF6n+DnLevWwiHgedWMGJ8M8xKDc=
github.com/okta/okta-sdk-golang/v2 v2.18.0/go.mod h1:dz30v3ctAiMb7jpsCngGfQUAEGm1/NsWT92uTbNDQIs=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@@ -480,8 +531,8 @@ github.com/oschwald/maxminddb-golang v1.12.0 h1:9FnTOD0YOhP7DGxGsq4glzpGy5+w7pq5
github.com/oschwald/maxminddb-golang v1.12.0/go.mod h1:q0Nob5lTCqyQ8WT6FYgS1L7PXKVVbgiymefNwIjPzgY=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
-github.com/pelletier/go-toml/v2 v2.0.9 h1:uH2qQXheeefCCkuBBSLi7jCiSmj3VRh2+Goq2N7Xxu0=
-github.com/pelletier/go-toml/v2 v2.0.9/go.mod h1:tJU2Z3ZkXwnxa4DPO899bsyIoywizdUvyaeZurnPPDc=
+github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
+github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/petermattis/goid v0.0.0-20250303134427-723919f7f203 h1:E7Kmf11E4K7B5hDti2K2NqPb1nlYlGYsu02S1JNd/Bs=
github.com/petermattis/goid v0.0.0-20250303134427-723919f7f203/go.mod h1:pxMtw7cyUw6B2bRH0ZBANSPg+AoSud1I1iyJHI69jH4=
github.com/philhofer/fwd v1.2.0 h1:e6DnBTl7vGY+Gz322/ASL4Gyp1FspeMvx1RNDoToZuM=
@@ -558,8 +609,8 @@ github.com/rymdport/portal v0.4.2 h1:7jKRSemwlTyVHHrTGgQg7gmNPJs88xkbKcIL3NlcmSU
github.com/rymdport/portal v0.4.2/go.mod h1:kFF4jslnJ8pD5uCi17brj/ODlfIidOxlgUDTO5ncnC4=
github.com/shirou/gopsutil/v3 v3.24.4 h1:dEHgzZXt4LMNm+oYELpzl9YCqV65Yr/6SfrvgRBtXeU=
github.com/shirou/gopsutil/v3 v3.24.4/go.mod h1:lTd2mdiOspcqLgAnr9/nGi71NkeMpWKdmhuxm9GusH8=
-github.com/shirou/gopsutil/v4 v4.25.1 h1:QSWkTc+fu9LTAWfkZwZ6j8MSUk4A2LV7rbH0ZqmLjXs=
-github.com/shirou/gopsutil/v4 v4.25.1/go.mod h1:RoUCUpndaJFtT+2zsZzzmhvbfGoDCJ7nFXKJf8GqJbI=
+github.com/shirou/gopsutil/v4 v4.25.8 h1:NnAsw9lN7587WHxjJA9ryDnqhJpFH6A+wagYWTOH970=
+github.com/shirou/gopsutil/v4 v4.25.8/go.mod h1:q9QdMmfAOVIw7a+eF86P7ISEU6ka+NLgkUxlopV4RwI=
github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
github.com/shoenig/go-m1cpu v0.2.1 h1:yqRB4fvOge2+FyRXFkXqsyMoqPazv14Yyy+iyccT2E4=
github.com/shoenig/go-m1cpu v0.2.1/go.mod h1:KkDOw6m3ZJQAPHbrzkZki4hnx+pDRR1Lo+ldA56wD5w=
@@ -568,8 +619,8 @@ github.com/shoenig/test v1.7.0 h1:eWcHtTXa6QLnBvm0jgEabMRN/uJ4DMV3M8xUGgRkZmk=
github.com/shoenig/test v1.7.0/go.mod h1:UxJ6u/x2v/TNs/LoLxBNJRV9DiwBBKYxXSyczsBHFoI=
github.com/shopspring/decimal v1.4.0 h1:bxl37RwXBklmTi0C79JfXCEBD1cqqHt0bbgBAGFp81k=
github.com/shopspring/decimal v1.4.0/go.mod h1:gawqmDU56v4yIKSwfBSFip1HdCCXN8/+DMd9qYNcwME=
-github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
-github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/sirupsen/logrus v1.9.4 h1:TsZE7l11zFCLZnZ+teH4Umoq5BhEIfIzfRDZ1Uzql2w=
+github.com/sirupsen/logrus v1.9.4/go.mod h1:ftWc9WdOfJ0a92nsE2jF5u5ZwH8Bv2zdeOC42RjbV2g=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966 h1:JIAuq3EEf9cgbU6AtGPK4CTG3Zf6CKMNqf0MHTggAUA=
github.com/skratchdot/open-golang v0.0.0-20200116055534-eef842397966/go.mod h1:sUM3LWHvSMaG192sy56D9F7CNvL7jUJVXoqM1QKLnog=
github.com/songgao/water v0.0.0-20200317203138-2b4b6d7c09d8 h1:TG/diQgUe0pntT/2D9tmUCz4VNwm9MfrtPr0SU2qSX8=
@@ -620,11 +671,11 @@ github.com/ti-mo/netfilter v0.5.2/go.mod h1:Btx3AtFiOVdHReTDmP9AE+hlkOcvIy403u7B
github.com/tinylib/msgp v1.6.3 h1:bCSxiTz386UTgyT1i0MSCvdbWjVW+8sG3PjkGsZQt4s=
github.com/tinylib/msgp v1.6.3/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
-github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU=
-github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY=
+github.com/tklauser/go-sysconf v0.3.15 h1:VE89k0criAymJ/Os65CSn1IXaol+1wrsFHEB8Ol49K4=
+github.com/tklauser/go-sysconf v0.3.15/go.mod h1:Dmjwr6tYFIseJw7a3dRLJfsHAMXZ3nEnL/aZY+0IuI4=
github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
-github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY=
-github.com/tklauser/numcpus v0.8.0/go.mod h1:ZJZlAY+dmR4eut8epnzf0u/VwodKmryxR8txiloSqBE=
+github.com/tklauser/numcpus v0.10.0 h1:18njr6LDBk1zuna922MgdjQuJFjrdppsZG60sHGfjso=
+github.com/tklauser/numcpus v0.10.0/go.mod h1:BiTKazU708GQTYF4mB+cmlpT2Is1gLk7XVuEeem8LsQ=
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
@@ -655,6 +706,8 @@ github.com/zeebo/blake3 v0.2.3 h1:TFoLXsjeXqRNFxSbk35Dk4YtszE/MQQGK10BH4ptoTg=
github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvvKCaQ=
github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo=
github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4=
+go.mongodb.org/mongo-driver v1.17.9 h1:IexDdCuuNJ3BHrELgBlyaH9p60JXAvdzWR128q+U5tU=
+go.mongodb.org/mongo-driver v1.17.9/go.mod h1:LlOhpH5NUEfhxcAwG0UEkMqwYcc4JU18gtCdGudk/tQ=
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.67.0 h1:yI1/OhfEPy7J9eoa6Sj051C7n5dvpj0QX8g4sRchg04=
@@ -705,8 +758,8 @@ golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.50.0 h1:zO47/JPrL6vsNkINmLoo/PH1gcxpls50DNogFvB5ZGI=
golang.org/x/crypto v0.50.0/go.mod h1:3muZ7vA7PBCE6xgPX7nkzzjiUq87kRItoJQM1Yo8S+Q=
-golang.org/x/exp v0.0.0-20240823005443-9b4947da3948 h1:kx6Ds3MlpiUHKj7syVnbp57++8WpuKPcR5yjLBjvLEA=
-golang.org/x/exp v0.0.0-20240823005443-9b4947da3948/go.mod h1:akd2r19cwCdwSwWeIdzYQGa/EZZyqcOdwWiwj5L5eKQ=
+golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o=
+golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8=
golang.org/x/image v0.33.0 h1:LXRZRnv1+zGd5XBUVRFmYEphyyKJjQjCRiOuAP3sZfQ=
golang.org/x/image v0.33.0/go.mod h1:DD3OsTYT9chzuzTQt+zMcOlBHgfoKQb1gry8p76Y1sc=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
@@ -778,8 +831,8 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -881,8 +934,8 @@ gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8
gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
-gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc=
+gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
diff --git a/infrastructure_files/observability/grafana/dashboards/management.json b/infrastructure_files/observability/grafana/dashboards/management.json
index 95983603f..f116a8bde 100644
--- a/infrastructure_files/observability/grafana/dashboards/management.json
+++ b/infrastructure_files/observability/grafana/dashboards/management.json
@@ -302,7 +302,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "rate(management_account_peer_meta_update_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
+ "expr": "rate(management_account_peer_meta_update_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
"instant": false,
"legendFormat": "{{cluster}}/{{environment}}/{{job}}",
"range": true,
@@ -410,7 +410,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.5,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.5,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"includeNullMetadata": true,
@@ -426,7 +426,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.9,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.9,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"hide": false,
@@ -443,7 +443,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.99,sum(increase(management_account_get_peer_network_map_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.99,sum(increase(management_account_get_peer_network_map_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"hide": false,
@@ -545,7 +545,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.5,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.5,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"includeNullMetadata": true,
@@ -561,7 +561,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.9,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.9,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"hide": false,
@@ -578,7 +578,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.99,sum(increase(management_account_update_account_peers_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.99,sum(increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"hide": false,
@@ -694,7 +694,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.5,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.5,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"includeNullMetadata": true,
@@ -710,7 +710,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.9,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.9,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"hide": false,
@@ -727,7 +727,7 @@
},
"disableTextWrap": false,
"editorMode": "code",
- "expr": "histogram_quantile(0.99,sum(increase(management_grpc_updatechannel_queue_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
+ "expr": "histogram_quantile(0.99,sum(increase(management_grpc_updatechannel_queue_length_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le,cluster,environment,job))",
"format": "heatmap",
"fullMetaSearch": false,
"hide": false,
@@ -841,7 +841,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.50, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"instant": false,
"legendFormat": "p50",
"range": true,
@@ -853,7 +853,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p90",
@@ -866,7 +866,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(management_store_persistence_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(management_store_persistence_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p99",
@@ -963,7 +963,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.50, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"instant": false,
"legendFormat": "p50",
"range": true,
@@ -975,7 +975,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p90",
@@ -988,7 +988,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(management_store_transaction_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(management_store_transaction_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p99",
@@ -1085,7 +1085,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.50, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"instant": false,
"legendFormat": "p50",
"range": true,
@@ -1097,7 +1097,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p90",
@@ -1110,7 +1110,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(management_store_global_lock_acquisition_duration_ms_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(management_store_global_lock_acquisition_duration_ms_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p99",
@@ -1221,7 +1221,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "rate(management_idp_authenticate_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
+ "expr": "rate(management_idp_authenticate_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
"instant": false,
"legendFormat": "{{cluster}}/{{environment}}/{{job}}",
"range": true,
@@ -1317,7 +1317,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "rate(management_idp_get_account_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
+ "expr": "rate(management_idp_get_account_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
"instant": false,
"legendFormat": "{{cluster}}/{{environment}}/{{job}}",
"range": true,
@@ -1413,7 +1413,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "rate(management_idp_update_user_meta_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
+ "expr": "rate(management_idp_update_user_meta_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])",
"instant": false,
"legendFormat": "{{cluster}}/{{environment}}/{{job}}",
"range": true,
@@ -1523,7 +1523,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"GET|OPTIONS\"}[$__rate_interval])) by (job,method)",
+ "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"GET|OPTIONS\"}[$__rate_interval])) by (job,method)",
"instant": false,
"legendFormat": "{{method}}",
"range": true,
@@ -1619,7 +1619,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"POST|PUT|DELETE\"}[$__rate_interval])) by (job,method)",
+ "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",method=~\"POST|PUT|DELETE\"}[$__rate_interval])) by (job,method)",
"instant": false,
"legendFormat": "{{method}}",
"range": true,
@@ -1715,7 +1715,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))",
+ "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))",
"instant": false,
"legendFormat": "p50",
"range": true,
@@ -1727,7 +1727,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p90",
@@ -1740,7 +1740,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"read\"}[5m])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p99",
@@ -1837,7 +1837,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))",
+ "expr": "histogram_quantile(0.50, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))",
"instant": false,
"legendFormat": "p50",
"range": true,
@@ -1849,7 +1849,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))",
+ "expr": "histogram_quantile(0.90, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p90",
@@ -1862,7 +1862,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))",
+ "expr": "histogram_quantile(0.99, sum(rate(management_http_request_duration_ms_total_milliseconds_bucket{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\",type=~\"write\"}[5m])) by (le))",
"hide": false,
"instant": false,
"legendFormat": "p99",
@@ -1963,7 +1963,7 @@
"uid": "${datasource}"
},
"editorMode": "code",
- "expr": "sum(rate(management_http_request_counter_ratio_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (job,exported_endpoint,method)",
+ "expr": "sum(rate(management_http_request_counter_total{cluster=~\"$cluster\",environment=~\"$environment\",job=~\"$job\",host=~\"$host\"}[$__rate_interval])) by (job,exported_endpoint,method)",
"hide": false,
"instant": false,
"legendFormat": "{{method}}-{{exported_endpoint}}",
@@ -3222,7 +3222,7 @@
},
"disableTextWrap": false,
"editorMode": "builder",
- "expr": "sum by(le) (increase(management_grpc_updatechannel_queue_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))",
+ "expr": "sum by(le) (increase(management_grpc_updatechannel_queue_length_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))",
"format": "heatmap",
"fullMetaSearch": false,
"includeNullMetadata": true,
@@ -3323,7 +3323,7 @@
},
"disableTextWrap": false,
"editorMode": "builder",
- "expr": "sum by(le) (increase(management_account_update_account_peers_duration_ms_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))",
+ "expr": "sum by(le) (increase(management_account_update_account_peers_duration_ms_milliseconds_bucket{application=\"management\", environment=\"$environment\", host=~\"$host\"}[$__rate_interval]))",
"format": "heatmap",
"fullMetaSearch": false,
"includeNullMetadata": true,
diff --git a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go
index a7f692569..f2ecfd5f9 100644
--- a/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go
+++ b/management/internals/modules/reverseproxy/accesslogs/accesslogentry.go
@@ -1,6 +1,7 @@
package accesslogs
import (
+ "maps"
"net"
"net/netip"
"time"
@@ -37,6 +38,7 @@ type AccessLogEntry struct {
BytesUpload int64 `gorm:"index"`
BytesDownload int64 `gorm:"index"`
Protocol AccessLogProtocol `gorm:"index"`
+ Metadata map[string]string `gorm:"serializer:json"`
}
// FromProto creates an AccessLogEntry from a proto.AccessLog
@@ -55,6 +57,7 @@ func (a *AccessLogEntry) FromProto(serviceLog *proto.AccessLog) {
a.BytesUpload = serviceLog.GetBytesUpload()
a.BytesDownload = serviceLog.GetBytesDownload()
a.Protocol = AccessLogProtocol(serviceLog.GetProtocol())
+ a.Metadata = maps.Clone(serviceLog.GetMetadata())
if sourceIP := serviceLog.GetSourceIp(); sourceIP != "" {
if addr, err := netip.ParseAddr(sourceIP); err == nil {
@@ -117,6 +120,11 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog {
protocol = &p
}
+ var metadata *map[string]string
+ if len(a.Metadata) > 0 {
+ metadata = &a.Metadata
+ }
+
return &api.ProxyAccessLog{
Id: a.ID,
ServiceId: a.ServiceID,
@@ -136,5 +144,6 @@ func (a *AccessLogEntry) ToAPIResponse() *api.ProxyAccessLog {
BytesUpload: a.BytesUpload,
BytesDownload: a.BytesDownload,
Protocol: protocol,
+ Metadata: metadata,
}
}
diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go
index e8d0ce763..59d7704eb 100644
--- a/management/internals/modules/reverseproxy/accesslogs/manager/manager.go
+++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager.go
@@ -106,13 +106,23 @@ func (m *managerImpl) CleanupOldAccessLogs(ctx context.Context, retentionDays in
// StartPeriodicCleanup starts a background goroutine that periodically cleans up old access logs
func (m *managerImpl) StartPeriodicCleanup(ctx context.Context, retentionDays, cleanupIntervalHours int) {
- if retentionDays <= 0 {
- log.WithContext(ctx).Debug("periodic access log cleanup disabled: retention days is 0 or negative")
+ if retentionDays < 0 {
+ log.WithContext(ctx).Debug("periodic access log cleanup disabled: retention days is negative")
return
}
+ if retentionDays == 0 {
+ retentionDays = 7
+ log.WithContext(ctx).Debugf("no retention days specified for access log cleanup, defaulting to %d days", retentionDays)
+ } else {
+ log.WithContext(ctx).Debugf("access log retention period set to %d days", retentionDays)
+ }
+
if cleanupIntervalHours <= 0 {
cleanupIntervalHours = 24
+ log.WithContext(ctx).Debugf("no cleanup interval specified for access log cleanup, defaulting to %d hours", cleanupIntervalHours)
+ } else {
+ log.WithContext(ctx).Debugf("access log cleanup interval set to %d hours", cleanupIntervalHours)
}
cleanupCtx, cancel := context.WithCancel(ctx)
diff --git a/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go
index 8fadef85f..11bf60829 100644
--- a/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go
+++ b/management/internals/modules/reverseproxy/accesslogs/manager/manager_test.go
@@ -121,7 +121,7 @@ func TestCleanupWithExactBoundary(t *testing.T) {
}
func TestStartPeriodicCleanup(t *testing.T) {
- t.Run("periodic cleanup disabled with zero retention", func(t *testing.T) {
+ t.Run("periodic cleanup disabled with negative retention", func(t *testing.T) {
ctrl := gomock.NewController(t)
defer ctrl.Finish()
@@ -135,7 +135,7 @@ func TestStartPeriodicCleanup(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
- manager.StartPeriodicCleanup(ctx, 0, 1)
+ manager.StartPeriodicCleanup(ctx, -1, 1)
time.Sleep(100 * time.Millisecond)
diff --git a/management/internals/modules/reverseproxy/domain/domain.go b/management/internals/modules/reverseproxy/domain/domain.go
index 859f1c5b2..f65e31a07 100644
--- a/management/internals/modules/reverseproxy/domain/domain.go
+++ b/management/internals/modules/reverseproxy/domain/domain.go
@@ -20,6 +20,9 @@ type Domain struct {
// RequireSubdomain is populated at query time. When true, the domain
// cannot be used bare and a subdomain label must be prepended. Not persisted.
RequireSubdomain *bool `gorm:"-"`
+ // SupportsCrowdSec is populated at query time from proxy cluster capabilities.
+ // Not persisted.
+ SupportsCrowdSec *bool `gorm:"-"`
}
// EventMeta returns activity event metadata for a domain
@@ -30,3 +33,8 @@ func (d *Domain) EventMeta() map[string]any {
"validated": d.Validated,
}
}
+
+func (d *Domain) Copy() *Domain {
+ dCopy := *d
+ return &dCopy
+}
diff --git a/management/internals/modules/reverseproxy/domain/manager/api.go b/management/internals/modules/reverseproxy/domain/manager/api.go
index 640ab28a5..4493ef0ad 100644
--- a/management/internals/modules/reverseproxy/domain/manager/api.go
+++ b/management/internals/modules/reverseproxy/domain/manager/api.go
@@ -48,6 +48,7 @@ func domainToApi(d *domain.Domain) api.ReverseProxyDomain {
Validated: d.Validated,
SupportsCustomPorts: d.SupportsCustomPorts,
RequireSubdomain: d.RequireSubdomain,
+ SupportsCrowdsec: d.SupportsCrowdSec,
}
if d.TargetCluster != "" {
resp.TargetCluster = &d.TargetCluster
diff --git a/management/internals/modules/reverseproxy/domain/manager/manager.go b/management/internals/modules/reverseproxy/domain/manager/manager.go
index c6c41bfe5..2c4c1372e 100644
--- a/management/internals/modules/reverseproxy/domain/manager/manager.go
+++ b/management/internals/modules/reverseproxy/domain/manager/manager.go
@@ -33,6 +33,7 @@ type proxyManager interface {
GetActiveClusterAddresses(ctx context.Context) ([]string, error)
ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool
ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool
+ ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool
}
type Manager struct {
@@ -90,6 +91,7 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d
}
d.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, cluster)
d.RequireSubdomain = m.proxyManager.ClusterRequireSubdomain(ctx, cluster)
+ d.SupportsCrowdSec = m.proxyManager.ClusterSupportsCrowdSec(ctx, cluster)
ret = append(ret, d)
}
@@ -105,6 +107,7 @@ func (m Manager) GetDomains(ctx context.Context, accountID, userID string) ([]*d
}
if d.TargetCluster != "" {
cd.SupportsCustomPorts = m.proxyManager.ClusterSupportsCustomPorts(ctx, d.TargetCluster)
+ cd.SupportsCrowdSec = m.proxyManager.ClusterSupportsCrowdSec(ctx, d.TargetCluster)
}
// Custom domains never require a subdomain by default since
// the account owns them and should be able to use the bare domain.
diff --git a/management/internals/modules/reverseproxy/proxy/manager.go b/management/internals/modules/reverseproxy/proxy/manager.go
index 0368b84de..aa7cd8630 100644
--- a/management/internals/modules/reverseproxy/proxy/manager.go
+++ b/management/internals/modules/reverseproxy/proxy/manager.go
@@ -18,6 +18,7 @@ type Manager interface {
GetActiveClusters(ctx context.Context) ([]Cluster, error)
ClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool
ClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool
+ ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool
CleanupStale(ctx context.Context, inactivityDuration time.Duration) error
}
diff --git a/management/internals/modules/reverseproxy/proxy/manager/manager.go b/management/internals/modules/reverseproxy/proxy/manager/manager.go
index a92fffab9..d13334e83 100644
--- a/management/internals/modules/reverseproxy/proxy/manager/manager.go
+++ b/management/internals/modules/reverseproxy/proxy/manager/manager.go
@@ -18,6 +18,7 @@ type store interface {
GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error)
GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool
GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool
+ GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool
CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error
}
@@ -138,6 +139,12 @@ func (m Manager) ClusterRequireSubdomain(ctx context.Context, clusterAddr string
return m.store.GetClusterRequireSubdomain(ctx, clusterAddr)
}
+// ClusterSupportsCrowdSec returns whether all active proxies in the cluster
+// have CrowdSec configured (unanimous). Returns nil when no proxy has reported capabilities.
+func (m Manager) ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool {
+ return m.store.GetClusterSupportsCrowdSec(ctx, clusterAddr)
+}
+
// CleanupStale removes proxies that haven't sent heartbeat in the specified duration
func (m Manager) CleanupStale(ctx context.Context, inactivityDuration time.Duration) error {
if err := m.store.CleanupStaleProxies(ctx, inactivityDuration); err != nil {
diff --git a/management/internals/modules/reverseproxy/proxy/manager_mock.go b/management/internals/modules/reverseproxy/proxy/manager_mock.go
index 97466c503..282ca0ba5 100644
--- a/management/internals/modules/reverseproxy/proxy/manager_mock.go
+++ b/management/internals/modules/reverseproxy/proxy/manager_mock.go
@@ -78,6 +78,20 @@ func (mr *MockManagerMockRecorder) ClusterRequireSubdomain(ctx, clusterAddr inte
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterRequireSubdomain", reflect.TypeOf((*MockManager)(nil).ClusterRequireSubdomain), ctx, clusterAddr)
}
+// ClusterSupportsCrowdSec mocks base method.
+func (m *MockManager) ClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "ClusterSupportsCrowdSec", ctx, clusterAddr)
+ ret0, _ := ret[0].(*bool)
+ return ret0
+}
+
+// ClusterSupportsCrowdSec indicates an expected call of ClusterSupportsCrowdSec.
+func (mr *MockManagerMockRecorder) ClusterSupportsCrowdSec(ctx, clusterAddr interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ClusterSupportsCrowdSec", reflect.TypeOf((*MockManager)(nil).ClusterSupportsCrowdSec), ctx, clusterAddr)
+}
+
// Connect mocks base method.
func (m *MockManager) Connect(ctx context.Context, proxyID, clusterAddress, ipAddress string, capabilities *Capabilities) error {
m.ctrl.T.Helper()
diff --git a/management/internals/modules/reverseproxy/proxy/proxy.go b/management/internals/modules/reverseproxy/proxy/proxy.go
index 4102e50fe..339c82446 100644
--- a/management/internals/modules/reverseproxy/proxy/proxy.go
+++ b/management/internals/modules/reverseproxy/proxy/proxy.go
@@ -11,6 +11,8 @@ type Capabilities struct {
// RequireSubdomain indicates whether a subdomain label is required in
// front of the cluster domain.
RequireSubdomain *bool
+ // SupportsCrowdsec indicates whether this proxy has CrowdSec configured.
+ SupportsCrowdsec *bool
}
// Proxy represents a reverse proxy instance
diff --git a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go
index 4a7647d90..28461641d 100644
--- a/management/internals/modules/reverseproxy/service/manager/l4_port_test.go
+++ b/management/internals/modules/reverseproxy/service/manager/l4_port_test.go
@@ -81,12 +81,13 @@ func setupL4Test(t *testing.T, customPortsSupported *bool) (*Manager, store.Stor
mockCaps := proxy.NewMockManager(ctrl)
mockCaps.EXPECT().ClusterSupportsCustomPorts(gomock.Any(), testCluster).Return(customPortsSupported).AnyTimes()
mockCaps.EXPECT().ClusterRequireSubdomain(gomock.Any(), testCluster).Return((*bool)(nil)).AnyTimes()
+ mockCaps.EXPECT().ClusterSupportsCrowdSec(gomock.Any(), testCluster).Return((*bool)(nil)).AnyTimes()
accountMgr := &mock_server.MockAccountManager{
StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {},
UpdateAccountPeersFunc: func(_ context.Context, _ string) {},
- GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) {
- return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID)
+ GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) {
+ return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName)
},
}
diff --git a/management/internals/modules/reverseproxy/service/manager/manager.go b/management/internals/modules/reverseproxy/service/manager/manager.go
index 989187826..ed9d4201b 100644
--- a/management/internals/modules/reverseproxy/service/manager/manager.go
+++ b/management/internals/modules/reverseproxy/service/manager/manager.go
@@ -1119,7 +1119,7 @@ func (m *Manager) getGroupIDsFromNames(ctx context.Context, accountID string, gr
}
groupIDs := make([]string, 0, len(groupNames))
for _, groupName := range groupNames {
- g, err := m.accountManager.GetGroupByName(ctx, groupName, accountID)
+ g, err := m.accountManager.GetGroupByName(ctx, groupName, accountID, activity.SystemInitiator)
if err != nil {
return nil, fmt.Errorf("failed to get group by name %s: %w", groupName, err)
}
diff --git a/management/internals/modules/reverseproxy/service/manager/manager_test.go b/management/internals/modules/reverseproxy/service/manager/manager_test.go
index f6e532118..69d48f10a 100644
--- a/management/internals/modules/reverseproxy/service/manager/manager_test.go
+++ b/management/internals/modules/reverseproxy/service/manager/manager_test.go
@@ -698,8 +698,8 @@ func setupIntegrationTest(t *testing.T) (*Manager, store.Store) {
accountMgr := &mock_server.MockAccountManager{
StoreEventFunc: func(_ context.Context, _, _, _ string, _ activity.ActivityDescriber, _ map[string]any) {},
UpdateAccountPeersFunc: func(_ context.Context, _ string) {},
- GetGroupByNameFunc: func(ctx context.Context, accountID, groupName string) (*types.Group, error) {
- return testStore.GetGroupByName(ctx, store.LockingStrengthNone, groupName, accountID)
+ GetGroupByNameFunc: func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) {
+ return testStore.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName)
},
}
diff --git a/management/internals/modules/reverseproxy/service/service.go b/management/internals/modules/reverseproxy/service/service.go
index 60b36917c..769e037bc 100644
--- a/management/internals/modules/reverseproxy/service/service.go
+++ b/management/internals/modules/reverseproxy/service/service.go
@@ -113,6 +113,7 @@ type AccessRestrictions struct {
BlockedCIDRs []string `json:"blocked_cidrs,omitempty" gorm:"serializer:json"`
AllowedCountries []string `json:"allowed_countries,omitempty" gorm:"serializer:json"`
BlockedCountries []string `json:"blocked_countries,omitempty" gorm:"serializer:json"`
+ CrowdSecMode string `json:"crowdsec_mode,omitempty" gorm:"serializer:json"`
}
// Copy returns a deep copy of the AccessRestrictions.
@@ -122,6 +123,7 @@ func (r AccessRestrictions) Copy() AccessRestrictions {
BlockedCIDRs: slices.Clone(r.BlockedCIDRs),
AllowedCountries: slices.Clone(r.AllowedCountries),
BlockedCountries: slices.Clone(r.BlockedCountries),
+ CrowdSecMode: r.CrowdSecMode,
}
}
@@ -555,7 +557,11 @@ func (s *Service) FromAPIRequest(req *api.ServiceRequest, accountID string) erro
}
if req.AccessRestrictions != nil {
- s.Restrictions = restrictionsFromAPI(req.AccessRestrictions)
+ restrictions, err := restrictionsFromAPI(req.AccessRestrictions)
+ if err != nil {
+ return err
+ }
+ s.Restrictions = restrictions
}
return nil
@@ -631,9 +637,9 @@ func authFromAPI(reqAuth *api.ServiceAuthConfig) AuthConfig {
return auth
}
-func restrictionsFromAPI(r *api.AccessRestrictions) AccessRestrictions {
+func restrictionsFromAPI(r *api.AccessRestrictions) (AccessRestrictions, error) {
if r == nil {
- return AccessRestrictions{}
+ return AccessRestrictions{}, nil
}
var res AccessRestrictions
if r.AllowedCidrs != nil {
@@ -648,11 +654,19 @@ func restrictionsFromAPI(r *api.AccessRestrictions) AccessRestrictions {
if r.BlockedCountries != nil {
res.BlockedCountries = *r.BlockedCountries
}
- return res
+ if r.CrowdsecMode != nil {
+ if !r.CrowdsecMode.Valid() {
+ return AccessRestrictions{}, fmt.Errorf("invalid crowdsec_mode %q", *r.CrowdsecMode)
+ }
+ res.CrowdSecMode = string(*r.CrowdsecMode)
+ }
+ return res, nil
}
func restrictionsToAPI(r AccessRestrictions) *api.AccessRestrictions {
- if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 {
+ if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 &&
+ len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 &&
+ r.CrowdSecMode == "" {
return nil
}
res := &api.AccessRestrictions{}
@@ -668,11 +682,17 @@ func restrictionsToAPI(r AccessRestrictions) *api.AccessRestrictions {
if len(r.BlockedCountries) > 0 {
res.BlockedCountries = &r.BlockedCountries
}
+ if r.CrowdSecMode != "" {
+ mode := api.AccessRestrictionsCrowdsecMode(r.CrowdSecMode)
+ res.CrowdsecMode = &mode
+ }
return res
}
func restrictionsToProto(r AccessRestrictions) *proto.AccessRestrictions {
- if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 && len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 {
+ if len(r.AllowedCIDRs) == 0 && len(r.BlockedCIDRs) == 0 &&
+ len(r.AllowedCountries) == 0 && len(r.BlockedCountries) == 0 &&
+ r.CrowdSecMode == "" {
return nil
}
return &proto.AccessRestrictions{
@@ -680,6 +700,7 @@ func restrictionsToProto(r AccessRestrictions) *proto.AccessRestrictions {
BlockedCidrs: r.BlockedCIDRs,
AllowedCountries: r.AllowedCountries,
BlockedCountries: r.BlockedCountries,
+ CrowdsecMode: r.CrowdSecMode,
}
}
@@ -988,7 +1009,20 @@ const (
// validateAccessRestrictions validates and normalizes access restriction
// entries. Country codes are uppercased in place.
+func validateCrowdSecMode(mode string) error {
+ switch mode {
+ case "", "off", "enforce", "observe":
+ return nil
+ default:
+ return fmt.Errorf("crowdsec_mode %q is invalid", mode)
+ }
+}
+
func validateAccessRestrictions(r *AccessRestrictions) error {
+ if err := validateCrowdSecMode(r.CrowdSecMode); err != nil {
+ return err
+ }
+
if len(r.AllowedCIDRs) > maxCIDREntries {
return fmt.Errorf("allowed_cidrs: exceeds maximum of %d entries", maxCIDREntries)
}
@@ -1002,35 +1036,37 @@ func validateAccessRestrictions(r *AccessRestrictions) error {
return fmt.Errorf("blocked_countries: exceeds maximum of %d entries", maxCountryEntries)
}
- for i, raw := range r.AllowedCIDRs {
+ if err := validateCIDRList("allowed_cidrs", r.AllowedCIDRs); err != nil {
+ return err
+ }
+ if err := validateCIDRList("blocked_cidrs", r.BlockedCIDRs); err != nil {
+ return err
+ }
+ if err := normalizeCountryList("allowed_countries", r.AllowedCountries); err != nil {
+ return err
+ }
+ return normalizeCountryList("blocked_countries", r.BlockedCountries)
+}
+
+func validateCIDRList(field string, cidrs []string) error {
+ for i, raw := range cidrs {
prefix, err := netip.ParsePrefix(raw)
if err != nil {
- return fmt.Errorf("allowed_cidrs[%d]: %w", i, err)
+ return fmt.Errorf("%s[%d]: %w", field, i, err)
}
if prefix != prefix.Masked() {
- return fmt.Errorf("allowed_cidrs[%d]: %q has host bits set, use %s instead", i, raw, prefix.Masked())
+ return fmt.Errorf("%s[%d]: %q has host bits set, use %s instead", field, i, raw, prefix.Masked())
}
}
- for i, raw := range r.BlockedCIDRs {
- prefix, err := netip.ParsePrefix(raw)
- if err != nil {
- return fmt.Errorf("blocked_cidrs[%d]: %w", i, err)
- }
- if prefix != prefix.Masked() {
- return fmt.Errorf("blocked_cidrs[%d]: %q has host bits set, use %s instead", i, raw, prefix.Masked())
- }
- }
- for i, code := range r.AllowedCountries {
+ return nil
+}
+
+func normalizeCountryList(field string, codes []string) error {
+ for i, code := range codes {
if len(code) != 2 {
- return fmt.Errorf("allowed_countries[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", i, code)
+ return fmt.Errorf("%s[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", field, i, code)
}
- r.AllowedCountries[i] = strings.ToUpper(code)
- }
- for i, code := range r.BlockedCountries {
- if len(code) != 2 {
- return fmt.Errorf("blocked_countries[%d]: %q must be a 2-letter ISO 3166-1 alpha-2 code", i, code)
- }
- r.BlockedCountries[i] = strings.ToUpper(code)
+ codes[i] = strings.ToUpper(code)
}
return nil
}
diff --git a/management/internals/server/config/config.go b/management/internals/server/config/config.go
index 0ba393263..fb9c842b7 100644
--- a/management/internals/server/config/config.go
+++ b/management/internals/server/config/config.go
@@ -203,7 +203,7 @@ type ReverseProxy struct {
// AccessLogRetentionDays specifies the number of days to retain access logs.
// Logs older than this duration will be automatically deleted during cleanup.
- // A value of 0 or negative means logs are kept indefinitely (no cleanup).
+ // A value of 0 will default to 7 days. Negative means logs are kept indefinitely (no cleanup).
AccessLogRetentionDays int
// AccessLogCleanupIntervalHours specifies how often (in hours) to run the cleanup routine.
diff --git a/management/internals/shared/grpc/proxy.go b/management/internals/shared/grpc/proxy.go
index 07732cea6..a5e352e75 100644
--- a/management/internals/shared/grpc/proxy.go
+++ b/management/internals/shared/grpc/proxy.go
@@ -188,6 +188,7 @@ func (s *ProxyServiceServer) GetMappingUpdate(req *proto.GetMappingUpdateRequest
caps = &proxy.Capabilities{
SupportsCustomPorts: c.SupportsCustomPorts,
RequireSubdomain: c.RequireSubdomain,
+ SupportsCrowdsec: c.SupportsCrowdsec,
}
}
if err := s.proxyManager.Connect(ctx, proxyID, proxyAddress, peerInfo, caps); err != nil {
diff --git a/management/server/account.go b/management/server/account.go
index 75db36a5f..d90b46659 100644
--- a/management/server/account.go
+++ b/management/server/account.go
@@ -742,11 +742,6 @@ func (am *DefaultAccountManager) DeleteAccount(ctx context.Context, accountID, u
return status.Errorf(status.Internal, "failed to build user infos for account %s: %v", accountID, err)
}
- err = am.serviceManager.DeleteAllServices(ctx, accountID, userID)
- if err != nil {
- return status.Errorf(status.Internal, "failed to delete service %s: %v", accountID, err)
- }
-
for _, otherUser := range account.Users {
if otherUser.Id == userID {
continue
diff --git a/management/server/account/manager.go b/management/server/account/manager.go
index 45af63ae8..b4516d512 100644
--- a/management/server/account/manager.go
+++ b/management/server/account/manager.go
@@ -75,7 +75,7 @@ type Manager interface {
GetUsersFromAccount(ctx context.Context, accountID, userID string) (map[string]*types.UserInfo, error)
GetGroup(ctx context.Context, accountId, groupID, userID string) (*types.Group, error)
GetAllGroups(ctx context.Context, accountID, userID string) ([]*types.Group, error)
- GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error)
+ GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error)
CreateGroup(ctx context.Context, accountID, userID string, group *types.Group) error
UpdateGroup(ctx context.Context, accountID, userID string, group *types.Group) error
CreateGroups(ctx context.Context, accountID, userID string, newGroups []*types.Group) error
diff --git a/management/server/account/manager_mock.go b/management/server/account/manager_mock.go
index 90700c795..36e5fe39f 100644
--- a/management/server/account/manager_mock.go
+++ b/management/server/account/manager_mock.go
@@ -736,18 +736,18 @@ func (mr *MockManagerMockRecorder) GetGroup(ctx, accountId, groupID, userID inte
}
// GetGroupByName mocks base method.
-func (m *MockManager) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) {
+func (m *MockManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetGroupByName", ctx, groupName, accountID)
+ ret := m.ctrl.Call(m, "GetGroupByName", ctx, groupName, accountID, userID)
ret0, _ := ret[0].(*types.Group)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetGroupByName indicates an expected call of GetGroupByName.
-func (mr *MockManagerMockRecorder) GetGroupByName(ctx, groupName, accountID interface{}) *gomock.Call {
+func (mr *MockManagerMockRecorder) GetGroupByName(ctx, groupName, accountID, userID interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockManager)(nil).GetGroupByName), ctx, groupName, accountID)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockManager)(nil).GetGroupByName), ctx, groupName, accountID, userID)
}
// GetIdentityProvider mocks base method.
diff --git a/management/server/account_request_buffer.go b/management/server/account_request_buffer.go
index ac53a9fa8..e1672c2d0 100644
--- a/management/server/account_request_buffer.go
+++ b/management/server/account_request_buffer.go
@@ -63,20 +63,11 @@ func (ac *AccountRequestBuffer) GetAccountWithBackpressure(ctx context.Context,
log.WithContext(ctx).Tracef("requesting account %s with backpressure", accountID)
startTime := time.Now()
+ ac.getAccountRequestCh <- req
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case ac.getAccountRequestCh <- req:
- }
-
- select {
- case <-ctx.Done():
- return nil, ctx.Err()
- case result := <-req.ResultChan:
- log.WithContext(ctx).Tracef("got account with backpressure after %s", time.Since(startTime))
- return result.Account, result.Err
- }
+ result := <-req.ResultChan
+ log.WithContext(ctx).Tracef("got account with backpressure after %s", time.Since(startTime))
+ return result.Account, result.Err
}
func (ac *AccountRequestBuffer) processGetAccountBatch(ctx context.Context, accountID string) {
diff --git a/management/server/account_test.go b/management/server/account_test.go
index 548cf31d4..2f0533281 100644
--- a/management/server/account_test.go
+++ b/management/server/account_test.go
@@ -15,7 +15,6 @@ import (
"time"
"github.com/golang/mock/gomock"
- "github.com/netbirdio/netbird/shared/management/status"
"github.com/prometheus/client_golang/prometheus/push"
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
@@ -23,6 +22,9 @@ import (
"go.opentelemetry.io/otel/metric/noop"
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
+ "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
+ "github.com/netbirdio/netbird/shared/management/status"
+
nbdns "github.com/netbirdio/netbird/dns"
"github.com/netbirdio/netbird/management/internals/controllers/network_map"
"github.com/netbirdio/netbird/management/internals/controllers/network_map/controller"
@@ -1815,6 +1817,13 @@ func TestAccount_Copy(t *testing.T) {
Targets: []*service.Target{},
},
},
+ Domains: []*domain.Domain{
+ {
+ ID: "domain1",
+ Domain: "test.com",
+ AccountID: "account1",
+ },
+ },
NetworkMapCache: &types.NetworkMapBuilder{},
}
account.InitOnce()
diff --git a/management/server/group.go b/management/server/group.go
index 326b167cf..7b5b9b86c 100644
--- a/management/server/group.go
+++ b/management/server/group.go
@@ -61,7 +61,10 @@ func (am *DefaultAccountManager) GetAllGroups(ctx context.Context, accountID, us
}
// GetGroupByName filters all groups in an account by name and returns the one with the most peers
-func (am *DefaultAccountManager) GetGroupByName(ctx context.Context, groupName, accountID string) (*types.Group, error) {
+func (am *DefaultAccountManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) {
+ if err := am.CheckGroupPermissions(ctx, accountID, userID); err != nil {
+ return nil, err
+ }
return am.Store.GetGroupByName(ctx, store.LockingStrengthNone, accountID, groupName)
}
diff --git a/management/server/http/handlers/groups/groups_handler.go b/management/server/http/handlers/groups/groups_handler.go
index 56ccc9d0b..f8d161a87 100644
--- a/management/server/http/handlers/groups/groups_handler.go
+++ b/management/server/http/handlers/groups/groups_handler.go
@@ -52,7 +52,7 @@ func (h *handler) getAllGroups(w http.ResponseWriter, r *http.Request) {
groupName := r.URL.Query().Get("name")
if groupName != "" {
// Get single group by name
- group, err := h.accountManager.GetGroupByName(r.Context(), groupName, accountID)
+ group, err := h.accountManager.GetGroupByName(r.Context(), groupName, accountID, userID)
if err != nil {
util.WriteError(r.Context(), err, w)
return
@@ -118,7 +118,7 @@ func (h *handler) updateGroup(w http.ResponseWriter, r *http.Request) {
return
}
- allGroup, err := h.accountManager.GetGroupByName(r.Context(), "All", accountID)
+ allGroup, err := h.accountManager.GetGroupByName(r.Context(), "All", accountID, userID)
if err != nil {
util.WriteError(r.Context(), err, w)
return
diff --git a/management/server/http/handlers/groups/groups_handler_test.go b/management/server/http/handlers/groups/groups_handler_test.go
index 458a15c11..c7b4cbcdd 100644
--- a/management/server/http/handlers/groups/groups_handler_test.go
+++ b/management/server/http/handlers/groups/groups_handler_test.go
@@ -71,7 +71,7 @@ func initGroupTestData(initGroups ...*types.Group) *handler {
return groups, nil
},
- GetGroupByNameFunc: func(ctx context.Context, groupName, _ string) (*types.Group, error) {
+ GetGroupByNameFunc: func(ctx context.Context, groupName, _, _ string) (*types.Group, error) {
if groupName == "All" {
return &types.Group{ID: "id-all", Name: "All", Issued: types.GroupIssuedAPI}, nil
}
diff --git a/management/server/migration/migration.go b/management/server/migration/migration.go
index 29555ed0c..7a51cc200 100644
--- a/management/server/migration/migration.go
+++ b/management/server/migration/migration.go
@@ -489,6 +489,102 @@ func MigrateJsonToTable[T any](ctx context.Context, db *gorm.DB, columnName stri
return nil
}
+// hasForeignKey checks whether a foreign key constraint exists on the given table and column.
+func hasForeignKey(db *gorm.DB, table, column string) bool {
+ var count int64
+
+ switch db.Name() {
+ case "postgres":
+ db.Raw(`
+ SELECT COUNT(*) FROM information_schema.key_column_usage kcu
+ JOIN information_schema.table_constraints tc
+ ON tc.constraint_name = kcu.constraint_name
+ AND tc.table_schema = kcu.table_schema
+ WHERE tc.constraint_type = 'FOREIGN KEY'
+ AND kcu.table_name = ?
+ AND kcu.column_name = ?
+ `, table, column).Scan(&count)
+ case "mysql":
+ db.Raw(`
+ SELECT COUNT(*) FROM information_schema.key_column_usage
+ WHERE table_schema = DATABASE()
+ AND table_name = ?
+ AND column_name = ?
+ AND referenced_table_name IS NOT NULL
+ `, table, column).Scan(&count)
+ default: // sqlite
+ type fkInfo struct {
+ From string
+ }
+ var fks []fkInfo
+ db.Raw(fmt.Sprintf("PRAGMA foreign_key_list(%s)", table)).Scan(&fks)
+ for _, fk := range fks {
+ if fk.From == column {
+ return true
+ }
+ }
+ return false
+ }
+
+ return count > 0
+}
+
+// CleanupOrphanedResources deletes rows from the table of model T where the foreign
+// key column (fkColumn) references a row in the table of model R that no longer exists.
+func CleanupOrphanedResources[T any, R any](ctx context.Context, db *gorm.DB, fkColumn string) error {
+ var model T
+ var refModel R
+
+ if !db.Migrator().HasTable(&model) {
+ log.WithContext(ctx).Debugf("table for %T does not exist, no cleanup needed", model)
+ return nil
+ }
+
+ if !db.Migrator().HasTable(&refModel) {
+ log.WithContext(ctx).Debugf("referenced table for %T does not exist, no cleanup needed", refModel)
+ return nil
+ }
+
+ stmtT := &gorm.Statement{DB: db}
+ if err := stmtT.Parse(&model); err != nil {
+ return fmt.Errorf("parse model %T: %w", model, err)
+ }
+ childTable := stmtT.Schema.Table
+
+ stmtR := &gorm.Statement{DB: db}
+ if err := stmtR.Parse(&refModel); err != nil {
+ return fmt.Errorf("parse reference model %T: %w", refModel, err)
+ }
+ parentTable := stmtR.Schema.Table
+
+ if !db.Migrator().HasColumn(&model, fkColumn) {
+ log.WithContext(ctx).Debugf("column %s does not exist in table %s, no cleanup needed", fkColumn, childTable)
+ return nil
+ }
+
+ // If a foreign key constraint already exists on the column, the DB itself
+ // enforces referential integrity and orphaned rows cannot exist.
+ if hasForeignKey(db, childTable, fkColumn) {
+ log.WithContext(ctx).Debugf("foreign key constraint for %s already exists on %s, no cleanup needed", fkColumn, childTable)
+ return nil
+ }
+
+ result := db.Exec(
+ fmt.Sprintf(
+ "DELETE FROM %s WHERE %s NOT IN (SELECT id FROM %s)",
+ childTable, fkColumn, parentTable,
+ ),
+ )
+ if result.Error != nil {
+ return fmt.Errorf("cleanup orphaned rows in %s: %w", childTable, result.Error)
+ }
+
+ log.WithContext(ctx).Infof("Cleaned up %d orphaned rows from %s where %s had no matching row in %s",
+ result.RowsAffected, childTable, fkColumn, parentTable)
+
+ return nil
+}
+
func RemoveDuplicatePeerKeys(ctx context.Context, db *gorm.DB) error {
if !db.Migrator().HasTable("peers") {
log.WithContext(ctx).Debug("peers table does not exist, skipping duplicate key cleanup")
diff --git a/management/server/migration/migration_test.go b/management/server/migration/migration_test.go
index c1be8a3a3..5e00976c2 100644
--- a/management/server/migration/migration_test.go
+++ b/management/server/migration/migration_test.go
@@ -441,3 +441,197 @@ func TestRemoveDuplicatePeerKeys_NoTable(t *testing.T) {
err := migration.RemoveDuplicatePeerKeys(context.Background(), db)
require.NoError(t, err, "Should not fail when table does not exist")
}
+
+type testParent struct {
+ ID string `gorm:"primaryKey"`
+}
+
+func (testParent) TableName() string {
+ return "test_parents"
+}
+
+type testChild struct {
+ ID string `gorm:"primaryKey"`
+ ParentID string
+}
+
+func (testChild) TableName() string {
+ return "test_children"
+}
+
+type testChildWithFK struct {
+ ID string `gorm:"primaryKey"`
+ ParentID string `gorm:"index"`
+ Parent *testParent `gorm:"foreignKey:ParentID"`
+}
+
+func (testChildWithFK) TableName() string {
+ return "test_children"
+}
+
+func setupOrphanTestDB(t *testing.T, models ...any) *gorm.DB {
+ t.Helper()
+ db := setupDatabase(t)
+ for _, m := range models {
+ _ = db.Migrator().DropTable(m)
+ }
+ err := db.AutoMigrate(models...)
+ require.NoError(t, err, "Failed to auto-migrate tables")
+ return db
+}
+
+func TestCleanupOrphanedResources_NoChildTable(t *testing.T) {
+ db := setupDatabase(t)
+ _ = db.Migrator().DropTable(&testChild{})
+ _ = db.Migrator().DropTable(&testParent{})
+
+ err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id")
+ require.NoError(t, err, "Should not fail when child table does not exist")
+}
+
+func TestCleanupOrphanedResources_NoParentTable(t *testing.T) {
+ db := setupDatabase(t)
+ _ = db.Migrator().DropTable(&testParent{})
+ _ = db.Migrator().DropTable(&testChild{})
+
+ err := db.AutoMigrate(&testChild{})
+ require.NoError(t, err)
+
+ err = migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id")
+ require.NoError(t, err, "Should not fail when parent table does not exist")
+}
+
+func TestCleanupOrphanedResources_EmptyTables(t *testing.T) {
+ db := setupOrphanTestDB(t, &testParent{}, &testChild{})
+
+ err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id")
+ require.NoError(t, err, "Should not fail on empty tables")
+
+ var count int64
+ db.Model(&testChild{}).Count(&count)
+ assert.Equal(t, int64(0), count)
+}
+
+func TestCleanupOrphanedResources_NoOrphans(t *testing.T) {
+ db := setupOrphanTestDB(t, &testParent{}, &testChild{})
+
+ require.NoError(t, db.Create(&testParent{ID: "p1"}).Error)
+ require.NoError(t, db.Create(&testParent{ID: "p2"}).Error)
+ require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error)
+ require.NoError(t, db.Create(&testChild{ID: "c2", ParentID: "p2"}).Error)
+
+ err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id")
+ require.NoError(t, err)
+
+ var count int64
+ db.Model(&testChild{}).Count(&count)
+ assert.Equal(t, int64(2), count, "All children should remain when no orphans")
+}
+
+func TestCleanupOrphanedResources_AllOrphans(t *testing.T) {
+ db := setupOrphanTestDB(t, &testParent{}, &testChild{})
+
+ require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c1", "gone1").Error)
+ require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c2", "gone2").Error)
+ require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c3", "gone3").Error)
+
+ err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id")
+ require.NoError(t, err)
+
+ var count int64
+ db.Model(&testChild{}).Count(&count)
+ assert.Equal(t, int64(0), count, "All orphaned children should be deleted")
+}
+
+func TestCleanupOrphanedResources_MixedValidAndOrphaned(t *testing.T) {
+ db := setupOrphanTestDB(t, &testParent{}, &testChild{})
+
+ require.NoError(t, db.Create(&testParent{ID: "p1"}).Error)
+ require.NoError(t, db.Create(&testParent{ID: "p2"}).Error)
+
+ require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error)
+ require.NoError(t, db.Create(&testChild{ID: "c2", ParentID: "p2"}).Error)
+ require.NoError(t, db.Create(&testChild{ID: "c3", ParentID: "p1"}).Error)
+
+ require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c4", "gone1").Error)
+ require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c5", "gone2").Error)
+
+ err := migration.CleanupOrphanedResources[testChild, testParent](context.Background(), db, "parent_id")
+ require.NoError(t, err)
+
+ var remaining []testChild
+ require.NoError(t, db.Order("id").Find(&remaining).Error)
+
+ assert.Len(t, remaining, 3, "Only valid children should remain")
+ assert.Equal(t, "c1", remaining[0].ID)
+ assert.Equal(t, "c2", remaining[1].ID)
+ assert.Equal(t, "c3", remaining[2].ID)
+}
+
+func TestCleanupOrphanedResources_Idempotent(t *testing.T) {
+ db := setupOrphanTestDB(t, &testParent{}, &testChild{})
+
+ require.NoError(t, db.Create(&testParent{ID: "p1"}).Error)
+ require.NoError(t, db.Create(&testChild{ID: "c1", ParentID: "p1"}).Error)
+ require.NoError(t, db.Exec("INSERT INTO test_children (id, parent_id) VALUES (?, ?)", "c2", "gone").Error)
+
+ ctx := context.Background()
+
+ err := migration.CleanupOrphanedResources[testChild, testParent](ctx, db, "parent_id")
+ require.NoError(t, err)
+
+ var count int64
+ db.Model(&testChild{}).Count(&count)
+ assert.Equal(t, int64(1), count)
+
+ err = migration.CleanupOrphanedResources[testChild, testParent](ctx, db, "parent_id")
+ require.NoError(t, err)
+
+ db.Model(&testChild{}).Count(&count)
+ assert.Equal(t, int64(1), count, "Count should remain the same after second run")
+}
+
+func TestCleanupOrphanedResources_SkipsWhenForeignKeyExists(t *testing.T) {
+ engine := os.Getenv("NETBIRD_STORE_ENGINE")
+ if engine != "postgres" && engine != "mysql" {
+ t.Skip("FK constraint early-exit test requires postgres or mysql")
+ }
+
+ db := setupDatabase(t)
+ _ = db.Migrator().DropTable(&testChildWithFK{})
+ _ = db.Migrator().DropTable(&testParent{})
+
+ err := db.AutoMigrate(&testParent{}, &testChildWithFK{})
+ require.NoError(t, err)
+
+ require.NoError(t, db.Create(&testParent{ID: "p1"}).Error)
+ require.NoError(t, db.Create(&testParent{ID: "p2"}).Error)
+ require.NoError(t, db.Create(&testChildWithFK{ID: "c1", ParentID: "p1"}).Error)
+ require.NoError(t, db.Create(&testChildWithFK{ID: "c2", ParentID: "p2"}).Error)
+
+ switch engine {
+ case "postgres":
+ require.NoError(t, db.Exec("ALTER TABLE test_children DROP CONSTRAINT fk_test_children_parent").Error)
+ require.NoError(t, db.Exec("DELETE FROM test_parents WHERE id = ?", "p2").Error)
+ require.NoError(t, db.Exec(
+ "ALTER TABLE test_children ADD CONSTRAINT fk_test_children_parent "+
+ "FOREIGN KEY (parent_id) REFERENCES test_parents(id) NOT VALID",
+ ).Error)
+ case "mysql":
+ require.NoError(t, db.Exec("SET FOREIGN_KEY_CHECKS = 0").Error)
+ require.NoError(t, db.Exec("ALTER TABLE test_children DROP FOREIGN KEY fk_test_children_parent").Error)
+ require.NoError(t, db.Exec("DELETE FROM test_parents WHERE id = ?", "p2").Error)
+ require.NoError(t, db.Exec(
+ "ALTER TABLE test_children ADD CONSTRAINT fk_test_children_parent "+
+ "FOREIGN KEY (parent_id) REFERENCES test_parents(id)",
+ ).Error)
+ require.NoError(t, db.Exec("SET FOREIGN_KEY_CHECKS = 1").Error)
+ }
+
+ err = migration.CleanupOrphanedResources[testChildWithFK, testParent](context.Background(), db, "parent_id")
+ require.NoError(t, err)
+
+ var count int64
+ db.Model(&testChildWithFK{}).Count(&count)
+ assert.Equal(t, int64(2), count, "Both rows should survive — migration must skip when FK constraint exists")
+}
diff --git a/management/server/mock_server/account_mock.go b/management/server/mock_server/account_mock.go
index afd2021ac..ff369355e 100644
--- a/management/server/mock_server/account_mock.go
+++ b/management/server/mock_server/account_mock.go
@@ -46,7 +46,7 @@ type MockAccountManager struct {
AddPeerFunc func(ctx context.Context, accountID string, setupKey string, userId string, peer *nbpeer.Peer, temporary bool) (*nbpeer.Peer, *types.NetworkMap, []*posture.Checks, error)
GetGroupFunc func(ctx context.Context, accountID, groupID, userID string) (*types.Group, error)
GetAllGroupsFunc func(ctx context.Context, accountID, userID string) ([]*types.Group, error)
- GetGroupByNameFunc func(ctx context.Context, accountID, groupName string) (*types.Group, error)
+ GetGroupByNameFunc func(ctx context.Context, groupName, accountID, userID string) (*types.Group, error)
SaveGroupFunc func(ctx context.Context, accountID, userID string, group *types.Group, create bool) error
SaveGroupsFunc func(ctx context.Context, accountID, userID string, groups []*types.Group, create bool) error
DeleteGroupFunc func(ctx context.Context, accountID, userId, groupID string) error
@@ -406,9 +406,9 @@ func (am *MockAccountManager) AddPeer(
}
// GetGroupByName mock implementation of GetGroupByName from server.AccountManager interface
-func (am *MockAccountManager) GetGroupByName(ctx context.Context, accountID, groupName string) (*types.Group, error) {
+func (am *MockAccountManager) GetGroupByName(ctx context.Context, groupName, accountID, userID string) (*types.Group, error) {
if am.GetGroupByNameFunc != nil {
- return am.GetGroupByNameFunc(ctx, accountID, groupName)
+ return am.GetGroupByNameFunc(ctx, groupName, accountID, userID)
}
return nil, status.Errorf(codes.Unimplemented, "method GetGroupByName is not implemented")
}
diff --git a/management/server/store/sql_store.go b/management/server/store/sql_store.go
index ee1947b18..8189548b7 100644
--- a/management/server/store/sql_store.go
+++ b/management/server/store/sql_store.go
@@ -396,6 +396,11 @@ func (s *SqlStore) DeleteAccount(ctx context.Context, account *types.Account) er
return result.Error
}
+ result = tx.Select(clause.Associations).Delete(account.Services, "account_id = ?", account.Id)
+ if result.Error != nil {
+ return result.Error
+ }
+
result = tx.Select(clause.Associations).Delete(account)
if result.Error != nil {
return result.Error
@@ -1012,10 +1017,10 @@ func (s *SqlStore) GetAccountsCounter(ctx context.Context) (int64, error) {
// GetCustomDomainsCounts returns the total and validated custom domain counts.
func (s *SqlStore) GetCustomDomainsCounts(ctx context.Context) (int64, int64, error) {
var total, validated int64
- if err := s.db.WithContext(ctx).Model(&domain.Domain{}).Count(&total).Error; err != nil {
+ if err := s.db.Model(&domain.Domain{}).Count(&total).Error; err != nil {
return 0, 0, err
}
- if err := s.db.WithContext(ctx).Model(&domain.Domain{}).Where("validated = ?", true).Count(&validated).Error; err != nil {
+ if err := s.db.Model(&domain.Domain{}).Where("validated = ?", true).Count(&validated).Error; err != nil {
return 0, 0, err
}
return total, validated, nil
@@ -2080,7 +2085,8 @@ func (s *SqlStore) getPostureChecks(ctx context.Context, accountID string) ([]*p
func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpservice.Service, error) {
const serviceQuery = `SELECT id, account_id, name, domain, enabled, auth,
meta_created_at, meta_certificate_issued_at, meta_status, proxy_cluster,
- pass_host_header, rewrite_redirects, session_private_key, session_public_key
+ pass_host_header, rewrite_redirects, session_private_key, session_public_key,
+ mode, listen_port, port_auto_assigned, source, source_peer, terminated
FROM services WHERE account_id = $1`
const targetsQuery = `SELECT id, account_id, service_id, path, host, port, protocol,
@@ -2097,6 +2103,9 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv
var auth []byte
var createdAt, certIssuedAt sql.NullTime
var status, proxyCluster, sessionPrivateKey, sessionPublicKey sql.NullString
+ var mode, source, sourcePeer sql.NullString
+ var terminated, portAutoAssigned sql.NullBool
+ var listenPort sql.NullInt64
err := row.Scan(
&s.ID,
&s.AccountID,
@@ -2112,6 +2121,12 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv
&s.RewriteRedirects,
&sessionPrivateKey,
&sessionPublicKey,
+ &mode,
+ &listenPort,
+ &portAutoAssigned,
+ &source,
+ &sourcePeer,
+ &terminated,
)
if err != nil {
return nil, err
@@ -2143,7 +2158,24 @@ func (s *SqlStore) getServices(ctx context.Context, accountID string) ([]*rpserv
if sessionPublicKey.Valid {
s.SessionPublicKey = sessionPublicKey.String
}
-
+ if mode.Valid {
+ s.Mode = mode.String
+ }
+ if source.Valid {
+ s.Source = source.String
+ }
+ if sourcePeer.Valid {
+ s.SourcePeer = sourcePeer.String
+ }
+ if terminated.Valid {
+ s.Terminated = terminated.Bool
+ }
+ if portAutoAssigned.Valid {
+ s.PortAutoAssigned = portAutoAssigned.Bool
+ }
+ if listenPort.Valid {
+ s.ListenPort = uint16(listenPort.Int64)
+ }
s.Targets = []*rpservice.Target{}
return &s, nil
})
@@ -4410,7 +4442,7 @@ func (s *SqlStore) DeletePAT(ctx context.Context, userID, patID string) error {
// GetProxyAccessTokenByHashedToken retrieves a proxy access token by its hashed value.
func (s *SqlStore) GetProxyAccessTokenByHashedToken(ctx context.Context, lockStrength LockingStrength, hashedToken types.HashedProxyToken) (*types.ProxyAccessToken, error) {
- tx := s.db.WithContext(ctx)
+ tx := s.db
if lockStrength != LockingStrengthNone {
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
}
@@ -4429,7 +4461,7 @@ func (s *SqlStore) GetProxyAccessTokenByHashedToken(ctx context.Context, lockStr
// GetAllProxyAccessTokens retrieves all proxy access tokens.
func (s *SqlStore) GetAllProxyAccessTokens(ctx context.Context, lockStrength LockingStrength) ([]*types.ProxyAccessToken, error) {
- tx := s.db.WithContext(ctx)
+ tx := s.db
if lockStrength != LockingStrengthNone {
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
}
@@ -4445,7 +4477,7 @@ func (s *SqlStore) GetAllProxyAccessTokens(ctx context.Context, lockStrength Loc
// SaveProxyAccessToken saves a proxy access token to the database.
func (s *SqlStore) SaveProxyAccessToken(ctx context.Context, token *types.ProxyAccessToken) error {
- if result := s.db.WithContext(ctx).Create(token); result.Error != nil {
+ if result := s.db.Create(token); result.Error != nil {
return status.Errorf(status.Internal, "save proxy access token: %v", result.Error)
}
return nil
@@ -4453,7 +4485,7 @@ func (s *SqlStore) SaveProxyAccessToken(ctx context.Context, token *types.ProxyA
// RevokeProxyAccessToken revokes a proxy access token by its ID.
func (s *SqlStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) error {
- result := s.db.WithContext(ctx).Model(&types.ProxyAccessToken{}).Where(idQueryCondition, tokenID).Update("revoked", true)
+ result := s.db.Model(&types.ProxyAccessToken{}).Where(idQueryCondition, tokenID).Update("revoked", true)
if result.Error != nil {
return status.Errorf(status.Internal, "revoke proxy access token: %v", result.Error)
}
@@ -4467,7 +4499,7 @@ func (s *SqlStore) RevokeProxyAccessToken(ctx context.Context, tokenID string) e
// MarkProxyAccessTokenUsed updates the last used timestamp for a proxy access token.
func (s *SqlStore) MarkProxyAccessTokenUsed(ctx context.Context, tokenID string) error {
- result := s.db.WithContext(ctx).Model(&types.ProxyAccessToken{}).
+ result := s.db.Model(&types.ProxyAccessToken{}).
Where(idQueryCondition, tokenID).
Update("last_used", time.Now().UTC())
if result.Error != nil {
@@ -5136,7 +5168,7 @@ func (s *SqlStore) EphemeralServiceExists(ctx context.Context, lockStrength Lock
// GetServicesByClusterAndPort returns services matching the given proxy cluster, mode, and listen port.
func (s *SqlStore) GetServicesByClusterAndPort(ctx context.Context, lockStrength LockingStrength, proxyCluster string, mode string, listenPort uint16) ([]*rpservice.Service, error) {
- tx := s.db.WithContext(ctx)
+ tx := s.db
if lockStrength != LockingStrengthNone {
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
}
@@ -5152,7 +5184,7 @@ func (s *SqlStore) GetServicesByClusterAndPort(ctx context.Context, lockStrength
// GetServicesByCluster returns all services for the given proxy cluster.
func (s *SqlStore) GetServicesByCluster(ctx context.Context, lockStrength LockingStrength, proxyCluster string) ([]*rpservice.Service, error) {
- tx := s.db.WithContext(ctx)
+ tx := s.db
if lockStrength != LockingStrengthNone {
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
}
@@ -5262,7 +5294,7 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin
var logs []*accesslogs.AccessLogEntry
var totalCount int64
- baseQuery := s.db.WithContext(ctx).
+ baseQuery := s.db.
Model(&accesslogs.AccessLogEntry{}).
Where(accountIDCondition, accountID)
@@ -5273,7 +5305,7 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin
return nil, 0, status.Errorf(status.Internal, "failed to count access logs")
}
- query := s.db.WithContext(ctx).
+ query := s.db.
Where(accountIDCondition, accountID)
query = s.applyAccessLogFilters(query, filter)
@@ -5310,7 +5342,7 @@ func (s *SqlStore) GetAccountAccessLogs(ctx context.Context, lockStrength Lockin
// DeleteOldAccessLogs deletes all access logs older than the specified time
func (s *SqlStore) DeleteOldAccessLogs(ctx context.Context, olderThan time.Time) (int64, error) {
- result := s.db.WithContext(ctx).
+ result := s.db.
Where("timestamp < ?", olderThan).
Delete(&accesslogs.AccessLogEntry{})
@@ -5399,7 +5431,7 @@ func (s *SqlStore) GetServiceTargetByTargetID(ctx context.Context, lockStrength
// SaveProxy saves or updates a proxy in the database
func (s *SqlStore) SaveProxy(ctx context.Context, p *proxy.Proxy) error {
- result := s.db.WithContext(ctx).Save(p)
+ result := s.db.Save(p)
if result.Error != nil {
log.WithContext(ctx).Errorf("failed to save proxy: %v", result.Error)
return status.Errorf(status.Internal, "failed to save proxy")
@@ -5411,7 +5443,7 @@ func (s *SqlStore) SaveProxy(ctx context.Context, p *proxy.Proxy) error {
func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAddress, ipAddress string) error {
now := time.Now()
- result := s.db.WithContext(ctx).
+ result := s.db.
Model(&proxy.Proxy{}).
Where("id = ? AND status = ?", proxyID, "connected").
Update("last_seen", now)
@@ -5430,7 +5462,7 @@ func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAdd
ConnectedAt: &now,
Status: "connected",
}
- if err := s.db.WithContext(ctx).Save(p).Error; err != nil {
+ if err := s.db.Save(p).Error; err != nil {
log.WithContext(ctx).Errorf("failed to create proxy on heartbeat: %v", err)
return status.Errorf(status.Internal, "failed to create proxy on heartbeat")
}
@@ -5443,7 +5475,7 @@ func (s *SqlStore) UpdateProxyHeartbeat(ctx context.Context, proxyID, clusterAdd
func (s *SqlStore) GetActiveProxyClusterAddresses(ctx context.Context) ([]string, error) {
var addresses []string
- result := s.db.WithContext(ctx).
+ result := s.db.
Model(&proxy.Proxy{}).
Where("status = ? AND last_seen > ?", "connected", time.Now().Add(-proxyActiveThreshold)).
Distinct("cluster_address").
@@ -5482,6 +5514,7 @@ const proxyActiveThreshold = 2 * time.Minute
var validCapabilityColumns = map[string]struct{}{
"supports_custom_ports": {},
"require_subdomain": {},
+ "supports_crowdsec": {},
}
// GetClusterSupportsCustomPorts returns whether any active proxy in the cluster
@@ -5496,6 +5529,59 @@ func (s *SqlStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr s
return s.getClusterCapability(ctx, clusterAddr, "require_subdomain")
}
+// GetClusterSupportsCrowdSec returns whether all active proxies in the cluster
+// have CrowdSec configured. Returns nil when no proxy reported the capability.
+// Unlike other capabilities that use ANY-true (for rolling upgrades), CrowdSec
+// requires unanimous support: a single unconfigured proxy would let requests
+// bypass reputation checks.
+func (s *SqlStore) GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool {
+ return s.getClusterUnanimousCapability(ctx, clusterAddr, "supports_crowdsec")
+}
+
+// getClusterUnanimousCapability returns an aggregated boolean capability
+// requiring all active proxies in the cluster to report true.
+func (s *SqlStore) getClusterUnanimousCapability(ctx context.Context, clusterAddr, column string) *bool {
+ if _, ok := validCapabilityColumns[column]; !ok {
+ log.WithContext(ctx).Errorf("invalid capability column: %s", column)
+ return nil
+ }
+
+ var result struct {
+ Total int64
+ Reported int64
+ AllTrue bool
+ }
+
+ // All active proxies must have reported the capability (no NULLs) and all
+ // must report true. A single unreported or false proxy means the cluster
+ // does not unanimously support the capability.
+ err := s.db.WithContext(ctx).
+ Model(&proxy.Proxy{}).
+ Select("COUNT(*) AS total, "+
+ "COUNT(CASE WHEN "+column+" IS NOT NULL THEN 1 END) AS reported, "+
+ "COUNT(*) > 0 AND COUNT(*) = COUNT(CASE WHEN "+column+" = true THEN 1 END) AS all_true").
+ Where("cluster_address = ? AND status = ? AND last_seen > ?",
+ clusterAddr, "connected", time.Now().Add(-proxyActiveThreshold)).
+ Scan(&result).Error
+
+ if err != nil {
+ log.WithContext(ctx).Errorf("query cluster capability %s for %s: %v", column, clusterAddr, err)
+ return nil
+ }
+
+ if result.Total == 0 || result.Reported == 0 {
+ return nil
+ }
+
+ // If any proxy has not reported (NULL), we can't confirm unanimous support.
+ if result.Reported < result.Total {
+ v := false
+ return &v
+ }
+
+ return &result.AllTrue
+}
+
// getClusterCapability returns an aggregated boolean capability for the given
// cluster. It checks active (connected, recently seen) proxies and returns:
// - *true if any proxy in the cluster has the capability set to true,
@@ -5512,7 +5598,7 @@ func (s *SqlStore) getClusterCapability(ctx context.Context, clusterAddr, column
AnyTrue bool
}
- err := s.db.WithContext(ctx).
+ err := s.db.
Model(&proxy.Proxy{}).
Select("COUNT(CASE WHEN "+column+" IS NOT NULL THEN 1 END) > 0 AS has_capability, "+
"COALESCE(MAX(CASE WHEN "+column+" = true THEN 1 ELSE 0 END), 0) = 1 AS any_true").
@@ -5536,7 +5622,7 @@ func (s *SqlStore) getClusterCapability(ctx context.Context, clusterAddr, column
func (s *SqlStore) CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error {
cutoffTime := time.Now().Add(-inactivityDuration)
- result := s.db.WithContext(ctx).
+ result := s.db.
Where("last_seen < ?", cutoffTime).
Delete(&proxy.Proxy{})
diff --git a/management/server/store/sql_store_test.go b/management/server/store/sql_store_test.go
index bafa63580..8ea6c2ae5 100644
--- a/management/server/store/sql_store_test.go
+++ b/management/server/store/sql_store_test.go
@@ -22,6 +22,8 @@ import (
"github.com/stretchr/testify/require"
nbdns "github.com/netbirdio/netbird/dns"
+ proxydomain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
+ rpservice "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
"github.com/netbirdio/netbird/management/internals/modules/zones"
"github.com/netbirdio/netbird/management/internals/modules/zones/records"
resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
@@ -350,6 +352,35 @@ func TestSqlite_DeleteAccount(t *testing.T) {
},
}
+ account.Services = []*rpservice.Service{
+ {
+ ID: "service_id",
+ AccountID: account.Id,
+ Name: "test service",
+ Domain: "svc.example.com",
+ Enabled: true,
+ Targets: []*rpservice.Target{
+ {
+ AccountID: account.Id,
+ ServiceID: "service_id",
+ Host: "localhost",
+ Port: 8080,
+ Protocol: "http",
+ Enabled: true,
+ },
+ },
+ },
+ }
+
+ account.Domains = []*proxydomain.Domain{
+ {
+ ID: "domain_id",
+ Domain: "custom.example.com",
+ AccountID: account.Id,
+ Validated: true,
+ },
+ }
+
err = store.SaveAccount(context.Background(), account)
require.NoError(t, err)
@@ -411,6 +442,20 @@ func TestSqlite_DeleteAccount(t *testing.T) {
require.NoError(t, err, "expecting no error after removing DeleteAccount when searching for network resources")
require.Len(t, resources, 0, "expecting no network resources to be found after DeleteAccount")
}
+
+ domains, err := store.ListCustomDomains(context.Background(), account.Id)
+ require.NoError(t, err, "expecting no error after DeleteAccount when searching for custom domains")
+ require.Len(t, domains, 0, "expecting no custom domains to be found after DeleteAccount")
+
+ var services []*rpservice.Service
+ err = store.(*SqlStore).db.Model(&rpservice.Service{}).Find(&services, "account_id = ?", account.Id).Error
+ require.NoError(t, err, "expecting no error after DeleteAccount when searching for services")
+ require.Len(t, services, 0, "expecting no services to be found after DeleteAccount")
+
+ var targets []*rpservice.Target
+ err = store.(*SqlStore).db.Model(&rpservice.Target{}).Find(&targets, "account_id = ?", account.Id).Error
+ require.NoError(t, err, "expecting no error after DeleteAccount when searching for service targets")
+ require.Len(t, targets, 0, "expecting no service targets to be found after DeleteAccount")
}
func Test_GetAccount(t *testing.T) {
diff --git a/management/server/store/sqlstore_bench_test.go b/management/server/store/sqlstore_bench_test.go
index f2abafceb..81c4b33ae 100644
--- a/management/server/store/sqlstore_bench_test.go
+++ b/management/server/store/sqlstore_bench_test.go
@@ -20,6 +20,7 @@ import (
"github.com/stretchr/testify/assert"
nbdns "github.com/netbirdio/netbird/dns"
+ "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types"
@@ -265,6 +266,7 @@ func setupBenchmarkDB(b testing.TB) (*SqlStore, func(), string) {
&nbdns.NameServerGroup{}, &posture.Checks{}, &networkTypes.Network{},
&routerTypes.NetworkRouter{}, &resourceTypes.NetworkResource{},
&types.AccountOnboarding{}, &service.Service{}, &service.Target{},
+ &domain.Domain{},
}
for i := len(models) - 1; i >= 0; i-- {
diff --git a/management/server/store/store.go b/management/server/store/store.go
index e24a1efef..0d8b0678a 100644
--- a/management/server/store/store.go
+++ b/management/server/store/store.go
@@ -121,7 +121,7 @@ type Store interface {
GetAccountGroups(ctx context.Context, lockStrength LockingStrength, accountID string) ([]*types.Group, error)
GetResourceGroups(ctx context.Context, lockStrength LockingStrength, accountID, resourceID string) ([]*types.Group, error)
GetGroupByID(ctx context.Context, lockStrength LockingStrength, accountID, groupID string) (*types.Group, error)
- GetGroupByName(ctx context.Context, lockStrength LockingStrength, groupName, accountID string) (*types.Group, error)
+ GetGroupByName(ctx context.Context, lockStrength LockingStrength, accountID, groupName string) (*types.Group, error)
GetGroupsByIDs(ctx context.Context, lockStrength LockingStrength, accountID string, groupIDs []string) (map[string]*types.Group, error)
CreateGroups(ctx context.Context, accountID string, groups []*types.Group) error
UpdateGroups(ctx context.Context, accountID string, groups []*types.Group) error
@@ -289,6 +289,7 @@ type Store interface {
GetActiveProxyClusters(ctx context.Context) ([]proxy.Cluster, error)
GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool
GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool
+ GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool
CleanupStaleProxies(ctx context.Context, inactivityDuration time.Duration) error
GetCustomDomainsCounts(ctx context.Context) (total int64, validated int64, err error)
@@ -448,6 +449,12 @@ func getMigrationsPreAuto(ctx context.Context) []migrationFunc {
func(db *gorm.DB) error {
return migration.RemoveDuplicatePeerKeys(ctx, db)
},
+ func(db *gorm.DB) error {
+ return migration.CleanupOrphanedResources[rpservice.Service, types.Account](ctx, db, "account_id")
+ },
+ func(db *gorm.DB) error {
+ return migration.CleanupOrphanedResources[domain.Domain, types.Account](ctx, db, "account_id")
+ },
}
}
diff --git a/management/server/store/store_mock.go b/management/server/store/store_mock.go
index a8648aed7..beee13d96 100644
--- a/management/server/store/store_mock.go
+++ b/management/server/store/store_mock.go
@@ -165,34 +165,19 @@ func (mr *MockStoreMockRecorder) CleanupStaleProxies(ctx, inactivityDuration int
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStaleProxies", reflect.TypeOf((*MockStore)(nil).CleanupStaleProxies), ctx, inactivityDuration)
}
-// GetClusterSupportsCustomPorts mocks base method.
-func (m *MockStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool {
+// GetClusterSupportsCrowdSec mocks base method.
+func (m *MockStore) GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetClusterSupportsCustomPorts", ctx, clusterAddr)
+ ret := m.ctrl.Call(m, "GetClusterSupportsCrowdSec", ctx, clusterAddr)
ret0, _ := ret[0].(*bool)
return ret0
}
-// GetClusterSupportsCustomPorts indicates an expected call of GetClusterSupportsCustomPorts.
-func (mr *MockStoreMockRecorder) GetClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call {
+// GetClusterSupportsCrowdSec indicates an expected call of GetClusterSupportsCrowdSec.
+func (mr *MockStoreMockRecorder) GetClusterSupportsCrowdSec(ctx, clusterAddr interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCustomPorts", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCustomPorts), ctx, clusterAddr)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCrowdSec", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCrowdSec), ctx, clusterAddr)
}
-
-// GetClusterRequireSubdomain mocks base method.
-func (m *MockStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetClusterRequireSubdomain", ctx, clusterAddr)
- ret0, _ := ret[0].(*bool)
- return ret0
-}
-
-// GetClusterRequireSubdomain indicates an expected call of GetClusterRequireSubdomain.
-func (mr *MockStoreMockRecorder) GetClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterRequireSubdomain", reflect.TypeOf((*MockStore)(nil).GetClusterRequireSubdomain), ctx, clusterAddr)
-}
-
// Close mocks base method.
func (m *MockStore) Close(ctx context.Context) error {
m.ctrl.T.Helper()
@@ -1389,6 +1374,34 @@ func (mr *MockStoreMockRecorder) GetAnyAccountID(ctx interface{}) *gomock.Call {
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetAnyAccountID", reflect.TypeOf((*MockStore)(nil).GetAnyAccountID), ctx)
}
+// GetClusterRequireSubdomain mocks base method.
+func (m *MockStore) GetClusterRequireSubdomain(ctx context.Context, clusterAddr string) *bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetClusterRequireSubdomain", ctx, clusterAddr)
+ ret0, _ := ret[0].(*bool)
+ return ret0
+}
+
+// GetClusterRequireSubdomain indicates an expected call of GetClusterRequireSubdomain.
+func (mr *MockStoreMockRecorder) GetClusterRequireSubdomain(ctx, clusterAddr interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterRequireSubdomain", reflect.TypeOf((*MockStore)(nil).GetClusterRequireSubdomain), ctx, clusterAddr)
+}
+
+// GetClusterSupportsCustomPorts mocks base method.
+func (m *MockStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetClusterSupportsCustomPorts", ctx, clusterAddr)
+ ret0, _ := ret[0].(*bool)
+ return ret0
+}
+
+// GetClusterSupportsCustomPorts indicates an expected call of GetClusterSupportsCustomPorts.
+func (mr *MockStoreMockRecorder) GetClusterSupportsCustomPorts(ctx, clusterAddr interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCustomPorts", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCustomPorts), ctx, clusterAddr)
+}
+
// GetCustomDomain mocks base method.
func (m *MockStore) GetCustomDomain(ctx context.Context, accountID, domainID string) (*domain.Domain, error) {
m.ctrl.T.Helper()
@@ -1466,18 +1479,18 @@ func (mr *MockStoreMockRecorder) GetGroupByID(ctx, lockStrength, accountID, grou
}
// GetGroupByName mocks base method.
-func (m *MockStore) GetGroupByName(ctx context.Context, lockStrength LockingStrength, groupName, accountID string) (*types2.Group, error) {
+func (m *MockStore) GetGroupByName(ctx context.Context, lockStrength LockingStrength, accountID, groupName string) (*types2.Group, error) {
m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetGroupByName", ctx, lockStrength, groupName, accountID)
+ ret := m.ctrl.Call(m, "GetGroupByName", ctx, lockStrength, accountID, groupName)
ret0, _ := ret[0].(*types2.Group)
ret1, _ := ret[1].(error)
return ret0, ret1
}
// GetGroupByName indicates an expected call of GetGroupByName.
-func (mr *MockStoreMockRecorder) GetGroupByName(ctx, lockStrength, groupName, accountID interface{}) *gomock.Call {
+func (mr *MockStoreMockRecorder) GetGroupByName(ctx, lockStrength, accountID, groupName interface{}) *gomock.Call {
mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockStore)(nil).GetGroupByName), ctx, lockStrength, groupName, accountID)
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetGroupByName", reflect.TypeOf((*MockStore)(nil).GetGroupByName), ctx, lockStrength, accountID, groupName)
}
// GetGroupsByIDs mocks base method.
@@ -1974,6 +1987,21 @@ func (mr *MockStoreMockRecorder) GetRouteByID(ctx, lockStrength, accountID, rout
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRouteByID", reflect.TypeOf((*MockStore)(nil).GetRouteByID), ctx, lockStrength, accountID, routeID)
}
+// GetRoutingPeerNetworks mocks base method.
+func (m *MockStore) GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) {
+ m.ctrl.T.Helper()
+ ret := m.ctrl.Call(m, "GetRoutingPeerNetworks", ctx, accountID, peerID)
+ ret0, _ := ret[0].([]string)
+ ret1, _ := ret[1].(error)
+ return ret0, ret1
+}
+
+// GetRoutingPeerNetworks indicates an expected call of GetRoutingPeerNetworks.
+func (mr *MockStoreMockRecorder) GetRoutingPeerNetworks(ctx, accountID, peerID interface{}) *gomock.Call {
+ mr.mock.ctrl.T.Helper()
+ return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoutingPeerNetworks", reflect.TypeOf((*MockStore)(nil).GetRoutingPeerNetworks), ctx, accountID, peerID)
+}
+
// GetServiceByDomain mocks base method.
func (m *MockStore) GetServiceByDomain(ctx context.Context, domain string) (*service.Service, error) {
m.ctrl.T.Helper()
@@ -2361,21 +2389,6 @@ func (mr *MockStoreMockRecorder) IncrementSetupKeyUsage(ctx, setupKeyID interfac
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "IncrementSetupKeyUsage", reflect.TypeOf((*MockStore)(nil).IncrementSetupKeyUsage), ctx, setupKeyID)
}
-// GetRoutingPeerNetworks mocks base method.
-func (m *MockStore) GetRoutingPeerNetworks(ctx context.Context, accountID, peerID string) ([]string, error) {
- m.ctrl.T.Helper()
- ret := m.ctrl.Call(m, "GetRoutingPeerNetworks", ctx, accountID, peerID)
- ret0, _ := ret[0].([]string)
- ret1, _ := ret[1].(error)
- return ret0, ret1
-}
-
-// GetRoutingPeerNetworks indicates an expected call of GetRoutingPeerNetworks.
-func (mr *MockStoreMockRecorder) GetRoutingPeerNetworks(ctx, accountID, peerID interface{}) *gomock.Call {
- mr.mock.ctrl.T.Helper()
- return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetRoutingPeerNetworks", reflect.TypeOf((*MockStore)(nil).GetRoutingPeerNetworks), ctx, accountID, peerID)
-}
-
// IsPrimaryAccount mocks base method.
func (m *MockStore) IsPrimaryAccount(ctx context.Context, accountID string) (bool, string, error) {
m.ctrl.T.Helper()
diff --git a/management/server/telemetry/http_api_metrics.go b/management/server/telemetry/http_api_metrics.go
index c50ed1e51..28e8457e2 100644
--- a/management/server/telemetry/http_api_metrics.go
+++ b/management/server/telemetry/http_api_metrics.go
@@ -183,7 +183,18 @@ func (m *HTTPMiddleware) Handler(h http.Handler) http.Handler {
w := WrapResponseWriter(rw)
+ handlerDone := make(chan struct{})
+ context.AfterFunc(ctx, func() {
+ select {
+ case <-handlerDone:
+ default:
+ log.Debugf("HTTP request context canceled mid-flight: %v %v (reqID=%s, after %v, cause: %v)",
+ r.Method, r.URL.Path, reqID, time.Since(reqStart), context.Cause(ctx))
+ }
+ })
+
h.ServeHTTP(w, r.WithContext(ctx))
+ close(handlerDone)
userAuth, err := nbContext.GetUserAuthFromContext(r.Context())
if err == nil {
diff --git a/management/server/types/account.go b/management/server/types/account.go
index 269fc7a88..c448813db 100644
--- a/management/server/types/account.go
+++ b/management/server/types/account.go
@@ -18,6 +18,7 @@ import (
"github.com/netbirdio/netbird/client/ssh/auth"
nbdns "github.com/netbirdio/netbird/dns"
+ proxydomain "github.com/netbirdio/netbird/management/internals/modules/reverseproxy/domain"
"github.com/netbirdio/netbird/management/internals/modules/reverseproxy/service"
"github.com/netbirdio/netbird/management/internals/modules/zones"
"github.com/netbirdio/netbird/management/internals/modules/zones/records"
@@ -101,6 +102,7 @@ type Account struct {
DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"`
PostureChecks []*posture.Checks `gorm:"foreignKey:AccountID;references:id"`
Services []*service.Service `gorm:"foreignKey:AccountID;references:id"`
+ Domains []*proxydomain.Domain `gorm:"foreignKey:AccountID;references:id"`
// Settings is a dictionary of Account settings
Settings *Settings `gorm:"embedded;embeddedPrefix:settings_"`
Networks []*networkTypes.Network `gorm:"foreignKey:AccountID;references:id"`
@@ -911,6 +913,11 @@ func (a *Account) Copy() *Account {
services = append(services, svc.Copy())
}
+ domains := []*proxydomain.Domain{}
+ for _, domain := range a.Domains {
+ domains = append(domains, domain.Copy())
+ }
+
return &Account{
Id: a.Id,
CreatedBy: a.CreatedBy,
@@ -936,6 +943,7 @@ func (a *Account) Copy() *Account {
Onboarding: a.Onboarding,
NetworkMapCache: a.NetworkMapCache,
nmapInitOnce: a.nmapInitOnce,
+ Domains: domains,
}
}
diff --git a/management/server/types/networkmap_benchmark_test.go b/management/server/types/networkmap_benchmark_test.go
new file mode 100644
index 000000000..38272e7b0
--- /dev/null
+++ b/management/server/types/networkmap_benchmark_test.go
@@ -0,0 +1,217 @@
+package types_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "testing"
+
+ nbdns "github.com/netbirdio/netbird/dns"
+ "github.com/netbirdio/netbird/management/server/types"
+)
+
+type benchmarkScale struct {
+ name string
+ peers int
+ groups int
+}
+
+var defaultScales = []benchmarkScale{
+ {"100peers_5groups", 100, 5},
+ {"500peers_20groups", 500, 20},
+ {"1000peers_50groups", 1000, 50},
+ {"5000peers_100groups", 5000, 100},
+ {"10000peers_200groups", 10000, 200},
+ {"20000peers_200groups", 20000, 200},
+ {"30000peers_300groups", 30000, 300},
+}
+
+func skipCIBenchmark(b *testing.B) {
+ if os.Getenv("CI") == "true" {
+ b.Skip("Skipping benchmark in CI")
+ }
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// Single Peer Network Map Generation
+// ──────────────────────────────────────────────────────────────────────────────
+
+// BenchmarkNetworkMapGeneration_Components benchmarks the components-based approach for a single peer.
+func BenchmarkNetworkMapGeneration_Components(b *testing.B) {
+ skipCIBenchmark(b)
+ for _, scale := range defaultScales {
+ b.Run(scale.name, func(b *testing.B) {
+ account, validatedPeers := scalableTestAccount(scale.peers, scale.groups)
+ ctx := context.Background()
+ resourcePolicies := account.GetResourcePoliciesMap()
+ routers := account.GetResourceRoutersMap()
+ groupIDToUserIDs := account.GetActiveGroupUsers()
+
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs)
+ }
+ })
+ }
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// All Peers (UpdateAccountPeers hot path)
+// ──────────────────────────────────────────────────────────────────────────────
+
+// BenchmarkNetworkMapGeneration_AllPeers benchmarks generating network maps for ALL peers.
+func BenchmarkNetworkMapGeneration_AllPeers(b *testing.B) {
+ skipCIBenchmark(b)
+ scales := []benchmarkScale{
+ {"100peers_5groups", 100, 5},
+ {"500peers_20groups", 500, 20},
+ {"1000peers_50groups", 1000, 50},
+ {"5000peers_100groups", 5000, 100},
+ }
+
+ for _, scale := range scales {
+ account, validatedPeers := scalableTestAccount(scale.peers, scale.groups)
+ ctx := context.Background()
+
+ peerIDs := make([]string, 0, len(account.Peers))
+ for peerID := range account.Peers {
+ peerIDs = append(peerIDs, peerID)
+ }
+
+ b.Run("components/"+scale.name, func(b *testing.B) {
+ resourcePolicies := account.GetResourcePoliciesMap()
+ routers := account.GetResourceRoutersMap()
+ groupIDToUserIDs := account.GetActiveGroupUsers()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ for _, peerID := range peerIDs {
+ _ = account.GetPeerNetworkMapFromComponents(ctx, peerID, nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs)
+ }
+ }
+ })
+ }
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// Sub-operations
+// ──────────────────────────────────────────────────────────────────────────────
+
+// BenchmarkNetworkMapGeneration_ComponentsCreation benchmarks components extraction.
+func BenchmarkNetworkMapGeneration_ComponentsCreation(b *testing.B) {
+ skipCIBenchmark(b)
+ for _, scale := range defaultScales {
+ b.Run(scale.name, func(b *testing.B) {
+ account, validatedPeers := scalableTestAccount(scale.peers, scale.groups)
+ ctx := context.Background()
+ resourcePolicies := account.GetResourcePoliciesMap()
+ routers := account.GetResourceRoutersMap()
+ groupIDToUserIDs := account.GetActiveGroupUsers()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = account.GetPeerNetworkMapComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, groupIDToUserIDs)
+ }
+ })
+ }
+}
+
+// BenchmarkNetworkMapGeneration_ComponentsCalculation benchmarks calculation from pre-built components.
+func BenchmarkNetworkMapGeneration_ComponentsCalculation(b *testing.B) {
+ skipCIBenchmark(b)
+ for _, scale := range defaultScales {
+ b.Run(scale.name, func(b *testing.B) {
+ account, validatedPeers := scalableTestAccount(scale.peers, scale.groups)
+ ctx := context.Background()
+ resourcePolicies := account.GetResourcePoliciesMap()
+ routers := account.GetResourceRoutersMap()
+ groupIDToUserIDs := account.GetActiveGroupUsers()
+ components := account.GetPeerNetworkMapComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, groupIDToUserIDs)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = types.CalculateNetworkMapFromComponents(ctx, components)
+ }
+ })
+ }
+}
+
+// BenchmarkNetworkMapGeneration_PrecomputeMaps benchmarks precomputed map costs.
+func BenchmarkNetworkMapGeneration_PrecomputeMaps(b *testing.B) {
+ skipCIBenchmark(b)
+ for _, scale := range defaultScales {
+ b.Run("ResourcePoliciesMap/"+scale.name, func(b *testing.B) {
+ account, _ := scalableTestAccount(scale.peers, scale.groups)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = account.GetResourcePoliciesMap()
+ }
+ })
+ b.Run("ResourceRoutersMap/"+scale.name, func(b *testing.B) {
+ account, _ := scalableTestAccount(scale.peers, scale.groups)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = account.GetResourceRoutersMap()
+ }
+ })
+ b.Run("ActiveGroupUsers/"+scale.name, func(b *testing.B) {
+ account, _ := scalableTestAccount(scale.peers, scale.groups)
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = account.GetActiveGroupUsers()
+ }
+ })
+ }
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// Scaling Analysis
+// ──────────────────────────────────────────────────────────────────────────────
+
+// BenchmarkNetworkMapGeneration_GroupScaling tests group count impact on performance.
+func BenchmarkNetworkMapGeneration_GroupScaling(b *testing.B) {
+ skipCIBenchmark(b)
+ groupCounts := []int{1, 5, 20, 50, 100, 200, 500}
+ for _, numGroups := range groupCounts {
+ b.Run(fmt.Sprintf("components_%dgroups", numGroups), func(b *testing.B) {
+ account, validatedPeers := scalableTestAccount(1000, numGroups)
+ ctx := context.Background()
+ resourcePolicies := account.GetResourcePoliciesMap()
+ routers := account.GetResourceRoutersMap()
+ groupIDToUserIDs := account.GetActiveGroupUsers()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs)
+ }
+ })
+ }
+}
+
+// BenchmarkNetworkMapGeneration_PeerScaling tests peer count impact on performance.
+func BenchmarkNetworkMapGeneration_PeerScaling(b *testing.B) {
+ skipCIBenchmark(b)
+ peerCounts := []int{50, 100, 500, 1000, 2000, 5000, 10000, 20000, 30000}
+ for _, numPeers := range peerCounts {
+ numGroups := numPeers / 20
+ if numGroups < 1 {
+ numGroups = 1
+ }
+ b.Run(fmt.Sprintf("components_%dpeers", numPeers), func(b *testing.B) {
+ account, validatedPeers := scalableTestAccount(numPeers, numGroups)
+ ctx := context.Background()
+ resourcePolicies := account.GetResourcePoliciesMap()
+ routers := account.GetResourceRoutersMap()
+ groupIDToUserIDs := account.GetActiveGroupUsers()
+ b.ReportAllocs()
+ b.ResetTimer()
+ for range b.N {
+ _ = account.GetPeerNetworkMapFromComponents(ctx, "peer-0", nbdns.CustomZone{}, nil, validatedPeers, resourcePolicies, routers, nil, groupIDToUserIDs)
+ }
+ })
+ }
+}
diff --git a/management/server/types/networkmap_components_correctness_test.go b/management/server/types/networkmap_components_correctness_test.go
new file mode 100644
index 000000000..5cd41ff10
--- /dev/null
+++ b/management/server/types/networkmap_components_correctness_test.go
@@ -0,0 +1,1192 @@
+package types_test
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "net/netip"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ nbdns "github.com/netbirdio/netbird/dns"
+ resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
+ routerTypes "github.com/netbirdio/netbird/management/server/networks/routers/types"
+ networkTypes "github.com/netbirdio/netbird/management/server/networks/types"
+ nbpeer "github.com/netbirdio/netbird/management/server/peer"
+ "github.com/netbirdio/netbird/management/server/posture"
+ "github.com/netbirdio/netbird/management/server/types"
+ "github.com/netbirdio/netbird/route"
+)
+
+// scalableTestAccountWithoutDefaultPolicy creates an account without the blanket "Allow All" policy.
+// Use this for tests that need to verify feature-specific connectivity in isolation.
+func scalableTestAccountWithoutDefaultPolicy(numPeers, numGroups int) (*types.Account, map[string]struct{}) {
+ return buildScalableTestAccount(numPeers, numGroups, false)
+}
+
+// scalableTestAccount creates a realistic account with a blanket "Allow All" policy
+// plus per-group policies, routes, network resources, posture checks, and DNS settings.
+func scalableTestAccount(numPeers, numGroups int) (*types.Account, map[string]struct{}) {
+ return buildScalableTestAccount(numPeers, numGroups, true)
+}
+
+// buildScalableTestAccount is the core builder. When withDefaultPolicy is true it adds
+// a blanket group-all <-> group-all allow rule; when false the only policies are the
+// per-group ones, so tests can verify feature-specific connectivity in isolation.
+func buildScalableTestAccount(numPeers, numGroups int, withDefaultPolicy bool) (*types.Account, map[string]struct{}) {
+ peers := make(map[string]*nbpeer.Peer, numPeers)
+ allGroupPeers := make([]string, 0, numPeers)
+
+ for i := range numPeers {
+ peerID := fmt.Sprintf("peer-%d", i)
+ ip := net.IP{100, byte(64 + i/65536), byte((i / 256) % 256), byte(i % 256)}
+ wtVersion := "0.25.0"
+ if i%2 == 0 {
+ wtVersion = "0.40.0"
+ }
+
+ p := &nbpeer.Peer{
+ ID: peerID,
+ IP: ip,
+ Key: fmt.Sprintf("key-%s", peerID),
+ DNSLabel: fmt.Sprintf("peer%d", i),
+ Status: &nbpeer.PeerStatus{Connected: true, LastSeen: time.Now()},
+ UserID: "user-admin",
+ Meta: nbpeer.PeerSystemMeta{WtVersion: wtVersion, GoOS: "linux"},
+ }
+
+ if i == numPeers-2 {
+ p.LoginExpirationEnabled = true
+ pastTimestamp := time.Now().Add(-2 * time.Hour)
+ p.LastLogin = &pastTimestamp
+ }
+
+ peers[peerID] = p
+ allGroupPeers = append(allGroupPeers, peerID)
+ }
+
+ groups := make(map[string]*types.Group, numGroups+1)
+ groups["group-all"] = &types.Group{ID: "group-all", Name: "All", Peers: allGroupPeers}
+
+ peersPerGroup := numPeers / numGroups
+ if peersPerGroup < 1 {
+ peersPerGroup = 1
+ }
+
+ for g := range numGroups {
+ groupID := fmt.Sprintf("group-%d", g)
+ groupPeers := make([]string, 0, peersPerGroup)
+ start := g * peersPerGroup
+ end := start + peersPerGroup
+ if end > numPeers {
+ end = numPeers
+ }
+ for i := start; i < end; i++ {
+ groupPeers = append(groupPeers, fmt.Sprintf("peer-%d", i))
+ }
+ groups[groupID] = &types.Group{ID: groupID, Name: fmt.Sprintf("Group %d", g), Peers: groupPeers}
+ }
+
+ policies := make([]*types.Policy, 0, numGroups+2)
+ if withDefaultPolicy {
+ policies = append(policies, &types.Policy{
+ ID: "policy-all", Name: "Default-Allow", Enabled: true,
+ Rules: []*types.PolicyRule{{
+ ID: "rule-all", Name: "Allow All", Enabled: true, Action: types.PolicyTrafficActionAccept,
+ Protocol: types.PolicyRuleProtocolALL, Bidirectional: true,
+ Sources: []string{"group-all"}, Destinations: []string{"group-all"},
+ }},
+ })
+ }
+
+ for g := range numGroups {
+ groupID := fmt.Sprintf("group-%d", g)
+ dstGroup := fmt.Sprintf("group-%d", (g+1)%numGroups)
+ policies = append(policies, &types.Policy{
+ ID: fmt.Sprintf("policy-%d", g), Name: fmt.Sprintf("Policy %d", g), Enabled: true,
+ Rules: []*types.PolicyRule{{
+ ID: fmt.Sprintf("rule-%d", g), Name: fmt.Sprintf("Rule %d", g), Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true,
+ Ports: []string{"8080"},
+ Sources: []string{groupID}, Destinations: []string{dstGroup},
+ }},
+ })
+ }
+
+ if numGroups >= 2 {
+ policies = append(policies, &types.Policy{
+ ID: "policy-drop", Name: "Drop DB traffic", Enabled: true,
+ Rules: []*types.PolicyRule{{
+ ID: "rule-drop", Name: "Drop DB", Enabled: true, Action: types.PolicyTrafficActionDrop,
+ Protocol: types.PolicyRuleProtocolTCP, Ports: []string{"5432"}, Bidirectional: true,
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ }},
+ })
+ }
+
+ numRoutes := numGroups
+ if numRoutes > 20 {
+ numRoutes = 20
+ }
+ routes := make(map[route.ID]*route.Route, numRoutes)
+ for r := range numRoutes {
+ routeID := route.ID(fmt.Sprintf("route-%d", r))
+ peerIdx := (numPeers / 2) + r
+ if peerIdx >= numPeers {
+ peerIdx = numPeers - 1
+ }
+ routePeerID := fmt.Sprintf("peer-%d", peerIdx)
+ groupID := fmt.Sprintf("group-%d", r%numGroups)
+ routes[routeID] = &route.Route{
+ ID: routeID,
+ Network: netip.MustParsePrefix(fmt.Sprintf("10.%d.0.0/16", r)),
+ Peer: peers[routePeerID].Key,
+ PeerID: routePeerID,
+ Description: fmt.Sprintf("Route %d", r),
+ Enabled: true,
+ PeerGroups: []string{groupID},
+ Groups: []string{"group-all"},
+ AccessControlGroups: []string{groupID},
+ AccountID: "test-account",
+ }
+ }
+
+ numResources := numGroups / 2
+ if numResources < 1 {
+ numResources = 1
+ }
+ if numResources > 50 {
+ numResources = 50
+ }
+
+ networkResources := make([]*resourceTypes.NetworkResource, 0, numResources)
+ networksList := make([]*networkTypes.Network, 0, numResources)
+ networkRouters := make([]*routerTypes.NetworkRouter, 0, numResources)
+
+ routingPeerStart := numPeers * 3 / 4
+ for nr := range numResources {
+ netID := fmt.Sprintf("net-%d", nr)
+ resID := fmt.Sprintf("res-%d", nr)
+ routerPeerIdx := routingPeerStart + nr
+ if routerPeerIdx >= numPeers {
+ routerPeerIdx = numPeers - 1
+ }
+ routerPeerID := fmt.Sprintf("peer-%d", routerPeerIdx)
+
+ networksList = append(networksList, &networkTypes.Network{ID: netID, Name: fmt.Sprintf("Network %d", nr), AccountID: "test-account"})
+ networkResources = append(networkResources, &resourceTypes.NetworkResource{
+ ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true,
+ Address: fmt.Sprintf("svc-%d.netbird.cloud", nr),
+ })
+ networkRouters = append(networkRouters, &routerTypes.NetworkRouter{
+ ID: fmt.Sprintf("router-%d", nr), NetworkID: netID, Peer: routerPeerID,
+ Enabled: true, AccountID: "test-account",
+ })
+
+ policies = append(policies, &types.Policy{
+ ID: fmt.Sprintf("policy-res-%d", nr), Name: fmt.Sprintf("Resource Policy %d", nr), Enabled: true,
+ SourcePostureChecks: []string{"posture-check-ver"},
+ Rules: []*types.PolicyRule{{
+ ID: fmt.Sprintf("rule-res-%d", nr), Name: fmt.Sprintf("Allow Resource %d", nr), Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true,
+ Sources: []string{fmt.Sprintf("group-%d", nr%numGroups)},
+ DestinationResource: types.Resource{ID: resID},
+ }},
+ })
+ }
+
+ account := &types.Account{
+ Id: "test-account",
+ Peers: peers,
+ Groups: groups,
+ Policies: policies,
+ Routes: routes,
+ Users: map[string]*types.User{
+ "user-admin": {Id: "user-admin", Role: types.UserRoleAdmin, IsServiceUser: false, AccountID: "test-account"},
+ },
+ Network: &types.Network{
+ Identifier: "net-test", Net: net.IPNet{IP: net.IP{100, 64, 0, 0}, Mask: net.CIDRMask(10, 32)}, Serial: 1,
+ },
+ DNSSettings: types.DNSSettings{DisabledManagementGroups: []string{}},
+ NameServerGroups: map[string]*nbdns.NameServerGroup{
+ "ns-group-main": {
+ ID: "ns-group-main", Name: "Main NS", Enabled: true, Groups: []string{"group-all"},
+ NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("8.8.8.8"), NSType: nbdns.UDPNameServerType, Port: 53}},
+ },
+ },
+ PostureChecks: []*posture.Checks{
+ {ID: "posture-check-ver", Name: "Check version", Checks: posture.ChecksDefinition{
+ NBVersionCheck: &posture.NBVersionCheck{MinVersion: "0.26.0"},
+ }},
+ },
+ NetworkResources: networkResources,
+ Networks: networksList,
+ NetworkRouters: networkRouters,
+ Settings: &types.Settings{PeerLoginExpirationEnabled: true, PeerLoginExpiration: 1 * time.Hour},
+ }
+
+ for _, p := range account.Policies {
+ p.AccountID = account.Id
+ }
+ for _, r := range account.Routes {
+ r.AccountID = account.Id
+ }
+
+ validatedPeers := make(map[string]struct{}, numPeers)
+ for i := range numPeers {
+ peerID := fmt.Sprintf("peer-%d", i)
+ if i != numPeers-1 {
+ validatedPeers[peerID] = struct{}{}
+ }
+ }
+
+ return account, validatedPeers
+}
+
+// componentsNetworkMap is a convenience wrapper for GetPeerNetworkMapFromComponents.
+func componentsNetworkMap(account *types.Account, peerID string, validatedPeers map[string]struct{}) *types.NetworkMap {
+ return account.GetPeerNetworkMapFromComponents(
+ context.Background(), peerID, nbdns.CustomZone{}, nil,
+ validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(),
+ nil, account.GetActiveGroupUsers(),
+ )
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 1. PEER VISIBILITY & GROUPS
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_PeerVisibility(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.Equal(t, len(validatedPeers)-1-len(nm.OfflinePeers), len(nm.Peers), "peer should see all other validated non-expired peers")
+}
+
+func TestComponents_PeerDoesNotSeeItself(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ for _, p := range nm.Peers {
+ assert.NotEqual(t, "peer-0", p.ID, "peer should not see itself")
+ }
+}
+
+func TestComponents_IntraGroupConnectivity(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ peerIDs := make(map[string]bool, len(nm.Peers))
+ for _, p := range nm.Peers {
+ peerIDs[p.ID] = true
+ }
+ assert.True(t, peerIDs["peer-5"], "peer-0 should see peer-5 from same group")
+}
+
+func TestComponents_CrossGroupConnectivity(t *testing.T) {
+ // Without default policy, only per-group policies provide connectivity
+ account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ peerIDs := make(map[string]bool, len(nm.Peers))
+ for _, p := range nm.Peers {
+ peerIDs[p.ID] = true
+ }
+ assert.True(t, peerIDs["peer-10"], "peer-0 should see peer-10 from cross-group policy")
+}
+
+func TestComponents_BidirectionalPolicy(t *testing.T) {
+ // Without default policy so bidirectional visibility comes only from per-group policies
+ account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(100, 5)
+ nm0 := componentsNetworkMap(account, "peer-0", validatedPeers)
+ nm20 := componentsNetworkMap(account, "peer-20", validatedPeers)
+ require.NotNil(t, nm0)
+ require.NotNil(t, nm20)
+
+ peer0SeesPeer20 := false
+ for _, p := range nm0.Peers {
+ if p.ID == "peer-20" {
+ peer0SeesPeer20 = true
+ }
+ }
+ peer20SeesPeer0 := false
+ for _, p := range nm20.Peers {
+ if p.ID == "peer-0" {
+ peer20SeesPeer0 = true
+ }
+ }
+ assert.True(t, peer0SeesPeer20, "peer-0 should see peer-20 via bidirectional policy")
+ assert.True(t, peer20SeesPeer0, "peer-20 should see peer-0 via bidirectional policy")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 2. PEER EXPIRATION & ACCOUNT SETTINGS
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_ExpiredPeerInOfflineList(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ offlineIDs := make(map[string]bool, len(nm.OfflinePeers))
+ for _, p := range nm.OfflinePeers {
+ offlineIDs[p.ID] = true
+ }
+ assert.True(t, offlineIDs["peer-98"], "expired peer should be in OfflinePeers")
+ for _, p := range nm.Peers {
+ assert.NotEqual(t, "peer-98", p.ID, "expired peer should not be in active Peers")
+ }
+}
+
+func TestComponents_ExpirationDisabledSetting(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ account.Settings.PeerLoginExpirationEnabled = false
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ peerIDs := make(map[string]bool, len(nm.Peers))
+ for _, p := range nm.Peers {
+ peerIDs[p.ID] = true
+ }
+ assert.True(t, peerIDs["peer-98"], "with expiration disabled, peer-98 should be in active Peers")
+}
+
+func TestComponents_LoginExpiration_PeerLevel(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+ account.Settings.PeerLoginExpirationEnabled = true
+ account.Settings.PeerLoginExpiration = 1 * time.Hour
+
+ pastLogin := time.Now().Add(-2 * time.Hour)
+ account.Peers["peer-5"].LastLogin = &pastLogin
+ account.Peers["peer-5"].LoginExpirationEnabled = true
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ offlineIDs := make(map[string]bool, len(nm.OfflinePeers))
+ for _, p := range nm.OfflinePeers {
+ offlineIDs[p.ID] = true
+ }
+ assert.True(t, offlineIDs["peer-5"], "login-expired peer should be in OfflinePeers")
+ for _, p := range nm.Peers {
+ assert.NotEqual(t, "peer-5", p.ID, "login-expired peer should not be in active Peers")
+ }
+}
+
+func TestComponents_NetworkSerial(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 5)
+ account.Network.Serial = 42
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.Equal(t, uint64(42), nm.Network.Serial, "network serial should match")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 3. NON-VALIDATED PEERS
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_NonValidatedPeerExcluded(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ for _, p := range nm.Peers {
+ assert.NotEqual(t, "peer-99", p.ID, "non-validated peer should not appear in Peers")
+ }
+ for _, p := range nm.OfflinePeers {
+ assert.NotEqual(t, "peer-99", p.ID, "non-validated peer should not appear in OfflinePeers")
+ }
+}
+
+func TestComponents_NonValidatedTargetPeerGetsEmptyMap(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-99", validatedPeers)
+ require.NotNil(t, nm)
+ assert.Empty(t, nm.Peers)
+ assert.Empty(t, nm.FirewallRules)
+}
+
+func TestComponents_NonExistentPeerGetsEmptyMap(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-does-not-exist", validatedPeers)
+ require.NotNil(t, nm)
+ assert.Empty(t, nm.Peers)
+ assert.Empty(t, nm.FirewallRules)
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 4. POLICIES & FIREWALL RULES
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_FirewallRulesGenerated(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.NotEmpty(t, nm.FirewallRules, "should have firewall rules from policies")
+}
+
+func TestComponents_DropPolicyGeneratesDropRules(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ hasDropRule := false
+ for _, rule := range nm.FirewallRules {
+ if rule.Action == string(types.PolicyTrafficActionDrop) {
+ hasDropRule = true
+ break
+ }
+ }
+ assert.True(t, hasDropRule, "should have at least one drop firewall rule")
+}
+
+func TestComponents_DisabledPolicyIgnored(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 2)
+ for _, p := range account.Policies {
+ p.Enabled = false
+ }
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.Empty(t, nm.Peers, "disabled policies should yield no peers")
+ assert.Empty(t, nm.FirewallRules, "disabled policies should yield no firewall rules")
+}
+
+func TestComponents_PortPolicy(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 2)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ has8080, has5432 := false, false
+ for _, rule := range nm.FirewallRules {
+ if rule.Port == "8080" {
+ has8080 = true
+ }
+ if rule.Port == "5432" {
+ has5432 = true
+ }
+ }
+ assert.True(t, has8080, "should have firewall rule for port 8080")
+ assert.True(t, has5432, "should have firewall rule for port 5432 (drop policy)")
+}
+
+func TestComponents_PortRangePolicy(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 2)
+ account.Peers["peer-0"].Meta.WtVersion = "0.50.0"
+
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-port-range", Name: "Port Range", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{{
+ ID: "rule-port-range", Name: "Port Range Rule", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true,
+ PortRanges: []types.RulePortRange{{Start: 8000, End: 9000}},
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ }},
+ })
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ hasPortRange := false
+ for _, rule := range nm.FirewallRules {
+ if rule.PortRange.Start == 8000 && rule.PortRange.End == 9000 {
+ hasPortRange = true
+ break
+ }
+ }
+ assert.True(t, hasPortRange, "should have firewall rule with port range 8000-9000")
+}
+
+func TestComponents_FirewallRuleDirection(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 2)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ hasIn, hasOut := false, false
+ for _, rule := range nm.FirewallRules {
+ if rule.Direction == types.FirewallRuleDirectionIN {
+ hasIn = true
+ }
+ if rule.Direction == types.FirewallRuleDirectionOUT {
+ hasOut = true
+ }
+ }
+ assert.True(t, hasIn, "should have inbound firewall rules")
+ assert.True(t, hasOut, "should have outbound firewall rules")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 5. ROUTES
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_RoutesIncluded(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.NotEmpty(t, nm.Routes, "should have routes")
+}
+
+func TestComponents_DisabledRouteExcluded(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 2)
+ for _, r := range account.Routes {
+ r.Enabled = false
+ }
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ for _, r := range nm.Routes {
+ assert.True(t, r.Enabled, "only enabled routes should appear")
+ }
+}
+
+func TestComponents_RoutesFirewallRulesForACG(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.NotEmpty(t, nm.RoutesFirewallRules, "should have route firewall rules for access-controlled routes")
+}
+
+func TestComponents_HARouteDeduplication(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 5)
+
+ haNetwork := netip.MustParsePrefix("172.16.0.0/16")
+ account.Routes["route-ha-1"] = &route.Route{
+ ID: "route-ha-1", Network: haNetwork, PeerID: "peer-10",
+ Peer: account.Peers["peer-10"].Key, Enabled: true, Metric: 100,
+ Groups: []string{"group-all"}, PeerGroups: []string{"group-0"}, AccountID: "test-account",
+ }
+ account.Routes["route-ha-2"] = &route.Route{
+ ID: "route-ha-2", Network: haNetwork, PeerID: "peer-20",
+ Peer: account.Peers["peer-20"].Key, Enabled: true, Metric: 200,
+ Groups: []string{"group-all"}, PeerGroups: []string{"group-1"}, AccountID: "test-account",
+ }
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ haRoutes := 0
+ for _, r := range nm.Routes {
+ if r.Network == haNetwork {
+ haRoutes++
+ }
+ }
+ // Components deduplicates HA routes with the same HA unique ID, returning one entry per HA group
+ assert.Equal(t, 1, haRoutes, "HA routes with same network should be deduplicated into one entry")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 6. NETWORK RESOURCES & ROUTERS
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_NetworkResourceRoutes_RouterPeer(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+
+ var routerPeerID string
+ for _, nr := range account.NetworkRouters {
+ routerPeerID = nr.Peer
+ break
+ }
+ require.NotEmpty(t, routerPeerID)
+
+ nm := componentsNetworkMap(account, routerPeerID, validatedPeers)
+ require.NotNil(t, nm)
+ assert.NotEmpty(t, nm.Peers, "router peer should see source peers")
+}
+
+func TestComponents_NetworkResourceRoutes_SourcePeerSeesRouterPeer(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+
+ var routerPeerID string
+ for _, nr := range account.NetworkRouters {
+ routerPeerID = nr.Peer
+ break
+ }
+ require.NotEmpty(t, routerPeerID)
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ peerIDs := make(map[string]bool, len(nm.Peers))
+ for _, p := range nm.Peers {
+ peerIDs[p.ID] = true
+ }
+ assert.True(t, peerIDs[routerPeerID], "source peer should see router peer for network resource")
+}
+
+func TestComponents_DisabledNetworkResourceIgnored(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 5)
+ for _, nr := range account.NetworkResources {
+ nr.Enabled = false
+ }
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.NotNil(t, nm.Network)
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 7. POSTURE CHECKS
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_PostureCheckFiltering_PassingPeer(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.NotEmpty(t, nm.Routes, "passing peer should have routes including resource routes")
+}
+
+func TestComponents_PostureCheckFiltering_FailingPeer(t *testing.T) {
+ // peer-0 has version 0.40.0 (passes posture check >= 0.26.0)
+ // peer-1 has version 0.25.0 (fails posture check >= 0.26.0)
+ // Resource policies require posture-check-ver, so the failing peer
+ // should not see the router peer for those resources.
+ account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(100, 5)
+
+ nm0 := componentsNetworkMap(account, "peer-0", validatedPeers)
+ nm1 := componentsNetworkMap(account, "peer-1", validatedPeers)
+ require.NotNil(t, nm0)
+ require.NotNil(t, nm1)
+
+ // The passing peer should have more peers visible (including resource router peers)
+ // than the failing peer, because the failing peer is excluded from resource policies.
+ assert.Greater(t, len(nm0.Peers), len(nm1.Peers),
+ "passing peer (0.40.0) should see more peers than failing peer (0.25.0) due to posture-gated resource policies")
+}
+
+func TestComponents_MultiplePostureChecks(t *testing.T) {
+ account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(50, 2)
+
+ // Keep only the posture-gated policy — remove per-group policies so connectivity is isolated
+ account.Policies = []*types.Policy{}
+
+ // Set kernel version on peers so the OS posture check can evaluate
+ for _, p := range account.Peers {
+ p.Meta.KernelVersion = "5.15.0"
+ }
+
+ account.PostureChecks = append(account.PostureChecks, &posture.Checks{
+ ID: "posture-check-os", Name: "Check OS",
+ Checks: posture.ChecksDefinition{
+ OSVersionCheck: &posture.OSVersionCheck{Linux: &posture.MinKernelVersionCheck{MinKernelVersion: "0.0.1"}},
+ },
+ })
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-multi-posture", Name: "Multi Posture", Enabled: true, AccountID: "test-account",
+ SourcePostureChecks: []string{"posture-check-ver", "posture-check-os"},
+ Rules: []*types.PolicyRule{{
+ ID: "rule-multi-posture", Name: "Multi Check Rule", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL,
+ Bidirectional: true,
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ }},
+ })
+
+ // peer-0 (0.40.0, kernel 5.15.0) passes both checks, should see group-1 peers
+ nm0 := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm0)
+ assert.NotEmpty(t, nm0.Peers, "peer passing both posture checks should see destination peers")
+
+ // peer-1 (0.25.0, kernel 5.15.0) fails version check, should NOT see group-1 peers
+ nm1 := componentsNetworkMap(account, "peer-1", validatedPeers)
+ require.NotNil(t, nm1)
+ assert.Empty(t, nm1.Peers,
+ "peer failing posture check should see no peers when posture-gated policy is the only connectivity")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 8. DNS
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_DNSConfigEnabled(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.True(t, nm.DNSConfig.ServiceEnable, "DNS should be enabled")
+ assert.NotEmpty(t, nm.DNSConfig.NameServerGroups, "should have nameserver groups")
+}
+
+func TestComponents_DNSDisabledByManagementGroup(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(100, 5)
+ account.DNSSettings.DisabledManagementGroups = []string{"group-all"}
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.False(t, nm.DNSConfig.ServiceEnable, "DNS should be disabled for peer in disabled group")
+}
+
+func TestComponents_DNSNameServerGroupDistribution(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+ account.NameServerGroups["ns-group-0"] = &nbdns.NameServerGroup{
+ ID: "ns-group-0", Name: "Group 0 NS", Enabled: true, Groups: []string{"group-0"},
+ NameServers: []nbdns.NameServer{{IP: netip.MustParseAddr("1.1.1.1"), NSType: nbdns.UDPNameServerType, Port: 53}},
+ }
+
+ nm0 := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm0)
+ hasGroup0NS := false
+ for _, ns := range nm0.DNSConfig.NameServerGroups {
+ if ns.ID == "ns-group-0" {
+ hasGroup0NS = true
+ }
+ }
+ assert.True(t, hasGroup0NS, "peer-0 in group-0 should receive ns-group-0")
+
+ nm10 := componentsNetworkMap(account, "peer-10", validatedPeers)
+ require.NotNil(t, nm10)
+ hasGroup0NSForPeer10 := false
+ for _, ns := range nm10.DNSConfig.NameServerGroups {
+ if ns.ID == "ns-group-0" {
+ hasGroup0NSForPeer10 = true
+ }
+ }
+ assert.False(t, hasGroup0NSForPeer10, "peer-10 in group-1 should NOT receive ns-group-0")
+}
+
+func TestComponents_DNSCustomZone(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+ customZone := nbdns.CustomZone{
+ Domain: "netbird.cloud.",
+ Records: []nbdns.SimpleRecord{
+ {Name: "peer0.netbird.cloud.", Type: 1, Class: "IN", TTL: 300, RData: account.Peers["peer-0"].IP.String()},
+ {Name: "peer1.netbird.cloud.", Type: 1, Class: "IN", TTL: 300, RData: account.Peers["peer-1"].IP.String()},
+ },
+ }
+
+ nm := account.GetPeerNetworkMapFromComponents(
+ context.Background(), "peer-0", customZone, nil,
+ validatedPeers, account.GetResourcePoliciesMap(), account.GetResourceRoutersMap(),
+ nil, account.GetActiveGroupUsers(),
+ )
+ require.NotNil(t, nm)
+ assert.True(t, nm.DNSConfig.ServiceEnable)
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 9. SSH
+// ──────────────────────────────────────────────────────────────────────────────
+
+func TestComponents_SSHPolicy(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+ account.Groups["ssh-users"] = &types.Group{ID: "ssh-users", Name: "SSH Users", Peers: []string{}}
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-ssh", Name: "SSH Access", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{{
+ ID: "rule-ssh", Name: "Allow SSH", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolNetbirdSSH,
+ Bidirectional: false,
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ AuthorizedGroups: map[string][]string{"ssh-users": {"root"}},
+ }},
+ })
+
+ nm := componentsNetworkMap(account, "peer-10", validatedPeers)
+ require.NotNil(t, nm)
+ assert.True(t, nm.EnableSSH, "SSH should be enabled for destination peer of SSH policy")
+}
+
+func TestComponents_SSHNotEnabledWithoutPolicy(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+ assert.False(t, nm.EnableSSH, "SSH should not be enabled without SSH policy")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 10. CROSS-PEER CONSISTENCY
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_AllPeersGetValidMaps verifies that every validated peer gets a
+// non-nil map with a consistent network serial and non-empty peer list.
+func TestComponents_AllPeersGetValidMaps(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(50, 5)
+ for peerID := range account.Peers {
+ if _, validated := validatedPeers[peerID]; !validated {
+ continue
+ }
+ nm := componentsNetworkMap(account, peerID, validatedPeers)
+ require.NotNil(t, nm, "network map should not be nil for %s", peerID)
+ assert.Equal(t, account.Network.Serial, nm.Network.Serial, "serial mismatch for %s", peerID)
+ assert.NotEmpty(t, nm.Peers, "validated peer %s should see other peers", peerID)
+ }
+}
+
+// TestComponents_LargeScaleMapGeneration verifies that components can generate maps
+// at larger scales without errors and with consistent output.
+func TestComponents_LargeScaleMapGeneration(t *testing.T) {
+ scales := []struct{ peers, groups int }{
+ {500, 20},
+ {1000, 50},
+ }
+ for _, s := range scales {
+ t.Run(fmt.Sprintf("%dpeers_%dgroups", s.peers, s.groups), func(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(s.peers, s.groups)
+ testPeers := []string{"peer-0", fmt.Sprintf("peer-%d", s.peers/4), fmt.Sprintf("peer-%d", s.peers/2)}
+ for _, peerID := range testPeers {
+ nm := componentsNetworkMap(account, peerID, validatedPeers)
+ require.NotNil(t, nm, "network map should not be nil for %s", peerID)
+ assert.NotEmpty(t, nm.Peers, "peer %s should see other peers at scale", peerID)
+ assert.NotEmpty(t, nm.Routes, "peer %s should have routes at scale", peerID)
+ assert.Equal(t, account.Network.Serial, nm.Network.Serial, "serial mismatch for %s", peerID)
+ }
+ })
+ }
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 11. PEER-AS-RESOURCE POLICIES
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_PeerAsSourceResource verifies that a policy with SourceResource.Type=Peer
+// targets only that specific peer as the source.
+func TestComponents_PeerAsSourceResource(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-peer-src", Name: "Peer Source Resource", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{{
+ ID: "rule-peer-src", Name: "Peer Source Rule", Enabled: true,
+ Action: types.PolicyTrafficActionAccept,
+ Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true,
+ Ports: []string{"443"},
+ SourceResource: types.Resource{ID: "peer-0", Type: types.ResourceTypePeer},
+ Destinations: []string{"group-1"},
+ }},
+ })
+
+ // peer-0 is the source resource, should see group-1 peers
+ nm0 := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm0)
+
+ has443 := false
+ for _, rule := range nm0.FirewallRules {
+ if rule.Port == "443" {
+ has443 = true
+ break
+ }
+ }
+ assert.True(t, has443, "peer-0 as source resource should have port 443 rule")
+}
+
+// TestComponents_PeerAsDestinationResource verifies that a policy with DestinationResource.Type=Peer
+// targets only that specific peer as the destination.
+func TestComponents_PeerAsDestinationResource(t *testing.T) {
+ account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2)
+
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-peer-dst", Name: "Peer Dest Resource", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{{
+ ID: "rule-peer-dst", Name: "Peer Dest Rule", Enabled: true,
+ Action: types.PolicyTrafficActionAccept,
+ Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true,
+ Ports: []string{"443"},
+ Sources: []string{"group-0"},
+ DestinationResource: types.Resource{ID: "peer-15", Type: types.ResourceTypePeer},
+ }},
+ })
+
+ // peer-0 is in group-0 (source), should see peer-15 as destination
+ nm0 := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm0)
+
+ peerIDs := make(map[string]bool, len(nm0.Peers))
+ for _, p := range nm0.Peers {
+ peerIDs[p.ID] = true
+ }
+ assert.True(t, peerIDs["peer-15"], "peer-0 should see peer-15 via peer-as-destination-resource policy")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 12. MULTIPLE RULES PER POLICY
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_MultipleRulesPerPolicy verifies a policy with multiple rules generates
+// firewall rules for each.
+func TestComponents_MultipleRulesPerPolicy(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-multi-rule", Name: "Multi Rule Policy", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{
+ {
+ ID: "rule-http", Name: "Allow HTTP", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true, Ports: []string{"80"},
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ },
+ {
+ ID: "rule-https", Name: "Allow HTTPS", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true, Ports: []string{"443"},
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ },
+ },
+ })
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ has80, has443 := false, false
+ for _, rule := range nm.FirewallRules {
+ if rule.Port == "80" {
+ has80 = true
+ }
+ if rule.Port == "443" {
+ has443 = true
+ }
+ }
+ assert.True(t, has80, "should have firewall rule for port 80 from first rule")
+ assert.True(t, has443, "should have firewall rule for port 443 from second rule")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 13. SSH AUTHORIZED USERS CONTENT
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_SSHAuthorizedUsersContent verifies that SSH policies populate
+// the AuthorizedUsers map with the correct users and machine mappings.
+func TestComponents_SSHAuthorizedUsersContent(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+
+ account.Users["user-dev"] = &types.User{Id: "user-dev", Role: types.UserRoleUser, AccountID: "test-account", AutoGroups: []string{"ssh-users"}}
+ account.Groups["ssh-users"] = &types.Group{ID: "ssh-users", Name: "SSH Users", Peers: []string{}}
+
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-ssh", Name: "SSH Access", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{{
+ ID: "rule-ssh", Name: "Allow SSH", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolNetbirdSSH,
+ Bidirectional: false,
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ AuthorizedGroups: map[string][]string{"ssh-users": {"root", "admin"}},
+ }},
+ })
+
+ // peer-10 is in group-1 (destination)
+ nm := componentsNetworkMap(account, "peer-10", validatedPeers)
+ require.NotNil(t, nm)
+ assert.True(t, nm.EnableSSH, "SSH should be enabled")
+ assert.NotNil(t, nm.AuthorizedUsers, "AuthorizedUsers should not be nil")
+ assert.NotEmpty(t, nm.AuthorizedUsers, "AuthorizedUsers should have entries")
+
+ // Check that "root" machine user mapping exists
+ _, hasRoot := nm.AuthorizedUsers["root"]
+ _, hasAdmin := nm.AuthorizedUsers["admin"]
+ assert.True(t, hasRoot || hasAdmin, "AuthorizedUsers should contain 'root' or 'admin' machine user mapping")
+}
+
+// TestComponents_SSHLegacyImpliedSSH verifies that a non-SSH ALL protocol policy with
+// SSHEnabled peer implies legacy SSH access.
+func TestComponents_SSHLegacyImpliedSSH(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+
+ // Enable SSH on the destination peer
+ account.Peers["peer-10"].SSHEnabled = true
+
+ // The default "Allow All" policy with Protocol=ALL + SSHEnabled peer should imply SSH
+ nm := componentsNetworkMap(account, "peer-10", validatedPeers)
+ require.NotNil(t, nm)
+ assert.True(t, nm.EnableSSH, "SSH should be implied by ALL protocol policy with SSHEnabled peer")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 14. ROUTE DEFAULT PERMIT (no AccessControlGroups)
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_RouteDefaultPermit verifies that a route without AccessControlGroups
+// generates default permit firewall rules (0.0.0.0/0 source).
+func TestComponents_RouteDefaultPermit(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+
+ // Add a route without ACGs — this peer is the routing peer
+ routingPeerID := "peer-5"
+ account.Routes["route-no-acg"] = &route.Route{
+ ID: "route-no-acg", Network: netip.MustParsePrefix("192.168.99.0/24"),
+ PeerID: routingPeerID, Peer: account.Peers[routingPeerID].Key,
+ Enabled: true, Groups: []string{"group-all"}, PeerGroups: []string{"group-0"},
+ AccessControlGroups: []string{},
+ AccountID: "test-account",
+ }
+
+ // The routing peer should get default permit route firewall rules
+ nm := componentsNetworkMap(account, routingPeerID, validatedPeers)
+ require.NotNil(t, nm)
+
+ hasDefaultPermit := false
+ for _, rfr := range nm.RoutesFirewallRules {
+ for _, src := range rfr.SourceRanges {
+ if src == "0.0.0.0/0" || src == "::/0" {
+ hasDefaultPermit = true
+ break
+ }
+ }
+ }
+ assert.True(t, hasDefaultPermit, "route without ACG should have default permit rule with 0.0.0.0/0 source")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 15. MULTIPLE ROUTERS PER NETWORK
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_MultipleRoutersPerNetwork verifies that a network resource
+// with multiple routers provides routes through all available routers.
+func TestComponents_MultipleRoutersPerNetwork(t *testing.T) {
+ account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2)
+
+ netID := "net-multi-router"
+ resID := "res-multi-router"
+ account.Networks = append(account.Networks, &networkTypes.Network{ID: netID, Name: "Multi Router Network", AccountID: "test-account"})
+ account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{
+ ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true,
+ Address: "multi-svc.netbird.cloud",
+ })
+ account.NetworkRouters = append(account.NetworkRouters,
+ &routerTypes.NetworkRouter{ID: "router-a", NetworkID: netID, Peer: "peer-5", Enabled: true, AccountID: "test-account", Metric: 100},
+ &routerTypes.NetworkRouter{ID: "router-b", NetworkID: netID, Peer: "peer-15", Enabled: true, AccountID: "test-account", Metric: 200},
+ )
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-multi-router-res", Name: "Multi Router Resource", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{{
+ ID: "rule-multi-router-res", Name: "Allow Multi Router", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true,
+ Sources: []string{"group-0"}, DestinationResource: types.Resource{ID: resID},
+ }},
+ })
+
+ // peer-0 is in group-0 (source), should see both router peers
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ peerIDs := make(map[string]bool, len(nm.Peers))
+ for _, p := range nm.Peers {
+ peerIDs[p.ID] = true
+ }
+ assert.True(t, peerIDs["peer-5"], "source peer should see router-a (peer-5)")
+ assert.True(t, peerIDs["peer-15"], "source peer should see router-b (peer-15)")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 16. PEER-AS-NAMESERVER EXCLUSION
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_PeerIsNameserverExcludedFromNSGroup verifies that a peer serving
+// as a nameserver does not receive its own NS group in DNS config.
+func TestComponents_PeerIsNameserverExcludedFromNSGroup(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+
+ // peer-0 has IP 100.64.0.0 — make it a nameserver
+ nsIP := account.Peers["peer-0"].IP
+ account.NameServerGroups["ns-self"] = &nbdns.NameServerGroup{
+ ID: "ns-self", Name: "Self NS", Enabled: true, Groups: []string{"group-all"},
+ NameServers: []nbdns.NameServer{{IP: netip.AddrFrom4([4]byte{nsIP[0], nsIP[1], nsIP[2], nsIP[3]}), NSType: nbdns.UDPNameServerType, Port: 53}},
+ }
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ hasSelfNS := false
+ for _, ns := range nm.DNSConfig.NameServerGroups {
+ if ns.ID == "ns-self" {
+ hasSelfNS = true
+ }
+ }
+ assert.False(t, hasSelfNS, "peer serving as nameserver should NOT receive its own NS group")
+
+ // peer-10 is NOT the nameserver, should receive the NS group
+ nm10 := componentsNetworkMap(account, "peer-10", validatedPeers)
+ require.NotNil(t, nm10)
+ hasNSForPeer10 := false
+ for _, ns := range nm10.DNSConfig.NameServerGroups {
+ if ns.ID == "ns-self" {
+ hasNSForPeer10 = true
+ }
+ }
+ assert.True(t, hasNSForPeer10, "non-nameserver peer should receive the NS group")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 17. DOMAIN NETWORK RESOURCES
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_DomainNetworkResource verifies that domain-based network resources
+// produce routes with the correct domain configuration.
+func TestComponents_DomainNetworkResource(t *testing.T) {
+ account, validatedPeers := scalableTestAccountWithoutDefaultPolicy(20, 2)
+
+ netID := "net-domain"
+ resID := "res-domain"
+ account.Networks = append(account.Networks, &networkTypes.Network{ID: netID, Name: "Domain Network", AccountID: "test-account"})
+ account.NetworkResources = append(account.NetworkResources, &resourceTypes.NetworkResource{
+ ID: resID, NetworkID: netID, AccountID: "test-account", Enabled: true,
+ Address: "api.example.com", Type: "domain",
+ })
+ account.NetworkRouters = append(account.NetworkRouters, &routerTypes.NetworkRouter{
+ ID: "router-domain", NetworkID: netID, Peer: "peer-5", Enabled: true, AccountID: "test-account",
+ })
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-domain-res", Name: "Domain Resource Policy", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{{
+ ID: "rule-domain-res", Name: "Allow Domain", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolALL, Bidirectional: true,
+ Sources: []string{"group-0"}, DestinationResource: types.Resource{ID: resID},
+ }},
+ })
+
+ // peer-0 is source, should get route to the domain resource via peer-5
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ peerIDs := make(map[string]bool, len(nm.Peers))
+ for _, p := range nm.Peers {
+ peerIDs[p.ID] = true
+ }
+ assert.True(t, peerIDs["peer-5"], "source peer should see domain resource router peer")
+}
+
+// ──────────────────────────────────────────────────────────────────────────────
+// 18. DISABLED RULE WITHIN ENABLED POLICY
+// ──────────────────────────────────────────────────────────────────────────────
+
+// TestComponents_DisabledRuleInEnabledPolicy verifies that a disabled rule within
+// an enabled policy does not generate firewall rules.
+func TestComponents_DisabledRuleInEnabledPolicy(t *testing.T) {
+ account, validatedPeers := scalableTestAccount(20, 2)
+
+ account.Policies = append(account.Policies, &types.Policy{
+ ID: "policy-mixed-rules", Name: "Mixed Rules", Enabled: true, AccountID: "test-account",
+ Rules: []*types.PolicyRule{
+ {
+ ID: "rule-enabled", Name: "Enabled Rule", Enabled: true,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true, Ports: []string{"3000"},
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ },
+ {
+ ID: "rule-disabled", Name: "Disabled Rule", Enabled: false,
+ Action: types.PolicyTrafficActionAccept, Protocol: types.PolicyRuleProtocolTCP,
+ Bidirectional: true, Ports: []string{"3001"},
+ Sources: []string{"group-0"}, Destinations: []string{"group-1"},
+ },
+ },
+ })
+
+ nm := componentsNetworkMap(account, "peer-0", validatedPeers)
+ require.NotNil(t, nm)
+
+ has3000, has3001 := false, false
+ for _, rule := range nm.FirewallRules {
+ if rule.Port == "3000" {
+ has3000 = true
+ }
+ if rule.Port == "3001" {
+ has3001 = true
+ }
+ }
+ assert.True(t, has3000, "enabled rule should generate firewall rule for port 3000")
+ assert.False(t, has3001, "disabled rule should NOT generate firewall rule for port 3001")
+}
diff --git a/proxy/cmd/proxy/cmd/root.go b/proxy/cmd/proxy/cmd/root.go
index 1c36ee334..ec8980ad9 100644
--- a/proxy/cmd/proxy/cmd/root.go
+++ b/proxy/cmd/proxy/cmd/root.go
@@ -35,7 +35,7 @@ var (
)
var (
- logLevel string
+ logLevel string
debugLogs bool
mgmtAddr string
addr string
@@ -64,6 +64,8 @@ var (
supportsCustomPorts bool
requireSubdomain bool
geoDataDir string
+ crowdsecAPIURL string
+ crowdsecAPIKey string
)
var rootCmd = &cobra.Command{
@@ -106,6 +108,8 @@ func init() {
rootCmd.Flags().DurationVar(&maxDialTimeout, "max-dial-timeout", envDurationOrDefault("NB_PROXY_MAX_DIAL_TIMEOUT", 0), "Cap per-service backend dial timeout (0 = no cap)")
rootCmd.Flags().DurationVar(&maxSessionIdleTimeout, "max-session-idle-timeout", envDurationOrDefault("NB_PROXY_MAX_SESSION_IDLE_TIMEOUT", 0), "Cap per-service session idle timeout (0 = no cap)")
rootCmd.Flags().StringVar(&geoDataDir, "geo-data-dir", envStringOrDefault("NB_PROXY_GEO_DATA_DIR", "/var/lib/netbird/geolocation"), "Directory for the GeoLite2 MMDB file (auto-downloaded if missing)")
+ rootCmd.Flags().StringVar(&crowdsecAPIURL, "crowdsec-api-url", envStringOrDefault("NB_PROXY_CROWDSEC_API_URL", ""), "CrowdSec LAPI URL for IP reputation checks")
+ rootCmd.Flags().StringVar(&crowdsecAPIKey, "crowdsec-api-key", envStringOrDefault("NB_PROXY_CROWDSEC_API_KEY", ""), "CrowdSec bouncer API key")
}
// Execute runs the root command.
@@ -187,6 +191,8 @@ func runServer(cmd *cobra.Command, args []string) error {
MaxDialTimeout: maxDialTimeout,
MaxSessionIdleTimeout: maxSessionIdleTimeout,
GeoDataDir: geoDataDir,
+ CrowdSecAPIURL: crowdsecAPIURL,
+ CrowdSecAPIKey: crowdsecAPIKey,
}
ctx, stop := signal.NotifyContext(context.Background(), syscall.SIGTERM, syscall.SIGINT)
diff --git a/proxy/internal/accesslog/logger.go b/proxy/internal/accesslog/logger.go
index 3ed3275b5..3283f61db 100644
--- a/proxy/internal/accesslog/logger.go
+++ b/proxy/internal/accesslog/logger.go
@@ -2,6 +2,7 @@ package accesslog
import (
"context"
+ "maps"
"net/netip"
"sync"
"sync/atomic"
@@ -126,6 +127,7 @@ type logEntry struct {
BytesUpload int64
BytesDownload int64
Protocol Protocol
+ Metadata map[string]string
}
// Protocol identifies the transport protocol of an access log entry.
@@ -150,8 +152,10 @@ type L4Entry struct {
BytesDownload int64
// DenyReason, when non-empty, indicates the connection was denied.
// Values match the HTTP auth mechanism strings: "ip_restricted",
- // "country_restricted", "geo_unavailable".
+ // "country_restricted", "geo_unavailable", "crowdsec_ban", etc.
DenyReason string
+ // Metadata carries extra context about the connection (e.g. CrowdSec verdict).
+ Metadata map[string]string
}
// LogL4 sends an access log entry for a layer-4 connection (TCP or UDP).
@@ -167,6 +171,7 @@ func (l *Logger) LogL4(entry L4Entry) {
DurationMs: entry.DurationMs,
BytesUpload: entry.BytesUpload,
BytesDownload: entry.BytesDownload,
+ Metadata: maps.Clone(entry.Metadata),
}
if entry.DenyReason != "" {
if !l.allowDenyLog(entry.ServiceID, entry.DenyReason) {
@@ -258,6 +263,7 @@ func (l *Logger) log(entry logEntry) {
BytesUpload: entry.BytesUpload,
BytesDownload: entry.BytesDownload,
Protocol: string(entry.Protocol),
+ Metadata: entry.Metadata,
},
}); err != nil {
l.logger.WithFields(log.Fields{
diff --git a/proxy/internal/accesslog/middleware.go b/proxy/internal/accesslog/middleware.go
index 81c790b17..5a0684c19 100644
--- a/proxy/internal/accesslog/middleware.go
+++ b/proxy/internal/accesslog/middleware.go
@@ -82,6 +82,7 @@ func (l *Logger) Middleware(next http.Handler) http.Handler {
BytesUpload: bytesUpload,
BytesDownload: bytesDownload,
Protocol: ProtocolHTTP,
+ Metadata: capturedData.GetMetadata(),
}
l.logger.Debugf("response: request_id=%s method=%s host=%s path=%s status=%d duration=%dms source=%s origin=%s service=%s account=%s",
requestID, r.Method, host, r.URL.Path, sw.status, duration.Milliseconds(), sourceIp, capturedData.GetOrigin(), capturedData.GetServiceID(), capturedData.GetAccountID())
diff --git a/proxy/internal/auth/middleware.go b/proxy/internal/auth/middleware.go
index 670cafb68..055e4510f 100644
--- a/proxy/internal/auth/middleware.go
+++ b/proxy/internal/auth/middleware.go
@@ -167,6 +167,20 @@ func (mw *Middleware) checkIPRestrictions(w http.ResponseWriter, r *http.Request
return true
}
+ if verdict.IsCrowdSec() {
+ if cd := proxy.CapturedDataFromContext(r.Context()); cd != nil {
+ cd.SetMetadata("crowdsec_verdict", verdict.String())
+ if config.IPRestrictions.IsObserveOnly(verdict) {
+ cd.SetMetadata("crowdsec_mode", "observe")
+ }
+ }
+ }
+
+ if config.IPRestrictions.IsObserveOnly(verdict) {
+ mw.logger.Debugf("CrowdSec observe: would block %s for %s (%s)", clientIP, r.Host, verdict)
+ return true
+ }
+
reason := verdict.String()
mw.blockIPRestriction(r, reason)
http.Error(w, "Forbidden", http.StatusForbidden)
@@ -358,6 +372,12 @@ func (mw *Middleware) authenticateWithSchemes(w http.ResponseWriter, r *http.Req
cd.SetAuthMethod(attemptedMethod)
}
}
+
+ if oidcURL, ok := methods[auth.MethodOIDC.String()]; ok && len(methods) == 1 && oidcURL != "" {
+ http.Redirect(w, r, oidcURL, http.StatusFound)
+ return
+ }
+
web.ServeHTTP(w, r, map[string]any{"methods": methods}, http.StatusUnauthorized)
}
diff --git a/proxy/internal/auth/middleware_test.go b/proxy/internal/auth/middleware_test.go
index 6063f070e..16d09800c 100644
--- a/proxy/internal/auth/middleware_test.go
+++ b/proxy/internal/auth/middleware_test.go
@@ -669,7 +669,7 @@ func TestCheckIPRestrictions_UnparseableAddress(t *testing.T) {
mw := NewMiddleware(log.StandardLogger(), nil, nil)
err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1",
- restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil))
+ restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}))
require.NoError(t, err)
handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -705,7 +705,7 @@ func TestCheckIPRestrictions_UsesCapturedDataClientIP(t *testing.T) {
mw := NewMiddleware(log.StandardLogger(), nil, nil)
err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1",
- restrict.ParseFilter([]string{"203.0.113.0/24"}, nil, nil, nil))
+ restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"203.0.113.0/24"}}))
require.NoError(t, err)
handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -746,7 +746,7 @@ func TestCheckIPRestrictions_NilGeoWithCountryRules(t *testing.T) {
mw := NewMiddleware(log.StandardLogger(), nil, nil)
err := mw.AddDomain("example.com", nil, "", 0, "acc1", "svc1",
- restrict.ParseFilter(nil, nil, []string{"US"}, nil))
+ restrict.ParseFilter(restrict.FilterConfig{AllowedCountries: []string{"US"}}))
require.NoError(t, err)
handler := mw.Protect(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
@@ -761,6 +761,56 @@ func TestCheckIPRestrictions_NilGeoWithCountryRules(t *testing.T) {
assert.Equal(t, http.StatusForbidden, rr.Code, "country restrictions with nil geo must deny")
}
+func TestProtect_OIDCOnlyRedirectsDirectly(t *testing.T) {
+ mw := NewMiddleware(log.StandardLogger(), nil, nil)
+ kp := generateTestKeyPair(t)
+
+ oidcURL := "https://idp.example.com/authorize?client_id=abc"
+ scheme := &stubScheme{
+ method: auth.MethodOIDC,
+ authFn: func(_ *http.Request) (string, string, error) {
+ return "", oidcURL, nil
+ },
+ }
+ require.NoError(t, mw.AddDomain("example.com", []Scheme{scheme}, kp.PublicKey, time.Hour, "", "", nil))
+
+ handler := mw.Protect(newPassthroughHandler())
+
+ req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil)
+ rec := httptest.NewRecorder()
+ handler.ServeHTTP(rec, req)
+
+ assert.Equal(t, http.StatusFound, rec.Code, "should redirect directly to IdP")
+ assert.Equal(t, oidcURL, rec.Header().Get("Location"))
+}
+
+func TestProtect_OIDCWithOtherMethodShowsLoginPage(t *testing.T) {
+ mw := NewMiddleware(log.StandardLogger(), nil, nil)
+ kp := generateTestKeyPair(t)
+
+ oidcScheme := &stubScheme{
+ method: auth.MethodOIDC,
+ authFn: func(_ *http.Request) (string, string, error) {
+ return "", "https://idp.example.com/authorize", nil
+ },
+ }
+ pinScheme := &stubScheme{
+ method: auth.MethodPIN,
+ authFn: func(_ *http.Request) (string, string, error) {
+ return "", "pin", nil
+ },
+ }
+ require.NoError(t, mw.AddDomain("example.com", []Scheme{oidcScheme, pinScheme}, kp.PublicKey, time.Hour, "", "", nil))
+
+ handler := mw.Protect(newPassthroughHandler())
+
+ req := httptest.NewRequest(http.MethodGet, "http://example.com/", nil)
+ rec := httptest.NewRecorder()
+ handler.ServeHTTP(rec, req)
+
+ assert.Equal(t, http.StatusUnauthorized, rec.Code, "should show login page when multiple methods exist")
+}
+
// mockAuthenticator is a minimal mock for the authenticator gRPC interface
// used by the Header scheme.
type mockAuthenticator struct {
diff --git a/proxy/internal/crowdsec/bouncer.go b/proxy/internal/crowdsec/bouncer.go
new file mode 100644
index 000000000..06a452520
--- /dev/null
+++ b/proxy/internal/crowdsec/bouncer.go
@@ -0,0 +1,251 @@
+// Package crowdsec provides a CrowdSec stream bouncer that maintains a local
+// decision cache for IP reputation checks.
+package crowdsec
+
+import (
+ "context"
+ "errors"
+ "net/netip"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/crowdsecurity/crowdsec/pkg/models"
+ csbouncer "github.com/crowdsecurity/go-cs-bouncer"
+ log "github.com/sirupsen/logrus"
+
+ "github.com/netbirdio/netbird/proxy/internal/restrict"
+)
+
+// Bouncer wraps a CrowdSec StreamBouncer, maintaining a local cache of
+// active decisions for fast IP lookups. It implements restrict.CrowdSecChecker.
+type Bouncer struct {
+ mu sync.RWMutex
+ ips map[netip.Addr]*restrict.CrowdSecDecision
+ prefixes map[netip.Prefix]*restrict.CrowdSecDecision
+ ready atomic.Bool
+
+ apiURL string
+ apiKey string
+ tickerInterval time.Duration
+ logger *log.Entry
+
+ // lifeMu protects cancel and done from concurrent Start/Stop calls.
+ lifeMu sync.Mutex
+ cancel context.CancelFunc
+ done chan struct{}
+}
+
+// compile-time check
+var _ restrict.CrowdSecChecker = (*Bouncer)(nil)
+
+// NewBouncer creates a bouncer but does not start the stream.
+func NewBouncer(apiURL, apiKey string, logger *log.Entry) *Bouncer {
+ return &Bouncer{
+ apiURL: apiURL,
+ apiKey: apiKey,
+ logger: logger,
+ ips: make(map[netip.Addr]*restrict.CrowdSecDecision),
+ prefixes: make(map[netip.Prefix]*restrict.CrowdSecDecision),
+ }
+}
+
+// Start launches the background goroutine that streams decisions from the
+// CrowdSec LAPI. The stream runs until Stop is called or ctx is cancelled.
+func (b *Bouncer) Start(ctx context.Context) error {
+ interval := b.tickerInterval
+ if interval == 0 {
+ interval = 10 * time.Second
+ }
+ stream := &csbouncer.StreamBouncer{
+ APIKey: b.apiKey,
+ APIUrl: b.apiURL,
+ TickerInterval: interval.String(),
+ UserAgent: "netbird-proxy/1.0",
+ Scopes: []string{"ip", "range"},
+ RetryInitialConnect: true,
+ }
+
+ b.logger.Infof("connecting to CrowdSec LAPI at %s", b.apiURL)
+
+ if err := stream.Init(); err != nil {
+ return err
+ }
+
+ // Reset state from any previous run.
+ b.mu.Lock()
+ b.ips = make(map[netip.Addr]*restrict.CrowdSecDecision)
+ b.prefixes = make(map[netip.Prefix]*restrict.CrowdSecDecision)
+ b.mu.Unlock()
+ b.ready.Store(false)
+
+ ctx, cancel := context.WithCancel(ctx)
+ done := make(chan struct{})
+
+ b.lifeMu.Lock()
+ if b.cancel != nil {
+ b.lifeMu.Unlock()
+ cancel()
+ return errors.New("bouncer already started")
+ }
+ b.cancel = cancel
+ b.done = done
+ b.lifeMu.Unlock()
+
+ var wg sync.WaitGroup
+ wg.Add(2)
+
+ go func() {
+ defer wg.Done()
+ if err := stream.Run(ctx); err != nil && ctx.Err() == nil {
+ b.logger.Errorf("CrowdSec stream ended: %v", err)
+ }
+ }()
+
+ go func() {
+ defer wg.Done()
+ b.consumeStream(ctx, stream)
+ }()
+
+ go func() {
+ wg.Wait()
+ close(done)
+ }()
+
+ return nil
+}
+
+// Stop cancels the stream and waits for all goroutines to finish.
+func (b *Bouncer) Stop() {
+ b.lifeMu.Lock()
+ cancel := b.cancel
+ done := b.done
+ b.cancel = nil
+ b.lifeMu.Unlock()
+
+ if cancel != nil {
+ cancel()
+ <-done
+ }
+}
+
+// Ready returns true after the first batch of decisions has been processed.
+func (b *Bouncer) Ready() bool {
+ return b.ready.Load()
+}
+
+// CheckIP looks up addr in the local decision cache. Returns nil if no
+// active decision exists for the address.
+//
+// Prefix lookups are O(1): instead of scanning all stored prefixes, we
+// probe the map for every possible containing prefix of the address
+// (at most 33 for IPv4, 129 for IPv6).
+func (b *Bouncer) CheckIP(addr netip.Addr) *restrict.CrowdSecDecision {
+ addr = addr.Unmap()
+
+ b.mu.RLock()
+ defer b.mu.RUnlock()
+
+ if d, ok := b.ips[addr]; ok {
+ return d
+ }
+
+ maxBits := 32
+ if addr.Is6() {
+ maxBits = 128
+ }
+ // Walk from most-specific to least-specific prefix so the narrowest
+ // matching decision wins when ranges overlap.
+ for bits := maxBits; bits >= 0; bits-- {
+ prefix := netip.PrefixFrom(addr, bits).Masked()
+ if d, ok := b.prefixes[prefix]; ok {
+ return d
+ }
+ }
+
+ return nil
+}
+
+func (b *Bouncer) consumeStream(ctx context.Context, stream *csbouncer.StreamBouncer) {
+ first := true
+ for {
+ select {
+ case <-ctx.Done():
+ return
+ case resp, ok := <-stream.Stream:
+ if !ok {
+ return
+ }
+ b.mu.Lock()
+ b.applyDeleted(resp.Deleted)
+ b.applyNew(resp.New)
+ b.mu.Unlock()
+
+ if first {
+ b.ready.Store(true)
+ b.logger.Info("CrowdSec bouncer synced initial decisions")
+ first = false
+ }
+ }
+ }
+}
+
+func (b *Bouncer) applyDeleted(decisions []*models.Decision) {
+ for _, d := range decisions {
+ if d.Value == nil || d.Scope == nil {
+ continue
+ }
+ value := *d.Value
+
+ if strings.ToLower(*d.Scope) == "range" || strings.Contains(value, "/") {
+ prefix, err := netip.ParsePrefix(value)
+ if err != nil {
+ b.logger.Debugf("skip unparsable CrowdSec range deletion %q: %v", value, err)
+ continue
+ }
+ prefix = normalizePrefix(prefix)
+ delete(b.prefixes, prefix)
+ } else {
+ addr, err := netip.ParseAddr(value)
+ if err != nil {
+ b.logger.Debugf("skip unparsable CrowdSec IP deletion %q: %v", value, err)
+ continue
+ }
+ delete(b.ips, addr.Unmap())
+ }
+ }
+}
+
+func (b *Bouncer) applyNew(decisions []*models.Decision) {
+ for _, d := range decisions {
+ if d.Value == nil || d.Type == nil || d.Scope == nil {
+ continue
+ }
+ dec := &restrict.CrowdSecDecision{Type: restrict.DecisionType(*d.Type)}
+ value := *d.Value
+
+ if strings.ToLower(*d.Scope) == "range" || strings.Contains(value, "/") {
+ prefix, err := netip.ParsePrefix(value)
+ if err != nil {
+ b.logger.Debugf("skip unparsable CrowdSec range %q: %v", value, err)
+ continue
+ }
+ prefix = normalizePrefix(prefix)
+ b.prefixes[prefix] = dec
+ } else {
+ addr, err := netip.ParseAddr(value)
+ if err != nil {
+ b.logger.Debugf("skip unparsable CrowdSec IP %q: %v", value, err)
+ continue
+ }
+ b.ips[addr.Unmap()] = dec
+ }
+ }
+}
+
+// normalizePrefix unmaps v4-mapped-v6 addresses and zeros host bits so
+// the prefix is a valid map key that matches CheckIP's probe logic.
+func normalizePrefix(p netip.Prefix) netip.Prefix {
+ return netip.PrefixFrom(p.Addr().Unmap(), p.Bits()).Masked()
+}
diff --git a/proxy/internal/crowdsec/bouncer_test.go b/proxy/internal/crowdsec/bouncer_test.go
new file mode 100644
index 000000000..3bd8aa068
--- /dev/null
+++ b/proxy/internal/crowdsec/bouncer_test.go
@@ -0,0 +1,337 @@
+package crowdsec
+
+import (
+ "context"
+ "encoding/json"
+ "net/http"
+ "net/http/httptest"
+ "net/netip"
+ "sync"
+ "testing"
+ "time"
+
+ "github.com/crowdsecurity/crowdsec/pkg/models"
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+
+ "github.com/netbirdio/netbird/proxy/internal/restrict"
+)
+
+func TestBouncer_CheckIP_Empty(t *testing.T) {
+ b := newTestBouncer()
+ b.ready.Store(true)
+
+ assert.Nil(t, b.CheckIP(netip.MustParseAddr("1.2.3.4")))
+}
+
+func TestBouncer_CheckIP_ExactMatch(t *testing.T) {
+ b := newTestBouncer()
+ b.ready.Store(true)
+ b.ips[netip.MustParseAddr("10.0.0.1")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+
+ d := b.CheckIP(netip.MustParseAddr("10.0.0.1"))
+ require.NotNil(t, d)
+ assert.Equal(t, restrict.DecisionBan, d.Type)
+
+ assert.Nil(t, b.CheckIP(netip.MustParseAddr("10.0.0.2")))
+}
+
+func TestBouncer_CheckIP_PrefixMatch(t *testing.T) {
+ b := newTestBouncer()
+ b.ready.Store(true)
+ b.prefixes[netip.MustParsePrefix("192.168.1.0/24")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+
+ d := b.CheckIP(netip.MustParseAddr("192.168.1.100"))
+ require.NotNil(t, d)
+ assert.Equal(t, restrict.DecisionBan, d.Type)
+
+ assert.Nil(t, b.CheckIP(netip.MustParseAddr("192.168.2.1")))
+}
+
+func TestBouncer_CheckIP_UnmapsV4InV6(t *testing.T) {
+ b := newTestBouncer()
+ b.ready.Store(true)
+ b.ips[netip.MustParseAddr("10.0.0.1")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+
+ d := b.CheckIP(netip.MustParseAddr("::ffff:10.0.0.1"))
+ require.NotNil(t, d)
+ assert.Equal(t, restrict.DecisionBan, d.Type)
+}
+
+func TestBouncer_Ready(t *testing.T) {
+ b := newTestBouncer()
+ assert.False(t, b.Ready())
+
+ b.ready.Store(true)
+ assert.True(t, b.Ready())
+}
+
+func TestBouncer_CheckIP_ExactBeforePrefix(t *testing.T) {
+ b := newTestBouncer()
+ b.ready.Store(true)
+ b.ips[netip.MustParseAddr("10.0.0.1")] = &restrict.CrowdSecDecision{Type: restrict.DecisionCaptcha}
+ b.prefixes[netip.MustParsePrefix("10.0.0.0/8")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+
+ d := b.CheckIP(netip.MustParseAddr("10.0.0.1"))
+ require.NotNil(t, d)
+ assert.Equal(t, restrict.DecisionCaptcha, d.Type)
+
+ d2 := b.CheckIP(netip.MustParseAddr("10.0.0.2"))
+ require.NotNil(t, d2)
+ assert.Equal(t, restrict.DecisionBan, d2.Type)
+}
+
+func TestBouncer_ApplyNew_IP(t *testing.T) {
+ b := newTestBouncer()
+
+ b.applyNew(makeDecisions(
+ decision{scope: "ip", value: "1.2.3.4", dtype: "ban", scenario: "test/brute"},
+ decision{scope: "ip", value: "5.6.7.8", dtype: "captcha", scenario: "test/crawl"},
+ ))
+
+ require.Len(t, b.ips, 2)
+ assert.Equal(t, restrict.DecisionBan, b.ips[netip.MustParseAddr("1.2.3.4")].Type)
+ assert.Equal(t, restrict.DecisionCaptcha, b.ips[netip.MustParseAddr("5.6.7.8")].Type)
+}
+
+func TestBouncer_ApplyNew_Range(t *testing.T) {
+ b := newTestBouncer()
+
+ b.applyNew(makeDecisions(
+ decision{scope: "range", value: "10.0.0.0/8", dtype: "ban"},
+ ))
+
+ require.Len(t, b.prefixes, 1)
+ assert.NotNil(t, b.prefixes[netip.MustParsePrefix("10.0.0.0/8")])
+}
+
+func TestBouncer_ApplyDeleted_IP(t *testing.T) {
+ b := newTestBouncer()
+ b.ips[netip.MustParseAddr("1.2.3.4")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+ b.ips[netip.MustParseAddr("5.6.7.8")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+
+ b.applyDeleted(makeDecisions(
+ decision{scope: "ip", value: "1.2.3.4", dtype: "ban"},
+ ))
+
+ assert.Len(t, b.ips, 1)
+ assert.Nil(t, b.ips[netip.MustParseAddr("1.2.3.4")])
+ assert.NotNil(t, b.ips[netip.MustParseAddr("5.6.7.8")])
+}
+
+func TestBouncer_ApplyDeleted_Range(t *testing.T) {
+ b := newTestBouncer()
+ b.prefixes[netip.MustParsePrefix("10.0.0.0/8")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+ b.prefixes[netip.MustParsePrefix("192.168.0.0/16")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+
+ b.applyDeleted(makeDecisions(
+ decision{scope: "range", value: "10.0.0.0/8", dtype: "ban"},
+ ))
+
+ require.Len(t, b.prefixes, 1)
+ assert.NotNil(t, b.prefixes[netip.MustParsePrefix("192.168.0.0/16")])
+}
+
+func TestBouncer_ApplyNew_OverwritesExisting(t *testing.T) {
+ b := newTestBouncer()
+ b.ips[netip.MustParseAddr("1.2.3.4")] = &restrict.CrowdSecDecision{Type: restrict.DecisionBan}
+
+ b.applyNew(makeDecisions(
+ decision{scope: "ip", value: "1.2.3.4", dtype: "captcha"},
+ ))
+
+ assert.Equal(t, restrict.DecisionCaptcha, b.ips[netip.MustParseAddr("1.2.3.4")].Type)
+}
+
+func TestBouncer_ApplyNew_SkipsInvalid(t *testing.T) {
+ b := newTestBouncer()
+
+ b.applyNew(makeDecisions(
+ decision{scope: "ip", value: "not-an-ip", dtype: "ban"},
+ decision{scope: "range", value: "also-not-valid", dtype: "ban"},
+ ))
+
+ assert.Empty(t, b.ips)
+ assert.Empty(t, b.prefixes)
+}
+
+// TestBouncer_StreamIntegration tests the full flow: fake LAPI → StreamBouncer → Bouncer cache → CheckIP.
+func TestBouncer_StreamIntegration(t *testing.T) {
+ lapi := newFakeLAPI()
+ ts := httptest.NewServer(lapi)
+ defer ts.Close()
+
+ // Seed the LAPI with initial decisions.
+ lapi.setDecisions(
+ decision{scope: "ip", value: "1.2.3.4", dtype: "ban", scenario: "crowdsecurity/ssh-bf"},
+ decision{scope: "range", value: "10.0.0.0/8", dtype: "ban", scenario: "crowdsecurity/http-probing"},
+ decision{scope: "ip", value: "5.5.5.5", dtype: "captcha", scenario: "crowdsecurity/http-crawl"},
+ )
+
+ b := NewBouncer(ts.URL, "test-key", log.NewEntry(log.StandardLogger()))
+ b.tickerInterval = 200 * time.Millisecond
+
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ require.NoError(t, b.Start(ctx))
+ defer b.Stop()
+
+ // Wait for initial sync.
+ require.Eventually(t, b.Ready, 5*time.Second, 50*time.Millisecond, "bouncer should become ready")
+
+ // Verify decisions are cached.
+ d := b.CheckIP(netip.MustParseAddr("1.2.3.4"))
+ require.NotNil(t, d, "1.2.3.4 should be banned")
+ assert.Equal(t, restrict.DecisionBan, d.Type)
+
+ d2 := b.CheckIP(netip.MustParseAddr("10.1.2.3"))
+ require.NotNil(t, d2, "10.1.2.3 should match range ban")
+ assert.Equal(t, restrict.DecisionBan, d2.Type)
+
+ d3 := b.CheckIP(netip.MustParseAddr("5.5.5.5"))
+ require.NotNil(t, d3, "5.5.5.5 should have captcha")
+ assert.Equal(t, restrict.DecisionCaptcha, d3.Type)
+
+ assert.Nil(t, b.CheckIP(netip.MustParseAddr("9.9.9.9")), "unknown IP should be nil")
+
+ // Simulate a delta update: delete one IP, add a new one.
+ lapi.setDelta(
+ []decision{{scope: "ip", value: "1.2.3.4", dtype: "ban"}},
+ []decision{{scope: "ip", value: "2.3.4.5", dtype: "throttle", scenario: "crowdsecurity/http-flood"}},
+ )
+
+ // Wait for the delta to be picked up.
+ require.Eventually(t, func() bool {
+ return b.CheckIP(netip.MustParseAddr("2.3.4.5")) != nil
+ }, 5*time.Second, 50*time.Millisecond, "new decision should appear")
+
+ assert.Nil(t, b.CheckIP(netip.MustParseAddr("1.2.3.4")), "deleted decision should be gone")
+
+ d4 := b.CheckIP(netip.MustParseAddr("2.3.4.5"))
+ require.NotNil(t, d4)
+ assert.Equal(t, restrict.DecisionThrottle, d4.Type)
+
+ // Range ban should still be active.
+ assert.NotNil(t, b.CheckIP(netip.MustParseAddr("10.99.99.99")))
+}
+
+// Helpers
+
+func newTestBouncer() *Bouncer {
+ return &Bouncer{
+ ips: make(map[netip.Addr]*restrict.CrowdSecDecision),
+ prefixes: make(map[netip.Prefix]*restrict.CrowdSecDecision),
+ logger: log.NewEntry(log.StandardLogger()),
+ }
+}
+
+type decision struct {
+ scope string
+ value string
+ dtype string
+ scenario string
+}
+
+func makeDecisions(decs ...decision) []*models.Decision {
+ out := make([]*models.Decision, len(decs))
+ for i, d := range decs {
+ out[i] = &models.Decision{
+ Scope: strPtr(d.scope),
+ Value: strPtr(d.value),
+ Type: strPtr(d.dtype),
+ Scenario: strPtr(d.scenario),
+ Duration: strPtr("1h"),
+ Origin: strPtr("cscli"),
+ }
+ }
+ return out
+}
+
+func strPtr(s string) *string { return &s }
+
+// fakeLAPI is a minimal fake CrowdSec LAPI that serves /v1/decisions/stream.
+type fakeLAPI struct {
+ mu sync.Mutex
+ initial []decision
+ newDelta []decision
+ delDelta []decision
+ served bool // true after the initial snapshot has been served
+}
+
+func newFakeLAPI() *fakeLAPI {
+ return &fakeLAPI{}
+}
+
+func (f *fakeLAPI) setDecisions(decs ...decision) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.initial = decs
+ f.served = false
+}
+
+func (f *fakeLAPI) setDelta(deleted, added []decision) {
+ f.mu.Lock()
+ defer f.mu.Unlock()
+ f.delDelta = deleted
+ f.newDelta = added
+}
+
+func (f *fakeLAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/v1/decisions/stream" {
+ http.NotFound(w, r)
+ return
+ }
+
+ f.mu.Lock()
+ defer f.mu.Unlock()
+
+ resp := streamResponse{}
+
+ if !f.served {
+ for _, d := range f.initial {
+ resp.New = append(resp.New, toLAPIDecision(d))
+ }
+ f.served = true
+ } else {
+ for _, d := range f.delDelta {
+ resp.Deleted = append(resp.Deleted, toLAPIDecision(d))
+ }
+ for _, d := range f.newDelta {
+ resp.New = append(resp.New, toLAPIDecision(d))
+ }
+ // Clear delta after serving once.
+ f.delDelta = nil
+ f.newDelta = nil
+ }
+
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(resp) //nolint:errcheck
+}
+
+// streamResponse mirrors the CrowdSec LAPI /v1/decisions/stream JSON structure.
+type streamResponse struct {
+ New []*lapiDecision `json:"new"`
+ Deleted []*lapiDecision `json:"deleted"`
+}
+
+type lapiDecision struct {
+ Duration *string `json:"duration"`
+ Origin *string `json:"origin"`
+ Scenario *string `json:"scenario"`
+ Scope *string `json:"scope"`
+ Type *string `json:"type"`
+ Value *string `json:"value"`
+}
+
+func toLAPIDecision(d decision) *lapiDecision {
+ return &lapiDecision{
+ Duration: strPtr("1h"),
+ Origin: strPtr("cscli"),
+ Scenario: strPtr(d.scenario),
+ Scope: strPtr(d.scope),
+ Type: strPtr(d.dtype),
+ Value: strPtr(d.value),
+ }
+}
diff --git a/proxy/internal/crowdsec/registry.go b/proxy/internal/crowdsec/registry.go
new file mode 100644
index 000000000..652fb6f9f
--- /dev/null
+++ b/proxy/internal/crowdsec/registry.go
@@ -0,0 +1,103 @@
+package crowdsec
+
+import (
+ "context"
+ "sync"
+
+ log "github.com/sirupsen/logrus"
+
+ "github.com/netbirdio/netbird/proxy/internal/types"
+)
+
+// Registry manages a single shared Bouncer instance with reference counting.
+// The bouncer starts when the first service acquires it and stops when the
+// last service releases it.
+type Registry struct {
+ mu sync.Mutex
+ bouncer *Bouncer
+ refs map[types.ServiceID]struct{}
+ apiURL string
+ apiKey string
+ logger *log.Entry
+ cancel context.CancelFunc
+}
+
+// NewRegistry creates a registry. The bouncer is not started until Acquire is called.
+func NewRegistry(apiURL, apiKey string, logger *log.Entry) *Registry {
+ return &Registry{
+ apiURL: apiURL,
+ apiKey: apiKey,
+ logger: logger,
+ refs: make(map[types.ServiceID]struct{}),
+ }
+}
+
+// Available returns true when the LAPI URL and API key are configured.
+func (r *Registry) Available() bool {
+ return r.apiURL != "" && r.apiKey != ""
+}
+
+// Acquire registers svcID as a consumer and starts the bouncer if this is the
+// first consumer. Returns the shared Bouncer (which implements the restrict
+// package's CrowdSecChecker interface). Returns nil if not Available.
+func (r *Registry) Acquire(svcID types.ServiceID) *Bouncer {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ if !r.Available() {
+ return nil
+ }
+
+ if _, exists := r.refs[svcID]; exists {
+ return r.bouncer
+ }
+
+ if r.bouncer == nil {
+ r.startLocked()
+ }
+
+ // startLocked may fail, leaving r.bouncer nil.
+ if r.bouncer == nil {
+ return nil
+ }
+
+ r.refs[svcID] = struct{}{}
+ return r.bouncer
+}
+
+// Release removes svcID as a consumer. Stops the bouncer when the last
+// consumer releases.
+func (r *Registry) Release(svcID types.ServiceID) {
+ r.mu.Lock()
+ defer r.mu.Unlock()
+
+ delete(r.refs, svcID)
+
+ if len(r.refs) == 0 && r.bouncer != nil {
+ r.stopLocked()
+ }
+}
+
+func (r *Registry) startLocked() {
+ b := NewBouncer(r.apiURL, r.apiKey, r.logger)
+
+ ctx, cancel := context.WithCancel(context.Background())
+ r.cancel = cancel
+
+ if err := b.Start(ctx); err != nil {
+ r.logger.Errorf("failed to start CrowdSec bouncer: %v", err)
+ cancel()
+ return
+ }
+
+ r.bouncer = b
+ r.logger.Info("CrowdSec bouncer started")
+}
+
+func (r *Registry) stopLocked() {
+ r.bouncer.Stop()
+ r.cancel()
+ r.bouncer = nil
+ r.cancel = nil
+ r.logger.Info("CrowdSec bouncer stopped")
+}
diff --git a/proxy/internal/crowdsec/registry_test.go b/proxy/internal/crowdsec/registry_test.go
new file mode 100644
index 000000000..f1567b186
--- /dev/null
+++ b/proxy/internal/crowdsec/registry_test.go
@@ -0,0 +1,66 @@
+package crowdsec
+
+import (
+ "testing"
+
+ log "github.com/sirupsen/logrus"
+ "github.com/stretchr/testify/assert"
+
+ "github.com/netbirdio/netbird/proxy/internal/types"
+)
+
+func TestRegistry_Available(t *testing.T) {
+ r := NewRegistry("http://localhost:8080/", "test-key", log.NewEntry(log.StandardLogger()))
+ assert.True(t, r.Available())
+
+ r2 := NewRegistry("", "", log.NewEntry(log.StandardLogger()))
+ assert.False(t, r2.Available())
+
+ r3 := NewRegistry("http://localhost:8080/", "", log.NewEntry(log.StandardLogger()))
+ assert.False(t, r3.Available())
+}
+
+func TestRegistry_Acquire_NotAvailable(t *testing.T) {
+ r := NewRegistry("", "", log.NewEntry(log.StandardLogger()))
+ b := r.Acquire("svc-1")
+ assert.Nil(t, b)
+}
+
+func TestRegistry_Acquire_Idempotent(t *testing.T) {
+ r := newTestRegistry()
+
+ b1 := r.Acquire("svc-1")
+ // Can't start without a real LAPI, but we can verify the ref tracking.
+ // The bouncer will be nil because Start fails, but the ref is tracked.
+ _ = b1
+
+ assert.Len(t, r.refs, 1)
+
+ // Second acquire of same service should not add another ref.
+ r.Acquire("svc-1")
+ assert.Len(t, r.refs, 1)
+}
+
+func TestRegistry_Release_Removes(t *testing.T) {
+ r := newTestRegistry()
+ r.refs[types.ServiceID("svc-1")] = struct{}{}
+
+ r.Release("svc-1")
+ assert.Empty(t, r.refs)
+}
+
+func TestRegistry_Release_Noop(t *testing.T) {
+ r := newTestRegistry()
+ // Releasing a service that was never acquired should not panic.
+ r.Release("nonexistent")
+ assert.Empty(t, r.refs)
+}
+
+func newTestRegistry() *Registry {
+ return &Registry{
+ apiURL: "http://localhost:8080/",
+ apiKey: "test-key",
+ logger: log.NewEntry(log.StandardLogger()),
+ refs: make(map[types.ServiceID]struct{}),
+ }
+}
diff --git a/proxy/internal/proxy/context.go b/proxy/internal/proxy/context.go
index d3f67dc57..a888ad9ed 100644
--- a/proxy/internal/proxy/context.go
+++ b/proxy/internal/proxy/context.go
@@ -2,6 +2,7 @@ package proxy
import (
"context"
+ "maps"
"net/netip"
"sync"
@@ -52,6 +53,7 @@ type CapturedData struct {
clientIP netip.Addr
userID string
authMethod string
+ metadata map[string]string
}
// NewCapturedData creates a CapturedData with the given request ID.
@@ -150,6 +152,23 @@ func (c *CapturedData) GetAuthMethod() string {
return c.authMethod
}
+// SetMetadata sets a key-value pair in the metadata map.
+func (c *CapturedData) SetMetadata(key, value string) {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+ if c.metadata == nil {
+ c.metadata = make(map[string]string)
+ }
+ c.metadata[key] = value
+}
+
+// GetMetadata returns a copy of the metadata map.
+func (c *CapturedData) GetMetadata() map[string]string {
+ c.mu.RLock()
+ defer c.mu.RUnlock()
+ return maps.Clone(c.metadata)
+}
+
// WithCapturedData adds a CapturedData struct to the context.
func WithCapturedData(ctx context.Context, data *CapturedData) context.Context {
return context.WithValue(ctx, capturedDataKey, data)
diff --git a/proxy/internal/restrict/restrict.go b/proxy/internal/restrict/restrict.go
index a0d99ce93..f3e0fa695 100644
--- a/proxy/internal/restrict/restrict.go
+++ b/proxy/internal/restrict/restrict.go
@@ -12,12 +12,44 @@ import (
"github.com/netbirdio/netbird/proxy/internal/geolocation"
)
+// defaultLogger is used when no logger is provided to ParseFilter.
+var defaultLogger = log.NewEntry(log.StandardLogger())
+
// GeoResolver resolves an IP address to geographic information.
type GeoResolver interface {
LookupAddr(addr netip.Addr) geolocation.Result
Available() bool
}
+// DecisionType is the type of CrowdSec remediation action.
+type DecisionType string
+
+const (
+ DecisionBan DecisionType = "ban"
+ DecisionCaptcha DecisionType = "captcha"
+ DecisionThrottle DecisionType = "throttle"
+)
+
+// CrowdSecDecision holds the type of a CrowdSec decision.
+type CrowdSecDecision struct {
+ Type DecisionType
+}
+
+// CrowdSecChecker queries CrowdSec decisions for an IP address.
+type CrowdSecChecker interface {
+ CheckIP(addr netip.Addr) *CrowdSecDecision
+ Ready() bool
+}
+
+// CrowdSecMode is the per-service enforcement mode.
+type CrowdSecMode string
+
+const (
+ CrowdSecOff CrowdSecMode = ""
+ CrowdSecEnforce CrowdSecMode = "enforce"
+ CrowdSecObserve CrowdSecMode = "observe"
+)
+
// Filter evaluates IP restrictions. CIDR checks are performed first
// (cheap), followed by country lookups (more expensive) only when needed.
type Filter struct {
@@ -25,32 +57,55 @@ type Filter struct {
BlockedCIDRs []netip.Prefix
AllowedCountries []string
BlockedCountries []string
+ CrowdSec CrowdSecChecker
+ CrowdSecMode CrowdSecMode
}
-// ParseFilter builds a Filter from the raw string slices. Returns nil
-// if all slices are empty.
-func ParseFilter(allowedCIDRs, blockedCIDRs, allowedCountries, blockedCountries []string) *Filter {
- if len(allowedCIDRs) == 0 && len(blockedCIDRs) == 0 &&
- len(allowedCountries) == 0 && len(blockedCountries) == 0 {
+// FilterConfig holds the raw configuration for building a Filter.
+type FilterConfig struct {
+ AllowedCIDRs []string
+ BlockedCIDRs []string
+ AllowedCountries []string
+ BlockedCountries []string
+ CrowdSec CrowdSecChecker
+ CrowdSecMode CrowdSecMode
+ Logger *log.Entry
+}
+
+// ParseFilter builds a Filter from the config. Returns nil if no restrictions
+// are configured.
+func ParseFilter(cfg FilterConfig) *Filter {
+ hasCS := cfg.CrowdSecMode == CrowdSecEnforce || cfg.CrowdSecMode == CrowdSecObserve
+ if len(cfg.AllowedCIDRs) == 0 && len(cfg.BlockedCIDRs) == 0 &&
+ len(cfg.AllowedCountries) == 0 && len(cfg.BlockedCountries) == 0 && !hasCS {
return nil
}
- f := &Filter{
- AllowedCountries: normalizeCountryCodes(allowedCountries),
- BlockedCountries: normalizeCountryCodes(blockedCountries),
+ logger := cfg.Logger
+ if logger == nil {
+ logger = defaultLogger
}
- for _, cidr := range allowedCIDRs {
+
+ f := &Filter{
+ AllowedCountries: normalizeCountryCodes(cfg.AllowedCountries),
+ BlockedCountries: normalizeCountryCodes(cfg.BlockedCountries),
+ }
+ if hasCS {
+ f.CrowdSec = cfg.CrowdSec
+ f.CrowdSecMode = cfg.CrowdSecMode
+ }
+ for _, cidr := range cfg.AllowedCIDRs {
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
- log.Warnf("skip invalid allowed CIDR %q: %v", cidr, err)
+ logger.Warnf("skip invalid allowed CIDR %q: %v", cidr, err)
continue
}
f.AllowedCIDRs = append(f.AllowedCIDRs, prefix.Masked())
}
- for _, cidr := range blockedCIDRs {
+ for _, cidr := range cfg.BlockedCIDRs {
prefix, err := netip.ParsePrefix(cidr)
if err != nil {
- log.Warnf("skip invalid blocked CIDR %q: %v", cidr, err)
+ logger.Warnf("skip invalid blocked CIDR %q: %v", cidr, err)
continue
}
f.BlockedCIDRs = append(f.BlockedCIDRs, prefix.Masked())
@@ -82,6 +137,15 @@ const (
// DenyGeoUnavailable indicates that country restrictions are configured
// but the geo lookup is unavailable.
DenyGeoUnavailable
+ // DenyCrowdSecBan indicates a CrowdSec "ban" decision.
+ DenyCrowdSecBan
+ // DenyCrowdSecCaptcha indicates a CrowdSec "captcha" decision.
+ DenyCrowdSecCaptcha
+ // DenyCrowdSecThrottle indicates a CrowdSec "throttle" decision.
+ DenyCrowdSecThrottle
+ // DenyCrowdSecUnavailable indicates enforce mode but the bouncer has not
+ // completed its initial sync.
+ DenyCrowdSecUnavailable
)
// String returns the deny reason string matching the HTTP auth mechanism names.
@@ -95,14 +159,42 @@ func (v Verdict) String() string {
return "country_restricted"
case DenyGeoUnavailable:
return "geo_unavailable"
+ case DenyCrowdSecBan:
+ return "crowdsec_ban"
+ case DenyCrowdSecCaptcha:
+ return "crowdsec_captcha"
+ case DenyCrowdSecThrottle:
+ return "crowdsec_throttle"
+ case DenyCrowdSecUnavailable:
+ return "crowdsec_unavailable"
default:
return "unknown"
}
}
+// IsCrowdSec returns true when the verdict originates from a CrowdSec check.
+func (v Verdict) IsCrowdSec() bool {
+ switch v {
+ case DenyCrowdSecBan, DenyCrowdSecCaptcha, DenyCrowdSecThrottle, DenyCrowdSecUnavailable:
+ return true
+ default:
+ return false
+ }
+}
+
+// IsObserveOnly returns true when v is a CrowdSec verdict and the filter is in
+// observe mode. Callers should log the verdict but not block the request.
+func (f *Filter) IsObserveOnly(v Verdict) bool {
+ if f == nil {
+ return false
+ }
+ return v.IsCrowdSec() && f.CrowdSecMode == CrowdSecObserve
+}
+
// Check evaluates whether addr is permitted. CIDR rules are evaluated
// first because they are O(n) prefix comparisons. Country rules run
-// only when CIDR checks pass and require a geo lookup.
+// only when CIDR checks pass and require a geo lookup. CrowdSec checks
+// run last.
func (f *Filter) Check(addr netip.Addr, geo GeoResolver) Verdict {
if f == nil {
return Allow
@@ -115,7 +207,10 @@ func (f *Filter) Check(addr netip.Addr, geo GeoResolver) Verdict {
if v := f.checkCIDR(addr); v != Allow {
return v
}
- return f.checkCountry(addr, geo)
+ if v := f.checkCountry(addr, geo); v != Allow {
+ return v
+ }
+ return f.checkCrowdSec(addr)
}
func (f *Filter) checkCIDR(addr netip.Addr) Verdict {
@@ -173,11 +268,48 @@ func (f *Filter) checkCountry(addr netip.Addr, geo GeoResolver) Verdict {
return Allow
}
+func (f *Filter) checkCrowdSec(addr netip.Addr) Verdict {
+ if f.CrowdSecMode == CrowdSecOff {
+ return Allow
+ }
+
+ // Checker nil with enforce means CrowdSec was requested but the proxy
+ // has no LAPI configured. Fail-closed.
+ if f.CrowdSec == nil {
+ if f.CrowdSecMode == CrowdSecEnforce {
+ return DenyCrowdSecUnavailable
+ }
+ return Allow
+ }
+
+ if !f.CrowdSec.Ready() {
+ if f.CrowdSecMode == CrowdSecEnforce {
+ return DenyCrowdSecUnavailable
+ }
+ return Allow
+ }
+
+ d := f.CrowdSec.CheckIP(addr)
+ if d == nil {
+ return Allow
+ }
+
+ switch d.Type {
+ case DecisionCaptcha:
+ return DenyCrowdSecCaptcha
+ case DecisionThrottle:
+ return DenyCrowdSecThrottle
+ default:
+ return DenyCrowdSecBan
+ }
+}
+
// HasRestrictions returns true if any restriction rules are configured.
func (f *Filter) HasRestrictions() bool {
if f == nil {
return false
}
return len(f.AllowedCIDRs) > 0 || len(f.BlockedCIDRs) > 0 ||
- len(f.AllowedCountries) > 0 || len(f.BlockedCountries) > 0
+ len(f.AllowedCountries) > 0 || len(f.BlockedCountries) > 0 ||
+ f.CrowdSecMode == CrowdSecEnforce || f.CrowdSecMode == CrowdSecObserve
}
diff --git a/proxy/internal/restrict/restrict_test.go b/proxy/internal/restrict/restrict_test.go
index 17a5848d8..abaa1afdc 100644
--- a/proxy/internal/restrict/restrict_test.go
+++ b/proxy/internal/restrict/restrict_test.go
@@ -29,21 +29,21 @@ func TestFilter_Check_NilFilter(t *testing.T) {
}
func TestFilter_Check_AllowedCIDR(t *testing.T) {
- f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil))
assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("192.168.1.1"), nil))
}
func TestFilter_Check_BlockedCIDR(t *testing.T) {
- f := ParseFilter(nil, []string{"10.0.0.0/8"}, nil, nil)
+ f := ParseFilter(FilterConfig{BlockedCIDRs: []string{"10.0.0.0/8"}})
assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("10.1.2.3"), nil))
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("192.168.1.1"), nil))
}
func TestFilter_Check_AllowedAndBlockedCIDR(t *testing.T) {
- f := ParseFilter([]string{"10.0.0.0/8"}, []string{"10.1.0.0/16"}, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, BlockedCIDRs: []string{"10.1.0.0/16"}})
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.2.3.4"), nil), "allowed by allowlist, not in blocklist")
assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("10.1.2.3"), nil), "allowed by allowlist but in blocklist")
@@ -56,7 +56,7 @@ func TestFilter_Check_AllowedCountry(t *testing.T) {
"2.2.2.2": "DE",
"3.3.3.3": "CN",
})
- f := ParseFilter(nil, nil, []string{"US", "DE"}, nil)
+ f := ParseFilter(FilterConfig{AllowedCountries: []string{"US", "DE"}})
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "US in allowlist")
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "DE in allowlist")
@@ -69,7 +69,7 @@ func TestFilter_Check_BlockedCountry(t *testing.T) {
"2.2.2.2": "RU",
"3.3.3.3": "US",
})
- f := ParseFilter(nil, nil, nil, []string{"CN", "RU"})
+ f := ParseFilter(FilterConfig{BlockedCountries: []string{"CN", "RU"}})
assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "CN in blocklist")
assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "RU in blocklist")
@@ -83,7 +83,7 @@ func TestFilter_Check_AllowedAndBlockedCountry(t *testing.T) {
"3.3.3.3": "CN",
})
// Allow US and DE, but block DE explicitly.
- f := ParseFilter(nil, nil, []string{"US", "DE"}, []string{"DE"})
+ f := ParseFilter(FilterConfig{AllowedCountries: []string{"US", "DE"}, BlockedCountries: []string{"DE"}})
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "US allowed and not blocked")
assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("2.2.2.2"), geo), "DE allowed but also blocked, block wins")
@@ -94,7 +94,7 @@ func TestFilter_Check_UnknownCountryWithAllowlist(t *testing.T) {
geo := newMockGeo(map[string]string{
"1.1.1.1": "US",
})
- f := ParseFilter(nil, nil, []string{"US"}, nil)
+ f := ParseFilter(FilterConfig{AllowedCountries: []string{"US"}})
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "known US in allowlist")
assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("9.9.9.9"), geo), "unknown country denied when allowlist is active")
@@ -104,34 +104,34 @@ func TestFilter_Check_UnknownCountryWithBlocklistOnly(t *testing.T) {
geo := newMockGeo(map[string]string{
"1.1.1.1": "CN",
})
- f := ParseFilter(nil, nil, nil, []string{"CN"})
+ f := ParseFilter(FilterConfig{BlockedCountries: []string{"CN"}})
assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("1.1.1.1"), geo), "known CN in blocklist")
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("9.9.9.9"), geo), "unknown country allowed when only blocklist is active")
}
func TestFilter_Check_CountryWithoutGeo(t *testing.T) {
- f := ParseFilter(nil, nil, []string{"US"}, nil)
+ f := ParseFilter(FilterConfig{AllowedCountries: []string{"US"}})
assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil), "nil geo with country allowlist")
}
func TestFilter_Check_CountryBlocklistWithoutGeo(t *testing.T) {
- f := ParseFilter(nil, nil, nil, []string{"CN"})
+ f := ParseFilter(FilterConfig{BlockedCountries: []string{"CN"}})
assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil), "nil geo with country blocklist")
}
func TestFilter_Check_GeoUnavailable(t *testing.T) {
geo := &unavailableGeo{}
- f := ParseFilter(nil, nil, []string{"US"}, nil)
+ f := ParseFilter(FilterConfig{AllowedCountries: []string{"US"}})
assert.Equal(t, DenyGeoUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), geo), "unavailable geo with country allowlist")
- f2 := ParseFilter(nil, nil, nil, []string{"CN"})
+ f2 := ParseFilter(FilterConfig{BlockedCountries: []string{"CN"}})
assert.Equal(t, DenyGeoUnavailable, f2.Check(netip.MustParseAddr("1.2.3.4"), geo), "unavailable geo with country blocklist")
}
func TestFilter_Check_CIDROnlySkipsGeo(t *testing.T) {
- f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})
// CIDR-only filter should never touch geo, so nil geo is fine.
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil))
@@ -143,7 +143,7 @@ func TestFilter_Check_CIDRAllowThenCountryBlock(t *testing.T) {
"10.1.2.3": "CN",
"10.2.3.4": "US",
})
- f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, []string{"CN"})
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, BlockedCountries: []string{"CN"}})
assert.Equal(t, DenyCountry, f.Check(netip.MustParseAddr("10.1.2.3"), geo), "CIDR allowed but country blocked")
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.2.3.4"), geo), "CIDR allowed and country not blocked")
@@ -151,12 +151,12 @@ func TestFilter_Check_CIDRAllowThenCountryBlock(t *testing.T) {
}
func TestParseFilter_Empty(t *testing.T) {
- f := ParseFilter(nil, nil, nil, nil)
+ f := ParseFilter(FilterConfig{})
assert.Nil(t, f)
}
func TestParseFilter_InvalidCIDR(t *testing.T) {
- f := ParseFilter([]string{"invalid", "10.0.0.0/8"}, nil, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"invalid", "10.0.0.0/8"}})
assert.NotNil(t, f)
assert.Len(t, f.AllowedCIDRs, 1, "invalid CIDR should be skipped")
@@ -166,12 +166,12 @@ func TestParseFilter_InvalidCIDR(t *testing.T) {
func TestFilter_HasRestrictions(t *testing.T) {
assert.False(t, (*Filter)(nil).HasRestrictions())
assert.False(t, (&Filter{}).HasRestrictions())
- assert.True(t, ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil).HasRestrictions())
- assert.True(t, ParseFilter(nil, nil, []string{"US"}, nil).HasRestrictions())
+ assert.True(t, ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}}).HasRestrictions())
+ assert.True(t, ParseFilter(FilterConfig{AllowedCountries: []string{"US"}}).HasRestrictions())
}
func TestFilter_Check_IPv6CIDR(t *testing.T) {
- f := ParseFilter([]string{"2001:db8::/32"}, nil, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"2001:db8::/32"}})
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2001:db8::1"), nil), "v6 addr in v6 allowlist")
assert.Equal(t, DenyCIDR, f.Check(netip.MustParseAddr("2001:db9::1"), nil), "v6 addr not in v6 allowlist")
@@ -179,7 +179,7 @@ func TestFilter_Check_IPv6CIDR(t *testing.T) {
}
func TestFilter_Check_IPv4MappedIPv6(t *testing.T) {
- f := ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})
// A v4-mapped-v6 address like ::ffff:10.1.2.3 must match a v4 CIDR.
v4mapped := netip.MustParseAddr("::ffff:10.1.2.3")
@@ -191,7 +191,7 @@ func TestFilter_Check_IPv4MappedIPv6(t *testing.T) {
}
func TestFilter_Check_MixedV4V6CIDRs(t *testing.T) {
- f := ParseFilter([]string{"10.0.0.0/8", "2001:db8::/32"}, nil, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8", "2001:db8::/32"}})
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("10.1.2.3"), nil), "v4 in v4 CIDR")
assert.Equal(t, Allow, f.Check(netip.MustParseAddr("2001:db8::1"), nil), "v6 in v6 CIDR")
@@ -202,7 +202,7 @@ func TestFilter_Check_MixedV4V6CIDRs(t *testing.T) {
func TestParseFilter_CanonicalizesNonMaskedCIDR(t *testing.T) {
// 1.1.1.1/24 has host bits set; ParseFilter should canonicalize to 1.1.1.0/24.
- f := ParseFilter([]string{"1.1.1.1/24"}, nil, nil, nil)
+ f := ParseFilter(FilterConfig{AllowedCIDRs: []string{"1.1.1.1/24"}})
assert.Equal(t, netip.MustParsePrefix("1.1.1.0/24"), f.AllowedCIDRs[0])
// Verify it still matches correctly.
@@ -264,7 +264,7 @@ func TestFilter_Check_CountryCodeCaseInsensitive(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
- f := ParseFilter(nil, nil, tc.allowedCountries, tc.blockedCountries)
+ f := ParseFilter(FilterConfig{AllowedCountries: tc.allowedCountries, BlockedCountries: tc.blockedCountries})
got := f.Check(netip.MustParseAddr(tc.addr), geo)
assert.Equal(t, tc.want, got)
})
@@ -275,4 +275,252 @@ func TestFilter_Check_CountryCodeCaseInsensitive(t *testing.T) {
type unavailableGeo struct{}
func (u *unavailableGeo) LookupAddr(_ netip.Addr) geolocation.Result { return geolocation.Result{} }
-func (u *unavailableGeo) Available() bool { return false }
+func (u *unavailableGeo) Available() bool { return false }
+
+// mockCrowdSec is a test implementation of CrowdSecChecker.
+type mockCrowdSec struct {
+ decisions map[string]*CrowdSecDecision
+ ready bool
+}
+
+func (m *mockCrowdSec) CheckIP(addr netip.Addr) *CrowdSecDecision {
+ return m.decisions[addr.Unmap().String()]
+}
+
+func (m *mockCrowdSec) Ready() bool { return m.ready }
+
+func TestFilter_CrowdSec_Enforce_Ban(t *testing.T) {
+ cs := &mockCrowdSec{
+ decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionBan}},
+ ready: true,
+ }
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce})
+
+ assert.Equal(t, DenyCrowdSecBan, f.Check(netip.MustParseAddr("1.2.3.4"), nil))
+ assert.Equal(t, Allow, f.Check(netip.MustParseAddr("5.6.7.8"), nil))
+}
+
+func TestFilter_CrowdSec_Enforce_Captcha(t *testing.T) {
+ cs := &mockCrowdSec{
+ decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionCaptcha}},
+ ready: true,
+ }
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce})
+
+ assert.Equal(t, DenyCrowdSecCaptcha, f.Check(netip.MustParseAddr("1.2.3.4"), nil))
+}
+
+func TestFilter_CrowdSec_Enforce_Throttle(t *testing.T) {
+ cs := &mockCrowdSec{
+ decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionThrottle}},
+ ready: true,
+ }
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce})
+
+ assert.Equal(t, DenyCrowdSecThrottle, f.Check(netip.MustParseAddr("1.2.3.4"), nil))
+}
+
+func TestFilter_CrowdSec_Observe_DoesNotBlock(t *testing.T) {
+ cs := &mockCrowdSec{
+ decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionBan}},
+ ready: true,
+ }
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecObserve})
+
+ verdict := f.Check(netip.MustParseAddr("1.2.3.4"), nil)
+ assert.Equal(t, DenyCrowdSecBan, verdict, "verdict should be ban")
+ assert.True(t, f.IsObserveOnly(verdict), "should be observe-only")
+}
+
+func TestFilter_CrowdSec_Enforce_NotReady(t *testing.T) {
+ cs := &mockCrowdSec{ready: false}
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce})
+
+ assert.Equal(t, DenyCrowdSecUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil))
+}
+
+func TestFilter_CrowdSec_Observe_NotReady_Allows(t *testing.T) {
+ cs := &mockCrowdSec{ready: false}
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecObserve})
+
+ assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.2.3.4"), nil))
+}
+
+func TestFilter_CrowdSec_Off(t *testing.T) {
+ cs := &mockCrowdSec{
+ decisions: map[string]*CrowdSecDecision{"1.2.3.4": {Type: DecisionBan}},
+ ready: true,
+ }
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecOff})
+
+ // CrowdSecOff means the filter is nil (no restrictions).
+ assert.Nil(t, f)
+}
+
+func TestFilter_IsObserveOnly(t *testing.T) {
+ f := &Filter{CrowdSecMode: CrowdSecObserve}
+ assert.True(t, f.IsObserveOnly(DenyCrowdSecBan))
+ assert.True(t, f.IsObserveOnly(DenyCrowdSecCaptcha))
+ assert.True(t, f.IsObserveOnly(DenyCrowdSecThrottle))
+ assert.True(t, f.IsObserveOnly(DenyCrowdSecUnavailable))
+ assert.False(t, f.IsObserveOnly(DenyCIDR))
+ assert.False(t, f.IsObserveOnly(Allow))
+
+ f2 := &Filter{CrowdSecMode: CrowdSecEnforce}
+ assert.False(t, f2.IsObserveOnly(DenyCrowdSecBan))
+}
+
+// TestFilter_LayerInteraction exercises the evaluation order across all three
+// restriction layers: CIDR -> Country -> CrowdSec. Each layer can only further
+// restrict; no layer can relax a denial from an earlier layer.
+//
+// Layer order | Behavior
+// ---------------|-------------------------------------------------------
+// 1. CIDR | Allowlist narrows to specific ranges, blocklist removes
+// | specific ranges. Deny here → stop, CrowdSec never runs.
+// 2. Country | Allowlist/blocklist by geo. Deny here → stop.
+// 3. CrowdSec | IP reputation. Can block IPs that passed layers 1-2.
+// | Observe mode: verdict returned but caller doesn't block.
+func TestFilter_LayerInteraction(t *testing.T) {
+ bannedIP := "10.1.2.3"
+ cleanIP := "10.2.3.4"
+ outsideIP := "192.168.1.1"
+
+ cs := &mockCrowdSec{
+ decisions: map[string]*CrowdSecDecision{bannedIP: {Type: DecisionBan}},
+ ready: true,
+ }
+ geo := newMockGeo(map[string]string{
+ bannedIP: "US",
+ cleanIP: "US",
+ outsideIP: "CN",
+ })
+
+ tests := []struct {
+ name string
+ config FilterConfig
+ addr string
+ want Verdict
+ }{
+ // CIDR allowlist + CrowdSec enforce: CrowdSec blocks inside allowed range
+ {
+ name: "allowed CIDR + CrowdSec banned",
+ config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce},
+ addr: bannedIP,
+ want: DenyCrowdSecBan,
+ },
+ {
+ name: "allowed CIDR + CrowdSec clean",
+ config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce},
+ addr: cleanIP,
+ want: Allow,
+ },
+ {
+ name: "CIDR deny stops before CrowdSec",
+ config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce},
+ addr: outsideIP,
+ want: DenyCIDR,
+ },
+
+ // CIDR blocklist + CrowdSec enforce: blocklist blocks first, CrowdSec blocks remaining
+ {
+ name: "blocked CIDR stops before CrowdSec",
+ config: FilterConfig{BlockedCIDRs: []string{"10.1.0.0/16"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce},
+ addr: bannedIP,
+ want: DenyCIDR,
+ },
+ {
+ name: "not in blocklist + CrowdSec clean",
+ config: FilterConfig{BlockedCIDRs: []string{"10.1.0.0/16"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce},
+ addr: cleanIP,
+ want: Allow,
+ },
+
+ // Country allowlist + CrowdSec enforce
+ {
+ name: "allowed country + CrowdSec banned",
+ config: FilterConfig{AllowedCountries: []string{"US"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce},
+ addr: bannedIP,
+ want: DenyCrowdSecBan,
+ },
+ {
+ name: "country deny stops before CrowdSec",
+ config: FilterConfig{AllowedCountries: []string{"US"}, CrowdSec: cs, CrowdSecMode: CrowdSecEnforce},
+ addr: outsideIP,
+ want: DenyCountry,
+ },
+
+ // All three layers: CIDR allowlist + country blocklist + CrowdSec
+ {
+ name: "all layers: CIDR allow + country allow + CrowdSec ban",
+ config: FilterConfig{
+ AllowedCIDRs: []string{"10.0.0.0/8"},
+ BlockedCountries: []string{"CN"},
+ CrowdSec: cs,
+ CrowdSecMode: CrowdSecEnforce,
+ },
+ addr: bannedIP, // 10.x (CIDR ok), US (country ok), banned (CrowdSec deny)
+ want: DenyCrowdSecBan,
+ },
+ {
+ name: "all layers: CIDR deny short-circuits everything",
+ config: FilterConfig{
+ AllowedCIDRs: []string{"10.0.0.0/8"},
+ BlockedCountries: []string{"CN"},
+ CrowdSec: cs,
+ CrowdSecMode: CrowdSecEnforce,
+ },
+ addr: outsideIP, // 192.x (CIDR deny)
+ want: DenyCIDR,
+ },
+
+ // Observe mode: verdict returned but IsObserveOnly is true
+ {
+ name: "observe mode: CrowdSec banned inside allowed CIDR",
+ config: FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}, CrowdSec: cs, CrowdSecMode: CrowdSecObserve},
+ addr: bannedIP,
+ want: DenyCrowdSecBan, // verdict is ban, caller checks IsObserveOnly
+ },
+ }
+
+ for _, tc := range tests {
+ t.Run(tc.name, func(t *testing.T) {
+ f := ParseFilter(tc.config)
+ got := f.Check(netip.MustParseAddr(tc.addr), geo)
+ assert.Equal(t, tc.want, got)
+
+ // Verify observe mode flag when applicable.
+ if tc.config.CrowdSecMode == CrowdSecObserve && got.IsCrowdSec() {
+ assert.True(t, f.IsObserveOnly(got), "observe mode verdict should be observe-only")
+ }
+ if tc.config.CrowdSecMode == CrowdSecEnforce && got.IsCrowdSec() {
+ assert.False(t, f.IsObserveOnly(got), "enforce mode verdict should not be observe-only")
+ }
+ })
+ }
+}
+
+func TestFilter_CrowdSec_Enforce_NilChecker(t *testing.T) {
+ // LAPI not configured: checker is nil but mode is enforce. Must fail closed.
+ f := ParseFilter(FilterConfig{CrowdSec: nil, CrowdSecMode: CrowdSecEnforce})
+
+ assert.Equal(t, DenyCrowdSecUnavailable, f.Check(netip.MustParseAddr("1.2.3.4"), nil))
+}
+
+func TestFilter_CrowdSec_Observe_NilChecker(t *testing.T) {
+ // LAPI not configured: checker is nil but mode is observe. Must allow.
+ f := ParseFilter(FilterConfig{CrowdSec: nil, CrowdSecMode: CrowdSecObserve})
+
+ assert.Equal(t, Allow, f.Check(netip.MustParseAddr("1.2.3.4"), nil))
+}
+
+func TestFilter_HasRestrictions_CrowdSec(t *testing.T) {
+ cs := &mockCrowdSec{ready: true}
+ f := ParseFilter(FilterConfig{CrowdSec: cs, CrowdSecMode: CrowdSecEnforce})
+ assert.True(t, f.HasRestrictions())
+
+ // Enforce mode without checker (LAPI not configured): still has restrictions
+ // because Check() will fail-closed with DenyCrowdSecUnavailable.
+ f2 := ParseFilter(FilterConfig{CrowdSec: nil, CrowdSecMode: CrowdSecEnforce})
+ assert.True(t, f2.HasRestrictions())
+}
diff --git a/proxy/internal/tcp/router.go b/proxy/internal/tcp/router.go
index 8255c36d3..9f8660aeb 100644
--- a/proxy/internal/tcp/router.go
+++ b/proxy/internal/tcp/router.go
@@ -479,9 +479,14 @@ func (r *Router) checkRestrictions(conn net.Conn, route Route) restrict.Verdict
// On success (nil error), both conn and backend are closed by the relay.
func (r *Router) relayTCP(ctx context.Context, conn net.Conn, sni SNIHost, route Route) error {
if verdict := r.checkRestrictions(conn, route); verdict != restrict.Allow {
- r.logger.Debugf("connection from %s rejected by access restrictions: %s", conn.RemoteAddr(), verdict)
- r.logL4Deny(route, conn, verdict)
- return errAccessRestricted
+ if route.Filter != nil && route.Filter.IsObserveOnly(verdict) {
+ r.logger.Debugf("CrowdSec observe: would block %s for %s (%s)", conn.RemoteAddr(), sni, verdict)
+ r.logL4Deny(route, conn, verdict, true)
+ } else {
+ r.logger.Debugf("connection from %s rejected by access restrictions: %s", conn.RemoteAddr(), verdict)
+ r.logL4Deny(route, conn, verdict, false)
+ return errAccessRestricted
+ }
}
svcCtx, err := r.acquireRelay(ctx, route)
@@ -610,7 +615,7 @@ func (r *Router) logL4Entry(route Route, conn net.Conn, duration time.Duration,
}
// logL4Deny sends an access log entry for a denied connection.
-func (r *Router) logL4Deny(route Route, conn net.Conn, verdict restrict.Verdict) {
+func (r *Router) logL4Deny(route Route, conn net.Conn, verdict restrict.Verdict, observeOnly bool) {
r.mu.RLock()
al := r.accessLog
r.mu.RUnlock()
@@ -621,14 +626,22 @@ func (r *Router) logL4Deny(route Route, conn net.Conn, verdict restrict.Verdict)
sourceIP, _ := addrFromConn(conn)
- al.LogL4(accesslog.L4Entry{
+ entry := accesslog.L4Entry{
AccountID: route.AccountID,
ServiceID: route.ServiceID,
Protocol: route.Protocol,
Host: route.Domain,
SourceIP: sourceIP,
DenyReason: verdict.String(),
- })
+ }
+ if verdict.IsCrowdSec() {
+ entry.Metadata = map[string]string{"crowdsec_verdict": verdict.String()}
+ if observeOnly {
+ entry.Metadata["crowdsec_mode"] = "observe"
+ entry.DenyReason = ""
+ }
+ }
+ al.LogL4(entry)
}
// getOrCreateServiceCtxLocked returns the context for a service, creating one
diff --git a/proxy/internal/tcp/router_test.go b/proxy/internal/tcp/router_test.go
index 189cdc622..93b6560f4 100644
--- a/proxy/internal/tcp/router_test.go
+++ b/proxy/internal/tcp/router_test.go
@@ -1686,7 +1686,7 @@ func (f *fakeConn) RemoteAddr() net.Addr { return f.remote }
func TestCheckRestrictions_UnparseableAddress(t *testing.T) {
router := NewPortRouter(log.StandardLogger(), nil)
- filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)
+ filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})
route := Route{Filter: filter}
conn := &fakeConn{remote: fakeAddr("not-an-ip")}
@@ -1695,7 +1695,7 @@ func TestCheckRestrictions_UnparseableAddress(t *testing.T) {
func TestCheckRestrictions_NilRemoteAddr(t *testing.T) {
router := NewPortRouter(log.StandardLogger(), nil)
- filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)
+ filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})
route := Route{Filter: filter}
conn := &fakeConn{remote: nil}
@@ -1704,7 +1704,7 @@ func TestCheckRestrictions_NilRemoteAddr(t *testing.T) {
func TestCheckRestrictions_AllowedAndDenied(t *testing.T) {
router := NewPortRouter(log.StandardLogger(), nil)
- filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)
+ filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})
route := Route{Filter: filter}
allowed := &fakeConn{remote: &net.TCPAddr{IP: net.IPv4(10, 1, 2, 3), Port: 1234}}
@@ -1724,7 +1724,7 @@ func TestCheckRestrictions_NilFilter(t *testing.T) {
func TestCheckRestrictions_IPv4MappedIPv6(t *testing.T) {
router := NewPortRouter(log.StandardLogger(), nil)
- filter := restrict.ParseFilter([]string{"10.0.0.0/8"}, nil, nil, nil)
+ filter := restrict.ParseFilter(restrict.FilterConfig{AllowedCIDRs: []string{"10.0.0.0/8"}})
route := Route{Filter: filter}
// net.IPv4() returns a 16-byte v4-in-v6 representation internally.
diff --git a/proxy/internal/udp/relay.go b/proxy/internal/udp/relay.go
index d20ecf48b..8293bfe81 100644
--- a/proxy/internal/udp/relay.go
+++ b/proxy/internal/udp/relay.go
@@ -336,8 +336,13 @@ func (r *Relay) checkAccessRestrictions(addr net.Addr) error {
return fmt.Errorf("parse client address %s for restriction check: %w", addr, err)
}
if v := r.filter.Check(clientIP, r.geo); v != restrict.Allow {
- r.logDeny(clientIP, v)
- return fmt.Errorf("access restricted for %s", addr)
+ if r.filter.IsObserveOnly(v) {
+ r.logger.Debugf("CrowdSec observe: would block %s (%s)", clientIP, v)
+ r.logDeny(clientIP, v, true)
+ } else {
+ r.logDeny(clientIP, v, false)
+ return fmt.Errorf("access restricted for %s", addr)
+ }
}
return nil
}
@@ -498,19 +503,27 @@ func (r *Relay) logSessionEnd(sess *session) {
}
// logDeny sends an access log entry for a denied UDP packet.
-func (r *Relay) logDeny(clientIP netip.Addr, verdict restrict.Verdict) {
+func (r *Relay) logDeny(clientIP netip.Addr, verdict restrict.Verdict, observeOnly bool) {
if r.accessLog == nil {
return
}
- r.accessLog.LogL4(accesslog.L4Entry{
+ entry := accesslog.L4Entry{
AccountID: r.accountID,
ServiceID: r.serviceID,
Protocol: accesslog.ProtocolUDP,
Host: r.domain,
SourceIP: clientIP,
DenyReason: verdict.String(),
- })
+ }
+ if verdict.IsCrowdSec() {
+ entry.Metadata = map[string]string{"crowdsec_verdict": verdict.String()}
+ if observeOnly {
+ entry.Metadata["crowdsec_mode"] = "observe"
+ entry.DenyReason = ""
+ }
+ }
+ r.accessLog.LogL4(entry)
}
// Close stops the relay, waits for all session goroutines to exit,
diff --git a/proxy/management_integration_test.go b/proxy/management_integration_test.go
index 796cad622..17510f37e 100644
--- a/proxy/management_integration_test.go
+++ b/proxy/management_integration_test.go
@@ -228,6 +228,10 @@ func (m *testProxyManager) ClusterRequireSubdomain(_ context.Context, _ string)
return nil
}
+func (m *testProxyManager) ClusterSupportsCrowdSec(_ context.Context, _ string) *bool {
+ return nil
+}
+
func (m *testProxyManager) CleanupStale(_ context.Context, _ time.Duration) error {
return nil
}
diff --git a/proxy/server.go b/proxy/server.go
index acfe3c12d..fbd0d058e 100644
--- a/proxy/server.go
+++ b/proxy/server.go
@@ -42,6 +42,7 @@ import (
"github.com/netbirdio/netbird/proxy/internal/auth"
"github.com/netbirdio/netbird/proxy/internal/certwatch"
"github.com/netbirdio/netbird/proxy/internal/conntrack"
+ "github.com/netbirdio/netbird/proxy/internal/crowdsec"
"github.com/netbirdio/netbird/proxy/internal/debug"
"github.com/netbirdio/netbird/proxy/internal/geolocation"
proxygrpc "github.com/netbirdio/netbird/proxy/internal/grpc"
@@ -100,6 +101,13 @@ type Server struct {
geo restrict.GeoResolver
geoRaw *geolocation.Lookup
+ // crowdsecRegistry manages the shared CrowdSec bouncer lifecycle.
+ crowdsecRegistry *crowdsec.Registry
+ // crowdsecServices tracks which services have CrowdSec enabled for
+ // proper acquire/release lifecycle management.
+ crowdsecMu sync.Mutex
+ crowdsecServices map[types.ServiceID]bool
+
// routerReady is closed once mainRouter is fully initialized.
// The mapping worker waits on this before processing updates.
routerReady chan struct{}
@@ -175,6 +183,10 @@ type Server struct {
// GeoDataDir is the directory containing GeoLite2 MMDB files for
// country-based access restrictions. Empty disables geo lookups.
GeoDataDir string
+ // CrowdSecAPIURL is the CrowdSec LAPI URL. Empty disables CrowdSec.
+ CrowdSecAPIURL string
+ // CrowdSecAPIKey is the CrowdSec bouncer API key. Empty disables CrowdSec.
+ CrowdSecAPIKey string
// MaxSessionIdleTimeout caps the per-service session idle timeout.
// Zero means no cap (the proxy honors whatever management sends).
// Set via NB_PROXY_MAX_SESSION_IDLE_TIMEOUT for shared deployments.
@@ -275,6 +287,9 @@ func (s *Server) ListenAndServe(ctx context.Context, addr string) (err error) {
// management connectivity from the first stream connection.
s.healthChecker = health.NewChecker(s.Logger, s.netbird)
+ s.crowdsecRegistry = crowdsec.NewRegistry(s.CrowdSecAPIURL, s.CrowdSecAPIKey, log.NewEntry(s.Logger))
+ s.crowdsecServices = make(map[types.ServiceID]bool)
+
go s.newManagementMappingWorker(runCtx, s.mgmtClient)
tlsConfig, err := s.configureTLS(ctx)
@@ -763,6 +778,22 @@ func (s *Server) shutdownServices() {
s.Logger.Debugf("close geolocation: %v", err)
}
}
+
+ s.shutdownCrowdSec()
+}
+
+func (s *Server) shutdownCrowdSec() {
+ if s.crowdsecRegistry == nil {
+ return
+ }
+ s.crowdsecMu.Lock()
+ services := maps.Clone(s.crowdsecServices)
+ maps.Clear(s.crowdsecServices)
+ s.crowdsecMu.Unlock()
+
+ for svcID := range services {
+ s.crowdsecRegistry.Release(svcID)
+ }
}
// resolveDialFunc returns a DialContextFunc that dials through the
@@ -916,6 +947,7 @@ func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.Pr
s.healthChecker.SetManagementConnected(false)
}
+ supportsCrowdSec := s.crowdsecRegistry.Available()
mappingClient, err := client.GetMappingUpdate(ctx, &proto.GetMappingUpdateRequest{
ProxyId: s.ID,
Version: s.Version,
@@ -924,6 +956,7 @@ func (s *Server) newManagementMappingWorker(ctx context.Context, client proto.Pr
Capabilities: &proto.ProxyCapabilities{
SupportsCustomPorts: &s.SupportsCustomPorts,
RequireSubdomain: &s.RequireSubdomain,
+ SupportsCrowdsec: &supportsCrowdSec,
},
})
if err != nil {
@@ -1159,7 +1192,7 @@ func (s *Server) setupTCPMapping(ctx context.Context, mapping *proto.ProxyMappin
ProxyProtocol: s.l4ProxyProtocol(mapping),
DialTimeout: s.l4DialTimeout(mapping),
SessionIdleTimeout: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)),
- Filter: parseRestrictions(mapping),
+ Filter: s.parseRestrictions(mapping),
})
s.portMu.Lock()
@@ -1234,7 +1267,7 @@ func (s *Server) setupTLSMapping(ctx context.Context, mapping *proto.ProxyMappin
ProxyProtocol: s.l4ProxyProtocol(mapping),
DialTimeout: s.l4DialTimeout(mapping),
SessionIdleTimeout: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)),
- Filter: parseRestrictions(mapping),
+ Filter: s.parseRestrictions(mapping),
})
if tlsPort != s.mainPort {
@@ -1268,12 +1301,51 @@ func (s *Server) serviceKeyForMapping(mapping *proto.ProxyMapping) roundtrip.Ser
// parseRestrictions converts a proto mapping's access restrictions into
// a restrict.Filter. Returns nil if the mapping has no restrictions.
-func parseRestrictions(mapping *proto.ProxyMapping) *restrict.Filter {
+func (s *Server) parseRestrictions(mapping *proto.ProxyMapping) *restrict.Filter {
r := mapping.GetAccessRestrictions()
if r == nil {
return nil
}
- return restrict.ParseFilter(r.GetAllowedCidrs(), r.GetBlockedCidrs(), r.GetAllowedCountries(), r.GetBlockedCountries())
+
+ svcID := types.ServiceID(mapping.GetId())
+ csMode := restrict.CrowdSecMode(r.GetCrowdsecMode())
+
+ var checker restrict.CrowdSecChecker
+ if csMode == restrict.CrowdSecEnforce || csMode == restrict.CrowdSecObserve {
+ if b := s.crowdsecRegistry.Acquire(svcID); b != nil {
+ checker = b
+ s.crowdsecMu.Lock()
+ s.crowdsecServices[svcID] = true
+ s.crowdsecMu.Unlock()
+ } else {
+ s.Logger.Warnf("service %s requests CrowdSec mode %q but proxy has no CrowdSec configured", svcID, csMode)
+ // Keep the mode: restrict.Filter will fail-closed for enforce (DenyCrowdSecUnavailable)
+ // and allow for observe.
+ }
+ }
+
+ return restrict.ParseFilter(restrict.FilterConfig{
+ AllowedCIDRs: r.GetAllowedCidrs(),
+ BlockedCIDRs: r.GetBlockedCidrs(),
+ AllowedCountries: r.GetAllowedCountries(),
+ BlockedCountries: r.GetBlockedCountries(),
+ CrowdSec: checker,
+ CrowdSecMode: csMode,
+ Logger: log.NewEntry(s.Logger),
+ })
+}
+
+// releaseCrowdSec releases the CrowdSec bouncer reference for the given
+// service if it had one.
+func (s *Server) releaseCrowdSec(svcID types.ServiceID) {
+ s.crowdsecMu.Lock()
+ had := s.crowdsecServices[svcID]
+ delete(s.crowdsecServices, svcID)
+ s.crowdsecMu.Unlock()
+
+ if had {
+ s.crowdsecRegistry.Release(svcID)
+ }
}
// warnIfGeoUnavailable logs a warning if the mapping has country restrictions
@@ -1388,7 +1460,7 @@ func (s *Server) addUDPRelay(ctx context.Context, mapping *proto.ProxyMapping, t
DialTimeout: s.l4DialTimeout(mapping),
SessionTTL: s.clampIdleTimeout(l4SessionIdleTimeout(mapping)),
AccessLog: s.accessLog,
- Filter: parseRestrictions(mapping),
+ Filter: s.parseRestrictions(mapping),
Geo: s.geo,
})
relay.SetObserver(s.meter)
@@ -1425,7 +1497,7 @@ func (s *Server) updateMapping(ctx context.Context, mapping *proto.ProxyMapping)
schemes = append(schemes, auth.NewHeader(s.mgmtClient, svcID, accountID, ha.GetHeader()))
}
- ipRestrictions := parseRestrictions(mapping)
+ ipRestrictions := s.parseRestrictions(mapping)
s.warnIfGeoUnavailable(mapping.GetDomain(), mapping.GetAccessRestrictions())
maxSessionAge := time.Duration(mapping.GetAuth().GetMaxSessionAgeSeconds()) * time.Second
@@ -1507,6 +1579,9 @@ func (s *Server) cleanupMappingRoutes(mapping *proto.ProxyMapping) {
// UDP relay cleanup (idempotent).
s.removeUDPRelay(svcID)
+ // Release CrowdSec after all routes are removed so the shared bouncer
+ // isn't stopped while stale filters can still be reached by in-flight requests.
+ s.releaseCrowdSec(svcID)
}
// removeUDPRelay stops and removes a UDP relay by service ID.
diff --git a/proxy/web/package-lock.json b/proxy/web/package-lock.json
index d16196d77..1611323a7 100644
--- a/proxy/web/package-lock.json
+++ b/proxy/web/package-lock.json
@@ -15,7 +15,7 @@
"tailwind-merge": "^2.6.0"
},
"devDependencies": {
- "@eslint/js": "^9.39.1",
+ "@eslint/js": "9.39.2",
"@tailwindcss/vite": "^4.1.18",
"@types/node": "^24.10.1",
"@types/react": "^19.2.5",
@@ -29,7 +29,7 @@
"tsx": "^4.21.0",
"typescript": "~5.9.3",
"typescript-eslint": "^8.46.4",
- "vite": "^7.2.4"
+ "vite": "7.3.2"
}
},
"node_modules/@babel/code-frame": {
@@ -1024,9 +1024,9 @@
"license": "MIT"
},
"node_modules/@rollup/rollup-android-arm-eabi": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.57.1.tgz",
- "integrity": "sha512-A6ehUVSiSaaliTxai040ZpZ2zTevHYbvu/lDoeAteHI8QnaosIzm4qwtezfRg1jOYaUmnzLX1AOD6Z+UJjtifg==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.60.0.tgz",
+ "integrity": "sha512-WOhNW9K8bR3kf4zLxbfg6Pxu2ybOUbB2AjMDHSQx86LIF4rH4Ft7vmMwNt0loO0eonglSNy4cpD3MKXXKQu0/A==",
"cpu": [
"arm"
],
@@ -1038,9 +1038,9 @@
]
},
"node_modules/@rollup/rollup-android-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.57.1.tgz",
- "integrity": "sha512-dQaAddCY9YgkFHZcFNS/606Exo8vcLHwArFZ7vxXq4rigo2bb494/xKMMwRRQW6ug7Js6yXmBZhSBRuBvCCQ3w==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.60.0.tgz",
+ "integrity": "sha512-u6JHLll5QKRvjciE78bQXDmqRqNs5M/3GVqZeMwvmjaNODJih/WIrJlFVEihvV0MiYFmd+ZyPr9wxOVbPAG2Iw==",
"cpu": [
"arm64"
],
@@ -1052,9 +1052,9 @@
]
},
"node_modules/@rollup/rollup-darwin-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.57.1.tgz",
- "integrity": "sha512-crNPrwJOrRxagUYeMn/DZwqN88SDmwaJ8Cvi/TN1HnWBU7GwknckyosC2gd0IqYRsHDEnXf328o9/HC6OkPgOg==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.60.0.tgz",
+ "integrity": "sha512-qEF7CsKKzSRc20Ciu2Zw1wRrBz4g56F7r/vRwY430UPp/nt1x21Q/fpJ9N5l47WWvJlkNCPJz3QRVw008fi7yA==",
"cpu": [
"arm64"
],
@@ -1066,9 +1066,9 @@
]
},
"node_modules/@rollup/rollup-darwin-x64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.57.1.tgz",
- "integrity": "sha512-Ji8g8ChVbKrhFtig5QBV7iMaJrGtpHelkB3lsaKzadFBe58gmjfGXAOfI5FV0lYMH8wiqsxKQ1C9B0YTRXVy4w==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.60.0.tgz",
+ "integrity": "sha512-WADYozJ4QCnXCH4wPB+3FuGmDPoFseVCUrANmA5LWwGmC6FL14BWC7pcq+FstOZv3baGX65tZ378uT6WG8ynTw==",
"cpu": [
"x64"
],
@@ -1080,9 +1080,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.57.1.tgz",
- "integrity": "sha512-R+/WwhsjmwodAcz65guCGFRkMb4gKWTcIeLy60JJQbXrJ97BOXHxnkPFrP+YwFlaS0m+uWJTstrUA9o+UchFug==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-arm64/-/rollup-freebsd-arm64-4.60.0.tgz",
+ "integrity": "sha512-6b8wGHJlDrGeSE3aH5mGNHBjA0TTkxdoNHik5EkvPHCt351XnigA4pS7Wsj/Eo9Y8RBU6f35cjN9SYmCFBtzxw==",
"cpu": [
"arm64"
],
@@ -1094,9 +1094,9 @@
]
},
"node_modules/@rollup/rollup-freebsd-x64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.57.1.tgz",
- "integrity": "sha512-IEQTCHeiTOnAUC3IDQdzRAGj3jOAYNr9kBguI7MQAAZK3caezRrg0GxAb6Hchg4lxdZEI5Oq3iov/w/hnFWY9Q==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-freebsd-x64/-/rollup-freebsd-x64-4.60.0.tgz",
+ "integrity": "sha512-h25Ga0t4jaylMB8M/JKAyrvvfxGRjnPQIR8lnCayyzEjEOx2EJIlIiMbhpWxDRKGKF8jbNH01NnN663dH638mA==",
"cpu": [
"x64"
],
@@ -1108,9 +1108,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-gnueabihf": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.57.1.tgz",
- "integrity": "sha512-F8sWbhZ7tyuEfsmOxwc2giKDQzN3+kuBLPwwZGyVkLlKGdV1nvnNwYD0fKQ8+XS6hp9nY7B+ZeK01EBUE7aHaw==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.60.0.tgz",
+ "integrity": "sha512-RzeBwv0B3qtVBWtcuABtSuCzToo2IEAIQrcyB/b2zMvBWVbjo8bZDjACUpnaafaxhTw2W+imQbP2BD1usasK4g==",
"cpu": [
"arm"
],
@@ -1122,9 +1122,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm-musleabihf": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.57.1.tgz",
- "integrity": "sha512-rGfNUfn0GIeXtBP1wL5MnzSj98+PZe/AXaGBCRmT0ts80lU5CATYGxXukeTX39XBKsxzFpEeK+Mrp9faXOlmrw==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.60.0.tgz",
+ "integrity": "sha512-Sf7zusNI2CIU1HLzuu9Tc5YGAHEZs5Lu7N1ssJG4Tkw6e0MEsN7NdjUDDfGNHy2IU+ENyWT+L2obgWiguWibWQ==",
"cpu": [
"arm"
],
@@ -1136,9 +1136,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.57.1.tgz",
- "integrity": "sha512-MMtej3YHWeg/0klK2Qodf3yrNzz6CGjo2UntLvk2RSPlhzgLvYEB3frRvbEF2wRKh1Z2fDIg9KRPe1fawv7C+g==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.60.0.tgz",
+ "integrity": "sha512-DX2x7CMcrJzsE91q7/O02IJQ5/aLkVtYFryqCjduJhUfGKG6yJV8hxaw8pZa93lLEpPTP/ohdN4wFz7yp/ry9A==",
"cpu": [
"arm64"
],
@@ -1150,9 +1150,9 @@
]
},
"node_modules/@rollup/rollup-linux-arm64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.57.1.tgz",
- "integrity": "sha512-1a/qhaaOXhqXGpMFMET9VqwZakkljWHLmZOX48R0I/YLbhdxr1m4gtG1Hq7++VhVUmf+L3sTAf9op4JlhQ5u1Q==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.60.0.tgz",
+ "integrity": "sha512-09EL+yFVbJZlhcQfShpswwRZ0Rg+z/CsSELFCnPt3iK+iqwGsI4zht3secj5vLEs957QvFFXnzAT0FFPIxSrkQ==",
"cpu": [
"arm64"
],
@@ -1164,9 +1164,9 @@
]
},
"node_modules/@rollup/rollup-linux-loong64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.57.1.tgz",
- "integrity": "sha512-QWO6RQTZ/cqYtJMtxhkRkidoNGXc7ERPbZN7dVW5SdURuLeVU7lwKMpo18XdcmpWYd0qsP1bwKPf7DNSUinhvA==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-gnu/-/rollup-linux-loong64-gnu-4.60.0.tgz",
+ "integrity": "sha512-i9IcCMPr3EXm8EQg5jnja0Zyc1iFxJjZWlb4wr7U2Wx/GrddOuEafxRdMPRYVaXjgbhvqalp6np07hN1w9kAKw==",
"cpu": [
"loong64"
],
@@ -1178,9 +1178,9 @@
]
},
"node_modules/@rollup/rollup-linux-loong64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.57.1.tgz",
- "integrity": "sha512-xpObYIf+8gprgWaPP32xiN5RVTi/s5FCR+XMXSKmhfoJjrpRAjCuuqQXyxUa/eJTdAE6eJ+KDKaoEqjZQxh3Gw==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-loong64-musl/-/rollup-linux-loong64-musl-4.60.0.tgz",
+ "integrity": "sha512-DGzdJK9kyJ+B78MCkWeGnpXJ91tK/iKA6HwHxF4TAlPIY7GXEvMe8hBFRgdrR9Ly4qebR/7gfUs9y2IoaVEyog==",
"cpu": [
"loong64"
],
@@ -1192,9 +1192,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.57.1.tgz",
- "integrity": "sha512-4BrCgrpZo4hvzMDKRqEaW1zeecScDCR+2nZ86ATLhAoJ5FQ+lbHVD3ttKe74/c7tNT9c6F2viwB3ufwp01Oh2w==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-gnu/-/rollup-linux-ppc64-gnu-4.60.0.tgz",
+ "integrity": "sha512-RwpnLsqC8qbS8z1H1AxBA1H6qknR4YpPR9w2XX0vo2Sz10miu57PkNcnHVaZkbqyw/kUWfKMI73jhmfi9BRMUQ==",
"cpu": [
"ppc64"
],
@@ -1206,9 +1206,9 @@
]
},
"node_modules/@rollup/rollup-linux-ppc64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.57.1.tgz",
- "integrity": "sha512-NOlUuzesGauESAyEYFSe3QTUguL+lvrN1HtwEEsU2rOwdUDeTMJdO5dUYl/2hKf9jWydJrO9OL/XSSf65R5+Xw==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-ppc64-musl/-/rollup-linux-ppc64-musl-4.60.0.tgz",
+ "integrity": "sha512-Z8pPf54Ly3aqtdWC3G4rFigZgNvd+qJlOE52fmko3KST9SoGfAdSRCwyoyG05q1HrrAblLbk1/PSIV+80/pxLg==",
"cpu": [
"ppc64"
],
@@ -1220,9 +1220,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.57.1.tgz",
- "integrity": "sha512-ptA88htVp0AwUUqhVghwDIKlvJMD/fmL/wrQj99PRHFRAG6Z5nbWoWG4o81Nt9FT+IuqUQi+L31ZKAFeJ5Is+A==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.60.0.tgz",
+ "integrity": "sha512-3a3qQustp3COCGvnP4SvrMHnPQ9d1vzCakQVRTliaz8cIp/wULGjiGpbcqrkv0WrHTEp8bQD/B3HBjzujVWLOA==",
"cpu": [
"riscv64"
],
@@ -1234,9 +1234,9 @@
]
},
"node_modules/@rollup/rollup-linux-riscv64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.57.1.tgz",
- "integrity": "sha512-S51t7aMMTNdmAMPpBg7OOsTdn4tySRQvklmL3RpDRyknk87+Sp3xaumlatU+ppQ+5raY7sSTcC2beGgvhENfuw==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-musl/-/rollup-linux-riscv64-musl-4.60.0.tgz",
+ "integrity": "sha512-pjZDsVH/1VsghMJ2/kAaxt6dL0psT6ZexQVrijczOf+PeP2BUqTHYejk3l6TlPRydggINOeNRhvpLa0AYpCWSQ==",
"cpu": [
"riscv64"
],
@@ -1248,9 +1248,9 @@
]
},
"node_modules/@rollup/rollup-linux-s390x-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.57.1.tgz",
- "integrity": "sha512-Bl00OFnVFkL82FHbEqy3k5CUCKH6OEJL54KCyx2oqsmZnFTR8IoNqBF+mjQVcRCT5sB6yOvK8A37LNm/kPJiZg==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.60.0.tgz",
+ "integrity": "sha512-3ObQs0BhvPgiUVZrN7gqCSvmFuMWvWvsjG5ayJ3Lraqv+2KhOsp+pUbigqbeWqueGIsnn+09HBw27rJ+gYK4VQ==",
"cpu": [
"s390x"
],
@@ -1262,9 +1262,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.57.1.tgz",
- "integrity": "sha512-ABca4ceT4N+Tv/GtotnWAeXZUZuM/9AQyCyKYyKnpk4yoA7QIAuBt6Hkgpw8kActYlew2mvckXkvx0FfoInnLg==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.60.0.tgz",
+ "integrity": "sha512-EtylprDtQPdS5rXvAayrNDYoJhIz1/vzN2fEubo3yLE7tfAw+948dO0g4M0vkTVFhKojnF+n6C8bDNe+gDRdTg==",
"cpu": [
"x64"
],
@@ -1276,9 +1276,9 @@
]
},
"node_modules/@rollup/rollup-linux-x64-musl": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.57.1.tgz",
- "integrity": "sha512-HFps0JeGtuOR2convgRRkHCekD7j+gdAuXM+/i6kGzQtFhlCtQkpwtNzkNj6QhCDp7DRJ7+qC/1Vg2jt5iSOFw==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.60.0.tgz",
+ "integrity": "sha512-k09oiRCi/bHU9UVFqD17r3eJR9bn03TyKraCrlz5ULFJGdJGi7VOmm9jl44vOJvRJ6P7WuBi/s2A97LxxHGIdw==",
"cpu": [
"x64"
],
@@ -1290,9 +1290,9 @@
]
},
"node_modules/@rollup/rollup-openbsd-x64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.57.1.tgz",
- "integrity": "sha512-H+hXEv9gdVQuDTgnqD+SQffoWoc0Of59AStSzTEj/feWTBAnSfSD3+Dql1ZruJQxmykT/JVY0dE8Ka7z0DH1hw==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openbsd-x64/-/rollup-openbsd-x64-4.60.0.tgz",
+ "integrity": "sha512-1o/0/pIhozoSaDJoDcec+IVLbnRtQmHwPV730+AOD29lHEEo4F5BEUB24H0OBdhbBBDwIOSuf7vgg0Ywxdfiiw==",
"cpu": [
"x64"
],
@@ -1304,9 +1304,9 @@
]
},
"node_modules/@rollup/rollup-openharmony-arm64": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.57.1.tgz",
- "integrity": "sha512-4wYoDpNg6o/oPximyc/NG+mYUejZrCU2q+2w6YZqrAs2UcNUChIZXjtafAiiZSUc7On8v5NyNj34Kzj/Ltk6dQ==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-openharmony-arm64/-/rollup-openharmony-arm64-4.60.0.tgz",
+ "integrity": "sha512-pESDkos/PDzYwtyzB5p/UoNU/8fJo68vcXM9ZW2V0kjYayj1KaaUfi1NmTUTUpMn4UhU4gTuK8gIaFO4UGuMbA==",
"cpu": [
"arm64"
],
@@ -1318,9 +1318,9 @@
]
},
"node_modules/@rollup/rollup-win32-arm64-msvc": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.57.1.tgz",
- "integrity": "sha512-O54mtsV/6LW3P8qdTcamQmuC990HDfR71lo44oZMZlXU4tzLrbvTii87Ni9opq60ds0YzuAlEr/GNwuNluZyMQ==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.60.0.tgz",
+ "integrity": "sha512-hj1wFStD7B1YBeYmvY+lWXZ7ey73YGPcViMShYikqKT1GtstIKQAtfUI6yrzPjAy/O7pO0VLXGmUVWXQMaYgTQ==",
"cpu": [
"arm64"
],
@@ -1332,9 +1332,9 @@
]
},
"node_modules/@rollup/rollup-win32-ia32-msvc": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.57.1.tgz",
- "integrity": "sha512-P3dLS+IerxCT/7D2q2FYcRdWRl22dNbrbBEtxdWhXrfIMPP9lQhb5h4Du04mdl5Woq05jVCDPCMF7Ub0NAjIew==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.60.0.tgz",
+ "integrity": "sha512-SyaIPFoxmUPlNDq5EHkTbiKzmSEmq/gOYFI/3HHJ8iS/v1mbugVa7dXUzcJGQfoytp9DJFLhHH4U3/eTy2Bq4w==",
"cpu": [
"ia32"
],
@@ -1346,9 +1346,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-gnu": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.57.1.tgz",
- "integrity": "sha512-VMBH2eOOaKGtIJYleXsi2B8CPVADrh+TyNxJ4mWPnKfLB/DBUmzW+5m1xUrcwWoMfSLagIRpjUFeW5CO5hyciQ==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-gnu/-/rollup-win32-x64-gnu-4.60.0.tgz",
+ "integrity": "sha512-RdcryEfzZr+lAr5kRm2ucN9aVlCCa2QNq4hXelZxb8GG0NJSazq44Z3PCCc8wISRuCVnGs0lQJVX5Vp6fKA+IA==",
"cpu": [
"x64"
],
@@ -1360,9 +1360,9 @@
]
},
"node_modules/@rollup/rollup-win32-x64-msvc": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.57.1.tgz",
- "integrity": "sha512-mxRFDdHIWRxg3UfIIAwCm6NzvxG0jDX/wBN6KsQFTvKFqqg9vTrWUE68qEjHt19A5wwx5X5aUi2zuZT7YR0jrA==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.60.0.tgz",
+ "integrity": "sha512-PrsWNQ8BuE00O3Xsx3ALh2Df8fAj9+cvvX9AIA6o4KpATR98c9mud4XtDWVvsEuyia5U4tVSTKygawyJkjm60w==",
"cpu": [
"x64"
],
@@ -1926,9 +1926,9 @@
}
},
"node_modules/@typescript-eslint/typescript-estree/node_modules/brace-expansion": {
- "version": "2.0.2",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.2.tgz",
- "integrity": "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ==",
+ "version": "2.0.3",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.3.tgz",
+ "integrity": "sha512-MCV/fYJEbqx68aE58kv2cA/kiky1G8vux3OR6/jbS+jIMe/6fJWa0DTzJU7dqijOWYwHi1t29FlfYI9uytqlpA==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -1936,13 +1936,13 @@
}
},
"node_modules/@typescript-eslint/typescript-estree/node_modules/minimatch": {
- "version": "9.0.5",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.5.tgz",
- "integrity": "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow==",
+ "version": "9.0.9",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.9.tgz",
+ "integrity": "sha512-OBwBN9AL4dqmETlpS2zasx+vTeWclWzkblfZk7KTA5j3jeOONz/tRCnZomUyvNg83wL5Zv9Ss6HMJXAgL8R2Yg==",
"dev": true,
"license": "ISC",
"dependencies": {
- "brace-expansion": "^2.0.1"
+ "brace-expansion": "^2.0.2"
},
"engines": {
"node": ">=16 || 14 >=14.17"
@@ -2052,9 +2052,9 @@
}
},
"node_modules/ajv": {
- "version": "6.12.6",
- "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
- "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==",
+ "version": "6.14.0",
+ "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.14.0.tgz",
+ "integrity": "sha512-IWrosm/yrn43eiKqkfkHis7QioDleaXQHdDVPKg0FSwwd/DuvyX79TZnFOnYpB7dcsFAMmtFztZuXPDvSePkFw==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -2109,9 +2109,9 @@
}
},
"node_modules/brace-expansion": {
- "version": "1.1.12",
- "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.12.tgz",
- "integrity": "sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==",
+ "version": "1.1.13",
+ "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.13.tgz",
+ "integrity": "sha512-9ZLprWS6EENmhEOpjCYW2c8VkmOvckIJZfkr7rBW6dObmfgJ/L1GpSYW5Hpo9lDz4D1+n0Ckz8rU7FwHDQiG/w==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -2657,9 +2657,9 @@
}
},
"node_modules/flatted": {
- "version": "3.3.3",
- "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.3.tgz",
- "integrity": "sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==",
+ "version": "3.4.2",
+ "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.4.2.tgz",
+ "integrity": "sha512-PjDse7RzhcPkIJwy5t7KPWQSZ9cAbzQXcafsetQoD7sOJRQlGikNbx7yZp2OotDnJyrDcbyRq3Ttb18iYOqkxA==",
"dev": true,
"license": "ISC"
},
@@ -3243,9 +3243,9 @@
}
},
"node_modules/minimatch": {
- "version": "3.1.2",
- "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz",
- "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==",
+ "version": "3.1.5",
+ "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.5.tgz",
+ "integrity": "sha512-VgjWUsnnT6n+NUk6eZq77zeFdpW2LWDzP6zFGrCbHXiYNul5Dzqk2HHQ5uFH2DNW5Xbp8+jVzaeNt94ssEEl4w==",
"dev": true,
"license": "ISC",
"dependencies": {
@@ -3386,9 +3386,9 @@
"license": "ISC"
},
"node_modules/picomatch": {
- "version": "4.0.3",
- "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
- "integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
+ "version": "4.0.4",
+ "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.4.tgz",
+ "integrity": "sha512-QP88BAKvMam/3NxH6vj2o21R6MjxZUAd6nlwAS/pnGvN9IVLocLHxGYIzFhg6fUQ+5th6P4dv4eW9jX3DSIj7A==",
"dev": true,
"license": "MIT",
"peer": true,
@@ -3501,9 +3501,9 @@
}
},
"node_modules/rollup": {
- "version": "4.57.1",
- "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.57.1.tgz",
- "integrity": "sha512-oQL6lgK3e2QZeQ7gcgIkS2YZPg5slw37hYufJ3edKlfQSGGm8ICoxswK15ntSzF/a8+h7ekRy7k7oWc3BQ7y8A==",
+ "version": "4.60.0",
+ "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.60.0.tgz",
+ "integrity": "sha512-yqjxruMGBQJ2gG4HtjZtAfXArHomazDHoFwFFmZZl0r7Pdo7qCIXKqKHZc8yeoMgzJJ+pO6pEEHa+V7uzWlrAQ==",
"dev": true,
"license": "MIT",
"dependencies": {
@@ -3517,31 +3517,31 @@
"npm": ">=8.0.0"
},
"optionalDependencies": {
- "@rollup/rollup-android-arm-eabi": "4.57.1",
- "@rollup/rollup-android-arm64": "4.57.1",
- "@rollup/rollup-darwin-arm64": "4.57.1",
- "@rollup/rollup-darwin-x64": "4.57.1",
- "@rollup/rollup-freebsd-arm64": "4.57.1",
- "@rollup/rollup-freebsd-x64": "4.57.1",
- "@rollup/rollup-linux-arm-gnueabihf": "4.57.1",
- "@rollup/rollup-linux-arm-musleabihf": "4.57.1",
- "@rollup/rollup-linux-arm64-gnu": "4.57.1",
- "@rollup/rollup-linux-arm64-musl": "4.57.1",
- "@rollup/rollup-linux-loong64-gnu": "4.57.1",
- "@rollup/rollup-linux-loong64-musl": "4.57.1",
- "@rollup/rollup-linux-ppc64-gnu": "4.57.1",
- "@rollup/rollup-linux-ppc64-musl": "4.57.1",
- "@rollup/rollup-linux-riscv64-gnu": "4.57.1",
- "@rollup/rollup-linux-riscv64-musl": "4.57.1",
- "@rollup/rollup-linux-s390x-gnu": "4.57.1",
- "@rollup/rollup-linux-x64-gnu": "4.57.1",
- "@rollup/rollup-linux-x64-musl": "4.57.1",
- "@rollup/rollup-openbsd-x64": "4.57.1",
- "@rollup/rollup-openharmony-arm64": "4.57.1",
- "@rollup/rollup-win32-arm64-msvc": "4.57.1",
- "@rollup/rollup-win32-ia32-msvc": "4.57.1",
- "@rollup/rollup-win32-x64-gnu": "4.57.1",
- "@rollup/rollup-win32-x64-msvc": "4.57.1",
+ "@rollup/rollup-android-arm-eabi": "4.60.0",
+ "@rollup/rollup-android-arm64": "4.60.0",
+ "@rollup/rollup-darwin-arm64": "4.60.0",
+ "@rollup/rollup-darwin-x64": "4.60.0",
+ "@rollup/rollup-freebsd-arm64": "4.60.0",
+ "@rollup/rollup-freebsd-x64": "4.60.0",
+ "@rollup/rollup-linux-arm-gnueabihf": "4.60.0",
+ "@rollup/rollup-linux-arm-musleabihf": "4.60.0",
+ "@rollup/rollup-linux-arm64-gnu": "4.60.0",
+ "@rollup/rollup-linux-arm64-musl": "4.60.0",
+ "@rollup/rollup-linux-loong64-gnu": "4.60.0",
+ "@rollup/rollup-linux-loong64-musl": "4.60.0",
+ "@rollup/rollup-linux-ppc64-gnu": "4.60.0",
+ "@rollup/rollup-linux-ppc64-musl": "4.60.0",
+ "@rollup/rollup-linux-riscv64-gnu": "4.60.0",
+ "@rollup/rollup-linux-riscv64-musl": "4.60.0",
+ "@rollup/rollup-linux-s390x-gnu": "4.60.0",
+ "@rollup/rollup-linux-x64-gnu": "4.60.0",
+ "@rollup/rollup-linux-x64-musl": "4.60.0",
+ "@rollup/rollup-openbsd-x64": "4.60.0",
+ "@rollup/rollup-openharmony-arm64": "4.60.0",
+ "@rollup/rollup-win32-arm64-msvc": "4.60.0",
+ "@rollup/rollup-win32-ia32-msvc": "4.60.0",
+ "@rollup/rollup-win32-x64-gnu": "4.60.0",
+ "@rollup/rollup-win32-x64-msvc": "4.60.0",
"fsevents": "~2.3.2"
}
},
@@ -3803,9 +3803,9 @@
}
},
"node_modules/vite": {
- "version": "7.3.1",
- "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.1.tgz",
- "integrity": "sha512-w+N7Hifpc3gRjZ63vYBXA56dvvRlNWRczTdmCBBa+CotUzAPf5b7YMdMR/8CQoeYE5LX3W4wj6RYTgonm1b9DA==",
+ "version": "7.3.2",
+ "resolved": "https://registry.npmjs.org/vite/-/vite-7.3.2.tgz",
+ "integrity": "sha512-Bby3NOsna2jsjfLVOHKes8sGwgl4TT0E6vvpYgnAYDIF/tie7MRaFthmKuHx1NSXjiTueXH3do80FMQgvEktRg==",
"dev": true,
"license": "MIT",
"peer": true,
diff --git a/proxy/web/package.json b/proxy/web/package.json
index 97ec1ec0d..9a7c84ed4 100644
--- a/proxy/web/package.json
+++ b/proxy/web/package.json
@@ -17,7 +17,7 @@
"tailwind-merge": "^2.6.0"
},
"devDependencies": {
- "@eslint/js": "^9.39.1",
+ "@eslint/js": "9.39.2",
"@tailwindcss/vite": "^4.1.18",
"@types/node": "^24.10.1",
"@types/react": "^19.2.5",
@@ -31,6 +31,6 @@
"tsx": "^4.21.0",
"typescript": "~5.9.3",
"typescript-eslint": "^8.46.4",
- "vite": "^7.2.4"
+ "vite": "7.3.2"
}
}
diff --git a/relay/server/handshake.go b/relay/server/handshake.go
index 8c3ee1899..067888406 100644
--- a/relay/server/handshake.go
+++ b/relay/server/handshake.go
@@ -1,11 +1,13 @@
package server
import (
+ "context"
"fmt"
- "net"
+ "time"
log "github.com/sirupsen/logrus"
+ "github.com/netbirdio/netbird/relay/server/listener"
"github.com/netbirdio/netbird/shared/relay/messages"
//nolint:staticcheck
"github.com/netbirdio/netbird/shared/relay/messages/address"
@@ -13,6 +15,12 @@ import (
authmsg "github.com/netbirdio/netbird/shared/relay/messages/auth"
)
+const (
+ // handshakeTimeout bounds how long a connection may remain in the
+ // pre-authentication handshake phase before being closed.
+ handshakeTimeout = 10 * time.Second
+)
+
type Validator interface {
Validate(any) error
// Deprecated: Use Validate instead.
@@ -58,7 +66,7 @@ func marshalResponseHelloMsg(instanceURL string) ([]byte, error) {
}
type handshake struct {
- conn net.Conn
+ conn listener.Conn
validator Validator
preparedMsg *preparedMsg
@@ -66,9 +74,9 @@ type handshake struct {
peerID *messages.PeerID
}
-func (h *handshake) handshakeReceive() (*messages.PeerID, error) {
+func (h *handshake) handshakeReceive(ctx context.Context) (*messages.PeerID, error) {
buf := make([]byte, messages.MaxHandshakeSize)
- n, err := h.conn.Read(buf)
+ n, err := h.conn.Read(ctx, buf)
if err != nil {
return nil, fmt.Errorf("read from %s: %w", h.conn.RemoteAddr(), err)
}
@@ -103,7 +111,7 @@ func (h *handshake) handshakeReceive() (*messages.PeerID, error) {
return peerID, nil
}
-func (h *handshake) handshakeResponse() error {
+func (h *handshake) handshakeResponse(ctx context.Context) error {
var responseMsg []byte
if h.handshakeMethodAuth {
responseMsg = h.preparedMsg.responseAuthMsg
@@ -111,7 +119,7 @@ func (h *handshake) handshakeResponse() error {
responseMsg = h.preparedMsg.responseHelloMsg
}
- if _, err := h.conn.Write(responseMsg); err != nil {
+ if _, err := h.conn.Write(ctx, responseMsg); err != nil {
return fmt.Errorf("handshake response write to %s (%s): %w", h.peerID, h.conn.RemoteAddr(), err)
}
diff --git a/relay/server/listener/conn.go b/relay/server/listener/conn.go
new file mode 100644
index 000000000..ef0869594
--- /dev/null
+++ b/relay/server/listener/conn.go
@@ -0,0 +1,14 @@
+package listener
+
+import (
+ "context"
+ "net"
+)
+
+// Conn is the relay connection contract implemented by WS and QUIC transports.
+type Conn interface {
+ Read(ctx context.Context, b []byte) (n int, err error)
+ Write(ctx context.Context, b []byte) (n int, err error)
+ RemoteAddr() net.Addr
+ Close() error
+}
diff --git a/relay/server/listener/listener.go b/relay/server/listener/listener.go
deleted file mode 100644
index 0a79182f4..000000000
--- a/relay/server/listener/listener.go
+++ /dev/null
@@ -1,14 +0,0 @@
-package listener
-
-import (
- "context"
- "net"
-
- "github.com/netbirdio/netbird/relay/protocol"
-)
-
-type Listener interface {
- Listen(func(conn net.Conn)) error
- Shutdown(ctx context.Context) error
- Protocol() protocol.Protocol
-}
diff --git a/relay/server/listener/quic/conn.go b/relay/server/listener/quic/conn.go
index 6e2201bf7..d8dafcd1f 100644
--- a/relay/server/listener/quic/conn.go
+++ b/relay/server/listener/quic/conn.go
@@ -3,33 +3,26 @@ package quic
import (
"context"
"errors"
- "fmt"
"net"
"sync"
- "time"
"github.com/quic-go/quic-go"
)
type Conn struct {
- session *quic.Conn
- closed bool
- closedMu sync.Mutex
- ctx context.Context
- ctxCancel context.CancelFunc
+ session *quic.Conn
+ closed bool
+ closedMu sync.Mutex
}
func NewConn(session *quic.Conn) *Conn {
- ctx, cancel := context.WithCancel(context.Background())
return &Conn{
- session: session,
- ctx: ctx,
- ctxCancel: cancel,
+ session: session,
}
}
-func (c *Conn) Read(b []byte) (n int, err error) {
- dgram, err := c.session.ReceiveDatagram(c.ctx)
+func (c *Conn) Read(ctx context.Context, b []byte) (n int, err error) {
+ dgram, err := c.session.ReceiveDatagram(ctx)
if err != nil {
return 0, c.remoteCloseErrHandling(err)
}
@@ -38,33 +31,17 @@ func (c *Conn) Read(b []byte) (n int, err error) {
return n, nil
}
-func (c *Conn) Write(b []byte) (int, error) {
+func (c *Conn) Write(_ context.Context, b []byte) (int, error) {
if err := c.session.SendDatagram(b); err != nil {
return 0, c.remoteCloseErrHandling(err)
}
return len(b), nil
}
-func (c *Conn) LocalAddr() net.Addr {
- return c.session.LocalAddr()
-}
-
func (c *Conn) RemoteAddr() net.Addr {
return c.session.RemoteAddr()
}
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return nil
-}
-
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- return fmt.Errorf("SetWriteDeadline is not implemented")
-}
-
-func (c *Conn) SetDeadline(t time.Time) error {
- return fmt.Errorf("SetDeadline is not implemented")
-}
-
func (c *Conn) Close() error {
c.closedMu.Lock()
if c.closed {
@@ -74,8 +51,6 @@ func (c *Conn) Close() error {
c.closed = true
c.closedMu.Unlock()
- c.ctxCancel() // Cancel the context
-
sessionErr := c.session.CloseWithError(0, "normal closure")
return sessionErr
}
diff --git a/relay/server/listener/quic/listener.go b/relay/server/listener/quic/listener.go
index 797223e74..68f0e03c0 100644
--- a/relay/server/listener/quic/listener.go
+++ b/relay/server/listener/quic/listener.go
@@ -5,12 +5,12 @@ import (
"crypto/tls"
"errors"
"fmt"
- "net"
"github.com/quic-go/quic-go"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/relay/protocol"
+ relaylistener "github.com/netbirdio/netbird/relay/server/listener"
nbRelay "github.com/netbirdio/netbird/shared/relay"
)
@@ -25,7 +25,7 @@ type Listener struct {
listener *quic.Listener
}
-func (l *Listener) Listen(acceptFn func(conn net.Conn)) error {
+func (l *Listener) Listen(acceptFn func(conn relaylistener.Conn)) error {
quicCfg := &quic.Config{
EnableDatagrams: true,
InitialPacketSize: nbRelay.QUICInitialPacketSize,
diff --git a/relay/server/listener/ws/conn.go b/relay/server/listener/ws/conn.go
index d5bce56f7..c22b5719d 100644
--- a/relay/server/listener/ws/conn.go
+++ b/relay/server/listener/ws/conn.go
@@ -18,25 +18,21 @@ const (
type Conn struct {
*websocket.Conn
- lAddr *net.TCPAddr
rAddr *net.TCPAddr
closed bool
closedMu sync.Mutex
- ctx context.Context
}
-func NewConn(wsConn *websocket.Conn, lAddr, rAddr *net.TCPAddr) *Conn {
+func NewConn(wsConn *websocket.Conn, rAddr *net.TCPAddr) *Conn {
return &Conn{
Conn: wsConn,
- lAddr: lAddr,
rAddr: rAddr,
- ctx: context.Background(),
}
}
-func (c *Conn) Read(b []byte) (n int, err error) {
- t, r, err := c.Reader(c.ctx)
+func (c *Conn) Read(ctx context.Context, b []byte) (n int, err error) {
+ t, r, err := c.Reader(ctx)
if err != nil {
return 0, c.ioErrHandling(err)
}
@@ -56,34 +52,18 @@ func (c *Conn) Read(b []byte) (n int, err error) {
// Write writes a binary message with the given payload.
// It does not block until fill the internal buffer.
// If the buffer filled up, wait until the buffer is drained or timeout.
-func (c *Conn) Write(b []byte) (int, error) {
- ctx, ctxCancel := context.WithTimeout(c.ctx, writeTimeout)
+func (c *Conn) Write(ctx context.Context, b []byte) (int, error) {
+ ctx, ctxCancel := context.WithTimeout(ctx, writeTimeout)
defer ctxCancel()
err := c.Conn.Write(ctx, websocket.MessageBinary, b)
return len(b), err
}
-func (c *Conn) LocalAddr() net.Addr {
- return c.lAddr
-}
-
func (c *Conn) RemoteAddr() net.Addr {
return c.rAddr
}
-func (c *Conn) SetReadDeadline(t time.Time) error {
- return fmt.Errorf("SetReadDeadline is not implemented")
-}
-
-func (c *Conn) SetWriteDeadline(t time.Time) error {
- return fmt.Errorf("SetWriteDeadline is not implemented")
-}
-
-func (c *Conn) SetDeadline(t time.Time) error {
- return fmt.Errorf("SetDeadline is not implemented")
-}
-
func (c *Conn) Close() error {
c.closedMu.Lock()
c.closed = true
diff --git a/relay/server/listener/ws/listener.go b/relay/server/listener/ws/listener.go
index 12219e29b..ba175f901 100644
--- a/relay/server/listener/ws/listener.go
+++ b/relay/server/listener/ws/listener.go
@@ -7,11 +7,13 @@ import (
"fmt"
"net"
"net/http"
+ "time"
"github.com/coder/websocket"
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/relay/protocol"
+ relaylistener "github.com/netbirdio/netbird/relay/server/listener"
"github.com/netbirdio/netbird/shared/relay"
)
@@ -27,18 +29,19 @@ type Listener struct {
TLSConfig *tls.Config
server *http.Server
- acceptFn func(conn net.Conn)
+ acceptFn func(conn relaylistener.Conn)
}
-func (l *Listener) Listen(acceptFn func(conn net.Conn)) error {
+func (l *Listener) Listen(acceptFn func(conn relaylistener.Conn)) error {
l.acceptFn = acceptFn
mux := http.NewServeMux()
mux.HandleFunc(URLPath, l.onAccept)
l.server = &http.Server{
- Addr: l.Address,
- Handler: mux,
- TLSConfig: l.TLSConfig,
+ Addr: l.Address,
+ Handler: mux,
+ TLSConfig: l.TLSConfig,
+ ReadHeaderTimeout: 5 * time.Second,
}
log.Infof("WS server listening address: %s", l.Address)
@@ -93,18 +96,9 @@ func (l *Listener) onAccept(w http.ResponseWriter, r *http.Request) {
return
}
- lAddr, err := net.ResolveTCPAddr("tcp", l.server.Addr)
- if err != nil {
- err = wsConn.Close(websocket.StatusInternalError, "internal error")
- if err != nil {
- log.Errorf("failed to close ws connection: %s", err)
- }
- return
- }
-
log.Infof("WS client connected from: %s", rAddr)
- conn := NewConn(wsConn, lAddr, rAddr)
+ conn := NewConn(wsConn, rAddr)
l.acceptFn(conn)
}
diff --git a/relay/server/peer.go b/relay/server/peer.go
index c5ff41857..8376cdfa7 100644
--- a/relay/server/peer.go
+++ b/relay/server/peer.go
@@ -10,6 +10,7 @@ import (
log "github.com/sirupsen/logrus"
"github.com/netbirdio/netbird/relay/metrics"
+ "github.com/netbirdio/netbird/relay/server/listener"
"github.com/netbirdio/netbird/relay/server/store"
"github.com/netbirdio/netbird/shared/relay/healthcheck"
"github.com/netbirdio/netbird/shared/relay/messages"
@@ -26,11 +27,14 @@ type Peer struct {
metrics *metrics.Metrics
log *log.Entry
id messages.PeerID
- conn net.Conn
+ conn listener.Conn
connMu sync.RWMutex
store *store.Store
notifier *store.PeerNotifier
+ ctx context.Context
+ ctxCancel context.CancelFunc
+
peersListener *store.Listener
// between the online peer collection step and the notification sending should not be sent offline notifications from another thread
@@ -38,14 +42,17 @@ type Peer struct {
}
// NewPeer creates a new Peer instance and prepare custom logging
-func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn net.Conn, store *store.Store, notifier *store.PeerNotifier) *Peer {
+func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn listener.Conn, store *store.Store, notifier *store.PeerNotifier) *Peer {
+ ctx, cancel := context.WithCancel(context.Background())
p := &Peer{
- metrics: metrics,
- log: log.WithField("peer_id", id.String()),
- id: id,
- conn: conn,
- store: store,
- notifier: notifier,
+ metrics: metrics,
+ log: log.WithField("peer_id", id.String()),
+ id: id,
+ conn: conn,
+ store: store,
+ notifier: notifier,
+ ctx: ctx,
+ ctxCancel: cancel,
}
return p
@@ -57,6 +64,7 @@ func NewPeer(metrics *metrics.Metrics, id messages.PeerID, conn net.Conn, store
func (p *Peer) Work() {
p.peersListener = p.notifier.NewListener(p.sendPeersOnline, p.sendPeersWentOffline)
defer func() {
+ p.ctxCancel()
p.notifier.RemoveListener(p.peersListener)
if err := p.conn.Close(); err != nil && !errors.Is(err, net.ErrClosed) {
@@ -64,8 +72,7 @@ func (p *Peer) Work() {
}
}()
- ctx, cancel := context.WithCancel(context.Background())
- defer cancel()
+ ctx := p.ctx
hc := healthcheck.NewSender(p.log)
go hc.StartHealthCheck(ctx)
@@ -73,7 +80,7 @@ func (p *Peer) Work() {
buf := make([]byte, bufferSize)
for {
- n, err := p.conn.Read(buf)
+ n, err := p.conn.Read(ctx, buf)
if err != nil {
if !errors.Is(err, net.ErrClosed) {
p.log.Errorf("failed to read message: %s", err)
@@ -131,10 +138,10 @@ func (p *Peer) handleMsgType(ctx context.Context, msgType messages.MsgType, hc *
}
// Write writes data to the connection
-func (p *Peer) Write(b []byte) (int, error) {
+func (p *Peer) Write(ctx context.Context, b []byte) (int, error) {
p.connMu.RLock()
defer p.connMu.RUnlock()
- return p.conn.Write(b)
+ return p.conn.Write(ctx, b)
}
// CloseGracefully closes the connection with the peer gracefully. Send a close message to the client and close the
@@ -147,6 +154,7 @@ func (p *Peer) CloseGracefully(ctx context.Context) {
p.log.Errorf("failed to send close message to peer: %s", p.String())
}
+ p.ctxCancel()
if err := p.conn.Close(); err != nil {
p.log.Errorf(errCloseConn, err)
}
@@ -156,6 +164,7 @@ func (p *Peer) Close() {
p.connMu.Lock()
defer p.connMu.Unlock()
+ p.ctxCancel()
if err := p.conn.Close(); err != nil {
p.log.Errorf(errCloseConn, err)
}
@@ -170,26 +179,15 @@ func (p *Peer) writeWithTimeout(ctx context.Context, buf []byte) error {
ctx, cancel := context.WithTimeout(ctx, 3*time.Second)
defer cancel()
- writeDone := make(chan struct{})
- var err error
- go func() {
- _, err = p.conn.Write(buf)
- close(writeDone)
- }()
-
- select {
- case <-ctx.Done():
- return ctx.Err()
- case <-writeDone:
- return err
- }
+ _, err := p.conn.Write(ctx, buf)
+ return err
}
func (p *Peer) handleHealthcheckEvents(ctx context.Context, hc *healthcheck.Sender) {
for {
select {
case <-hc.HealthCheck:
- _, err := p.Write(messages.MarshalHealthcheck())
+ _, err := p.Write(ctx, messages.MarshalHealthcheck())
if err != nil {
p.log.Errorf("failed to send healthcheck message: %s", err)
return
@@ -228,12 +226,12 @@ func (p *Peer) handleTransportMsg(msg []byte) {
return
}
- n, err := dp.Write(msg)
+ n, err := dp.Write(dp.ctx, msg)
if err != nil {
p.log.Errorf("failed to write transport message to: %s", dp.String())
return
}
- p.metrics.TransferBytesSent.Add(context.Background(), int64(n))
+ p.metrics.TransferBytesSent.Add(p.ctx, int64(n))
}
func (p *Peer) handleSubscribePeerState(msg []byte) {
@@ -276,7 +274,7 @@ func (p *Peer) sendPeersOnline(peers []messages.PeerID) {
}
for n, msg := range msgs {
- if _, err := p.Write(msg); err != nil {
+ if _, err := p.Write(p.ctx, msg); err != nil {
p.log.Errorf("failed to write %d. peers offline message: %s", n, err)
}
}
@@ -293,7 +291,7 @@ func (p *Peer) sendPeersWentOffline(peers []messages.PeerID) {
}
for n, msg := range msgs {
- if _, err := p.Write(msg); err != nil {
+ if _, err := p.Write(p.ctx, msg); err != nil {
p.log.Errorf("failed to write %d. peers offline message: %s", n, err)
}
}
diff --git a/relay/server/relay.go b/relay/server/relay.go
index bb355f58f..56add8bea 100644
--- a/relay/server/relay.go
+++ b/relay/server/relay.go
@@ -3,7 +3,6 @@ package server
import (
"context"
"fmt"
- "net"
"net/url"
"sync"
"time"
@@ -13,11 +12,20 @@ import (
"go.opentelemetry.io/otel/metric"
"github.com/netbirdio/netbird/relay/healthcheck/peerid"
+ "github.com/netbirdio/netbird/relay/protocol"
+ "github.com/netbirdio/netbird/relay/server/listener"
+
//nolint:staticcheck
"github.com/netbirdio/netbird/relay/metrics"
"github.com/netbirdio/netbird/relay/server/store"
)
+type Listener interface {
+ Listen(func(conn listener.Conn)) error
+ Shutdown(ctx context.Context) error
+ Protocol() protocol.Protocol
+}
+
type Config struct {
Meter metric.Meter
ExposedAddress string
@@ -109,7 +117,7 @@ func NewRelay(config Config) (*Relay, error) {
}
// Accept start to handle a new peer connection
-func (r *Relay) Accept(conn net.Conn) {
+func (r *Relay) Accept(conn listener.Conn) {
acceptTime := time.Now()
r.closeMu.RLock()
defer r.closeMu.RUnlock()
@@ -117,12 +125,15 @@ func (r *Relay) Accept(conn net.Conn) {
return
}
+ hsCtx, hsCancel := context.WithTimeout(context.Background(), handshakeTimeout)
+ defer hsCancel()
+
h := handshake{
conn: conn,
validator: r.validator,
preparedMsg: r.preparedMsg,
}
- peerID, err := h.handshakeReceive()
+ peerID, err := h.handshakeReceive(hsCtx)
if err != nil {
if peerid.IsHealthCheck(peerID) {
log.Debugf("health check connection from %s", conn.RemoteAddr())
@@ -154,7 +165,7 @@ func (r *Relay) Accept(conn net.Conn) {
r.metrics.PeerDisconnected(peer.String())
}()
- if err := h.handshakeResponse(); err != nil {
+ if err := h.handshakeResponse(hsCtx); err != nil {
log.Errorf("failed to send handshake response, close peer: %s", err)
peer.Close()
}
diff --git a/relay/server/server.go b/relay/server/server.go
index a0f7eb73c..340da55b8 100644
--- a/relay/server/server.go
+++ b/relay/server/server.go
@@ -3,7 +3,6 @@ package server
import (
"context"
"crypto/tls"
- "net"
"net/url"
"sync"
@@ -31,7 +30,7 @@ type ListenerConfig struct {
// In a new HTTP connection, the server will accept the connection and pass it to the Relay server via the Accept method.
type Server struct {
relay *Relay
- listeners []listener.Listener
+ listeners []Listener
listenerMux sync.Mutex
}
@@ -56,7 +55,7 @@ func NewServer(config Config) (*Server, error) {
}
return &Server{
relay: relay,
- listeners: make([]listener.Listener, 0, 2),
+ listeners: make([]Listener, 0, 2),
}, nil
}
@@ -86,7 +85,7 @@ func (r *Server) Listen(cfg ListenerConfig) error {
wg := sync.WaitGroup{}
for _, l := range r.listeners {
wg.Add(1)
- go func(listener listener.Listener) {
+ go func(listener Listener) {
defer wg.Done()
errChan <- listener.Listen(r.relay.Accept)
}(l)
@@ -139,6 +138,6 @@ func (r *Server) InstanceURL() url.URL {
// RelayAccept returns the relay's Accept function for handling incoming connections.
// This allows external HTTP handlers to route connections to the relay without
// starting the relay's own listeners.
-func (r *Server) RelayAccept() func(conn net.Conn) {
+func (r *Server) RelayAccept() func(conn listener.Conn) {
return r.relay.Accept
}
diff --git a/shared/management/client/client.go b/shared/management/client/client.go
index a15301223..18efba87b 100644
--- a/shared/management/client/client.go
+++ b/shared/management/client/client.go
@@ -4,8 +4,6 @@ import (
"context"
"io"
- "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
-
"github.com/netbirdio/netbird/client/system"
"github.com/netbirdio/netbird/shared/management/domain"
"github.com/netbirdio/netbird/shared/management/proto"
@@ -16,14 +14,18 @@ type Client interface {
io.Closer
Sync(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error
Job(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error
- GetServerPublicKey() (*wgtypes.Key, error)
- Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
- Login(serverKey wgtypes.Key, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
- GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error)
- GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error)
+ Register(setupKey string, jwtToken string, sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
+ Login(sysInfo *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
+ GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error)
+ GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error)
GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, error)
GetServerURL() string
+ // IsHealthy returns the current connection status without blocking.
+ // Used by the engine to monitor connectivity in the background.
IsHealthy() bool
+ // HealthCheck actively probes the management server and returns an error if unreachable.
+ // Used to validate connectivity before committing configuration changes.
+ HealthCheck() error
SyncMeta(sysInfo *system.Info) error
Logout() error
CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error)
diff --git a/shared/management/client/client_test.go b/shared/management/client/client_test.go
index 01957154c..f5edb6b95 100644
--- a/shared/management/client/client_test.go
+++ b/shared/management/client/client_test.go
@@ -189,7 +189,7 @@ func closeManagementSilently(s *grpc.Server, listener net.Listener) {
}
}
-func TestClient_GetServerPublicKey(t *testing.T) {
+func TestClient_HealthCheck(t *testing.T) {
testKey, err := wgtypes.GenerateKey()
if err != nil {
t.Fatal(err)
@@ -203,12 +203,8 @@ func TestClient_GetServerPublicKey(t *testing.T) {
t.Fatal(err)
}
- key, err := client.GetServerPublicKey()
- if err != nil {
- t.Error("couldn't retrieve management public key")
- }
- if key == nil {
- t.Error("got an empty management public key")
+ if err := client.HealthCheck(); err != nil {
+ t.Errorf("health check failed: %v", err)
}
}
@@ -225,12 +221,8 @@ func TestClient_LoginUnregistered_ShouldThrow_401(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- key, err := client.GetServerPublicKey()
- if err != nil {
- t.Fatal(err)
- }
sysInfo := system.GetInfo(context.TODO())
- _, err = client.Login(*key, sysInfo, nil, nil)
+ _, err = client.Login(sysInfo, nil, nil)
if err == nil {
t.Error("expecting err on unregistered login, got nil")
}
@@ -253,12 +245,8 @@ func TestClient_LoginRegistered(t *testing.T) {
t.Fatal(err)
}
- key, err := client.GetServerPublicKey()
- if err != nil {
- t.Error(err)
- }
info := system.GetInfo(context.TODO())
- resp, err := client.Register(*key, ValidKey, "", info, nil, nil)
+ resp, err := client.Register(ValidKey, "", info, nil, nil)
if err != nil {
t.Error(err)
}
@@ -282,13 +270,8 @@ func TestClient_Sync(t *testing.T) {
t.Fatal(err)
}
- serverKey, err := client.GetServerPublicKey()
- if err != nil {
- t.Error(err)
- }
-
info := system.GetInfo(context.TODO())
- _, err = client.Register(*serverKey, ValidKey, "", info, nil, nil)
+ _, err = client.Register(ValidKey, "", info, nil, nil)
if err != nil {
t.Error(err)
}
@@ -304,7 +287,7 @@ func TestClient_Sync(t *testing.T) {
}
info = system.GetInfo(context.TODO())
- _, err = remoteClient.Register(*serverKey, ValidKey, "", info, nil, nil)
+ _, err = remoteClient.Register(ValidKey, "", info, nil, nil)
if err != nil {
t.Fatal(err)
}
@@ -364,11 +347,6 @@ func Test_SystemMetaDataFromClient(t *testing.T) {
t.Fatalf("error while creating testClient: %v", err)
}
- key, err := testClient.GetServerPublicKey()
- if err != nil {
- t.Fatalf("error while getting server public key from testclient, %v", err)
- }
-
var actualMeta *mgmtProto.PeerSystemMeta
var actualValidKey string
var wg sync.WaitGroup
@@ -405,7 +383,7 @@ func Test_SystemMetaDataFromClient(t *testing.T) {
}
info := system.GetInfo(context.TODO())
- _, err = testClient.Register(*key, ValidKey, "", info, nil, nil)
+ _, err = testClient.Register(ValidKey, "", info, nil, nil)
if err != nil {
t.Errorf("error while trying to register client: %v", err)
}
@@ -505,7 +483,7 @@ func Test_GetDeviceAuthorizationFlow(t *testing.T) {
}
mgmtMockServer.GetDeviceAuthorizationFlowFunc = func(ctx context.Context, req *mgmtProto.EncryptedMessage) (*mgmtProto.EncryptedMessage, error) {
- encryptedResp, err := encryption.EncryptMessage(serverKey, client.key, expectedFlowInfo)
+ encryptedResp, err := encryption.EncryptMessage(client.key.PublicKey(), serverKey, expectedFlowInfo)
if err != nil {
return nil, err
}
@@ -517,7 +495,7 @@ func Test_GetDeviceAuthorizationFlow(t *testing.T) {
}, nil
}
- flowInfo, err := client.GetDeviceAuthorizationFlow(serverKey)
+ flowInfo, err := client.GetDeviceAuthorizationFlow()
if err != nil {
t.Error("error while retrieving device auth flow information")
}
@@ -551,7 +529,7 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) {
}
mgmtMockServer.GetPKCEAuthorizationFlowFunc = func(ctx context.Context, req *mgmtProto.EncryptedMessage) (*mgmtProto.EncryptedMessage, error) {
- encryptedResp, err := encryption.EncryptMessage(serverKey, client.key, expectedFlowInfo)
+ encryptedResp, err := encryption.EncryptMessage(client.key.PublicKey(), serverKey, expectedFlowInfo)
if err != nil {
return nil, err
}
@@ -563,7 +541,7 @@ func Test_GetPKCEAuthorizationFlow(t *testing.T) {
}, nil
}
- flowInfo, err := client.GetPKCEAuthorizationFlow(serverKey)
+ flowInfo, err := client.GetPKCEAuthorizationFlow()
if err != nil {
t.Error("error while retrieving pkce auth flow information")
}
diff --git a/shared/management/client/grpc.go b/shared/management/client/grpc.go
index 252199498..a01e51abc 100644
--- a/shared/management/client/grpc.go
+++ b/shared/management/client/grpc.go
@@ -202,7 +202,7 @@ func (c *GrpcClient) withMgmtStream(
return fmt.Errorf("connection to management is not ready and in %s state", connState)
}
- serverPubKey, err := c.GetServerPublicKey()
+ serverPubKey, err := c.getServerPublicKey()
if err != nil {
log.Debugf(errMsgMgmtPublicKey, err)
return err
@@ -404,7 +404,7 @@ func (c *GrpcClient) handleSyncStream(ctx context.Context, serverPubKey wgtypes.
// GetNetworkMap return with the network map
func (c *GrpcClient) GetNetworkMap(sysInfo *system.Info) (*proto.NetworkMap, error) {
- serverPubKey, err := c.GetServerPublicKey()
+ serverPubKey, err := c.getServerPublicKey()
if err != nil {
log.Debugf("failed getting Management Service public key: %s", err)
return nil, err
@@ -490,18 +490,24 @@ func (c *GrpcClient) receiveUpdatesEvents(stream proto.ManagementService_SyncCli
}
}
-// GetServerPublicKey returns server's WireGuard public key (used later for encrypting messages sent to the server)
-func (c *GrpcClient) GetServerPublicKey() (*wgtypes.Key, error) {
+// HealthCheck actively probes the management server and returns an error if unreachable.
+// Used to validate connectivity before committing configuration changes.
+func (c *GrpcClient) HealthCheck() error {
if !c.ready() {
- return nil, errors.New(errMsgNoMgmtConnection)
+ return errors.New(errMsgNoMgmtConnection)
}
+ _, err := c.getServerPublicKey()
+ return err
+}
+
+// getServerPublicKey fetches the server's WireGuard public key.
+func (c *GrpcClient) getServerPublicKey() (*wgtypes.Key, error) {
mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second)
defer cancel()
resp, err := c.realClient.GetServerKey(mgmCtx, &proto.Empty{})
if err != nil {
- log.Errorf("failed while getting Management Service public key: %v", err)
- return nil, fmt.Errorf("failed while getting Management Service public key")
+ return nil, fmt.Errorf("failed getting Management Service public key: %w", err)
}
serverKey, err := wgtypes.ParseKey(resp.Key)
@@ -512,7 +518,8 @@ func (c *GrpcClient) GetServerPublicKey() (*wgtypes.Key, error) {
return &serverKey, nil
}
-// IsHealthy probes the gRPC connection and returns false on errors
+// IsHealthy returns the current connection status without blocking.
+// Used by the engine to monitor connectivity in the background.
func (c *GrpcClient) IsHealthy() bool {
switch c.conn.GetState() {
case connectivity.TransientFailure:
@@ -538,12 +545,17 @@ func (c *GrpcClient) IsHealthy() bool {
return true
}
-func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*proto.LoginResponse, error) {
+func (c *GrpcClient) login(req *proto.LoginRequest) (*proto.LoginResponse, error) {
if !c.ready() {
return nil, errors.New(errMsgNoMgmtConnection)
}
- loginReq, err := encryption.EncryptMessage(serverKey, c.key, req)
+ serverKey, err := c.getServerPublicKey()
+ if err != nil {
+ return nil, err
+ }
+
+ loginReq, err := encryption.EncryptMessage(*serverKey, c.key, req)
if err != nil {
log.Errorf("failed to encrypt message: %s", err)
return nil, err
@@ -577,7 +589,7 @@ func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*pro
}
loginResp := &proto.LoginResponse{}
- err = encryption.DecryptMessage(serverKey, c.key, resp.Body, loginResp)
+ err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, loginResp)
if err != nil {
log.Errorf("failed to decrypt login response: %s", err)
return nil, err
@@ -589,34 +601,40 @@ func (c *GrpcClient) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*pro
// Register registers peer on Management Server. It actually calls a Login endpoint with a provided setup key
// Takes care of encrypting and decrypting messages.
// This method will also collect system info and send it with the request (e.g. hostname, os, etc)
-func (c *GrpcClient) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
+func (c *GrpcClient) Register(setupKey string, jwtToken string, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
keys := &proto.PeerKeys{
SshPubKey: pubSSHKey,
WgPubKey: []byte(c.key.PublicKey().String()),
}
- return c.login(serverKey, &proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()})
+ return c.login(&proto.LoginRequest{SetupKey: setupKey, Meta: infoToMetaData(sysInfo), JwtToken: jwtToken, PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()})
}
// Login attempts login to Management Server. Takes care of encrypting and decrypting messages.
-func (c *GrpcClient) Login(serverKey wgtypes.Key, sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
+func (c *GrpcClient) Login(sysInfo *system.Info, pubSSHKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
keys := &proto.PeerKeys{
SshPubKey: pubSSHKey,
WgPubKey: []byte(c.key.PublicKey().String()),
}
- return c.login(serverKey, &proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()})
+ return c.login(&proto.LoginRequest{Meta: infoToMetaData(sysInfo), PeerKeys: keys, DnsLabels: dnsLabels.ToPunycodeList()})
}
// GetDeviceAuthorizationFlow returns a device authorization flow information.
// It also takes care of encrypting and decrypting messages.
-func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) {
+func (c *GrpcClient) GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) {
if !c.ready() {
return nil, fmt.Errorf("no connection to management in order to get device authorization flow")
}
+
+ serverKey, err := c.getServerPublicKey()
+ if err != nil {
+ return nil, err
+ }
+
mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2)
defer cancel()
message := &proto.DeviceAuthorizationFlowRequest{}
- encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message)
+ encryptedMSG, err := encryption.EncryptMessage(*serverKey, c.key, message)
if err != nil {
return nil, err
}
@@ -630,7 +648,7 @@ func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.D
}
flowInfoResp := &proto.DeviceAuthorizationFlow{}
- err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp)
+ err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, flowInfoResp)
if err != nil {
errWithMSG := fmt.Errorf("failed to decrypt device authorization flow message: %s", err)
log.Error(errWithMSG)
@@ -642,15 +660,21 @@ func (c *GrpcClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.D
// GetPKCEAuthorizationFlow returns a pkce authorization flow information.
// It also takes care of encrypting and decrypting messages.
-func (c *GrpcClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) {
+func (c *GrpcClient) GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) {
if !c.ready() {
return nil, fmt.Errorf("no connection to management in order to get pkce authorization flow")
}
+
+ serverKey, err := c.getServerPublicKey()
+ if err != nil {
+ return nil, err
+ }
+
mgmCtx, cancel := context.WithTimeout(c.ctx, time.Second*2)
defer cancel()
message := &proto.PKCEAuthorizationFlowRequest{}
- encryptedMSG, err := encryption.EncryptMessage(serverKey, c.key, message)
+ encryptedMSG, err := encryption.EncryptMessage(*serverKey, c.key, message)
if err != nil {
return nil, err
}
@@ -664,7 +688,7 @@ func (c *GrpcClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKC
}
flowInfoResp := &proto.PKCEAuthorizationFlow{}
- err = encryption.DecryptMessage(serverKey, c.key, resp.Body, flowInfoResp)
+ err = encryption.DecryptMessage(*serverKey, c.key, resp.Body, flowInfoResp)
if err != nil {
errWithMSG := fmt.Errorf("failed to decrypt pkce authorization flow message: %s", err)
log.Error(errWithMSG)
@@ -681,7 +705,7 @@ func (c *GrpcClient) SyncMeta(sysInfo *system.Info) error {
return errors.New(errMsgNoMgmtConnection)
}
- serverPubKey, err := c.GetServerPublicKey()
+ serverPubKey, err := c.getServerPublicKey()
if err != nil {
log.Debugf(errMsgMgmtPublicKey, err)
return err
@@ -724,7 +748,7 @@ func (c *GrpcClient) notifyConnected() {
}
func (c *GrpcClient) Logout() error {
- serverKey, err := c.GetServerPublicKey()
+ serverKey, err := c.getServerPublicKey()
if err != nil {
return fmt.Errorf("get server public key: %w", err)
}
@@ -751,7 +775,7 @@ func (c *GrpcClient) Logout() error {
// CreateExpose calls the management server to create a new expose service.
func (c *GrpcClient) CreateExpose(ctx context.Context, req ExposeRequest) (*ExposeResponse, error) {
- serverPubKey, err := c.GetServerPublicKey()
+ serverPubKey, err := c.getServerPublicKey()
if err != nil {
return nil, err
}
@@ -787,7 +811,7 @@ func (c *GrpcClient) CreateExpose(ctx context.Context, req ExposeRequest) (*Expo
// RenewExpose extends the TTL of an active expose session on the management server.
func (c *GrpcClient) RenewExpose(ctx context.Context, domain string) error {
- serverPubKey, err := c.GetServerPublicKey()
+ serverPubKey, err := c.getServerPublicKey()
if err != nil {
return err
}
@@ -810,7 +834,7 @@ func (c *GrpcClient) RenewExpose(ctx context.Context, domain string) error {
// StopExpose terminates an active expose session on the management server.
func (c *GrpcClient) StopExpose(ctx context.Context, domain string) error {
- serverPubKey, err := c.GetServerPublicKey()
+ serverPubKey, err := c.getServerPublicKey()
if err != nil {
return err
}
diff --git a/shared/management/client/mock.go b/shared/management/client/mock.go
index 548e379e8..361e8ffad 100644
--- a/shared/management/client/mock.go
+++ b/shared/management/client/mock.go
@@ -3,8 +3,6 @@ package client
import (
"context"
- "golang.zx2c4.com/wireguard/wgctrl/wgtypes"
-
"github.com/netbirdio/netbird/client/system"
"github.com/netbirdio/netbird/shared/management/domain"
"github.com/netbirdio/netbird/shared/management/proto"
@@ -14,12 +12,12 @@ import (
type MockClient struct {
CloseFunc func() error
SyncFunc func(ctx context.Context, sysInfo *system.Info, msgHandler func(msg *proto.SyncResponse) error) error
- GetServerPublicKeyFunc func() (*wgtypes.Key, error)
- RegisterFunc func(serverKey wgtypes.Key, setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
- LoginFunc func(serverKey wgtypes.Key, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
- GetDeviceAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error)
- GetPKCEAuthorizationFlowFunc func(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error)
+ RegisterFunc func(setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
+ LoginFunc func(info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error)
+ GetDeviceAuthorizationFlowFunc func() (*proto.DeviceAuthorizationFlow, error)
+ GetPKCEAuthorizationFlowFunc func() (*proto.PKCEAuthorizationFlow, error)
GetServerURLFunc func() string
+ HealthCheckFunc func() error
SyncMetaFunc func(sysInfo *system.Info) error
LogoutFunc func() error
JobFunc func(ctx context.Context, msgHandler func(msg *proto.JobRequest) *proto.JobResponse) error
@@ -53,39 +51,39 @@ func (m *MockClient) Job(ctx context.Context, msgHandler func(msg *proto.JobRequ
return m.JobFunc(ctx, msgHandler)
}
-func (m *MockClient) GetServerPublicKey() (*wgtypes.Key, error) {
- if m.GetServerPublicKeyFunc == nil {
- return nil, nil
- }
- return m.GetServerPublicKeyFunc()
-}
-
-func (m *MockClient) Register(serverKey wgtypes.Key, setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
+func (m *MockClient) Register(setupKey string, jwtToken string, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
if m.RegisterFunc == nil {
return nil, nil
}
- return m.RegisterFunc(serverKey, setupKey, jwtToken, info, sshKey, dnsLabels)
+ return m.RegisterFunc(setupKey, jwtToken, info, sshKey, dnsLabels)
}
-func (m *MockClient) Login(serverKey wgtypes.Key, info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
+func (m *MockClient) Login(info *system.Info, sshKey []byte, dnsLabels domain.List) (*proto.LoginResponse, error) {
if m.LoginFunc == nil {
return nil, nil
}
- return m.LoginFunc(serverKey, info, sshKey, dnsLabels)
+ return m.LoginFunc(info, sshKey, dnsLabels)
}
-func (m *MockClient) GetDeviceAuthorizationFlow(serverKey wgtypes.Key) (*proto.DeviceAuthorizationFlow, error) {
+func (m *MockClient) GetDeviceAuthorizationFlow() (*proto.DeviceAuthorizationFlow, error) {
if m.GetDeviceAuthorizationFlowFunc == nil {
return nil, nil
}
- return m.GetDeviceAuthorizationFlowFunc(serverKey)
+ return m.GetDeviceAuthorizationFlowFunc()
}
-func (m *MockClient) GetPKCEAuthorizationFlow(serverKey wgtypes.Key) (*proto.PKCEAuthorizationFlow, error) {
+func (m *MockClient) GetPKCEAuthorizationFlow() (*proto.PKCEAuthorizationFlow, error) {
if m.GetPKCEAuthorizationFlowFunc == nil {
return nil, nil
}
- return m.GetPKCEAuthorizationFlowFunc(serverKey)
+ return m.GetPKCEAuthorizationFlowFunc()
+}
+
+func (m *MockClient) HealthCheck() error {
+ if m.HealthCheckFunc == nil {
+ return nil
+ }
+ return m.HealthCheckFunc()
}
// GetNetworkMap mock implementation of GetNetworkMap from Client interface.
diff --git a/shared/management/http/api/openapi.yml b/shared/management/http/api/openapi.yml
index 766fdf0de..0b855db67 100644
--- a/shared/management/http/api/openapi.yml
+++ b/shared/management/http/api/openapi.yml
@@ -2860,6 +2860,11 @@ components:
type: string
description: "Protocol type: http, tcp, or udp"
example: "http"
+ metadata:
+ type: object
+ additionalProperties:
+ type: string
+ description: "Extra context about the request (e.g. crowdsec_verdict)"
required:
- id
- service_id
@@ -3258,6 +3263,14 @@ components:
pattern: '^[a-zA-Z]{2}$'
example: "DE"
description: ISO 3166-1 alpha-2 country codes to block.
+ crowdsec_mode:
+ type: string
+ enum:
+ - "off"
+ - "enforce"
+ - "observe"
+ default: "off"
+ description: CrowdSec IP reputation mode. Only available when the proxy cluster supports CrowdSec.
PasswordAuthConfig:
type: object
properties:
@@ -3361,6 +3374,10 @@ components:
type: boolean
description: Whether a subdomain label is required in front of this domain. When true, the domain cannot be used bare.
example: false
+ supports_crowdsec:
+ type: boolean
+ description: Whether the proxy cluster has CrowdSec configured
+ example: false
required:
- id
- domain
diff --git a/shared/management/http/api/types.gen.go b/shared/management/http/api/types.gen.go
index 14bb6ee03..0317b8183 100644
--- a/shared/management/http/api/types.gen.go
+++ b/shared/management/http/api/types.gen.go
@@ -17,6 +17,27 @@ const (
TokenAuthScopes = "TokenAuth.Scopes"
)
+// Defines values for AccessRestrictionsCrowdsecMode.
+const (
+ AccessRestrictionsCrowdsecModeEnforce AccessRestrictionsCrowdsecMode = "enforce"
+ AccessRestrictionsCrowdsecModeObserve AccessRestrictionsCrowdsecMode = "observe"
+ AccessRestrictionsCrowdsecModeOff AccessRestrictionsCrowdsecMode = "off"
+)
+
+// Valid indicates whether the value is a known member of the AccessRestrictionsCrowdsecMode enum.
+func (e AccessRestrictionsCrowdsecMode) Valid() bool {
+ switch e {
+ case AccessRestrictionsCrowdsecModeEnforce:
+ return true
+ case AccessRestrictionsCrowdsecModeObserve:
+ return true
+ case AccessRestrictionsCrowdsecModeOff:
+ return true
+ default:
+ return false
+ }
+}
+
// Defines values for CreateAzureIntegrationRequestHost.
const (
CreateAzureIntegrationRequestHostMicrosoftCom CreateAzureIntegrationRequestHost = "microsoft.com"
@@ -1326,8 +1347,14 @@ type AccessRestrictions struct {
// BlockedCountries ISO 3166-1 alpha-2 country codes to block.
BlockedCountries *[]string `json:"blocked_countries,omitempty"`
+
+ // CrowdsecMode CrowdSec IP reputation mode. Only available when the proxy cluster supports CrowdSec.
+ CrowdsecMode *AccessRestrictionsCrowdsecMode `json:"crowdsec_mode,omitempty"`
}
+// AccessRestrictionsCrowdsecMode CrowdSec IP reputation mode. Only available when the proxy cluster supports CrowdSec.
+type AccessRestrictionsCrowdsecMode string
+
// AccessiblePeer defines model for AccessiblePeer.
type AccessiblePeer struct {
// CityName Commonly used English name of the city
@@ -3680,6 +3707,9 @@ type ProxyAccessLog struct {
// Id Unique identifier for the access log entry
Id string `json:"id"`
+ // Metadata Extra context about the request (e.g. crowdsec_verdict)
+ Metadata *map[string]string `json:"metadata,omitempty"`
+
// Method HTTP method of the request
Method string `json:"method"`
@@ -3759,6 +3789,9 @@ type ReverseProxyDomain struct {
// RequireSubdomain Whether a subdomain label is required in front of this domain. When true, the domain cannot be used bare.
RequireSubdomain *bool `json:"require_subdomain,omitempty"`
+ // SupportsCrowdsec Whether the proxy cluster has CrowdSec configured
+ SupportsCrowdsec *bool `json:"supports_crowdsec,omitempty"`
+
// SupportsCustomPorts Whether the cluster supports binding arbitrary TCP/UDP ports
SupportsCustomPorts *bool `json:"supports_custom_ports,omitempty"`
diff --git a/shared/management/proto/proxy_service.pb.go b/shared/management/proto/proxy_service.pb.go
index 81637f69e..1095b6411 100644
--- a/shared/management/proto/proxy_service.pb.go
+++ b/shared/management/proto/proxy_service.pb.go
@@ -186,6 +186,8 @@ type ProxyCapabilities struct {
// Whether the proxy requires a subdomain label in front of its cluster domain.
// When true, accounts cannot use the cluster domain bare.
RequireSubdomain *bool `protobuf:"varint,2,opt,name=require_subdomain,json=requireSubdomain,proto3,oneof" json:"require_subdomain,omitempty"`
+ // Whether the proxy has CrowdSec configured and can enforce IP reputation checks.
+ SupportsCrowdsec *bool `protobuf:"varint,3,opt,name=supports_crowdsec,json=supportsCrowdsec,proto3,oneof" json:"supports_crowdsec,omitempty"`
}
func (x *ProxyCapabilities) Reset() {
@@ -234,6 +236,13 @@ func (x *ProxyCapabilities) GetRequireSubdomain() bool {
return false
}
+func (x *ProxyCapabilities) GetSupportsCrowdsec() bool {
+ if x != nil && x.SupportsCrowdsec != nil {
+ return *x.SupportsCrowdsec
+ }
+ return false
+}
+
// GetMappingUpdateRequest is sent to initialise a mapping stream.
type GetMappingUpdateRequest struct {
state protoimpl.MessageState
@@ -679,6 +688,8 @@ type AccessRestrictions struct {
BlockedCidrs []string `protobuf:"bytes,2,rep,name=blocked_cidrs,json=blockedCidrs,proto3" json:"blocked_cidrs,omitempty"`
AllowedCountries []string `protobuf:"bytes,3,rep,name=allowed_countries,json=allowedCountries,proto3" json:"allowed_countries,omitempty"`
BlockedCountries []string `protobuf:"bytes,4,rep,name=blocked_countries,json=blockedCountries,proto3" json:"blocked_countries,omitempty"`
+ // CrowdSec IP reputation mode: "", "off", "enforce", or "observe".
+ CrowdsecMode string `protobuf:"bytes,5,opt,name=crowdsec_mode,json=crowdsecMode,proto3" json:"crowdsec_mode,omitempty"`
}
func (x *AccessRestrictions) Reset() {
@@ -741,6 +752,13 @@ func (x *AccessRestrictions) GetBlockedCountries() []string {
return nil
}
+func (x *AccessRestrictions) GetCrowdsecMode() string {
+ if x != nil {
+ return x.CrowdsecMode
+ }
+ return ""
+}
+
type ProxyMapping struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -990,6 +1008,8 @@ type AccessLog struct {
BytesUpload int64 `protobuf:"varint,14,opt,name=bytes_upload,json=bytesUpload,proto3" json:"bytes_upload,omitempty"`
BytesDownload int64 `protobuf:"varint,15,opt,name=bytes_download,json=bytesDownload,proto3" json:"bytes_download,omitempty"`
Protocol string `protobuf:"bytes,16,opt,name=protocol,proto3" json:"protocol,omitempty"`
+ // Extra key-value metadata for the access log entry (e.g. crowdsec_verdict, scenario).
+ Metadata map[string]string `protobuf:"bytes,17,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
}
func (x *AccessLog) Reset() {
@@ -1136,6 +1156,13 @@ func (x *AccessLog) GetProtocol() string {
return ""
}
+func (x *AccessLog) GetMetadata() map[string]string {
+ if x != nil {
+ return x.Metadata
+ }
+ return nil
+}
+
type AuthenticateRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@@ -1952,7 +1979,7 @@ var file_proxy_service_proto_rawDesc = []byte{
0x75, 0x66, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f,
- 0x74, 0x6f, 0x22, 0xae, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61,
+ 0x74, 0x6f, 0x22, 0xf6, 0x01, 0x0a, 0x11, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61,
0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x12, 0x37, 0x0a, 0x15, 0x73, 0x75, 0x70, 0x70,
0x6f, 0x72, 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74,
0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x48, 0x00, 0x52, 0x13, 0x73, 0x75, 0x70, 0x70, 0x6f,
@@ -1960,324 +1987,338 @@ var file_proxy_service_proto_rawDesc = []byte{
0x01, 0x12, 0x30, 0x0a, 0x11, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62,
0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x48, 0x01, 0x52, 0x10,
0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x53, 0x75, 0x62, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e,
- 0x88, 0x01, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73,
- 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x42, 0x14, 0x0a,
- 0x12, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64, 0x6f, 0x6d,
- 0x61, 0x69, 0x6e, 0x22, 0xe6, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69,
- 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65,
- 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72,
- 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f,
- 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73,
- 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12,
- 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70,
- 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32,
- 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f,
- 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c,
- 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a,
- 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70,
- 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70,
- 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a,
- 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f,
- 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e,
- 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e, 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74,
- 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74,
- 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f,
- 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69, 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08,
- 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c, 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12,
- 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f,
- 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
- 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74,
- 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65,
- 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70, 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
- 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69,
- 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b, 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65,
- 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61,
- 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72,
- 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f,
- 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63,
- 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e,
- 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05,
- 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f,
- 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69,
- 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
- 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65,
- 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c, 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74,
- 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
- 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02,
- 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61, 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e,
- 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18,
- 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a,
- 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
- 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68,
- 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f,
- 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47, 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
- 0x41, 0x75, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c,
- 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22,
- 0xe5, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69,
- 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65,
- 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d, 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02,
- 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61, 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61,
- 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61,
- 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63,
- 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x12, 0x39, 0x0a, 0x0c,
- 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x73, 0x18, 0x06, 0x20, 0x03,
- 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
- 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x73, 0x22, 0xb8, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x63, 0x65,
- 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23,
- 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18,
- 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x69,
- 0x64, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63,
- 0x69, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63,
- 0x6b, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f,
- 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20,
- 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e,
- 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a, 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64,
- 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09,
- 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69,
- 0x65, 0x73, 0x22, 0xe6, 0x03, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70,
- 0x69, 0x6e, 0x67, 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x0e, 0x32, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50,
- 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f,
- 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61,
- 0x69, 0x6e, 0x12, 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b,
- 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61,
- 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12,
- 0x1d, 0x0a, 0x0a, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e,
- 0x0a, 0x04, 0x61, 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e,
- 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28,
- 0x0a, 0x10, 0x70, 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64,
- 0x65, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f,
- 0x73, 0x74, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72,
- 0x69, 0x74, 0x65, 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20,
- 0x01, 0x28, 0x08, 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69,
- 0x72, 0x65, 0x63, 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20,
- 0x01, 0x28, 0x09, 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73,
- 0x74, 0x65, 0x6e, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a,
- 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e,
- 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72,
- 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52,
- 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53,
- 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75,
- 0x65, 0x73, 0x74, 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b,
- 0x32, 0x15, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15,
- 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73,
- 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x86, 0x04, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73,
- 0x4c, 0x6f, 0x67, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
- 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e,
- 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61,
- 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a,
- 0x06, 0x6c, 0x6f, 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c,
- 0x6f, 0x67, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
- 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69,
- 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75,
- 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52,
- 0x0a, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d,
- 0x65, 0x74, 0x68, 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74,
- 0x68, 0x6f, 0x64, 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f,
- 0x63, 0x6f, 0x64, 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72,
- 0x63, 0x65, 0x5f, 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75,
- 0x72, 0x63, 0x65, 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65,
- 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61,
- 0x75, 0x74, 0x68, 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07,
- 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75,
- 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75,
- 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74,
- 0x68, 0x53, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65,
- 0x73, 0x5f, 0x75, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b,
- 0x62, 0x79, 0x74, 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62,
- 0x79, 0x74, 0x65, 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20,
- 0x01, 0x28, 0x03, 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f,
- 0x61, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x22, 0xf8,
- 0x01, 0x0a, 0x13, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52,
+ 0x88, 0x01, 0x01, 0x12, 0x30, 0x0a, 0x11, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x5f,
+ 0x63, 0x72, 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x48, 0x02,
+ 0x52, 0x10, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x43, 0x72, 0x6f, 0x77, 0x64, 0x73,
+ 0x65, 0x63, 0x88, 0x01, 0x01, 0x42, 0x18, 0x0a, 0x16, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72,
+ 0x74, 0x73, 0x5f, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x73, 0x42,
+ 0x14, 0x0a, 0x12, 0x5f, 0x72, 0x65, 0x71, 0x75, 0x69, 0x72, 0x65, 0x5f, 0x73, 0x75, 0x62, 0x64,
+ 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x42, 0x14, 0x0a, 0x12, 0x5f, 0x73, 0x75, 0x70, 0x70, 0x6f, 0x72,
+ 0x74, 0x73, 0x5f, 0x63, 0x72, 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x22, 0xe6, 0x01, 0x0a, 0x17,
+ 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65,
+ 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x70, 0x72, 0x6f, 0x78, 0x79,
+ 0x49, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x39, 0x0a, 0x0a,
+ 0x73, 0x74, 0x61, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x73, 0x74,
+ 0x61, 0x72, 0x74, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65,
+ 0x73, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73,
+ 0x73, 0x12, 0x41, 0x0a, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69, 0x74, 0x69, 0x65,
+ 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x43, 0x61, 0x70, 0x61, 0x62, 0x69,
+ 0x6c, 0x69, 0x74, 0x69, 0x65, 0x73, 0x52, 0x0c, 0x63, 0x61, 0x70, 0x61, 0x62, 0x69, 0x6c, 0x69,
+ 0x74, 0x69, 0x65, 0x73, 0x22, 0x82, 0x01, 0x0a, 0x18, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70,
+ 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x12, 0x32, 0x0a, 0x07, 0x6d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x18, 0x01, 0x20, 0x03,
+ 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
+ 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x07, 0x6d, 0x61,
+ 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x32, 0x0a, 0x15, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c,
+ 0x5f, 0x73, 0x79, 0x6e, 0x63, 0x5f, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x13, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x53, 0x79, 0x6e,
+ 0x63, 0x43, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x22, 0xce, 0x03, 0x0a, 0x11, 0x50, 0x61,
+ 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12,
+ 0x26, 0x0a, 0x0f, 0x73, 0x6b, 0x69, 0x70, 0x5f, 0x74, 0x6c, 0x73, 0x5f, 0x76, 0x65, 0x72, 0x69,
+ 0x66, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x73, 0x6b, 0x69, 0x70, 0x54, 0x6c,
+ 0x73, 0x56, 0x65, 0x72, 0x69, 0x66, 0x79, 0x12, 0x42, 0x0a, 0x0f, 0x72, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b,
+ 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
+ 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x0e, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x12, 0x3e, 0x0a, 0x0c, 0x70,
+ 0x61, 0x74, 0x68, 0x5f, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
+ 0x0e, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50,
+ 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x52, 0x0b,
+ 0x70, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x12, 0x57, 0x0a, 0x0e, 0x63,
+ 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20,
+ 0x03, 0x28, 0x0b, 0x32, 0x30, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74,
+ 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70, 0x74, 0x69, 0x6f,
+ 0x6e, 0x73, 0x2e, 0x43, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73,
+ 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x0d, 0x63, 0x75, 0x73, 0x74, 0x6f, 0x6d, 0x48, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x5f, 0x70, 0x72,
+ 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x70, 0x72,
+ 0x6f, 0x78, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x4b, 0x0a, 0x14, 0x73,
+ 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x69, 0x64, 0x6c, 0x65, 0x5f, 0x74, 0x69, 0x6d, 0x65,
+ 0x6f, 0x75, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
+ 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x44, 0x75, 0x72, 0x61,
+ 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x12, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x49, 0x64, 0x6c,
+ 0x65, 0x54, 0x69, 0x6d, 0x65, 0x6f, 0x75, 0x74, 0x1a, 0x40, 0x0a, 0x12, 0x43, 0x75, 0x73, 0x74,
+ 0x6f, 0x6d, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x73, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10,
+ 0x0a, 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79,
+ 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x72, 0x0a, 0x0b, 0x50, 0x61,
+ 0x74, 0x68, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74,
+ 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a,
+ 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x74,
+ 0x61, 0x72, 0x67, 0x65, 0x74, 0x12, 0x37, 0x0a, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d,
+ 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x54, 0x61, 0x72, 0x67, 0x65, 0x74, 0x4f, 0x70,
+ 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x07, 0x6f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x47,
+ 0x0a, 0x0a, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06,
+ 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x68, 0x65,
+ 0x61, 0x64, 0x65, 0x72, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x61, 0x73, 0x68, 0x65, 0x64, 0x5f, 0x76,
+ 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x68,
+ 0x65, 0x64, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe5, 0x01, 0x0a, 0x0e, 0x41, 0x75, 0x74, 0x68,
+ 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x1f, 0x0a, 0x0b, 0x73, 0x65,
+ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0a, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x4b, 0x65, 0x79, 0x12, 0x35, 0x0a, 0x17, 0x6d,
+ 0x61, 0x78, 0x5f, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x61, 0x67, 0x65, 0x5f, 0x73,
+ 0x65, 0x63, 0x6f, 0x6e, 0x64, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x14, 0x6d, 0x61,
+ 0x78, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x41, 0x67, 0x65, 0x53, 0x65, 0x63, 0x6f, 0x6e,
+ 0x64, 0x73, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03,
+ 0x20, 0x01, 0x28, 0x08, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x10,
+ 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x03, 0x70, 0x69, 0x6e,
+ 0x12, 0x12, 0x0a, 0x04, 0x6f, 0x69, 0x64, 0x63, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04,
+ 0x6f, 0x69, 0x64, 0x63, 0x12, 0x39, 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61,
+ 0x75, 0x74, 0x68, 0x73, 0x18, 0x06, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e,
+ 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75,
+ 0x74, 0x68, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x73, 0x22,
+ 0xdd, 0x01, 0x0a, 0x12, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69,
+ 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65,
+ 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0c, 0x61,
+ 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x62,
+ 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x69, 0x64, 0x72, 0x73, 0x18, 0x02, 0x20, 0x03,
+ 0x28, 0x09, 0x52, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x43, 0x69, 0x64, 0x72, 0x73,
+ 0x12, 0x2b, 0x0a, 0x11, 0x61, 0x6c, 0x6c, 0x6f, 0x77, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e,
+ 0x74, 0x72, 0x69, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x61, 0x6c, 0x6c,
+ 0x6f, 0x77, 0x65, 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x2b, 0x0a,
+ 0x11, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65, 0x64, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69,
+ 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x09, 0x52, 0x10, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x65,
+ 0x64, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x72, 0x69, 0x65, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x63, 0x72,
+ 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0c, 0x63, 0x72, 0x6f, 0x77, 0x64, 0x73, 0x65, 0x63, 0x4d, 0x6f, 0x64, 0x65, 0x22,
+ 0xe6, 0x03, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67,
+ 0x12, 0x36, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x22,
+ 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78,
+ 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79,
+ 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x02,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f,
+ 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63,
+ 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69,
+ 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12,
+ 0x2b, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x05, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x17, 0x2e,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x74, 0x68, 0x4d,
+ 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1d, 0x0a, 0x0a,
+ 0x61, 0x75, 0x74, 0x68, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x09, 0x61, 0x75, 0x74, 0x68, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x2e, 0x0a, 0x04, 0x61,
+ 0x75, 0x74, 0x68, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63,
+ 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x04, 0x61, 0x75, 0x74, 0x68, 0x12, 0x28, 0x0a, 0x10, 0x70,
+ 0x61, 0x73, 0x73, 0x5f, 0x68, 0x6f, 0x73, 0x74, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x18,
+ 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0e, 0x70, 0x61, 0x73, 0x73, 0x48, 0x6f, 0x73, 0x74, 0x48,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x2b, 0x0a, 0x11, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65,
+ 0x5f, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x73, 0x18, 0x09, 0x20, 0x01, 0x28, 0x08,
+ 0x52, 0x10, 0x72, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x52, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09,
+ 0x52, 0x04, 0x6d, 0x6f, 0x64, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x6c, 0x69, 0x73, 0x74, 0x65, 0x6e,
+ 0x5f, 0x70, 0x6f, 0x72, 0x74, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0a, 0x6c, 0x69, 0x73,
+ 0x74, 0x65, 0x6e, 0x50, 0x6f, 0x72, 0x74, 0x12, 0x4f, 0x0a, 0x13, 0x61, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x5f, 0x72, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0c,
+ 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74, 0x72, 0x69, 0x63, 0x74,
+ 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x12, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x52, 0x65, 0x73, 0x74,
+ 0x72, 0x69, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x22, 0x3f, 0x0a, 0x14, 0x53, 0x65, 0x6e, 0x64,
+ 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x27, 0x0a, 0x03, 0x6c, 0x6f, 0x67, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x15, 0x2e,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x03, 0x6c, 0x6f, 0x67, 0x22, 0x17, 0x0a, 0x15, 0x53, 0x65, 0x6e,
+ 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x22, 0x84, 0x05, 0x0a, 0x09, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67,
+ 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
+ 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52,
+ 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x15, 0x0a, 0x06, 0x6c, 0x6f,
+ 0x67, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x6c, 0x6f, 0x67, 0x49,
+ 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18,
+ 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x04,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12,
+ 0x12, 0x0a, 0x04, 0x68, 0x6f, 0x73, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68,
+ 0x6f, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x70, 0x61, 0x74, 0x68, 0x18, 0x06, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, 0x12, 0x1f, 0x0a, 0x0b, 0x64, 0x75, 0x72, 0x61, 0x74,
+ 0x69, 0x6f, 0x6e, 0x5f, 0x6d, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0a, 0x64, 0x75,
+ 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4d, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x68,
+ 0x6f, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64,
+ 0x12, 0x23, 0x0a, 0x0d, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x5f, 0x63, 0x6f, 0x64,
+ 0x65, 0x18, 0x09, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x72, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73,
+ 0x65, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x1b, 0x0a, 0x09, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f,
+ 0x69, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65,
+ 0x49, 0x70, 0x12, 0x25, 0x0a, 0x0e, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x6d, 0x65, 0x63, 0x68, 0x61,
+ 0x6e, 0x69, 0x73, 0x6d, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x61, 0x75, 0x74, 0x68,
+ 0x4d, 0x65, 0x63, 0x68, 0x61, 0x6e, 0x69, 0x73, 0x6d, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65,
+ 0x72, 0x5f, 0x69, 0x64, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72,
+ 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x61, 0x75, 0x74, 0x68, 0x5f, 0x73, 0x75, 0x63, 0x63, 0x65,
+ 0x73, 0x73, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x61, 0x75, 0x74, 0x68, 0x53, 0x75,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x75,
+ 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x62, 0x79, 0x74,
+ 0x65, 0x73, 0x55, 0x70, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x25, 0x0a, 0x0e, 0x62, 0x79, 0x74, 0x65,
+ 0x73, 0x5f, 0x64, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x03,
+ 0x52, 0x0d, 0x62, 0x79, 0x74, 0x65, 0x73, 0x44, 0x6f, 0x77, 0x6e, 0x6c, 0x6f, 0x61, 0x64, 0x12,
+ 0x1a, 0x0a, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x18, 0x10, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x08, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x63, 0x6f, 0x6c, 0x12, 0x3f, 0x0a, 0x08, 0x6d,
+ 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x11, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x23, 0x2e,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x63, 0x63, 0x65, 0x73,
+ 0x73, 0x4c, 0x6f, 0x67, 0x2e, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74,
+ 0x72, 0x79, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x1a, 0x3b, 0x0a, 0x0d,
+ 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a,
+ 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12,
+ 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05,
+ 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0xf8, 0x01, 0x0a, 0x13, 0x41, 0x75,
+ 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
+ 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
+ 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64,
+ 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x03, 0x20, 0x01,
+ 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
+ 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48,
+ 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x12, 0x2a, 0x0a, 0x03, 0x70,
+ 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65,
+ 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x6d,
+ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72,
+ 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0a, 0x68,
+ 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x42, 0x09, 0x0a, 0x07, 0x72, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x22, 0x57, 0x0a, 0x11, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75,
+ 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21, 0x0a, 0x0c, 0x68, 0x65, 0x61,
+ 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
+ 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, 0x0b,
+ 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
+ 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x2d, 0x0a,
+ 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x1e, 0x0a, 0x0a,
+ 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x70, 0x69,
+ 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x22, 0x55, 0x0a, 0x14,
+ 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
+ 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18,
+ 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x23,
+ 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18,
+ 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74,
+ 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d,
+ 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x2f, 0x0a,
+ 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x17, 0x2e,
+ 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x72, 0x6f, 0x78, 0x79,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x2d,
+ 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x5f, 0x69, 0x73,
+ 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x63, 0x65, 0x72, 0x74,
+ 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75, 0x65, 0x64, 0x12, 0x28, 0x0a,
+ 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x05,
+ 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73,
+ 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f,
+ 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a, 0x0a, 0x18, 0x53, 0x65, 0x6e,
+ 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01,
+ 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x12,
+ 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20,
+ 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x14,
+ 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74,
+ 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72,
+ 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x18, 0x04, 0x20, 0x01,
+ 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x50, 0x75, 0x62,
+ 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65,
+ 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72,
+ 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50,
+ 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73,
+ 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75,
+ 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d,
+ 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c,
+ 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42,
+ 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67,
+ 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e,
0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f,
- 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x39, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
- 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64,
- 0x12, 0x2a, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x16, 0x2e,
- 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x50, 0x69, 0x6e, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x48, 0x00, 0x52, 0x03, 0x70, 0x69, 0x6e, 0x12, 0x40, 0x0a, 0x0b,
- 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x61, 0x75, 0x74, 0x68, 0x18, 0x05, 0x20, 0x01, 0x28,
- 0x0b, 0x32, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x48,
- 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
- 0x48, 0x00, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x42, 0x09,
- 0x0a, 0x07, 0x72, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x22, 0x57, 0x0a, 0x11, 0x48, 0x65, 0x61,
- 0x64, 0x65, 0x72, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x21,
- 0x0a, 0x0c, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01,
- 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x56, 0x61, 0x6c, 0x75,
- 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x6e, 0x61, 0x6d, 0x65,
- 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x4e, 0x61,
- 0x6d, 0x65, 0x22, 0x2d, 0x0a, 0x0f, 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72,
- 0x64, 0x22, 0x1e, 0x0a, 0x0a, 0x50, 0x69, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x10, 0x0a, 0x03, 0x70, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x70, 0x69,
- 0x6e, 0x22, 0x55, 0x0a, 0x14, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74,
- 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63,
- 0x65, 0x73, 0x73, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74,
- 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73,
- 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0xf3, 0x01, 0x0a, 0x17, 0x53, 0x65, 0x6e,
- 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71,
- 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
- 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
- 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69,
- 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74,
- 0x49, 0x64, 0x12, 0x2f, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x0e, 0x32, 0x17, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
- 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61,
- 0x74, 0x75, 0x73, 0x12, 0x2d, 0x0a, 0x12, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61,
- 0x74, 0x65, 0x5f, 0x69, 0x73, 0x73, 0x75, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52,
- 0x11, 0x63, 0x65, 0x72, 0x74, 0x69, 0x66, 0x69, 0x63, 0x61, 0x74, 0x65, 0x49, 0x73, 0x73, 0x75,
- 0x65, 0x64, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73,
- 0x61, 0x67, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72,
- 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e,
- 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x1a,
- 0x0a, 0x18, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61,
- 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0xb8, 0x01, 0x0a, 0x16, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
- 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69,
- 0x63, 0x65, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f,
- 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e,
- 0x74, 0x49, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x03, 0x20, 0x01,
- 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x12, 0x30, 0x0a, 0x14, 0x77, 0x69, 0x72,
- 0x65, 0x67, 0x75, 0x61, 0x72, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65,
- 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x77, 0x69, 0x72, 0x65, 0x67, 0x75, 0x61,
- 0x72, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x12, 0x18, 0x0a, 0x07, 0x63,
- 0x6c, 0x75, 0x73, 0x74, 0x65, 0x72, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x63, 0x6c,
- 0x75, 0x73, 0x74, 0x65, 0x72, 0x22, 0x6f, 0x0a, 0x17, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50,
- 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
- 0x12, 0x18, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28,
- 0x08, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x73, 0x73, 0x12, 0x28, 0x0a, 0x0d, 0x65, 0x72,
- 0x72, 0x6f, 0x72, 0x5f, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
- 0x09, 0x48, 0x00, 0x52, 0x0c, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x4d, 0x65, 0x73, 0x73, 0x61, 0x67,
- 0x65, 0x88, 0x01, 0x01, 0x42, 0x10, 0x0a, 0x0e, 0x5f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6d,
- 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x65, 0x0a, 0x11, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44,
- 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x0e, 0x0a, 0x02, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x61,
- 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x09, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65,
- 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x0b, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a,
- 0x12, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
- 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74,
- 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
- 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69,
- 0x6f, 0x6e, 0x5f, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c,
- 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a,
- 0x17, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e,
- 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69,
- 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17,
- 0x0a, 0x07, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52,
- 0x06, 0x75, 0x73, 0x65, 0x72, 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f,
- 0x65, 0x6d, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65,
- 0x72, 0x45, 0x6d, 0x61, 0x69, 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64,
- 0x5f, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64,
- 0x65, 0x6e, 0x69, 0x65, 0x64, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50,
- 0x72, 0x6f, 0x78, 0x79, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f,
- 0x54, 0x59, 0x50, 0x45, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18,
- 0x0a, 0x14, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f,
- 0x44, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41,
- 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10,
- 0x02, 0x2a, 0x46, 0x0a, 0x0f, 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65,
- 0x4d, 0x6f, 0x64, 0x65, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57,
- 0x52, 0x49, 0x54, 0x45, 0x5f, 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19,
- 0x0a, 0x15, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50,
- 0x52, 0x45, 0x53, 0x45, 0x52, 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72,
- 0x6f, 0x78, 0x79, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f,
- 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e,
- 0x47, 0x10, 0x00, 0x12, 0x17, 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41,
- 0x54, 0x55, 0x53, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f,
- 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e,
- 0x4e, 0x45, 0x4c, 0x5f, 0x4e, 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10,
- 0x02, 0x12, 0x24, 0x0a, 0x20, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55,
- 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45,
- 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59,
- 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43,
- 0x41, 0x54, 0x45, 0x5f, 0x46, 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12,
- 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52,
- 0x4f, 0x52, 0x10, 0x05, 0x32, 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65,
- 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70,
- 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
- 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e,
- 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24,
- 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d,
- 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
- 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63,
- 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c,
- 0x6f, 0x67, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
- 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73,
- 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c,
- 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d,
- 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e,
- 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e,
- 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65,
- 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12,
- 0x5d, 0x0a, 0x10, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64,
- 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74,
- 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74,
- 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73,
- 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a,
- 0x0a, 0x0f, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65,
- 0x72, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43,
- 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65,
- 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65,
- 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65,
- 0x65, 0x72, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65,
- 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67,
- 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c,
- 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
- 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52,
- 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64,
- 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e,
- 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65,
- 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23,
- 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69,
+ 0x75, 0x6e, 0x74, 0x49, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x72, 0x65, 0x64, 0x69, 0x72, 0x65, 0x63,
+ 0x74, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x72, 0x65, 0x64,
+ 0x69, 0x72, 0x65, 0x63, 0x74, 0x55, 0x72, 0x6c, 0x22, 0x26, 0x0a, 0x12, 0x47, 0x65, 0x74, 0x4f,
+ 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x10,
+ 0x0a, 0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c,
+ 0x22, 0x55, 0x0a, 0x16, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73,
+ 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f,
+ 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61,
+ 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x73, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x5f, 0x74, 0x6f,
+ 0x6b, 0x65, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x73, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x54, 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x8c, 0x01, 0x0a, 0x17, 0x56, 0x61, 0x6c, 0x69,
0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f,
- 0x6e, 0x73, 0x65, 0x42, 0x08, 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70,
- 0x72, 0x6f, 0x74, 0x6f, 0x33,
+ 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
+ 0x28, 0x08, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x75, 0x73, 0x65,
+ 0x72, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x75, 0x73, 0x65, 0x72,
+ 0x49, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x75, 0x73, 0x65, 0x72, 0x5f, 0x65, 0x6d, 0x61, 0x69, 0x6c,
+ 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x75, 0x73, 0x65, 0x72, 0x45, 0x6d, 0x61, 0x69,
+ 0x6c, 0x12, 0x23, 0x0a, 0x0d, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x5f, 0x72, 0x65, 0x61, 0x73,
+ 0x6f, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x64, 0x65, 0x6e, 0x69, 0x65, 0x64,
+ 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x2a, 0x64, 0x0a, 0x16, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x4d,
+ 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x54, 0x79, 0x70, 0x65,
+ 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f,
+ 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x18, 0x0a, 0x14, 0x55, 0x50, 0x44,
+ 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59, 0x50, 0x45, 0x5f, 0x4d, 0x4f, 0x44, 0x49, 0x46, 0x49, 0x45,
+ 0x44, 0x10, 0x01, 0x12, 0x17, 0x0a, 0x13, 0x55, 0x50, 0x44, 0x41, 0x54, 0x45, 0x5f, 0x54, 0x59,
+ 0x50, 0x45, 0x5f, 0x52, 0x45, 0x4d, 0x4f, 0x56, 0x45, 0x44, 0x10, 0x02, 0x2a, 0x46, 0x0a, 0x0f,
+ 0x50, 0x61, 0x74, 0x68, 0x52, 0x65, 0x77, 0x72, 0x69, 0x74, 0x65, 0x4d, 0x6f, 0x64, 0x65, 0x12,
+ 0x18, 0x0a, 0x14, 0x50, 0x41, 0x54, 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f,
+ 0x44, 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x00, 0x12, 0x19, 0x0a, 0x15, 0x50, 0x41, 0x54,
+ 0x48, 0x5f, 0x52, 0x45, 0x57, 0x52, 0x49, 0x54, 0x45, 0x5f, 0x50, 0x52, 0x45, 0x53, 0x45, 0x52,
+ 0x56, 0x45, 0x10, 0x01, 0x2a, 0xc8, 0x01, 0x0a, 0x0b, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x74,
+ 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x14, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54,
+ 0x41, 0x54, 0x55, 0x53, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47, 0x10, 0x00, 0x12, 0x17,
+ 0x0a, 0x13, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x41,
+ 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x01, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59,
+ 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x54, 0x55, 0x4e, 0x4e, 0x45, 0x4c, 0x5f, 0x4e,
+ 0x4f, 0x54, 0x5f, 0x43, 0x52, 0x45, 0x41, 0x54, 0x45, 0x44, 0x10, 0x02, 0x12, 0x24, 0x0a, 0x20,
+ 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52,
+ 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x50, 0x45, 0x4e, 0x44, 0x49, 0x4e, 0x47,
+ 0x10, 0x03, 0x12, 0x23, 0x0a, 0x1f, 0x50, 0x52, 0x4f, 0x58, 0x59, 0x5f, 0x53, 0x54, 0x41, 0x54,
+ 0x55, 0x53, 0x5f, 0x43, 0x45, 0x52, 0x54, 0x49, 0x46, 0x49, 0x43, 0x41, 0x54, 0x45, 0x5f, 0x46,
+ 0x41, 0x49, 0x4c, 0x45, 0x44, 0x10, 0x04, 0x12, 0x16, 0x0a, 0x12, 0x50, 0x52, 0x4f, 0x58, 0x59,
+ 0x5f, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x05, 0x32,
+ 0xfc, 0x04, 0x0a, 0x0c, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
+ 0x12, 0x5f, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70,
+ 0x64, 0x61, 0x74, 0x65, 0x12, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e, 0x67, 0x55, 0x70, 0x64, 0x61,
+ 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x65, 0x74, 0x4d, 0x61, 0x70, 0x70, 0x69, 0x6e,
+ 0x67, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30,
+ 0x01, 0x12, 0x54, 0x0a, 0x0d, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c,
+ 0x6f, 0x67, 0x12, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
+ 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52, 0x65, 0x71,
+ 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e,
+ 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x4c, 0x6f, 0x67, 0x52,
+ 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x51, 0x0a, 0x0c, 0x41, 0x75, 0x74, 0x68, 0x65,
+ 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x12, 0x1f, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65,
+ 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61, 0x74,
+ 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x20, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67,
+ 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x41, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, 0x69, 0x63, 0x61,
+ 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5d, 0x0a, 0x10, 0x53, 0x65,
+ 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12, 0x23,
+ 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x65, 0x6e, 0x64,
+ 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75,
+ 0x65, 0x73, 0x74, 0x1a, 0x24, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74,
+ 0x2e, 0x53, 0x65, 0x6e, 0x64, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x55, 0x70, 0x64, 0x61, 0x74,
+ 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x43, 0x72, 0x65,
+ 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x12, 0x22, 0x2e, 0x6d,
+ 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65,
+ 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74,
+ 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x72,
+ 0x65, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x78, 0x79, 0x50, 0x65, 0x65, 0x72, 0x52, 0x65, 0x73,
+ 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x4b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43,
+ 0x55, 0x52, 0x4c, 0x12, 0x1d, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74,
+ 0x2e, 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x71, 0x75, 0x65,
+ 0x73, 0x74, 0x1a, 0x1e, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e,
+ 0x47, 0x65, 0x74, 0x4f, 0x49, 0x44, 0x43, 0x55, 0x52, 0x4c, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
+ 0x73, 0x65, 0x12, 0x5a, 0x0a, 0x0f, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65,
+ 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x22, 0x2e, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65,
+ 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53, 0x65, 0x73, 0x73, 0x69,
+ 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x6d, 0x61, 0x6e, 0x61,
+ 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x53,
+ 0x65, 0x73, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x42, 0x08,
+ 0x5a, 0x06, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@@ -2293,7 +2334,7 @@ func file_proxy_service_proto_rawDescGZIP() []byte {
}
var file_proxy_service_proto_enumTypes = make([]protoimpl.EnumInfo, 3)
-var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 26)
+var file_proxy_service_proto_msgTypes = make([]protoimpl.MessageInfo, 27)
var file_proxy_service_proto_goTypes = []interface{}{
(ProxyMappingUpdateType)(0), // 0: management.ProxyMappingUpdateType
(PathRewriteMode)(0), // 1: management.PathRewriteMode
@@ -2324,17 +2365,18 @@ var file_proxy_service_proto_goTypes = []interface{}{
(*ValidateSessionRequest)(nil), // 26: management.ValidateSessionRequest
(*ValidateSessionResponse)(nil), // 27: management.ValidateSessionResponse
nil, // 28: management.PathTargetOptions.CustomHeadersEntry
- (*timestamppb.Timestamp)(nil), // 29: google.protobuf.Timestamp
- (*durationpb.Duration)(nil), // 30: google.protobuf.Duration
+ nil, // 29: management.AccessLog.MetadataEntry
+ (*timestamppb.Timestamp)(nil), // 30: google.protobuf.Timestamp
+ (*durationpb.Duration)(nil), // 31: google.protobuf.Duration
}
var file_proxy_service_proto_depIdxs = []int32{
- 29, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp
+ 30, // 0: management.GetMappingUpdateRequest.started_at:type_name -> google.protobuf.Timestamp
3, // 1: management.GetMappingUpdateRequest.capabilities:type_name -> management.ProxyCapabilities
11, // 2: management.GetMappingUpdateResponse.mapping:type_name -> management.ProxyMapping
- 30, // 3: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration
+ 31, // 3: management.PathTargetOptions.request_timeout:type_name -> google.protobuf.Duration
1, // 4: management.PathTargetOptions.path_rewrite:type_name -> management.PathRewriteMode
28, // 5: management.PathTargetOptions.custom_headers:type_name -> management.PathTargetOptions.CustomHeadersEntry
- 30, // 6: management.PathTargetOptions.session_idle_timeout:type_name -> google.protobuf.Duration
+ 31, // 6: management.PathTargetOptions.session_idle_timeout:type_name -> google.protobuf.Duration
6, // 7: management.PathMapping.options:type_name -> management.PathTargetOptions
8, // 8: management.Authentication.header_auths:type_name -> management.HeaderAuth
0, // 9: management.ProxyMapping.type:type_name -> management.ProxyMappingUpdateType
@@ -2342,30 +2384,31 @@ var file_proxy_service_proto_depIdxs = []int32{
9, // 11: management.ProxyMapping.auth:type_name -> management.Authentication
10, // 12: management.ProxyMapping.access_restrictions:type_name -> management.AccessRestrictions
14, // 13: management.SendAccessLogRequest.log:type_name -> management.AccessLog
- 29, // 14: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp
- 17, // 15: management.AuthenticateRequest.password:type_name -> management.PasswordRequest
- 18, // 16: management.AuthenticateRequest.pin:type_name -> management.PinRequest
- 16, // 17: management.AuthenticateRequest.header_auth:type_name -> management.HeaderAuthRequest
- 2, // 18: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus
- 4, // 19: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest
- 12, // 20: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest
- 15, // 21: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest
- 20, // 22: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest
- 22, // 23: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest
- 24, // 24: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest
- 26, // 25: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest
- 5, // 26: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse
- 13, // 27: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse
- 19, // 28: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse
- 21, // 29: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse
- 23, // 30: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse
- 25, // 31: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse
- 27, // 32: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse
- 26, // [26:33] is the sub-list for method output_type
- 19, // [19:26] is the sub-list for method input_type
- 19, // [19:19] is the sub-list for extension type_name
- 19, // [19:19] is the sub-list for extension extendee
- 0, // [0:19] is the sub-list for field type_name
+ 30, // 14: management.AccessLog.timestamp:type_name -> google.protobuf.Timestamp
+ 29, // 15: management.AccessLog.metadata:type_name -> management.AccessLog.MetadataEntry
+ 17, // 16: management.AuthenticateRequest.password:type_name -> management.PasswordRequest
+ 18, // 17: management.AuthenticateRequest.pin:type_name -> management.PinRequest
+ 16, // 18: management.AuthenticateRequest.header_auth:type_name -> management.HeaderAuthRequest
+ 2, // 19: management.SendStatusUpdateRequest.status:type_name -> management.ProxyStatus
+ 4, // 20: management.ProxyService.GetMappingUpdate:input_type -> management.GetMappingUpdateRequest
+ 12, // 21: management.ProxyService.SendAccessLog:input_type -> management.SendAccessLogRequest
+ 15, // 22: management.ProxyService.Authenticate:input_type -> management.AuthenticateRequest
+ 20, // 23: management.ProxyService.SendStatusUpdate:input_type -> management.SendStatusUpdateRequest
+ 22, // 24: management.ProxyService.CreateProxyPeer:input_type -> management.CreateProxyPeerRequest
+ 24, // 25: management.ProxyService.GetOIDCURL:input_type -> management.GetOIDCURLRequest
+ 26, // 26: management.ProxyService.ValidateSession:input_type -> management.ValidateSessionRequest
+ 5, // 27: management.ProxyService.GetMappingUpdate:output_type -> management.GetMappingUpdateResponse
+ 13, // 28: management.ProxyService.SendAccessLog:output_type -> management.SendAccessLogResponse
+ 19, // 29: management.ProxyService.Authenticate:output_type -> management.AuthenticateResponse
+ 21, // 30: management.ProxyService.SendStatusUpdate:output_type -> management.SendStatusUpdateResponse
+ 23, // 31: management.ProxyService.CreateProxyPeer:output_type -> management.CreateProxyPeerResponse
+ 25, // 32: management.ProxyService.GetOIDCURL:output_type -> management.GetOIDCURLResponse
+ 27, // 33: management.ProxyService.ValidateSession:output_type -> management.ValidateSessionResponse
+ 27, // [27:34] is the sub-list for method output_type
+ 20, // [20:27] is the sub-list for method input_type
+ 20, // [20:20] is the sub-list for extension type_name
+ 20, // [20:20] is the sub-list for extension extendee
+ 0, // [0:20] is the sub-list for field type_name
}
func init() { file_proxy_service_proto_init() }
@@ -2689,7 +2732,7 @@ func file_proxy_service_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_proxy_service_proto_rawDesc,
NumEnums: 3,
- NumMessages: 26,
+ NumMessages: 27,
NumExtensions: 0,
NumServices: 1,
},
diff --git a/shared/management/proto/proxy_service.proto b/shared/management/proto/proxy_service.proto
index f77071eb0..e359f0cbd 100644
--- a/shared/management/proto/proxy_service.proto
+++ b/shared/management/proto/proxy_service.proto
@@ -34,6 +34,8 @@ message ProxyCapabilities {
// Whether the proxy requires a subdomain label in front of its cluster domain.
// When true, accounts cannot use the cluster domain bare.
optional bool require_subdomain = 2;
+ // Whether the proxy has CrowdSec configured and can enforce IP reputation checks.
+ optional bool supports_crowdsec = 3;
}
// GetMappingUpdateRequest is sent to initialise a mapping stream.
@@ -104,6 +106,8 @@ message AccessRestrictions {
repeated string blocked_cidrs = 2;
repeated string allowed_countries = 3;
repeated string blocked_countries = 4;
+ // CrowdSec IP reputation mode: "", "off", "enforce", or "observe".
+ string crowdsec_mode = 5;
}
message ProxyMapping {
@@ -152,6 +156,8 @@ message AccessLog {
int64 bytes_upload = 14;
int64 bytes_download = 15;
string protocol = 16;
+ // Extra key-value metadata for the access log entry (e.g. crowdsec_verdict, scenario).
+ map metadata = 17;
}
message AuthenticateRequest {
diff --git a/shared/relay/client/client.go b/shared/relay/client/client.go
index ed1b63435..b10b05617 100644
--- a/shared/relay/client/client.go
+++ b/shared/relay/client/client.go
@@ -333,7 +333,7 @@ func (c *Client) connect(ctx context.Context) (*RelayAddr, error) {
dialers := c.getDialers()
rd := dialer.NewRaceDial(c.log, dialer.DefaultConnectionTimeout, c.connectionURL, dialers...)
- conn, err := rd.Dial()
+ conn, err := rd.Dial(ctx)
if err != nil {
return nil, err
}
diff --git a/shared/relay/client/dialer/race_dialer.go b/shared/relay/client/dialer/race_dialer.go
index 0550fc63e..34359d17e 100644
--- a/shared/relay/client/dialer/race_dialer.go
+++ b/shared/relay/client/dialer/race_dialer.go
@@ -40,10 +40,10 @@ func NewRaceDial(log *log.Entry, connectionTimeout time.Duration, serverURL stri
}
}
-func (r *RaceDial) Dial() (net.Conn, error) {
+func (r *RaceDial) Dial(ctx context.Context) (net.Conn, error) {
connChan := make(chan dialResult, len(r.dialerFns))
winnerConn := make(chan net.Conn, 1)
- abortCtx, abort := context.WithCancel(context.Background())
+ abortCtx, abort := context.WithCancel(ctx)
defer abort()
for _, dfn := range r.dialerFns {
diff --git a/shared/relay/client/dialer/race_dialer_test.go b/shared/relay/client/dialer/race_dialer_test.go
index d216ec5e7..aa18df578 100644
--- a/shared/relay/client/dialer/race_dialer_test.go
+++ b/shared/relay/client/dialer/race_dialer_test.go
@@ -78,7 +78,7 @@ func TestRaceDialEmptyDialers(t *testing.T) {
serverURL := "test.server.com"
rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL)
- conn, err := rd.Dial()
+ conn, err := rd.Dial(context.Background())
if err == nil {
t.Errorf("Expected an error with empty dialers, got nil")
}
@@ -104,7 +104,7 @@ func TestRaceDialSingleSuccessfulDialer(t *testing.T) {
}
rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer)
- conn, err := rd.Dial()
+ conn, err := rd.Dial(context.Background())
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
@@ -137,7 +137,7 @@ func TestRaceDialMultipleDialersWithOneSuccess(t *testing.T) {
}
rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2)
- conn, err := rd.Dial()
+ conn, err := rd.Dial(context.Background())
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
@@ -160,7 +160,7 @@ func TestRaceDialTimeout(t *testing.T) {
}
rd := NewRaceDial(logger, 3*time.Second, serverURL, mockDialer)
- conn, err := rd.Dial()
+ conn, err := rd.Dial(context.Background())
if err == nil {
t.Errorf("Expected an error, got nil")
}
@@ -188,7 +188,7 @@ func TestRaceDialAllDialersFail(t *testing.T) {
}
rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2)
- conn, err := rd.Dial()
+ conn, err := rd.Dial(context.Background())
if err == nil {
t.Errorf("Expected an error, got nil")
}
@@ -230,7 +230,7 @@ func TestRaceDialFirstSuccessfulDialerWins(t *testing.T) {
}
rd := NewRaceDial(logger, DefaultConnectionTimeout, serverURL, mockDialer1, mockDialer2)
- conn, err := rd.Dial()
+ conn, err := rd.Dial(context.Background())
if err != nil {
t.Errorf("Expected no error, got %v", err)
}