Merge pull request #60 from LaurenceJJones/split/upstream-dev-relay-worker-scaling

perf(relay): scale packet workers and queue depth for throughput
This commit is contained in:
Owen Schwartz
2026-03-18 15:56:46 -07:00
committed by GitHub

View File

@@ -9,6 +9,7 @@ import (
"io" "io"
"net" "net"
"net/http" "net/http"
"runtime"
"sync" "sync"
"time" "time"
@@ -167,7 +168,7 @@ func NewUDPProxyServer(parentCtx context.Context, addr, serverURL string, privat
addr: addr, addr: addr,
serverURL: serverURL, serverURL: serverURL,
privateKey: privateKey, privateKey: privateKey,
packetChan: make(chan Packet, 1000), packetChan: make(chan Packet, 50000), // Increased from 1000 to handle high throughput
ReachableAt: reachableAt, ReachableAt: reachableAt,
ctx: ctx, ctx: ctx,
cancel: cancel, cancel: cancel,
@@ -192,8 +193,13 @@ func (s *UDPProxyServer) Start() error {
s.conn = conn s.conn = conn
logger.Info("UDP server listening on %s", s.addr) logger.Info("UDP server listening on %s", s.addr)
// Start a fixed number of worker goroutines. // Start worker goroutines based on CPU cores for better parallelism
workerCount := 10 // TODO: Make this configurable or pick it better! // At high throughput (160+ Mbps), we need many workers to avoid bottlenecks
workerCount := runtime.NumCPU() * 10
if workerCount < 20 {
workerCount = 20 // Minimum 20 workers
}
logger.Info("Starting %d packet workers (CPUs: %d)", workerCount, runtime.NumCPU())
for i := 0; i < workerCount; i++ { for i := 0; i < workerCount; i++ {
go s.packetWorker() go s.packetWorker()
} }