mirror of
https://github.com/fosrl/gerbil.git
synced 2026-02-27 07:16:47 +00:00
enhancement: base context + errgroup; propagate cancellation; graceful shutdown
- main: add base context via signal.NotifyContext; establish errgroup and use it to supervise background tasks; convert ticker to context-aware periodicBandwidthCheck; run HTTP server under errgroup and add graceful shutdown; treat context.Canceled as normal exit - relay: thread parent context through UDPProxyServer; add cancel func; make packet reader, workers, and cleanup tickers exit on ctx.Done; Stop cancels, closes listener and downstream UDP connections, and closes packet channel to drain workers - proxy: drop earlier parent context hook for SNI proxy per review; rely on existing Stop() for graceful shutdown Benefits: - unified lifecycle and deterministic shutdown across components - prevents leaked goroutines/tickers and closes sockets cleanly - consolidated error handling via g.Wait(), with context cancellation treated as non-error - sets foundation for child errgroups and future structured concurrency
This commit is contained in:
75
main.go
75
main.go
@@ -2,7 +2,9 @@ package main
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
@@ -21,6 +23,7 @@ import (
|
|||||||
"github.com/fosrl/gerbil/proxy"
|
"github.com/fosrl/gerbil/proxy"
|
||||||
"github.com/fosrl/gerbil/relay"
|
"github.com/fosrl/gerbil/relay"
|
||||||
"github.com/vishvananda/netlink"
|
"github.com/vishvananda/netlink"
|
||||||
|
"golang.org/x/sync/errgroup"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl"
|
"golang.zx2c4.com/wireguard/wgctrl"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
)
|
)
|
||||||
@@ -217,6 +220,10 @@ func main() {
|
|||||||
logger.Init()
|
logger.Init()
|
||||||
logger.GetLogger().SetLevel(parseLogLevel(logLevel))
|
logger.GetLogger().SetLevel(parseLogLevel(logLevel))
|
||||||
|
|
||||||
|
// Base context for the application; cancel on SIGINT/SIGTERM
|
||||||
|
ctx, stop := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||||
|
defer stop()
|
||||||
|
|
||||||
// try to parse as http://host:port and set the listenAddr to the :port from this reachableAt.
|
// try to parse as http://host:port and set the listenAddr to the :port from this reachableAt.
|
||||||
if reachableAt != "" && listenAddr == "" {
|
if reachableAt != "" && listenAddr == "" {
|
||||||
if strings.HasPrefix(reachableAt, "http://") || strings.HasPrefix(reachableAt, "https://") {
|
if strings.HasPrefix(reachableAt, "http://") || strings.HasPrefix(reachableAt, "https://") {
|
||||||
@@ -324,10 +331,16 @@ func main() {
|
|||||||
// Ensure the WireGuard peers exist
|
// Ensure the WireGuard peers exist
|
||||||
ensureWireguardPeers(wgconfig.Peers)
|
ensureWireguardPeers(wgconfig.Peers)
|
||||||
|
|
||||||
go periodicBandwidthCheck(remoteConfigURL + "/gerbil/receive-bandwidth")
|
// Child error group derived from base context
|
||||||
|
group, groupCtx := errgroup.WithContext(ctx)
|
||||||
|
|
||||||
|
// Periodic bandwidth reporting
|
||||||
|
group.Go(func() error {
|
||||||
|
return periodicBandwidthCheck(groupCtx, remoteConfigURL+"/gerbil/receive-bandwidth")
|
||||||
|
})
|
||||||
|
|
||||||
// Start the UDP proxy server
|
// Start the UDP proxy server
|
||||||
proxyRelay = relay.NewUDPProxyServer(":21820", remoteConfigURL, key, reachableAt)
|
proxyRelay = relay.NewUDPProxyServer(groupCtx, ":21820", remoteConfigURL, key, reachableAt)
|
||||||
err = proxyRelay.Start()
|
err = proxyRelay.Start()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatal("Failed to start UDP proxy server: %v", err)
|
logger.Fatal("Failed to start UDP proxy server: %v", err)
|
||||||
@@ -371,18 +384,39 @@ func main() {
|
|||||||
http.HandleFunc("/update-local-snis", handleUpdateLocalSNIs)
|
http.HandleFunc("/update-local-snis", handleUpdateLocalSNIs)
|
||||||
logger.Info("Starting HTTP server on %s", listenAddr)
|
logger.Info("Starting HTTP server on %s", listenAddr)
|
||||||
|
|
||||||
// Run HTTP server in a goroutine
|
// HTTP server with graceful shutdown on context cancel
|
||||||
go func() {
|
server := &http.Server{
|
||||||
if err := http.ListenAndServe(listenAddr, nil); err != nil {
|
Addr: listenAddr,
|
||||||
logger.Error("HTTP server failed: %v", err)
|
Handler: nil,
|
||||||
|
}
|
||||||
|
group.Go(func() error {
|
||||||
|
// http.ErrServerClosed is returned on graceful shutdown; not an error for us
|
||||||
|
if err := server.ListenAndServe(); err != nil && !errors.Is(err, http.ErrServerClosed) {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
}()
|
return nil
|
||||||
|
})
|
||||||
|
group.Go(func() error {
|
||||||
|
<-groupCtx.Done()
|
||||||
|
shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
_ = server.Shutdown(shutdownCtx)
|
||||||
|
// Stop background components as the context is canceled
|
||||||
|
if proxySNI != nil {
|
||||||
|
_ = proxySNI.Stop()
|
||||||
|
}
|
||||||
|
if proxyRelay != nil {
|
||||||
|
proxyRelay.Stop()
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
// Keep the main goroutine running
|
// Wait for all goroutines to finish
|
||||||
sigCh := make(chan os.Signal, 1)
|
if err := group.Wait(); err != nil && !errors.Is(err, context.Canceled) {
|
||||||
signal.Notify(sigCh, syscall.SIGINT, syscall.SIGTERM)
|
logger.Error("Service exited with error: %v", err)
|
||||||
<-sigCh
|
} else if errors.Is(err, context.Canceled) {
|
||||||
logger.Info("Shutting down servers...")
|
logger.Info("Context cancelled, shutting down")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func loadRemoteConfig(url string, key wgtypes.Key, reachableAt string) (WgConfig, error) {
|
func loadRemoteConfig(url string, key wgtypes.Key, reachableAt string) (WgConfig, error) {
|
||||||
@@ -639,7 +673,7 @@ func ensureMSSClamping() error {
|
|||||||
if out, err := addCmd.CombinedOutput(); err != nil {
|
if out, err := addCmd.CombinedOutput(); err != nil {
|
||||||
errMsg := fmt.Sprintf("Failed to add MSS clamping rule for chain %s: %v (output: %s)",
|
errMsg := fmt.Sprintf("Failed to add MSS clamping rule for chain %s: %v (output: %s)",
|
||||||
chain, err, string(out))
|
chain, err, string(out))
|
||||||
logger.Error(errMsg)
|
logger.Error("%s", errMsg)
|
||||||
errors = append(errors, fmt.Errorf("%s", errMsg))
|
errors = append(errors, fmt.Errorf("%s", errMsg))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -656,7 +690,7 @@ func ensureMSSClamping() error {
|
|||||||
if out, err := checkCmd.CombinedOutput(); err != nil {
|
if out, err := checkCmd.CombinedOutput(); err != nil {
|
||||||
errMsg := fmt.Sprintf("Rule verification failed for chain %s: %v (output: %s)",
|
errMsg := fmt.Sprintf("Rule verification failed for chain %s: %v (output: %s)",
|
||||||
chain, err, string(out))
|
chain, err, string(out))
|
||||||
logger.Error(errMsg)
|
logger.Error("%s", errMsg)
|
||||||
errors = append(errors, fmt.Errorf("%s", errMsg))
|
errors = append(errors, fmt.Errorf("%s", errMsg))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
@@ -977,13 +1011,18 @@ func handleUpdateLocalSNIs(w http.ResponseWriter, r *http.Request) {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
func periodicBandwidthCheck(endpoint string) {
|
func periodicBandwidthCheck(ctx context.Context, endpoint string) error {
|
||||||
ticker := time.NewTicker(10 * time.Second)
|
ticker := time.NewTicker(10 * time.Second)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
for range ticker.C {
|
for {
|
||||||
if err := reportPeerBandwidth(endpoint); err != nil {
|
select {
|
||||||
logger.Info("Failed to report peer bandwidth: %v", err)
|
case <-ticker.C:
|
||||||
|
if err := reportPeerBandwidth(endpoint); err != nil {
|
||||||
|
logger.Info("Failed to report peer bandwidth: %v", err)
|
||||||
|
}
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
168
relay/relay.go
168
relay/relay.go
@@ -1,6 +1,7 @@
|
|||||||
package relay
|
package relay
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -112,6 +113,8 @@ type UDPProxyServer struct {
|
|||||||
connections sync.Map // map[string]*DestinationConn where key is destination "ip:port"
|
connections sync.Map // map[string]*DestinationConn where key is destination "ip:port"
|
||||||
privateKey wgtypes.Key
|
privateKey wgtypes.Key
|
||||||
packetChan chan Packet
|
packetChan chan Packet
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
// Session tracking for WireGuard peers
|
// Session tracking for WireGuard peers
|
||||||
// Key format: "senderIndex:receiverIndex"
|
// Key format: "senderIndex:receiverIndex"
|
||||||
@@ -123,14 +126,17 @@ type UDPProxyServer struct {
|
|||||||
ReachableAt string
|
ReachableAt string
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewUDPProxyServer initializes the server with a buffered packet channel.
|
// NewUDPProxyServer initializes the server with a buffered packet channel and derived context.
|
||||||
func NewUDPProxyServer(addr, serverURL string, privateKey wgtypes.Key, reachableAt string) *UDPProxyServer {
|
func NewUDPProxyServer(parentCtx context.Context, addr, serverURL string, privateKey wgtypes.Key, reachableAt string) *UDPProxyServer {
|
||||||
|
ctx, cancel := context.WithCancel(parentCtx)
|
||||||
return &UDPProxyServer{
|
return &UDPProxyServer{
|
||||||
addr: addr,
|
addr: addr,
|
||||||
serverURL: serverURL,
|
serverURL: serverURL,
|
||||||
privateKey: privateKey,
|
privateKey: privateKey,
|
||||||
packetChan: make(chan Packet, 1000),
|
packetChan: make(chan Packet, 1000),
|
||||||
ReachableAt: reachableAt,
|
ReachableAt: reachableAt,
|
||||||
|
ctx: ctx,
|
||||||
|
cancel: cancel,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -177,17 +183,51 @@ func (s *UDPProxyServer) Start() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *UDPProxyServer) Stop() {
|
func (s *UDPProxyServer) Stop() {
|
||||||
s.conn.Close()
|
// Signal all background goroutines to stop
|
||||||
|
if s.cancel != nil {
|
||||||
|
s.cancel()
|
||||||
|
}
|
||||||
|
// Close listener to unblock reads
|
||||||
|
if s.conn != nil {
|
||||||
|
_ = s.conn.Close()
|
||||||
|
}
|
||||||
|
// Close all downstream UDP connections
|
||||||
|
s.connections.Range(func(key, value interface{}) bool {
|
||||||
|
if dc, ok := value.(*DestinationConn); ok && dc.conn != nil {
|
||||||
|
_ = dc.conn.Close()
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
// Close packet channel to stop workers
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
close(s.packetChan)
|
||||||
}
|
}
|
||||||
|
|
||||||
// readPackets continuously reads from the UDP socket and pushes packets into the channel.
|
// readPackets continuously reads from the UDP socket and pushes packets into the channel.
|
||||||
func (s *UDPProxyServer) readPackets() {
|
func (s *UDPProxyServer) readPackets() {
|
||||||
for {
|
for {
|
||||||
|
// Exit promptly if context is canceled
|
||||||
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
buf := bufferPool.Get().([]byte)
|
buf := bufferPool.Get().([]byte)
|
||||||
n, remoteAddr, err := s.conn.ReadFromUDP(buf)
|
n, remoteAddr, err := s.conn.ReadFromUDP(buf)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Error reading UDP packet: %v", err)
|
// If we're shutting down, exit
|
||||||
continue
|
select {
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
bufferPool.Put(buf[:1500])
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
logger.Error("Error reading UDP packet: %v", err)
|
||||||
|
bufferPool.Put(buf[:1500])
|
||||||
|
continue
|
||||||
|
}
|
||||||
}
|
}
|
||||||
s.packetChan <- Packet{data: buf[:n], remoteAddr: remoteAddr, n: n}
|
s.packetChan <- Packet{data: buf[:n], remoteAddr: remoteAddr, n: n}
|
||||||
}
|
}
|
||||||
@@ -588,49 +628,67 @@ func (s *UDPProxyServer) handleResponses(conn *net.UDPConn, destAddr *net.UDPAdd
|
|||||||
// Add a cleanup method to periodically remove idle connections
|
// Add a cleanup method to periodically remove idle connections
|
||||||
func (s *UDPProxyServer) cleanupIdleConnections() {
|
func (s *UDPProxyServer) cleanupIdleConnections() {
|
||||||
ticker := time.NewTicker(5 * time.Minute)
|
ticker := time.NewTicker(5 * time.Minute)
|
||||||
for range ticker.C {
|
defer ticker.Stop()
|
||||||
now := time.Now()
|
for {
|
||||||
s.connections.Range(func(key, value interface{}) bool {
|
select {
|
||||||
destConn := value.(*DestinationConn)
|
case <-ticker.C:
|
||||||
if now.Sub(destConn.lastUsed) > 10*time.Minute {
|
now := time.Now()
|
||||||
destConn.conn.Close()
|
s.connections.Range(func(key, value interface{}) bool {
|
||||||
s.connections.Delete(key)
|
destConn := value.(*DestinationConn)
|
||||||
}
|
if now.Sub(destConn.lastUsed) > 10*time.Minute {
|
||||||
return true
|
destConn.conn.Close()
|
||||||
})
|
s.connections.Delete(key)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// New method to periodically remove idle sessions
|
// New method to periodically remove idle sessions
|
||||||
func (s *UDPProxyServer) cleanupIdleSessions() {
|
func (s *UDPProxyServer) cleanupIdleSessions() {
|
||||||
ticker := time.NewTicker(5 * time.Minute)
|
ticker := time.NewTicker(5 * time.Minute)
|
||||||
for range ticker.C {
|
defer ticker.Stop()
|
||||||
now := time.Now()
|
for {
|
||||||
s.wgSessions.Range(func(key, value interface{}) bool {
|
select {
|
||||||
session := value.(*WireGuardSession)
|
case <-ticker.C:
|
||||||
if now.Sub(session.LastSeen) > 15*time.Minute {
|
now := time.Now()
|
||||||
s.wgSessions.Delete(key)
|
s.wgSessions.Range(func(key, value interface{}) bool {
|
||||||
logger.Debug("Removed idle session: %s", key)
|
session := value.(*WireGuardSession)
|
||||||
}
|
if now.Sub(session.LastSeen) > 15*time.Minute {
|
||||||
return true
|
s.wgSessions.Delete(key)
|
||||||
})
|
logger.Debug("Removed idle session: %s", key)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// New method to periodically remove idle proxy mappings
|
// New method to periodically remove idle proxy mappings
|
||||||
func (s *UDPProxyServer) cleanupIdleProxyMappings() {
|
func (s *UDPProxyServer) cleanupIdleProxyMappings() {
|
||||||
ticker := time.NewTicker(10 * time.Minute)
|
ticker := time.NewTicker(10 * time.Minute)
|
||||||
for range ticker.C {
|
defer ticker.Stop()
|
||||||
now := time.Now()
|
for {
|
||||||
s.proxyMappings.Range(func(key, value interface{}) bool {
|
select {
|
||||||
mapping := value.(ProxyMapping)
|
case <-ticker.C:
|
||||||
// Remove mappings that haven't been used in 30 minutes
|
now := time.Now()
|
||||||
if now.Sub(mapping.LastUsed) > 30*time.Minute {
|
s.proxyMappings.Range(func(key, value interface{}) bool {
|
||||||
s.proxyMappings.Delete(key)
|
mapping := value.(ProxyMapping)
|
||||||
logger.Debug("Removed idle proxy mapping: %s", key)
|
// Remove mappings that haven't been used in 30 minutes
|
||||||
}
|
if now.Sub(mapping.LastUsed) > 30*time.Minute {
|
||||||
return true
|
s.proxyMappings.Delete(key)
|
||||||
})
|
logger.Debug("Removed idle proxy mapping: %s", key)
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
})
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -943,23 +1001,29 @@ func (s *UDPProxyServer) tryRebuildSession(pattern *CommunicationPattern) {
|
|||||||
// cleanupIdleCommunicationPatterns periodically removes idle communication patterns
|
// cleanupIdleCommunicationPatterns periodically removes idle communication patterns
|
||||||
func (s *UDPProxyServer) cleanupIdleCommunicationPatterns() {
|
func (s *UDPProxyServer) cleanupIdleCommunicationPatterns() {
|
||||||
ticker := time.NewTicker(10 * time.Minute)
|
ticker := time.NewTicker(10 * time.Minute)
|
||||||
for range ticker.C {
|
defer ticker.Stop()
|
||||||
now := time.Now()
|
for {
|
||||||
s.commPatterns.Range(func(key, value interface{}) bool {
|
select {
|
||||||
pattern := value.(*CommunicationPattern)
|
case <-ticker.C:
|
||||||
|
now := time.Now()
|
||||||
|
s.commPatterns.Range(func(key, value interface{}) bool {
|
||||||
|
pattern := value.(*CommunicationPattern)
|
||||||
|
|
||||||
// Get the most recent activity
|
// Get the most recent activity
|
||||||
lastActivity := pattern.LastFromClient
|
lastActivity := pattern.LastFromClient
|
||||||
if pattern.LastFromDest.After(lastActivity) {
|
if pattern.LastFromDest.After(lastActivity) {
|
||||||
lastActivity = pattern.LastFromDest
|
lastActivity = pattern.LastFromDest
|
||||||
}
|
}
|
||||||
|
|
||||||
// Remove patterns that haven't had activity in 20 minutes
|
// Remove patterns that haven't had activity in 20 minutes
|
||||||
if now.Sub(lastActivity) > 20*time.Minute {
|
if now.Sub(lastActivity) > 20*time.Minute {
|
||||||
s.commPatterns.Delete(key)
|
s.commPatterns.Delete(key)
|
||||||
logger.Debug("Removed idle communication pattern: %s", key)
|
logger.Debug("Removed idle communication pattern: %s", key)
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
})
|
})
|
||||||
|
case <-s.ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user