Merge pull request #117 from fosrl/dev

1.5.0
This commit is contained in:
Owen Schwartz
2026-04-22 12:13:09 -07:00
committed by GitHub
8 changed files with 285 additions and 39 deletions

1
.github/CODEOWNERS vendored Normal file
View File

@@ -0,0 +1 @@
* @oschwartz10612 @miloschwartz

View File

@@ -1,4 +1,5 @@
# Olm
Olm is being phased out in favor of the [Pangolin CLI](https://github.com/fosrl/cli) and is only meant for advanced use cases.
Olm is a [WireGuard](https://www.wireguard.com/) tunnel client designed to securely connect your computer to Newt sites running on remote networks.

View File

@@ -205,12 +205,13 @@ func (o *Olm) handleConnect(msg websocket.WSMessage) {
// Register JIT handler: when the DNS proxy resolves a local record, check whether
// the owning site is already connected and, if not, initiate a JIT connection.
o.dnsProxy.SetJITHandler(func(siteId int) {
if o.peerManager == nil || o.websocket == nil {
pm := o.getPeerManager()
if pm == nil || o.websocket == nil {
return
}
// Site already has an active peer connection - nothing to do.
if _, exists := o.peerManager.GetPeer(siteId); exists {
if _, exists := pm.GetPeer(siteId); exists {
return
}

View File

@@ -32,21 +32,27 @@ func (o *Olm) handleWgPeerAddData(msg websocket.WSMessage) {
return
}
if _, exists := o.peerManager.GetPeer(addSubnetsData.SiteId); !exists {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring add-remote-subnets-aliases message: peerManager is nil (shutdown in progress)")
return
}
if _, exists := pm.GetPeer(addSubnetsData.SiteId); !exists {
logger.Debug("Peer %d not found for removing remote subnets and aliases", addSubnetsData.SiteId)
return
}
// Add new subnets
for _, subnet := range addSubnetsData.RemoteSubnets {
if err := o.peerManager.AddRemoteSubnet(addSubnetsData.SiteId, subnet); err != nil {
if err := pm.AddRemoteSubnet(addSubnetsData.SiteId, subnet); err != nil {
logger.Error("Failed to add allowed IP %s: %v", subnet, err)
}
}
// Add new aliases
for _, alias := range addSubnetsData.Aliases {
if err := o.peerManager.AddAlias(addSubnetsData.SiteId, alias); err != nil {
if err := pm.AddAlias(addSubnetsData.SiteId, alias); err != nil {
logger.Error("Failed to add alias %s: %v", alias.Alias, err)
}
}
@@ -73,21 +79,27 @@ func (o *Olm) handleWgPeerRemoveData(msg websocket.WSMessage) {
return
}
if _, exists := o.peerManager.GetPeer(removeSubnetsData.SiteId); !exists {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring remove-remote-subnets-aliases message: peerManager is nil (shutdown in progress)")
return
}
if _, exists := pm.GetPeer(removeSubnetsData.SiteId); !exists {
logger.Debug("Peer %d not found for removing remote subnets and aliases", removeSubnetsData.SiteId)
return
}
// Remove subnets
for _, subnet := range removeSubnetsData.RemoteSubnets {
if err := o.peerManager.RemoveRemoteSubnet(removeSubnetsData.SiteId, subnet); err != nil {
if err := pm.RemoveRemoteSubnet(removeSubnetsData.SiteId, subnet); err != nil {
logger.Error("Failed to remove allowed IP %s: %v", subnet, err)
}
}
// Remove aliases
for _, alias := range removeSubnetsData.Aliases {
if err := o.peerManager.RemoveAlias(removeSubnetsData.SiteId, alias.Alias); err != nil {
if err := pm.RemoveAlias(removeSubnetsData.SiteId, alias.Alias); err != nil {
logger.Error("Failed to remove alias %s: %v", alias.Alias, err)
}
}
@@ -114,7 +126,13 @@ func (o *Olm) handleWgPeerUpdateData(msg websocket.WSMessage) {
return
}
if _, exists := o.peerManager.GetPeer(updateSubnetsData.SiteId); !exists {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring update-remote-subnets-aliases message: peerManager is nil (shutdown in progress)")
return
}
if _, exists := pm.GetPeer(updateSubnetsData.SiteId); !exists {
logger.Debug("Peer %d not found for updating remote subnets and aliases", updateSubnetsData.SiteId)
return
}
@@ -123,14 +141,14 @@ func (o *Olm) handleWgPeerUpdateData(msg websocket.WSMessage) {
// This ensures that if an old and new subnet are the same on different peers,
// the route won't be temporarily removed
for _, subnet := range updateSubnetsData.NewRemoteSubnets {
if err := o.peerManager.AddRemoteSubnet(updateSubnetsData.SiteId, subnet); err != nil {
if err := pm.AddRemoteSubnet(updateSubnetsData.SiteId, subnet); err != nil {
logger.Error("Failed to add allowed IP %s: %v", subnet, err)
}
}
// Remove old subnets after new ones are added
for _, subnet := range updateSubnetsData.OldRemoteSubnets {
if err := o.peerManager.RemoveRemoteSubnet(updateSubnetsData.SiteId, subnet); err != nil {
if err := pm.RemoveRemoteSubnet(updateSubnetsData.SiteId, subnet); err != nil {
logger.Error("Failed to remove allowed IP %s: %v", subnet, err)
}
}
@@ -139,14 +157,14 @@ func (o *Olm) handleWgPeerUpdateData(msg websocket.WSMessage) {
// This ensures that if an old and new alias share the same IP, the IP won't be
// temporarily removed from the allowed IPs list
for _, alias := range updateSubnetsData.NewAliases {
if err := o.peerManager.AddAlias(updateSubnetsData.SiteId, alias); err != nil {
if err := pm.AddAlias(updateSubnetsData.SiteId, alias); err != nil {
logger.Error("Failed to add alias %s: %v", alias.Alias, err)
}
}
// Remove old aliases after new ones are added
for _, alias := range updateSubnetsData.OldAliases {
if err := o.peerManager.RemoveAlias(updateSubnetsData.SiteId, alias.Alias); err != nil {
if err := pm.RemoveAlias(updateSubnetsData.SiteId, alias.Alias); err != nil {
logger.Error("Failed to remove alias %s: %v", alias.Alias, err)
}
}
@@ -163,7 +181,8 @@ func (o *Olm) handleSync(msg websocket.WSMessage) {
return
}
if o.peerManager == nil {
pm := o.getPeerManager()
if pm == nil {
logger.Warn("Peer manager not initialized, ignoring sync request")
return
}
@@ -190,7 +209,7 @@ func (o *Olm) handleSync(msg websocket.WSMessage) {
}
// Get all current peers
currentPeers := o.peerManager.GetAllPeers()
currentPeers := pm.GetAllPeers()
currentPeerMap := make(map[int]peers.SiteConfig)
for _, peer := range currentPeers {
currentPeerMap[peer.SiteId] = peer
@@ -200,7 +219,7 @@ func (o *Olm) handleSync(msg websocket.WSMessage) {
for siteId := range currentPeerMap {
if _, exists := expectedPeers[siteId]; !exists {
logger.Info("Sync: Removing peer for site %d (no longer in expected config)", siteId)
if err := o.peerManager.RemovePeer(siteId); err != nil {
if err := pm.RemovePeer(siteId); err != nil {
logger.Error("Sync: Failed to remove peer %d: %v", siteId, err)
} else {
// Remove any exit nodes associated with this peer from hole punching
@@ -301,7 +320,7 @@ func (o *Olm) handleSync(msg websocket.WSMessage) {
siteConfig.Aliases = expectedSite.Aliases
}
if err := o.peerManager.UpdatePeer(siteConfig); err != nil {
if err := pm.UpdatePeer(siteConfig); err != nil {
logger.Error("Sync: Failed to update peer %d: %v", siteId, err)
} else {
// If the endpoint changed, trigger holepunch to refresh NAT mappings

View File

@@ -47,6 +47,7 @@ type Olm struct {
websocket *websocket.Client
holePunchManager *holepunch.Manager
peerManager *peers.PeerManager
peerManagerMu sync.RWMutex
// Power mode management
currentPowerMode string
powerModeMu sync.Mutex
@@ -76,6 +77,15 @@ type Olm struct {
tunnelWg sync.WaitGroup
}
// getPeerManager safely returns the current peerManager under a read-lock.
// Callers must check the returned value for nil before using it.
func (o *Olm) getPeerManager() *peers.PeerManager {
o.peerManagerMu.RLock()
pm := o.peerManager
o.peerManagerMu.RUnlock()
return pm
}
// initTunnelInfo creates the shared UDP socket and holepunch manager.
// This is used during initial tunnel setup and when switching organizations.
func (o *Olm) initTunnelInfo(clientID string) error {
@@ -457,7 +467,7 @@ func (o *Olm) StartTunnel(config TunnelConfig) {
"userToken": userToken,
"fingerprint": o.fingerprint,
"postures": o.postures,
}, 2*time.Second, 10)
}, 2*time.Second, 20)
// Invoke onRegistered callback if configured
if o.olmConfig.OnRegistered != nil {
@@ -591,10 +601,12 @@ func (o *Olm) Close() {
}
// Close() also calls Stop() internally
o.peerManagerMu.Lock()
if o.peerManager != nil {
o.peerManager.Close()
o.peerManager = nil
}
o.peerManagerMu.Unlock()
if o.uapiListener != nil {
_ = o.uapiListener.Close()
@@ -806,14 +818,14 @@ func (o *Olm) SetPowerMode(mode string) error {
lowPowerInterval := 10 * time.Minute
if o.peerManager != nil {
peerMonitor := o.peerManager.GetPeerMonitor()
if pm := o.getPeerManager(); pm != nil {
peerMonitor := pm.GetPeerMonitor()
if peerMonitor != nil {
peerMonitor.SetPeerInterval(lowPowerInterval, lowPowerInterval)
peerMonitor.SetPeerHolepunchInterval(lowPowerInterval, lowPowerInterval)
logger.Info("Set monitoring intervals to 10 minutes for low power mode")
}
o.peerManager.UpdateAllPeersPersistentKeepalive(0) // disable
pm.UpdateAllPeersPersistentKeepalive(0) // disable
}
if o.holePunchManager != nil {
@@ -858,14 +870,14 @@ func (o *Olm) SetPowerMode(mode string) error {
}
// Restore intervals and reconnect websocket
if o.peerManager != nil {
peerMonitor := o.peerManager.GetPeerMonitor()
if pm := o.getPeerManager(); pm != nil {
peerMonitor := pm.GetPeerMonitor()
if peerMonitor != nil {
peerMonitor.ResetPeerHolepunchInterval()
peerMonitor.ResetPeerInterval()
}
o.peerManager.UpdateAllPeersPersistentKeepalive(5)
pm.UpdateAllPeersPersistentKeepalive(5)
}
if o.holePunchManager != nil {

View File

@@ -20,7 +20,8 @@ func (o *Olm) handleWgPeerAdd(msg websocket.WSMessage) {
return
}
if o.peerManager == nil {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring add-peer message: peerManager is nil (shutdown in progress)")
return
}
@@ -64,7 +65,7 @@ func (o *Olm) handleWgPeerAdd(msg websocket.WSMessage) {
_ = o.holePunchManager.TriggerHolePunch() // Trigger immediate hole punch attempt so that if the peer decides to relay we have already punched close to when we need it
if err := o.peerManager.AddPeer(siteConfigMsg.SiteConfig); err != nil {
if err := pm.AddPeer(siteConfigMsg.SiteConfig); err != nil {
logger.Error("Failed to add peer: %v", err)
return
}
@@ -81,7 +82,8 @@ func (o *Olm) handleWgPeerRemove(msg websocket.WSMessage) {
return
}
if o.peerManager == nil {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring remove-peer message: peerManager is nil (shutdown in progress)")
return
}
@@ -98,7 +100,7 @@ func (o *Olm) handleWgPeerRemove(msg websocket.WSMessage) {
return
}
if err := o.peerManager.RemovePeer(removeData.SiteId); err != nil {
if err := pm.RemovePeer(removeData.SiteId); err != nil {
logger.Error("Failed to remove peer: %v", err)
return
}
@@ -123,7 +125,8 @@ func (o *Olm) handleWgPeerUpdate(msg websocket.WSMessage) {
return
}
if o.peerManager == nil {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring update-peer message: peerManager is nil (shutdown in progress)")
return
}
@@ -141,7 +144,7 @@ func (o *Olm) handleWgPeerUpdate(msg websocket.WSMessage) {
}
// Get existing peer from PeerManager
existingPeer, exists := o.peerManager.GetPeer(updateData.SiteId)
existingPeer, exists := pm.GetPeer(updateData.SiteId)
if !exists {
logger.Warn("Peer with site ID %d not found", updateData.SiteId)
return
@@ -169,7 +172,7 @@ func (o *Olm) handleWgPeerUpdate(msg websocket.WSMessage) {
siteConfig.RemoteSubnets = updateData.RemoteSubnets
}
if err := o.peerManager.UpdatePeer(siteConfig); err != nil {
if err := pm.UpdatePeer(siteConfig); err != nil {
logger.Error("Failed to update peer: %v", err)
return
}
@@ -188,7 +191,8 @@ func (o *Olm) handleWgPeerRelay(msg websocket.WSMessage) {
logger.Debug("Received relay-peer message: %v", msg.Data)
// Check if peerManager is still valid (may be nil during shutdown)
if o.peerManager == nil {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring relay message: peerManager is nil (shutdown in progress)")
return
}
@@ -208,7 +212,7 @@ func (o *Olm) handleWgPeerRelay(msg websocket.WSMessage) {
return
}
if monitor := o.peerManager.GetPeerMonitor(); monitor != nil {
if monitor := pm.GetPeerMonitor(); monitor != nil {
monitor.CancelRelaySend(relayData.ChainId)
}
@@ -222,14 +226,15 @@ func (o *Olm) handleWgPeerRelay(msg websocket.WSMessage) {
// Update HTTP server to mark this peer as using relay
o.apiServer.UpdatePeerRelayStatus(relayData.SiteId, relayData.RelayEndpoint, true)
o.peerManager.RelayPeer(relayData.SiteId, primaryRelay, relayData.RelayPort)
pm.RelayPeer(relayData.SiteId, primaryRelay, relayData.RelayPort)
}
func (o *Olm) handleWgPeerUnrelay(msg websocket.WSMessage) {
logger.Debug("Received unrelay-peer message: %v", msg.Data)
// Check if peerManager is still valid (may be nil during shutdown)
if o.peerManager == nil {
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring unrelay message: peerManager is nil (shutdown in progress)")
return
}
@@ -249,7 +254,7 @@ func (o *Olm) handleWgPeerUnrelay(msg websocket.WSMessage) {
return
}
if monitor := o.peerManager.GetPeerMonitor(); monitor != nil {
if monitor := pm.GetPeerMonitor(); monitor != nil {
monitor.CancelRelaySend(relayData.ChainId)
}
@@ -262,7 +267,7 @@ func (o *Olm) handleWgPeerUnrelay(msg websocket.WSMessage) {
// Update HTTP server to mark this peer as using relay
o.apiServer.UpdatePeerRelayStatus(relayData.SiteId, relayData.Endpoint, false)
o.peerManager.UnRelayPeer(relayData.SiteId, primaryRelay)
pm.UnRelayPeer(relayData.SiteId, primaryRelay)
}
func (o *Olm) handleWgPeerHolepunchAddSite(msg websocket.WSMessage) {
@@ -317,7 +322,12 @@ func (o *Olm) handleWgPeerHolepunchAddSite(msg websocket.WSMessage) {
}
// Get existing peer from PeerManager
_, exists := o.peerManager.GetPeer(handshakeData.SiteId)
pm := o.getPeerManager()
if pm == nil {
logger.Debug("Ignoring peer-handshake message: peerManager is nil (shutdown in progress)")
return
}
_, exists := pm.GetPeer(handshakeData.SiteId)
if exists {
logger.Warn("Peer with site ID %d already added", handshakeData.SiteId)
return

View File

@@ -6,6 +6,7 @@ import (
"strconv"
"strings"
"sync"
"time"
"github.com/fosrl/newt/bind"
"github.com/fosrl/newt/logger"
@@ -54,6 +55,9 @@ type PeerManager struct {
publicDNS []string
PersistentKeepalive int
routeOptimizerStop chan struct{}
optimizerTrigger chan struct{}
}
// NewPeerManager creates a new PeerManager with an internal PeerMonitor
@@ -80,6 +84,8 @@ func NewPeerManager(config PeerManagerConfig) *PeerManager {
config.PublicDNS,
)
pm.optimizerTrigger = make(chan struct{}, 1)
return pm
}
@@ -856,10 +862,12 @@ func (pm *PeerManager) Start() {
if pm.peerMonitor != nil {
pm.peerMonitor.Start()
}
pm.startRouteOptimizer()
}
// Stop stops the peer monitor
func (pm *PeerManager) Stop() {
pm.stopRouteOptimizer()
if pm.peerMonitor != nil {
pm.peerMonitor.Stop()
}
@@ -867,6 +875,7 @@ func (pm *PeerManager) Stop() {
// Close stops the peer monitor and cleans up resources
func (pm *PeerManager) Close() {
pm.stopRouteOptimizer()
if pm.peerMonitor != nil {
pm.peerMonitor.Close()
pm.peerMonitor = nil
@@ -928,3 +937,166 @@ endpoint=%s`, util.FixKey(peer.PublicKey), endpoint)
logger.Info("Switched peer %d back to direct connection at %s", siteId, endpoint)
return nil
}
// isBetterConnection returns true if connection quality (a) is better than (b).
// Priority: connected > disconnected, then direct > relayed, then lower RTT.
func isBetterConnection(aConn bool, aRelay bool, aRTT time.Duration,
bConn bool, bRelay bool, bRTT time.Duration) bool {
if aConn != bConn {
return aConn // connected beats disconnected
}
if !aConn {
return false // both offline, no preference
}
if aRelay != bRelay {
return !aRelay // direct beats relayed
}
// Same connectivity class: prefer lower RTT
if aRTT == 0 {
return false // unknown RTT, don't displace
}
if bRTT == 0 {
return true // current has no RTT data, prefer known
}
return aRTT < bRTT
}
// selectBestOwner returns the siteId of the best site to own the given IP,
// based on connection quality. Must be called with pm.mu held.
func (pm *PeerManager) selectBestOwner(claims map[int]bool) int {
bestSiteId := -1
var bestConn, bestRelay bool
var bestRTT time.Duration
for siteId := range claims {
conn, relay, rtt := pm.peerMonitor.GetConnectionQuality(siteId)
if bestSiteId < 0 || isBetterConnection(conn, relay, rtt, bestConn, bestRelay, bestRTT) {
bestSiteId = siteId
bestConn = conn
bestRelay = relay
bestRTT = rtt
}
}
return bestSiteId
}
// getWireGuardAllowedIPs returns the full set of IPs that should be in WireGuard
// for a peer: server IP /32 plus all shared IPs it currently owns.
// Must be called with pm.mu held.
func (pm *PeerManager) getWireGuardAllowedIPs(siteId int) []string {
peer, exists := pm.peers[siteId]
if !exists {
return nil
}
serverIP := strings.Split(peer.ServerIP, "/")[0] + "/32"
ips := []string{serverIP}
for cidr, owner := range pm.allowedIPOwners {
if owner == siteId {
ips = append(ips, cidr)
}
}
return ips
}
// transferOwnership moves WireGuard ownership of cidr from fromSiteId to toSiteId.
// Must be called with pm.mu held.
func (pm *PeerManager) transferOwnership(cidr string, fromSiteId int, toSiteId int) error {
// Update owner map first
pm.allowedIPOwners[cidr] = toSiteId
// Remove cidr from old owner's WireGuard allowed IPs
if fromPeer, exists := pm.peers[fromSiteId]; exists {
remaining := pm.getWireGuardAllowedIPs(fromSiteId) // cidr is no longer in owners, so it won't appear here
if err := RemoveAllowedIP(pm.device, fromPeer.PublicKey, remaining); err != nil {
// Revert
pm.allowedIPOwners[cidr] = fromSiteId
return fmt.Errorf("remove IP %s from site %d: %v", cidr, fromSiteId, err)
}
}
// Add cidr to new owner's WireGuard allowed IPs
if toPeer, exists := pm.peers[toSiteId]; exists {
if err := AddAllowedIP(pm.device, toPeer.PublicKey, cidr); err != nil {
return fmt.Errorf("add IP %s to site %d: %v", cidr, toSiteId, err)
}
}
return nil
}
// optimizeRoutes evaluates all shared IPs and reassigns ownership to the best site.
func (pm *PeerManager) optimizeRoutes() {
pm.mu.Lock()
defer pm.mu.Unlock()
for cidr, claims := range pm.allowedIPClaims {
if len(claims) <= 1 {
continue // No competition, nothing to optimize
}
currentOwner, hasOwner := pm.allowedIPOwners[cidr]
bestOwner := pm.selectBestOwner(claims)
if bestOwner < 0 {
continue
}
if hasOwner && currentOwner == bestOwner {
continue // Already on the best site
}
if !hasOwner {
// No current owner, just assign
pm.allowedIPOwners[cidr] = bestOwner
if toPeer, exists := pm.peers[bestOwner]; exists {
if err := AddAllowedIP(pm.device, toPeer.PublicKey, cidr); err != nil {
logger.Error("Failed to assign IP %s to site %d: %v", cidr, bestOwner, err)
}
}
continue
}
logger.Info("Route optimizer: moving %s from site %d to site %d", cidr, currentOwner, bestOwner)
if err := pm.transferOwnership(cidr, currentOwner, bestOwner); err != nil {
logger.Error("Failed to transfer ownership of %s from site %d to site %d: %v",
cidr, currentOwner, bestOwner, err)
}
}
}
// startRouteOptimizer registers the status-change callback and launches the optimizer goroutine.
func (pm *PeerManager) startRouteOptimizer() {
pm.routeOptimizerStop = make(chan struct{})
// Trigger optimization whenever any peer's connection status changes
if pm.peerMonitor != nil {
pm.peerMonitor.SetStatusChangeCallback(func(_ int) {
select {
case pm.optimizerTrigger <- struct{}{}:
default:
}
})
}
go func() {
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
for {
select {
case <-pm.routeOptimizerStop:
return
case <-pm.optimizerTrigger:
pm.optimizeRoutes()
case <-ticker.C:
pm.optimizeRoutes()
}
}
}()
}
// stopRouteOptimizer stops the route optimizer goroutine if it is running.
func (pm *PeerManager) stopRouteOptimizer() {
if pm.routeOptimizerStop != nil {
close(pm.routeOptimizerStop)
pm.routeOptimizerStop = nil
}
}

View File

@@ -85,7 +85,9 @@ type PeerMonitor struct {
apiServer *api.API
// WG connection status tracking
wgConnectionStatus map[int]bool // siteID -> WG connected status
wgConnectionStatus map[int]bool // siteID -> WG connected status
wgConnectionRTT map[int]time.Duration // siteID -> last known RTT
statusChangeCallback func(siteId int) // called when any peer's connection status changes
}
// NewPeerMonitor creates a new peer monitor with the given callback
@@ -122,6 +124,7 @@ func NewPeerMonitor(wsClient *websocket.Client, middleDev *middleDevice.MiddleDe
rapidTestMaxAttempts: 5, // 5 attempts = ~1-1.5 seconds total
apiServer: apiServer,
wgConnectionStatus: make(map[int]bool),
wgConnectionRTT: make(map[int]time.Duration),
// Exponential backoff settings for holepunch monitor
defaultHolepunchMinInterval: 2 * time.Second,
defaultHolepunchMaxInterval: 30 * time.Second,
@@ -392,6 +395,9 @@ func (pm *PeerMonitor) handleConnectionStatusChange(siteID int, status Connectio
pm.mutex.Lock()
previousStatus, exists := pm.wgConnectionStatus[siteID]
pm.wgConnectionStatus[siteID] = status.Connected
if status.Connected && status.RTT > 0 {
pm.wgConnectionRTT[siteID] = status.RTT
}
isRelayed := pm.relayedPeers[siteID]
endpoint := pm.holepunchEndpoints[siteID]
pm.mutex.Unlock()
@@ -409,6 +415,11 @@ func (pm *PeerMonitor) handleConnectionStatusChange(siteID int, status Connectio
if pm.apiServer != nil {
pm.apiServer.UpdatePeerStatus(siteID, status.Connected, status.RTT, endpoint, isRelayed)
}
// Notify route optimizer of status change
if pm.statusChangeCallback != nil {
pm.statusChangeCallback(siteID)
}
}
// sendRelay sends a relay message to the server with retry, keyed by chainId
@@ -521,6 +532,25 @@ func (pm *PeerMonitor) IsPeerRelayed(siteID int) bool {
return pm.relayedPeers[siteID]
}
// SetStatusChangeCallback registers a callback that is invoked whenever a peer's
// WireGuard connection status changes (connected/disconnected). The callback must
// be non-blocking (e.g., send to a buffered channel).
func (pm *PeerMonitor) SetStatusChangeCallback(cb func(siteId int)) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.statusChangeCallback = cb
}
// GetConnectionQuality returns the current connection quality metrics for a peer.
func (pm *PeerMonitor) GetConnectionQuality(siteId int) (connected bool, relayed bool, rtt time.Duration) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
connected = pm.wgConnectionStatus[siteId]
relayed = pm.relayedPeers[siteId]
rtt = pm.wgConnectionRTT[siteId]
return
}
// startHolepunchMonitor starts the holepunch connection monitoring
// Note: This function assumes the mutex is already held by the caller (called from Start())
func (pm *PeerMonitor) startHolepunchMonitor() error {