From ba91478b89832990cb366e2362a21c93e5ce698e Mon Sep 17 00:00:00 2001 From: Owen Date: Mon, 12 Jan 2026 12:29:42 -0800 Subject: [PATCH] Try to reduce cpu when idle --- dns/dns_proxy.go | 76 ++++++++++++++++------------------------ peers/monitor/monitor.go | 67 +++++++++++++++-------------------- 2 files changed, 60 insertions(+), 83 deletions(-) diff --git a/dns/dns_proxy.go b/dns/dns_proxy.go index 748a5a9..d010bc6 100644 --- a/dns/dns_proxy.go +++ b/dns/dns_proxy.go @@ -599,12 +599,12 @@ func (p *DNSProxy) runTunnelPacketSender() { defer p.wg.Done() logger.Debug("DNS tunnel packet sender goroutine started") - ticker := time.NewTicker(1 * time.Millisecond) - defer ticker.Stop() - for { - select { - case <-p.ctx.Done(): + // Use blocking ReadContext instead of polling - much more CPU efficient + // This will block until a packet is available or context is cancelled + pkt := p.tunnelEp.ReadContext(p.ctx) + if pkt == nil { + // Context was cancelled or endpoint closed logger.Debug("DNS tunnel packet sender exiting") // Drain any remaining packets for { @@ -615,36 +615,28 @@ func (p *DNSProxy) runTunnelPacketSender() { pkt.DecRef() } return - case <-ticker.C: - // Try to read packets - for i := 0; i < 10; i++ { - pkt := p.tunnelEp.Read() - if pkt == nil { - break - } - - // Extract packet data - slices := pkt.AsSlices() - if len(slices) > 0 { - var totalSize int - for _, slice := range slices { - totalSize += len(slice) - } - - buf := make([]byte, totalSize) - pos := 0 - for _, slice := range slices { - copy(buf[pos:], slice) - pos += len(slice) - } - - // Inject into MiddleDevice (outbound to WG) - p.middleDevice.InjectOutbound(buf) - } - - pkt.DecRef() - } } + + // Extract packet data + slices := pkt.AsSlices() + if len(slices) > 0 { + var totalSize int + for _, slice := range slices { + totalSize += len(slice) + } + + buf := make([]byte, totalSize) + pos := 0 + for _, slice := range slices { + copy(buf[pos:], slice) + pos += len(slice) + } + + // Inject into MiddleDevice (outbound to WG) + p.middleDevice.InjectOutbound(buf) + } + + pkt.DecRef() } } @@ -657,18 +649,12 @@ func (p *DNSProxy) runPacketSender() { const offset = 16 for { - select { - case <-p.ctx.Done(): - return - default: - } - - // Read packets from netstack endpoint - pkt := p.ep.Read() + // Use blocking ReadContext instead of polling - much more CPU efficient + // This will block until a packet is available or context is cancelled + pkt := p.ep.ReadContext(p.ctx) if pkt == nil { - // No packet available, small sleep to avoid busy loop - time.Sleep(1 * time.Millisecond) - continue + // Context was cancelled or endpoint closed + return } // Extract packet data as slices diff --git a/peers/monitor/monitor.go b/peers/monitor/monitor.go index 1ec267e..45dd090 100644 --- a/peers/monitor/monitor.go +++ b/peers/monitor/monitor.go @@ -42,7 +42,7 @@ type PeerMonitor struct { stack *stack.Stack ep *channel.Endpoint activePorts map[uint16]bool - portsLock sync.Mutex + portsLock sync.RWMutex nsCtx context.Context nsCancel context.CancelFunc nsWg sync.WaitGroup @@ -809,9 +809,9 @@ func (pm *PeerMonitor) handlePacket(packet []byte) bool { } // Check if we are listening on this port - pm.portsLock.Lock() + pm.portsLock.RLock() active := pm.activePorts[uint16(port)] - pm.portsLock.Unlock() + pm.portsLock.RUnlock() if !active { return false @@ -842,13 +842,12 @@ func (pm *PeerMonitor) runPacketSender() { defer pm.nsWg.Done() logger.Debug("PeerMonitor: Packet sender goroutine started") - // Use a ticker to periodically check for packets without blocking indefinitely - ticker := time.NewTicker(10 * time.Millisecond) - defer ticker.Stop() - for { - select { - case <-pm.nsCtx.Done(): + // Use blocking ReadContext instead of polling - much more CPU efficient + // This will block until a packet is available or context is cancelled + pkt := pm.ep.ReadContext(pm.nsCtx) + if pkt == nil { + // Context was cancelled or endpoint closed logger.Debug("PeerMonitor: Packet sender context cancelled, draining packets") // Drain any remaining packets before exiting for { @@ -860,36 +859,28 @@ func (pm *PeerMonitor) runPacketSender() { } logger.Debug("PeerMonitor: Packet sender goroutine exiting") return - case <-ticker.C: - // Try to read packets in batches - for i := 0; i < 10; i++ { - pkt := pm.ep.Read() - if pkt == nil { - break - } - - // Extract packet data - slices := pkt.AsSlices() - if len(slices) > 0 { - var totalSize int - for _, slice := range slices { - totalSize += len(slice) - } - - buf := make([]byte, totalSize) - pos := 0 - for _, slice := range slices { - copy(buf[pos:], slice) - pos += len(slice) - } - - // Inject into MiddleDevice (outbound to WG) - pm.middleDev.InjectOutbound(buf) - } - - pkt.DecRef() - } } + + // Extract packet data + slices := pkt.AsSlices() + if len(slices) > 0 { + var totalSize int + for _, slice := range slices { + totalSize += len(slice) + } + + buf := make([]byte, totalSize) + pos := 0 + for _, slice := range slices { + copy(buf[pos:], slice) + pos += len(slice) + } + + // Inject into MiddleDevice (outbound to WG) + pm.middleDev.InjectOutbound(buf) + } + + pkt.DecRef() } }