mirror of
https://github.com/fosrl/newt.git
synced 2026-04-10 20:06:38 +00:00
Merge pull request #277 from LaurenceJJones/refactor/proxy-udp-buffer-pool
perf(proxy): add sync.Pool for UDP buffers
This commit is contained in:
@@ -23,9 +23,30 @@ import (
|
|||||||
|
|
||||||
const (
|
const (
|
||||||
errUnsupportedProtoFmt = "unsupported protocol: %s"
|
errUnsupportedProtoFmt = "unsupported protocol: %s"
|
||||||
maxUDPPacketSize = 65507
|
maxUDPPacketSize = 65507 // Maximum UDP packet size
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// udpBufferPool provides reusable buffers for UDP packet handling.
|
||||||
|
// This reduces GC pressure from frequent large allocations.
|
||||||
|
var udpBufferPool = sync.Pool{
|
||||||
|
New: func() any {
|
||||||
|
buf := make([]byte, maxUDPPacketSize)
|
||||||
|
return &buf
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
// getUDPBuffer retrieves a buffer from the pool.
|
||||||
|
func getUDPBuffer() *[]byte {
|
||||||
|
return udpBufferPool.Get().(*[]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// putUDPBuffer clears and returns a buffer to the pool.
|
||||||
|
func putUDPBuffer(buf *[]byte) {
|
||||||
|
// Clear the buffer to prevent data leakage
|
||||||
|
clear(*buf)
|
||||||
|
udpBufferPool.Put(buf)
|
||||||
|
}
|
||||||
|
|
||||||
// Target represents a proxy target with its address and port
|
// Target represents a proxy target with its address and port
|
||||||
type Target struct {
|
type Target struct {
|
||||||
Address string
|
Address string
|
||||||
@@ -555,7 +576,9 @@ func (pm *ProxyManager) handleTCPProxy(listener net.Listener, targetAddr string)
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (pm *ProxyManager) handleUDPProxy(conn *gonet.UDPConn, targetAddr string) {
|
func (pm *ProxyManager) handleUDPProxy(conn *gonet.UDPConn, targetAddr string) {
|
||||||
buffer := make([]byte, maxUDPPacketSize) // Max UDP packet size
|
bufPtr := getUDPBuffer()
|
||||||
|
defer putUDPBuffer(bufPtr)
|
||||||
|
buffer := *bufPtr
|
||||||
clientConns := make(map[string]*net.UDPConn)
|
clientConns := make(map[string]*net.UDPConn)
|
||||||
var clientsMutex sync.RWMutex
|
var clientsMutex sync.RWMutex
|
||||||
|
|
||||||
@@ -638,7 +661,10 @@ func (pm *ProxyManager) handleUDPProxy(conn *gonet.UDPConn, targetAddr string) {
|
|||||||
go func(clientKey string, targetConn *net.UDPConn, remoteAddr net.Addr, tunnelID string) {
|
go func(clientKey string, targetConn *net.UDPConn, remoteAddr net.Addr, tunnelID string) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
result := "success"
|
result := "success"
|
||||||
|
bufPtr := getUDPBuffer()
|
||||||
defer func() {
|
defer func() {
|
||||||
|
// Return buffer to pool first
|
||||||
|
putUDPBuffer(bufPtr)
|
||||||
// Always clean up when this goroutine exits
|
// Always clean up when this goroutine exits
|
||||||
clientsMutex.Lock()
|
clientsMutex.Lock()
|
||||||
if storedConn, exists := clientConns[clientKey]; exists && storedConn == targetConn {
|
if storedConn, exists := clientConns[clientKey]; exists && storedConn == targetConn {
|
||||||
@@ -653,7 +679,7 @@ func (pm *ProxyManager) handleUDPProxy(conn *gonet.UDPConn, targetAddr string) {
|
|||||||
telemetry.IncProxyConnectionEvent(context.Background(), tunnelID, "udp", telemetry.ProxyConnectionClosed)
|
telemetry.IncProxyConnectionEvent(context.Background(), tunnelID, "udp", telemetry.ProxyConnectionClosed)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
buffer := make([]byte, maxUDPPacketSize)
|
buffer := *bufPtr
|
||||||
for {
|
for {
|
||||||
n, _, err := targetConn.ReadFromUDP(buffer)
|
n, _, err := targetConn.ReadFromUDP(buffer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
Reference in New Issue
Block a user