mirror of
https://github.com/netbirdio/netbird.git
synced 2026-04-16 15:26:40 +00:00
put grpc endpoint on management and send test exposed service
This commit is contained in:
@@ -150,6 +150,11 @@ func (s *BaseServer) GRPCServer() *grpc.Server {
|
||||
}
|
||||
mgmtProto.RegisterManagementServiceServer(gRPCAPIHandler, srv)
|
||||
|
||||
// Register ProxyService for proxy connections
|
||||
proxyService := nbgrpc.NewProxyServiceServer()
|
||||
mgmtProto.RegisterProxyServiceServer(gRPCAPIHandler, proxyService)
|
||||
log.Info("ProxyService registered on gRPC server")
|
||||
|
||||
return gRPCAPIHandler
|
||||
})
|
||||
}
|
||||
|
||||
242
management/internals/shared/grpc/proxy_service.go
Normal file
242
management/internals/shared/grpc/proxy_service.go
Normal file
@@ -0,0 +1,242 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/peer"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
// ProxyServiceServer implements the ProxyService gRPC server
|
||||
type ProxyServiceServer struct {
|
||||
proto.UnimplementedProxyServiceServer
|
||||
|
||||
// Map of connected proxies: proxy_id -> proxy connection
|
||||
connectedProxies sync.Map
|
||||
|
||||
// Channel for broadcasting service updates to all proxies
|
||||
updatesChan chan *proto.ServiceUpdate
|
||||
}
|
||||
|
||||
// proxyConnection represents a connected proxy
|
||||
type proxyConnection struct {
|
||||
proxyID string
|
||||
stream proto.ProxyService_StreamServer
|
||||
sendChan chan *proto.ManagementMessage
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProxyServiceServer creates a new proxy service server
|
||||
func NewProxyServiceServer() *ProxyServiceServer {
|
||||
return &ProxyServiceServer{
|
||||
updatesChan: make(chan *proto.ServiceUpdate, 100),
|
||||
}
|
||||
}
|
||||
|
||||
// Stream handles the bidirectional stream with proxy clients
|
||||
func (s *ProxyServiceServer) Stream(stream proto.ProxyService_StreamServer) error {
|
||||
ctx := stream.Context()
|
||||
|
||||
peerInfo := ""
|
||||
if p, ok := peer.FromContext(ctx); ok {
|
||||
peerInfo = p.Addr.String()
|
||||
}
|
||||
|
||||
log.Infof("New proxy connection from %s", peerInfo)
|
||||
|
||||
firstMsg, err := stream.Recv()
|
||||
if err != nil {
|
||||
log.Errorf("Failed to receive ProxyHello: %v", err)
|
||||
return status.Errorf(codes.InvalidArgument, "expected ProxyHello message")
|
||||
}
|
||||
|
||||
hello := firstMsg.GetHello()
|
||||
if hello == nil {
|
||||
log.Error("First message is not ProxyHello")
|
||||
return status.Errorf(codes.InvalidArgument, "first message must be ProxyHello")
|
||||
}
|
||||
|
||||
proxyID := hello.GetProxyId()
|
||||
if proxyID == "" {
|
||||
return status.Errorf(codes.InvalidArgument, "proxy_id is required")
|
||||
}
|
||||
|
||||
log.Infof("Proxy %s connected (version: %s, started: %s)",
|
||||
proxyID, hello.GetVersion(), hello.GetStartedAt().AsTime())
|
||||
|
||||
connCtx, cancel := context.WithCancel(ctx)
|
||||
conn := &proxyConnection{
|
||||
proxyID: proxyID,
|
||||
stream: stream,
|
||||
sendChan: make(chan *proto.ManagementMessage, 100),
|
||||
ctx: connCtx,
|
||||
cancel: cancel,
|
||||
}
|
||||
|
||||
s.connectedProxies.Store(proxyID, conn)
|
||||
defer func() {
|
||||
s.connectedProxies.Delete(proxyID)
|
||||
cancel()
|
||||
log.Infof("Proxy %s disconnected", proxyID)
|
||||
}()
|
||||
|
||||
if err := s.sendSnapshot(conn); err != nil {
|
||||
log.Errorf("Failed to send snapshot to proxy %s: %v", proxyID, err)
|
||||
return err
|
||||
}
|
||||
|
||||
errChan := make(chan error, 2)
|
||||
go s.sender(conn, errChan)
|
||||
|
||||
go s.receiver(conn, errChan)
|
||||
|
||||
select {
|
||||
case err := <-errChan:
|
||||
return err
|
||||
case <-connCtx.Done():
|
||||
return connCtx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// sendSnapshot sends initial snapshot of all services to proxy
|
||||
func (s *ProxyServiceServer) sendSnapshot(conn *proxyConnection) error {
|
||||
// TODO: Get actual services from database/store
|
||||
// For now, sending test service
|
||||
testService := &proto.ExposedServiceConfig{
|
||||
Id: "test",
|
||||
Domain: "test.netbird.io",
|
||||
PathMappings: map[string]string{
|
||||
"/": "100.116.118.156:8181",
|
||||
},
|
||||
SetupKey: "some-key",
|
||||
Auth: &proto.AuthConfig{
|
||||
AuthType: &proto.AuthConfig_BearerAuth{
|
||||
BearerAuth: &proto.BearerAuthConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
snapshot := &proto.ServicesSnapshot{
|
||||
Services: []*proto.ExposedServiceConfig{testService},
|
||||
Timestamp: timestamppb.Now(),
|
||||
}
|
||||
|
||||
msg := &proto.ManagementMessage{
|
||||
Payload: &proto.ManagementMessage_Snapshot{
|
||||
Snapshot: snapshot,
|
||||
},
|
||||
}
|
||||
|
||||
log.Infof("Sending snapshot to proxy %s with %d services", conn.proxyID, len(snapshot.Services))
|
||||
|
||||
if err := conn.stream.Send(msg); err != nil {
|
||||
return status.Errorf(codes.Internal, "failed to send snapshot: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// sender handles sending messages to proxy
|
||||
func (s *ProxyServiceServer) sender(conn *proxyConnection, errChan chan<- error) {
|
||||
for {
|
||||
select {
|
||||
case msg := <-conn.sendChan:
|
||||
if err := conn.stream.Send(msg); err != nil {
|
||||
log.Errorf("Failed to send message to proxy %s: %v", conn.proxyID, err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
case <-conn.ctx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// receiver handles receiving messages from proxy
|
||||
func (s *ProxyServiceServer) receiver(conn *proxyConnection, errChan chan<- error) {
|
||||
for {
|
||||
msg, err := conn.stream.Recv()
|
||||
if err == io.EOF {
|
||||
log.Infof("Proxy %s closed connection", conn.proxyID)
|
||||
errChan <- nil
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Failed to receive from proxy %s: %v", conn.proxyID, err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// Handle different message types
|
||||
switch payload := msg.GetPayload().(type) {
|
||||
case *proto.ProxyMessage_RequestData:
|
||||
s.handleAccessLog(conn.proxyID, payload.RequestData)
|
||||
case *proto.ProxyMessage_Hello:
|
||||
log.Warnf("Received unexpected ProxyHello from %s after initial handshake", conn.proxyID)
|
||||
default:
|
||||
log.Warnf("Received unknown message type from proxy %s", conn.proxyID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleAccessLog processes access log from proxy
|
||||
func (s *ProxyServiceServer) handleAccessLog(proxyID string, data *proto.ProxyRequestData) {
|
||||
log.WithFields(log.Fields{
|
||||
"proxy_id": proxyID,
|
||||
"service_id": data.GetServiceId(),
|
||||
"host": data.GetHost(),
|
||||
"path": data.GetPath(),
|
||||
"method": data.GetMethod(),
|
||||
"response_code": data.GetResponseCode(),
|
||||
"duration_ms": data.GetDurationMs(),
|
||||
"source_ip": data.GetSourceIp(),
|
||||
"auth_mechanism": data.GetAuthMechanism(),
|
||||
"user_id": data.GetUserId(),
|
||||
"auth_success": data.GetAuthSuccess(),
|
||||
}).Info("Access log from proxy")
|
||||
|
||||
// TODO: Store access log in database/metrics system
|
||||
}
|
||||
|
||||
// SendServiceUpdate broadcasts a service update to all connected proxies
|
||||
// This should be called by management when services are created/updated/removed
|
||||
func (s *ProxyServiceServer) SendServiceUpdate(update *proto.ServiceUpdate) {
|
||||
updateMsg := &proto.ManagementMessage{
|
||||
Payload: &proto.ManagementMessage_Update{
|
||||
Update: update,
|
||||
},
|
||||
}
|
||||
|
||||
// Send to all connected proxies
|
||||
s.connectedProxies.Range(func(key, value interface{}) bool {
|
||||
conn := value.(*proxyConnection)
|
||||
select {
|
||||
case conn.sendChan <- updateMsg:
|
||||
log.Debugf("Sent service update to proxy %s", conn.proxyID)
|
||||
default:
|
||||
log.Warnf("Failed to send service update to proxy %s (channel full)", conn.proxyID)
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
// GetConnectedProxies returns list of connected proxy IDs
|
||||
func (s *ProxyServiceServer) GetConnectedProxies() []string {
|
||||
var proxies []string
|
||||
s.connectedProxies.Range(func(key, value interface{}) bool {
|
||||
proxies = append(proxies, key.(string))
|
||||
return true
|
||||
})
|
||||
return proxies
|
||||
}
|
||||
325
proxy/pkg/grpc/client.go
Normal file
325
proxy/pkg/grpc/client.go
Normal file
@@ -0,0 +1,325 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials/insecure"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
|
||||
"github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
const (
|
||||
reconnectInterval = 5 * time.Second
|
||||
proxyVersion = "0.1.0"
|
||||
)
|
||||
|
||||
// ServiceUpdateHandler is called when services are added/updated/removed
|
||||
type ServiceUpdateHandler func(update *proto.ServiceUpdate) error
|
||||
|
||||
// Client manages the gRPC connection to management server
|
||||
type Client struct {
|
||||
proxyID string
|
||||
managementURL string
|
||||
conn *grpc.ClientConn
|
||||
stream proto.ProxyService_StreamClient
|
||||
serviceUpdateHandler ServiceUpdateHandler
|
||||
accessLogChan chan *proto.ProxyRequestData
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
mu sync.RWMutex
|
||||
connected bool
|
||||
}
|
||||
|
||||
// ClientConfig holds client configuration
|
||||
type ClientConfig struct {
|
||||
ProxyID string
|
||||
ManagementURL string
|
||||
ServiceUpdateHandler ServiceUpdateHandler
|
||||
}
|
||||
|
||||
// NewClient creates a new gRPC client for proxy-management communication
|
||||
func NewClient(config ClientConfig) *Client {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
return &Client{
|
||||
proxyID: config.ProxyID,
|
||||
managementURL: config.ManagementURL,
|
||||
serviceUpdateHandler: config.ServiceUpdateHandler,
|
||||
accessLogChan: make(chan *proto.ProxyRequestData, 1000),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
}
|
||||
}
|
||||
|
||||
// Start connects to management server and maintains connection
|
||||
func (c *Client) Start() error {
|
||||
go c.connectionLoop()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop closes the connection
|
||||
func (c *Client) Stop() error {
|
||||
c.cancel()
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
|
||||
if c.stream != nil {
|
||||
// Try to close stream gracefully
|
||||
_ = c.stream.CloseSend()
|
||||
}
|
||||
|
||||
if c.conn != nil {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SendAccessLog queues an access log to be sent to management
|
||||
func (c *Client) SendAccessLog(log *proto.ProxyRequestData) {
|
||||
select {
|
||||
case c.accessLogChan <- log:
|
||||
default:
|
||||
// Channel full, drop log
|
||||
}
|
||||
}
|
||||
|
||||
// IsConnected returns whether client is connected to management
|
||||
func (c *Client) IsConnected() bool {
|
||||
c.mu.RLock()
|
||||
defer c.mu.RUnlock()
|
||||
return c.connected
|
||||
}
|
||||
|
||||
// connectionLoop maintains connection to management server
|
||||
func (c *Client) connectionLoop() {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
default:
|
||||
}
|
||||
|
||||
log.Infof("Connecting to management server at %s", c.managementURL)
|
||||
|
||||
if err := c.connect(); err != nil {
|
||||
log.Errorf("Failed to connect to management: %v", err)
|
||||
c.setConnected(false)
|
||||
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(reconnectInterval):
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Handle connection
|
||||
if err := c.handleConnection(); err != nil {
|
||||
log.Errorf("Connection error: %v", err)
|
||||
c.setConnected(false)
|
||||
}
|
||||
|
||||
// Reconnect after delay
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return
|
||||
case <-time.After(reconnectInterval):
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// connect establishes connection to management server
|
||||
func (c *Client) connect() error {
|
||||
// Strip scheme from URL if present (gRPC doesn't use http:// or https://)
|
||||
target := c.managementURL
|
||||
target = strings.TrimPrefix(target, "http://")
|
||||
target = strings.TrimPrefix(target, "https://")
|
||||
|
||||
// Create gRPC connection
|
||||
opts := []grpc.DialOption{
|
||||
grpc.WithTransportCredentials(insecure.NewCredentials()), // TODO: Add TLS
|
||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||
Time: 20 * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
}),
|
||||
}
|
||||
|
||||
conn, err := grpc.Dial(target, opts...)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to dial: %w", err)
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.conn = conn
|
||||
c.mu.Unlock()
|
||||
|
||||
// Create stream
|
||||
client := proto.NewProxyServiceClient(conn)
|
||||
stream, err := client.Stream(c.ctx)
|
||||
if err != nil {
|
||||
conn.Close()
|
||||
return fmt.Errorf("failed to create stream: %w", err)
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.stream = stream
|
||||
c.mu.Unlock()
|
||||
|
||||
// Send ProxyHello
|
||||
hello := &proto.ProxyMessage{
|
||||
Payload: &proto.ProxyMessage_Hello{
|
||||
Hello: &proto.ProxyHello{
|
||||
ProxyId: c.proxyID,
|
||||
Version: proxyVersion,
|
||||
StartedAt: timestamppb.Now(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if err := stream.Send(hello); err != nil {
|
||||
conn.Close()
|
||||
return fmt.Errorf("failed to send hello: %w", err)
|
||||
}
|
||||
|
||||
c.setConnected(true)
|
||||
log.Info("Successfully connected to management server")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// handleConnection manages the active connection
|
||||
func (c *Client) handleConnection() error {
|
||||
errChan := make(chan error, 2)
|
||||
|
||||
// Start sender goroutine
|
||||
go c.sender(errChan)
|
||||
|
||||
// Start receiver goroutine
|
||||
go c.receiver(errChan)
|
||||
|
||||
// Wait for error
|
||||
return <-errChan
|
||||
}
|
||||
|
||||
// sender sends messages to management
|
||||
func (c *Client) sender(errChan chan<- error) {
|
||||
for {
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
errChan <- c.ctx.Err()
|
||||
return
|
||||
|
||||
case accessLog := <-c.accessLogChan:
|
||||
msg := &proto.ProxyMessage{
|
||||
Payload: &proto.ProxyMessage_RequestData{
|
||||
RequestData: accessLog,
|
||||
},
|
||||
}
|
||||
|
||||
c.mu.RLock()
|
||||
stream := c.stream
|
||||
c.mu.RUnlock()
|
||||
|
||||
if stream == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := stream.Send(msg); err != nil {
|
||||
log.Errorf("Failed to send access log: %v", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// receiver receives messages from management
|
||||
func (c *Client) receiver(errChan chan<- error) {
|
||||
for {
|
||||
c.mu.RLock()
|
||||
stream := c.stream
|
||||
c.mu.RUnlock()
|
||||
|
||||
if stream == nil {
|
||||
errChan <- fmt.Errorf("stream is nil")
|
||||
return
|
||||
}
|
||||
|
||||
msg, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
log.Info("Management server closed connection")
|
||||
errChan <- io.EOF
|
||||
return
|
||||
}
|
||||
if err != nil {
|
||||
log.Errorf("Failed to receive: %v", err)
|
||||
errChan <- err
|
||||
return
|
||||
}
|
||||
|
||||
// Handle message
|
||||
switch payload := msg.GetPayload().(type) {
|
||||
case *proto.ManagementMessage_Snapshot:
|
||||
c.handleSnapshot(payload.Snapshot)
|
||||
case *proto.ManagementMessage_Update:
|
||||
c.handleServiceUpdate(payload.Update)
|
||||
default:
|
||||
log.Warnf("Received unknown message type")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleSnapshot processes initial services snapshot
|
||||
func (c *Client) handleSnapshot(snapshot *proto.ServicesSnapshot) {
|
||||
log.Infof("Received services snapshot with %d services", len(snapshot.Services))
|
||||
|
||||
if c.serviceUpdateHandler == nil {
|
||||
log.Warn("No service update handler configured")
|
||||
return
|
||||
}
|
||||
|
||||
// Process each service as a CREATED update
|
||||
for _, service := range snapshot.Services {
|
||||
update := &proto.ServiceUpdate{
|
||||
Type: proto.ServiceUpdate_CREATED,
|
||||
Service: service,
|
||||
ServiceId: service.Id,
|
||||
}
|
||||
|
||||
if err := c.serviceUpdateHandler(update); err != nil {
|
||||
log.Errorf("Failed to handle service %s: %v", service.Id, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleServiceUpdate processes incremental service update
|
||||
func (c *Client) handleServiceUpdate(update *proto.ServiceUpdate) {
|
||||
log.Infof("Received service update: %s %s", update.Type, update.ServiceId)
|
||||
|
||||
if c.serviceUpdateHandler == nil {
|
||||
log.Warn("No service update handler configured")
|
||||
return
|
||||
}
|
||||
|
||||
if err := c.serviceUpdateHandler(update); err != nil {
|
||||
log.Errorf("Failed to handle service update: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// setConnected updates connected status
|
||||
func (c *Client) setConnected(connected bool) {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
c.connected = connected
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,159 +0,0 @@
|
||||
syntax = "proto3";
|
||||
|
||||
package proxy;
|
||||
|
||||
option go_package = "github.com/netbirdio/netbird/proxy/pkg/grpc/proto";
|
||||
|
||||
import "google/protobuf/timestamp.proto";
|
||||
|
||||
// ProxyService defines the bidirectional streaming service
|
||||
// The proxy runs this service, control service connects as client
|
||||
service ProxyService {
|
||||
// Stream establishes a bidirectional stream between proxy and control service
|
||||
// Control service (client) sends ControlMessage, Proxy (server) sends ProxyMessage
|
||||
rpc Stream(stream ControlMessage) returns (stream ProxyMessage);
|
||||
}
|
||||
|
||||
// ProxyMessage represents messages sent from proxy to control service
|
||||
message ProxyMessage {
|
||||
oneof message {
|
||||
ProxyStats stats = 1;
|
||||
ProxyEvent event = 2;
|
||||
ProxyLog log = 3;
|
||||
ProxyHeartbeat heartbeat = 4;
|
||||
ProxyRequestData request_data = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// ControlMessage represents messages sent from control service to proxy
|
||||
message ControlMessage {
|
||||
oneof message {
|
||||
ControlEvent event = 1;
|
||||
ControlCommand command = 2;
|
||||
ControlConfig config = 3;
|
||||
ExposedServiceEvent exposed_service = 4;
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyStats contains proxy statistics
|
||||
message ProxyStats {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
uint64 total_requests = 2;
|
||||
uint64 active_connections = 3;
|
||||
uint64 bytes_sent = 4;
|
||||
uint64 bytes_received = 5;
|
||||
double cpu_usage = 6;
|
||||
double memory_usage_mb = 7;
|
||||
map<string, uint64> status_code_counts = 8;
|
||||
}
|
||||
|
||||
// ProxyEvent represents events from the proxy
|
||||
message ProxyEvent {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
EventType type = 2;
|
||||
string message = 3;
|
||||
map<string, string> metadata = 4;
|
||||
|
||||
enum EventType {
|
||||
UNKNOWN = 0;
|
||||
STARTED = 1;
|
||||
STOPPED = 2;
|
||||
ERROR = 3;
|
||||
BACKEND_UNAVAILABLE = 4;
|
||||
BACKEND_RECOVERED = 5;
|
||||
CONFIG_UPDATED = 6;
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyLog represents log entries
|
||||
message ProxyLog {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
LogLevel level = 2;
|
||||
string message = 3;
|
||||
map<string, string> fields = 4;
|
||||
|
||||
enum LogLevel {
|
||||
DEBUG = 0;
|
||||
INFO = 1;
|
||||
WARN = 2;
|
||||
ERROR = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// ProxyHeartbeat is sent periodically to keep connection alive
|
||||
message ProxyHeartbeat {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
string proxy_id = 2;
|
||||
}
|
||||
|
||||
// ControlEvent represents events from control service
|
||||
message ControlEvent {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
string event_id = 2;
|
||||
string message = 3;
|
||||
}
|
||||
|
||||
// ControlCommand represents commands sent to proxy
|
||||
message ControlCommand {
|
||||
string command_id = 1;
|
||||
CommandType type = 2;
|
||||
map<string, string> parameters = 3;
|
||||
|
||||
enum CommandType {
|
||||
UNKNOWN = 0;
|
||||
RELOAD_CONFIG = 1;
|
||||
ENABLE_DEBUG = 2;
|
||||
DISABLE_DEBUG = 3;
|
||||
GET_STATS = 4;
|
||||
SHUTDOWN = 5;
|
||||
}
|
||||
}
|
||||
|
||||
// ControlConfig contains configuration updates from control service
|
||||
message ControlConfig {
|
||||
string config_version = 1;
|
||||
map<string, string> settings = 2;
|
||||
}
|
||||
|
||||
// ExposedServiceEvent represents exposed service lifecycle events
|
||||
message ExposedServiceEvent {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
EventType type = 2;
|
||||
string service_id = 3;
|
||||
PeerConfig peer_config = 4;
|
||||
UpstreamConfig upstream_config = 5;
|
||||
|
||||
enum EventType {
|
||||
UNKNOWN = 0;
|
||||
CREATED = 1;
|
||||
UPDATED = 2;
|
||||
REMOVED = 3;
|
||||
}
|
||||
}
|
||||
|
||||
// PeerConfig contains WireGuard peer configuration
|
||||
message PeerConfig {
|
||||
string peer_id = 1;
|
||||
string public_key = 2;
|
||||
repeated string allowed_ips = 3;
|
||||
string endpoint = 4;
|
||||
string tunnel_ip = 5;
|
||||
uint32 persistent_keepalive = 6;
|
||||
}
|
||||
|
||||
// UpstreamConfig contains reverse proxy upstream configuration
|
||||
message UpstreamConfig {
|
||||
string domain = 1;
|
||||
map<string, string> path_mappings = 2; // path -> port
|
||||
}
|
||||
|
||||
// ProxyRequestData contains metadata about requests routed through the reverse proxy
|
||||
message ProxyRequestData {
|
||||
google.protobuf.Timestamp timestamp = 1;
|
||||
string service_id = 2;
|
||||
string path = 3;
|
||||
int64 duration_ms = 4;
|
||||
string method = 5; // HTTP method (GET, POST, PUT, DELETE, etc.)
|
||||
int32 response_code = 6;
|
||||
string source_ip = 7;
|
||||
}
|
||||
@@ -1,137 +0,0 @@
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
|
||||
package proto
|
||||
|
||||
import (
|
||||
context "context"
|
||||
grpc "google.golang.org/grpc"
|
||||
codes "google.golang.org/grpc/codes"
|
||||
status "google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the grpc package it is being compiled against.
|
||||
// Requires gRPC-Go v1.32.0 or later.
|
||||
const _ = grpc.SupportPackageIsVersion7
|
||||
|
||||
// ProxyServiceClient is the client API for ProxyService service.
|
||||
//
|
||||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream.
|
||||
type ProxyServiceClient interface {
|
||||
// Stream establishes a bidirectional stream between proxy and control service
|
||||
// Control service (client) sends ControlMessage, Proxy (server) sends ProxyMessage
|
||||
Stream(ctx context.Context, opts ...grpc.CallOption) (ProxyService_StreamClient, error)
|
||||
}
|
||||
|
||||
type proxyServiceClient struct {
|
||||
cc grpc.ClientConnInterface
|
||||
}
|
||||
|
||||
func NewProxyServiceClient(cc grpc.ClientConnInterface) ProxyServiceClient {
|
||||
return &proxyServiceClient{cc}
|
||||
}
|
||||
|
||||
func (c *proxyServiceClient) Stream(ctx context.Context, opts ...grpc.CallOption) (ProxyService_StreamClient, error) {
|
||||
stream, err := c.cc.NewStream(ctx, &ProxyService_ServiceDesc.Streams[0], "/proxy.ProxyService/Stream", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &proxyServiceStreamClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type ProxyService_StreamClient interface {
|
||||
Send(*ControlMessage) error
|
||||
Recv() (*ProxyMessage, error)
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
type proxyServiceStreamClient struct {
|
||||
grpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *proxyServiceStreamClient) Send(m *ControlMessage) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *proxyServiceStreamClient) Recv() (*ProxyMessage, error) {
|
||||
m := new(ProxyMessage)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ProxyServiceServer is the server API for ProxyService service.
|
||||
// All implementations must embed UnimplementedProxyServiceServer
|
||||
// for forward compatibility
|
||||
type ProxyServiceServer interface {
|
||||
// Stream establishes a bidirectional stream between proxy and control service
|
||||
// Control service (client) sends ControlMessage, Proxy (server) sends ProxyMessage
|
||||
Stream(ProxyService_StreamServer) error
|
||||
mustEmbedUnimplementedProxyServiceServer()
|
||||
}
|
||||
|
||||
// UnimplementedProxyServiceServer must be embedded to have forward compatible implementations.
|
||||
type UnimplementedProxyServiceServer struct {
|
||||
}
|
||||
|
||||
func (UnimplementedProxyServiceServer) Stream(ProxyService_StreamServer) error {
|
||||
return status.Errorf(codes.Unimplemented, "method Stream not implemented")
|
||||
}
|
||||
func (UnimplementedProxyServiceServer) mustEmbedUnimplementedProxyServiceServer() {}
|
||||
|
||||
// UnsafeProxyServiceServer may be embedded to opt out of forward compatibility for this service.
|
||||
// Use of this interface is not recommended, as added methods to ProxyServiceServer will
|
||||
// result in compilation errors.
|
||||
type UnsafeProxyServiceServer interface {
|
||||
mustEmbedUnimplementedProxyServiceServer()
|
||||
}
|
||||
|
||||
func RegisterProxyServiceServer(s grpc.ServiceRegistrar, srv ProxyServiceServer) {
|
||||
s.RegisterService(&ProxyService_ServiceDesc, srv)
|
||||
}
|
||||
|
||||
func _ProxyService_Stream_Handler(srv interface{}, stream grpc.ServerStream) error {
|
||||
return srv.(ProxyServiceServer).Stream(&proxyServiceStreamServer{stream})
|
||||
}
|
||||
|
||||
type ProxyService_StreamServer interface {
|
||||
Send(*ProxyMessage) error
|
||||
Recv() (*ControlMessage, error)
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
type proxyServiceStreamServer struct {
|
||||
grpc.ServerStream
|
||||
}
|
||||
|
||||
func (x *proxyServiceStreamServer) Send(m *ProxyMessage) error {
|
||||
return x.ServerStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *proxyServiceStreamServer) Recv() (*ControlMessage, error) {
|
||||
m := new(ControlMessage)
|
||||
if err := x.ServerStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
// ProxyService_ServiceDesc is the grpc.ServiceDesc for ProxyService service.
|
||||
// It's only intended for direct use with grpc.RegisterService,
|
||||
// and not to be introspected or modified (even as a copy)
|
||||
var ProxyService_ServiceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "proxy.ProxyService",
|
||||
HandlerType: (*ProxyServiceServer)(nil),
|
||||
Methods: []grpc.MethodDesc{},
|
||||
Streams: []grpc.StreamDesc{
|
||||
{
|
||||
StreamName: "Stream",
|
||||
Handler: _ProxyService_Stream_Handler,
|
||||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
},
|
||||
},
|
||||
Metadata: "pkg/grpc/proto/proxy.proto",
|
||||
}
|
||||
@@ -1,274 +0,0 @@
|
||||
package grpc
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/keepalive"
|
||||
|
||||
pb "github.com/netbirdio/netbird/proxy/pkg/grpc/proto"
|
||||
)
|
||||
|
||||
// StreamHandler handles incoming messages from control service
|
||||
type StreamHandler interface {
|
||||
HandleControlEvent(ctx context.Context, event *pb.ControlEvent) error
|
||||
HandleControlCommand(ctx context.Context, command *pb.ControlCommand) error
|
||||
HandleControlConfig(ctx context.Context, config *pb.ControlConfig) error
|
||||
HandleExposedServiceEvent(ctx context.Context, event *pb.ExposedServiceEvent) error
|
||||
}
|
||||
|
||||
// Server represents the gRPC server running on the proxy
|
||||
type Server struct {
|
||||
pb.UnimplementedProxyServiceServer
|
||||
|
||||
listenAddr string
|
||||
grpcServer *grpc.Server
|
||||
handler StreamHandler
|
||||
|
||||
mu sync.RWMutex
|
||||
streams map[string]*StreamContext
|
||||
isRunning bool
|
||||
}
|
||||
|
||||
// StreamContext holds the context for each active stream
|
||||
type StreamContext struct {
|
||||
stream pb.ProxyService_StreamServer
|
||||
sendChan chan *pb.ProxyMessage
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
controlID string // ID of the connected control service
|
||||
}
|
||||
|
||||
// Config holds gRPC server configuration
|
||||
type Config struct {
|
||||
ListenAddr string
|
||||
Handler StreamHandler
|
||||
}
|
||||
|
||||
// NewServer creates a new gRPC server
|
||||
func NewServer(config Config) *Server {
|
||||
return &Server{
|
||||
listenAddr: config.ListenAddr,
|
||||
handler: config.Handler,
|
||||
streams: make(map[string]*StreamContext),
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the gRPC server
|
||||
func (s *Server) Start() error {
|
||||
s.mu.Lock()
|
||||
if s.isRunning {
|
||||
s.mu.Unlock()
|
||||
return fmt.Errorf("gRPC server already running")
|
||||
}
|
||||
s.isRunning = true
|
||||
s.mu.Unlock()
|
||||
|
||||
lis, err := net.Listen("tcp", s.listenAddr)
|
||||
if err != nil {
|
||||
s.mu.Lock()
|
||||
s.isRunning = false
|
||||
s.mu.Unlock()
|
||||
return fmt.Errorf("failed to listen: %w", err)
|
||||
}
|
||||
|
||||
s.grpcServer = grpc.NewServer(
|
||||
grpc.KeepaliveParams(keepalive.ServerParameters{
|
||||
Time: 30 * time.Second,
|
||||
Timeout: 10 * time.Second,
|
||||
}),
|
||||
grpc.KeepaliveEnforcementPolicy(keepalive.EnforcementPolicy{
|
||||
MinTime: 10 * time.Second,
|
||||
PermitWithoutStream: true,
|
||||
}),
|
||||
)
|
||||
|
||||
pb.RegisterProxyServiceServer(s.grpcServer, s)
|
||||
|
||||
log.Infof("gRPC server listening on %s", s.listenAddr)
|
||||
|
||||
if err := s.grpcServer.Serve(lis); err != nil {
|
||||
s.mu.Lock()
|
||||
s.isRunning = false
|
||||
s.mu.Unlock()
|
||||
return fmt.Errorf("failed to serve: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stop gracefully stops the gRPC server
|
||||
func (s *Server) Stop(ctx context.Context) error {
|
||||
s.mu.Lock()
|
||||
if !s.isRunning {
|
||||
s.mu.Unlock()
|
||||
return fmt.Errorf("gRPC server not running")
|
||||
}
|
||||
s.mu.Unlock()
|
||||
|
||||
log.Info("Stopping gRPC server...")
|
||||
|
||||
s.mu.Lock()
|
||||
for _, streamCtx := range s.streams {
|
||||
streamCtx.cancel()
|
||||
close(streamCtx.sendChan)
|
||||
}
|
||||
s.streams = make(map[string]*StreamContext)
|
||||
s.mu.Unlock()
|
||||
|
||||
stopped := make(chan struct{})
|
||||
go func() {
|
||||
s.grpcServer.GracefulStop()
|
||||
close(stopped)
|
||||
}()
|
||||
|
||||
select {
|
||||
case <-stopped:
|
||||
log.Info("gRPC server stopped gracefully")
|
||||
case <-ctx.Done():
|
||||
log.Warn("gRPC server graceful stop timeout, forcing stop")
|
||||
s.grpcServer.Stop()
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.isRunning = false
|
||||
s.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Stream implements the bidirectional streaming RPC
|
||||
// The control service connects as client, proxy is server
|
||||
// Control service sends ControlMessage, Proxy sends ProxyMessage
|
||||
func (s *Server) Stream(stream pb.ProxyService_StreamServer) error {
|
||||
ctx, cancel := context.WithCancel(stream.Context())
|
||||
defer cancel()
|
||||
|
||||
controlID := fmt.Sprintf("control-%d", time.Now().Unix())
|
||||
|
||||
streamCtx := &StreamContext{
|
||||
stream: stream,
|
||||
sendChan: make(chan *pb.ProxyMessage, 100),
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
controlID: controlID,
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.streams[controlID] = streamCtx
|
||||
s.mu.Unlock()
|
||||
|
||||
log.Infof("Control service connected: %s", controlID)
|
||||
|
||||
sendDone := make(chan error, 1)
|
||||
go s.sendLoop(streamCtx, sendDone)
|
||||
|
||||
recvDone := make(chan error, 1)
|
||||
go s.receiveLoop(streamCtx, recvDone)
|
||||
|
||||
select {
|
||||
case err := <-sendDone:
|
||||
log.Infof("Control service %s send loop ended: %v", controlID, err)
|
||||
return err
|
||||
case err := <-recvDone:
|
||||
log.Infof("Control service %s receive loop ended: %v", controlID, err)
|
||||
return err
|
||||
case <-ctx.Done():
|
||||
log.Infof("Control service %s context done: %v", controlID, ctx.Err())
|
||||
return ctx.Err()
|
||||
}
|
||||
}
|
||||
|
||||
// sendLoop handles sending ProxyMessages to the control service
|
||||
func (s *Server) sendLoop(streamCtx *StreamContext, done chan<- error) {
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-streamCtx.sendChan:
|
||||
if !ok {
|
||||
done <- nil
|
||||
return
|
||||
}
|
||||
|
||||
if err := streamCtx.stream.Send(msg); err != nil {
|
||||
log.Errorf("Failed to send message to control service: %v", err)
|
||||
done <- err
|
||||
return
|
||||
}
|
||||
|
||||
case <-streamCtx.ctx.Done():
|
||||
done <- streamCtx.ctx.Err()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// receiveLoop handles receiving ControlMessages from the control service
|
||||
func (s *Server) receiveLoop(streamCtx *StreamContext, done chan<- error) {
|
||||
for {
|
||||
controlMsg, err := streamCtx.stream.Recv()
|
||||
if err != nil {
|
||||
log.Debugf("Stream receive error: %v", err)
|
||||
done <- err
|
||||
return
|
||||
}
|
||||
|
||||
switch m := controlMsg.Message.(type) {
|
||||
case *pb.ControlMessage_Event:
|
||||
if s.handler != nil {
|
||||
if err := s.handler.HandleControlEvent(streamCtx.ctx, m.Event); err != nil {
|
||||
log.Errorf("Failed to handle control event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *pb.ControlMessage_Command:
|
||||
if s.handler != nil {
|
||||
if err := s.handler.HandleControlCommand(streamCtx.ctx, m.Command); err != nil {
|
||||
log.Errorf("Failed to handle control command: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *pb.ControlMessage_Config:
|
||||
if s.handler != nil {
|
||||
if err := s.handler.HandleControlConfig(streamCtx.ctx, m.Config); err != nil {
|
||||
log.Errorf("Failed to handle control config: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
case *pb.ControlMessage_ExposedService:
|
||||
if s.handler != nil {
|
||||
if err := s.handler.HandleExposedServiceEvent(streamCtx.ctx, m.ExposedService); err != nil {
|
||||
log.Errorf("Failed to handle exposed service event: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
default:
|
||||
log.Warnf("Received unknown control message type: %T", m)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SendProxyMessage sends a ProxyMessage to all connected control services
|
||||
func (s *Server) SendProxyMessage(msg *pb.ProxyMessage) {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
|
||||
for _, streamCtx := range s.streams {
|
||||
select {
|
||||
case streamCtx.sendChan <- msg:
|
||||
default:
|
||||
log.Warn("Send channel full, dropping message")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// GetActiveStreams returns the number of active streams
|
||||
func (s *Server) GetActiveStreams() int {
|
||||
s.mu.RLock()
|
||||
defer s.mu.RUnlock()
|
||||
return len(s.streams)
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
log "github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
@@ -13,13 +12,13 @@ import (
|
||||
"github.com/netbirdio/netbird/proxy/internal/auth/methods"
|
||||
"github.com/netbirdio/netbird/proxy/internal/reverseproxy"
|
||||
grpcpkg "github.com/netbirdio/netbird/proxy/pkg/grpc"
|
||||
pb "github.com/netbirdio/netbird/proxy/pkg/grpc/proto"
|
||||
pb "github.com/netbirdio/netbird/shared/management/proto"
|
||||
)
|
||||
|
||||
// Server represents the reverse proxy server with integrated gRPC control server
|
||||
// Server represents the reverse proxy server with integrated gRPC client
|
||||
type Server struct {
|
||||
config Config
|
||||
grpcServer *grpcpkg.Server
|
||||
grpcClient *grpcpkg.Client
|
||||
proxy *reverseproxy.Proxy
|
||||
|
||||
mu sync.RWMutex
|
||||
@@ -69,7 +68,6 @@ type UpstreamConfig struct {
|
||||
|
||||
// NewServer creates a new reverse proxy server instance
|
||||
func NewServer(config Config) (*Server, error) {
|
||||
// Validate config
|
||||
if err := config.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid configuration: %w", err)
|
||||
}
|
||||
@@ -86,41 +84,41 @@ func NewServer(config Config) (*Server, error) {
|
||||
exposedServices: make(map[string]*ExposedServiceConfig),
|
||||
}
|
||||
|
||||
// Create reverse proxy using embedded config
|
||||
proxy, err := reverseproxy.New(config.ReverseProxy)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create reverse proxy: %w", err)
|
||||
}
|
||||
|
||||
// Set request data callback
|
||||
proxy.SetRequestCallback(func(data reverseproxy.RequestData) {
|
||||
log.WithFields(log.Fields{
|
||||
"service_id": data.ServiceID,
|
||||
"host": data.Host,
|
||||
"method": data.Method,
|
||||
"path": data.Path,
|
||||
"response_code": data.ResponseCode,
|
||||
"duration_ms": data.DurationMs,
|
||||
"source_ip": data.SourceIP,
|
||||
"auth_mechanism": data.AuthMechanism,
|
||||
"user_id": data.UserID,
|
||||
"auth_success": data.AuthSuccess,
|
||||
}).Info("Access log received")
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create reverse proxy: %w", err)
|
||||
}
|
||||
server.proxy = proxy
|
||||
|
||||
// Create gRPC server if enabled
|
||||
if config.EnableGRPC && config.GRPCListenAddress != "" {
|
||||
grpcConfig := grpcpkg.Config{
|
||||
ListenAddr: config.GRPCListenAddress,
|
||||
Handler: server, // Server implements StreamHandler interface
|
||||
}
|
||||
server.grpcServer = grpcpkg.NewServer(grpcConfig)
|
||||
if config.ReverseProxy.ManagementURL == "" {
|
||||
return nil, fmt.Errorf("management URL is required")
|
||||
}
|
||||
|
||||
grpcClient := grpcpkg.NewClient(grpcpkg.ClientConfig{
|
||||
ProxyID: config.ProxyID,
|
||||
ManagementURL: config.ReverseProxy.ManagementURL,
|
||||
ServiceUpdateHandler: server.handleServiceUpdate,
|
||||
})
|
||||
server.grpcClient = grpcClient
|
||||
|
||||
// Set request data callback to send access logs to management
|
||||
proxy.SetRequestCallback(func(data reverseproxy.RequestData) {
|
||||
accessLog := &pb.ProxyRequestData{
|
||||
Timestamp: timestamppb.Now(),
|
||||
ServiceId: data.ServiceID,
|
||||
Host: data.Host,
|
||||
Path: data.Path,
|
||||
DurationMs: data.DurationMs,
|
||||
Method: data.Method,
|
||||
ResponseCode: data.ResponseCode,
|
||||
SourceIp: data.SourceIP,
|
||||
AuthMechanism: data.AuthMechanism,
|
||||
UserId: data.UserID,
|
||||
AuthSuccess: data.AuthSuccess,
|
||||
}
|
||||
server.grpcClient.SendAccessLog(accessLog)
|
||||
})
|
||||
|
||||
return server, nil
|
||||
}
|
||||
|
||||
@@ -136,7 +134,6 @@ func (s *Server) Start() error {
|
||||
|
||||
log.Infof("Starting proxy reverse proxy server on %s", s.config.ReverseProxy.ListenAddress)
|
||||
|
||||
// Start reverse proxy
|
||||
if err := s.proxy.Start(); err != nil {
|
||||
s.mu.Lock()
|
||||
s.isRunning = false
|
||||
@@ -144,47 +141,20 @@ func (s *Server) Start() error {
|
||||
return fmt.Errorf("failed to start reverse proxy: %w", err)
|
||||
}
|
||||
|
||||
// Start gRPC server if configured
|
||||
if s.grpcServer != nil {
|
||||
s.mu.Lock()
|
||||
s.grpcRunning = true
|
||||
s.mu.Unlock()
|
||||
|
||||
if err := s.grpcClient.Start(); err != nil {
|
||||
s.mu.Lock()
|
||||
s.grpcRunning = true
|
||||
s.isRunning = false
|
||||
s.grpcRunning = false
|
||||
s.mu.Unlock()
|
||||
|
||||
go func() {
|
||||
log.Infof("Starting gRPC control server on %s", s.config.GRPCListenAddress)
|
||||
if err := s.grpcServer.Start(); err != nil {
|
||||
log.Errorf("gRPC server error: %v", err)
|
||||
s.mu.Lock()
|
||||
s.grpcRunning = false
|
||||
s.mu.Unlock()
|
||||
}
|
||||
}()
|
||||
|
||||
// Send started event
|
||||
time.Sleep(100 * time.Millisecond) // Give gRPC server time to start
|
||||
s.sendProxyEvent(pb.ProxyEvent_STARTED, "Proxy server started")
|
||||
return fmt.Errorf("failed to start gRPC client: %w", err)
|
||||
}
|
||||
|
||||
// Enable Bearer authentication for the test route
|
||||
// OIDC configuration is set globally in the proxy config above
|
||||
testAuthConfig := &auth.Config{
|
||||
Bearer: &methods.BearerConfig{
|
||||
Enabled: true,
|
||||
},
|
||||
}
|
||||
|
||||
// Register main protected route with auth
|
||||
// The /auth/callback endpoint is automatically handled globally for all routes
|
||||
if err := s.proxy.AddRoute(
|
||||
&reverseproxy.RouteConfig{
|
||||
ID: "test",
|
||||
Domain: "test.netbird.io",
|
||||
PathMappings: map[string]string{"/": "100.116.118.156:8181"},
|
||||
AuthConfig: testAuthConfig,
|
||||
SetupKey: "88B2382A-93D2-47A9-A80F-D0055D741636",
|
||||
}); err != nil {
|
||||
log.Warn("Failed to add test route: ", err)
|
||||
}
|
||||
log.Info("Proxy started and connected to management")
|
||||
log.Info("Waiting for service configurations from management...")
|
||||
|
||||
<-s.shutdownCtx.Done()
|
||||
return nil
|
||||
@@ -208,17 +178,12 @@ func (s *Server) Stop(ctx context.Context) error {
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
// Send stopped event before shutdown
|
||||
if s.grpcServer != nil && s.grpcRunning {
|
||||
s.sendProxyEvent(pb.ProxyEvent_STOPPED, "Proxy server shutting down")
|
||||
}
|
||||
var proxyErr, grpcErr error
|
||||
|
||||
var caddyErr, grpcErr error
|
||||
|
||||
// Shutdown gRPC server first
|
||||
if s.grpcServer != nil && s.grpcRunning {
|
||||
if err := s.grpcServer.Stop(ctx); err != nil {
|
||||
grpcErr = fmt.Errorf("gRPC server shutdown failed: %w", err)
|
||||
// Stop gRPC client first
|
||||
if s.grpcRunning {
|
||||
if err := s.grpcClient.Stop(); err != nil {
|
||||
grpcErr = fmt.Errorf("gRPC client shutdown failed: %w", err)
|
||||
log.Error(grpcErr)
|
||||
}
|
||||
s.mu.Lock()
|
||||
@@ -228,16 +193,16 @@ func (s *Server) Stop(ctx context.Context) error {
|
||||
|
||||
// Shutdown reverse proxy
|
||||
if err := s.proxy.Stop(ctx); err != nil {
|
||||
caddyErr = fmt.Errorf("reverse proxy shutdown failed: %w", err)
|
||||
log.Error(caddyErr)
|
||||
proxyErr = fmt.Errorf("reverse proxy shutdown failed: %w", err)
|
||||
log.Error(proxyErr)
|
||||
}
|
||||
|
||||
s.mu.Lock()
|
||||
s.isRunning = false
|
||||
s.mu.Unlock()
|
||||
|
||||
if caddyErr != nil {
|
||||
return caddyErr
|
||||
if proxyErr != nil {
|
||||
return proxyErr
|
||||
}
|
||||
if grpcErr != nil {
|
||||
return grpcErr
|
||||
@@ -259,119 +224,173 @@ func (s *Server) GetConfig() Config {
|
||||
return s.config
|
||||
}
|
||||
|
||||
// GetStats returns a copy of current statistics
|
||||
func (s *Server) GetStats() *pb.ProxyStats {
|
||||
s.stats.mu.RLock()
|
||||
defer s.stats.mu.RUnlock()
|
||||
|
||||
return &pb.ProxyStats{
|
||||
Timestamp: timestamppb.Now(),
|
||||
TotalRequests: s.stats.totalRequests,
|
||||
ActiveConnections: s.stats.activeConns,
|
||||
BytesSent: s.stats.bytesSent,
|
||||
BytesReceived: s.stats.bytesReceived,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamHandler interface implementation
|
||||
|
||||
// HandleControlEvent handles incoming control events
|
||||
// This is where ExposedService events will be routed
|
||||
func (s *Server) HandleControlEvent(ctx context.Context, event *pb.ControlEvent) error {
|
||||
// handleServiceUpdate processes service updates from management
|
||||
func (s *Server) handleServiceUpdate(update *pb.ServiceUpdate) error {
|
||||
log.WithFields(log.Fields{
|
||||
"event_id": event.EventId,
|
||||
"message": event.Message,
|
||||
}).Info("Received control event")
|
||||
"service_id": update.ServiceId,
|
||||
"type": update.Type.String(),
|
||||
}).Info("Received service update from management")
|
||||
|
||||
// TODO: Parse event type and route to appropriate handler
|
||||
// if event.Type == "ExposedServiceCreated" {
|
||||
// return s.handleExposedServiceCreated(ctx, event)
|
||||
// } else if event.Type == "ExposedServiceUpdated" {
|
||||
// return s.handleExposedServiceUpdated(ctx, event)
|
||||
// } else if event.Type == "ExposedServiceRemoved" {
|
||||
// return s.handleExposedServiceRemoved(ctx, event)
|
||||
// }
|
||||
switch update.Type {
|
||||
case pb.ServiceUpdate_CREATED:
|
||||
if update.Service == nil {
|
||||
return fmt.Errorf("service config is nil for CREATED update")
|
||||
}
|
||||
return s.addServiceFromProto(update.Service)
|
||||
|
||||
return nil
|
||||
}
|
||||
case pb.ServiceUpdate_UPDATED:
|
||||
if update.Service == nil {
|
||||
return fmt.Errorf("service config is nil for UPDATED update")
|
||||
}
|
||||
return s.updateServiceFromProto(update.Service)
|
||||
|
||||
// HandleControlCommand handles incoming control commands
|
||||
func (s *Server) HandleControlCommand(ctx context.Context, command *pb.ControlCommand) error {
|
||||
log.WithFields(log.Fields{
|
||||
"command_id": command.CommandId,
|
||||
"type": command.Type.String(),
|
||||
}).Info("Received control command")
|
||||
|
||||
switch command.Type {
|
||||
case pb.ControlCommand_GET_STATS:
|
||||
// Stats are automatically sent, just log
|
||||
log.Debug("Stats requested via command")
|
||||
case pb.ControlCommand_RELOAD_CONFIG:
|
||||
log.Info("Config reload requested (not implemented yet)")
|
||||
case pb.ControlCommand_ENABLE_DEBUG:
|
||||
log.SetLevel(log.DebugLevel)
|
||||
log.Info("Debug logging enabled")
|
||||
case pb.ControlCommand_DISABLE_DEBUG:
|
||||
log.SetLevel(log.InfoLevel)
|
||||
log.Info("Debug logging disabled")
|
||||
case pb.ControlCommand_SHUTDOWN:
|
||||
log.Warn("Shutdown command received")
|
||||
go func() {
|
||||
time.Sleep(1 * time.Second)
|
||||
s.cancelFunc() // Trigger graceful shutdown
|
||||
}()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleControlConfig handles incoming configuration updates
|
||||
func (s *Server) HandleControlConfig(ctx context.Context, config *pb.ControlConfig) error {
|
||||
log.WithFields(log.Fields{
|
||||
"config_version": config.ConfigVersion,
|
||||
"settings": config.Settings,
|
||||
}).Info("Received config update")
|
||||
return nil
|
||||
}
|
||||
|
||||
// HandleExposedServiceEvent handles exposed service lifecycle events
|
||||
func (s *Server) HandleExposedServiceEvent(ctx context.Context, event *pb.ExposedServiceEvent) error {
|
||||
log.WithFields(log.Fields{
|
||||
"service_id": event.ServiceId,
|
||||
"type": event.Type.String(),
|
||||
}).Info("Received exposed service event")
|
||||
|
||||
// Convert proto types to internal types
|
||||
peerConfig := &PeerConfig{
|
||||
PeerID: event.PeerConfig.PeerId,
|
||||
PublicKey: event.PeerConfig.PublicKey,
|
||||
AllowedIPs: event.PeerConfig.AllowedIps,
|
||||
Endpoint: event.PeerConfig.Endpoint,
|
||||
TunnelIP: event.PeerConfig.TunnelIp,
|
||||
}
|
||||
|
||||
upstreamConfig := &UpstreamConfig{
|
||||
Domain: event.UpstreamConfig.Domain,
|
||||
PathMappings: event.UpstreamConfig.PathMappings,
|
||||
}
|
||||
|
||||
// Route to appropriate handler based on event type
|
||||
switch event.Type {
|
||||
case pb.ExposedServiceEvent_CREATED:
|
||||
return s.handleExposedServiceCreated(event.ServiceId, peerConfig, upstreamConfig)
|
||||
|
||||
case pb.ExposedServiceEvent_UPDATED:
|
||||
return s.handleExposedServiceUpdated(event.ServiceId, peerConfig, upstreamConfig)
|
||||
|
||||
case pb.ExposedServiceEvent_REMOVED:
|
||||
return s.handleExposedServiceRemoved(event.ServiceId)
|
||||
case pb.ServiceUpdate_REMOVED:
|
||||
return s.removeService(update.ServiceId)
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown exposed service event type: %v", event.Type)
|
||||
return fmt.Errorf("unknown service update type: %v", update.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// Exposed Service Handlers
|
||||
// addServiceFromProto adds a service from proto config
|
||||
func (s *Server) addServiceFromProto(serviceConfig *pb.ExposedServiceConfig) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
// Check if service already exists
|
||||
if _, exists := s.exposedServices[serviceConfig.Id]; exists {
|
||||
log.Warnf("Service %s already exists, updating instead", serviceConfig.Id)
|
||||
return s.updateServiceFromProtoLocked(serviceConfig)
|
||||
}
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"service_id": serviceConfig.Id,
|
||||
"domain": serviceConfig.Domain,
|
||||
}).Info("Adding service from management")
|
||||
|
||||
// Convert proto auth config to internal auth config
|
||||
var authConfig *auth.Config
|
||||
if serviceConfig.Auth != nil {
|
||||
authConfig = convertProtoAuthConfig(serviceConfig.Auth)
|
||||
}
|
||||
|
||||
// Add route to proxy
|
||||
route := &reverseproxy.RouteConfig{
|
||||
ID: serviceConfig.Id,
|
||||
Domain: serviceConfig.Domain,
|
||||
PathMappings: serviceConfig.PathMappings,
|
||||
AuthConfig: authConfig,
|
||||
SetupKey: serviceConfig.SetupKey,
|
||||
}
|
||||
|
||||
if err := s.proxy.AddRoute(route); err != nil {
|
||||
return fmt.Errorf("failed to add route: %w", err)
|
||||
}
|
||||
|
||||
// Store service config (simplified, no peer config for now)
|
||||
s.exposedServices[serviceConfig.Id] = &ExposedServiceConfig{
|
||||
ServiceID: serviceConfig.Id,
|
||||
UpstreamConfig: &UpstreamConfig{
|
||||
Domain: serviceConfig.Domain,
|
||||
PathMappings: serviceConfig.PathMappings,
|
||||
},
|
||||
}
|
||||
|
||||
log.Infof("Service %s added successfully", serviceConfig.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// updateServiceFromProto updates an existing service from proto config
|
||||
func (s *Server) updateServiceFromProto(serviceConfig *pb.ExposedServiceConfig) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
return s.updateServiceFromProtoLocked(serviceConfig)
|
||||
}
|
||||
|
||||
func (s *Server) updateServiceFromProtoLocked(serviceConfig *pb.ExposedServiceConfig) error {
|
||||
log.WithFields(log.Fields{
|
||||
"service_id": serviceConfig.Id,
|
||||
"domain": serviceConfig.Domain,
|
||||
}).Info("Updating service from management")
|
||||
|
||||
// Convert proto auth config to internal auth config
|
||||
var authConfig *auth.Config
|
||||
if serviceConfig.Auth != nil {
|
||||
authConfig = convertProtoAuthConfig(serviceConfig.Auth)
|
||||
}
|
||||
|
||||
// Update route in proxy
|
||||
route := &reverseproxy.RouteConfig{
|
||||
ID: serviceConfig.Id,
|
||||
Domain: serviceConfig.Domain,
|
||||
PathMappings: serviceConfig.PathMappings,
|
||||
AuthConfig: authConfig,
|
||||
SetupKey: serviceConfig.SetupKey,
|
||||
}
|
||||
|
||||
if err := s.proxy.UpdateRoute(route); err != nil {
|
||||
return fmt.Errorf("failed to update route: %w", err)
|
||||
}
|
||||
|
||||
// Update service config
|
||||
s.exposedServices[serviceConfig.Id] = &ExposedServiceConfig{
|
||||
ServiceID: serviceConfig.Id,
|
||||
UpstreamConfig: &UpstreamConfig{
|
||||
Domain: serviceConfig.Domain,
|
||||
PathMappings: serviceConfig.PathMappings,
|
||||
},
|
||||
}
|
||||
|
||||
log.Infof("Service %s updated successfully", serviceConfig.Id)
|
||||
return nil
|
||||
}
|
||||
|
||||
// removeService removes a service
|
||||
func (s *Server) removeService(serviceID string) error {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"service_id": serviceID,
|
||||
}).Info("Removing service from management")
|
||||
|
||||
// Remove route from proxy
|
||||
if err := s.proxy.RemoveRoute(serviceID); err != nil {
|
||||
return fmt.Errorf("failed to remove route: %w", err)
|
||||
}
|
||||
|
||||
// Remove service config
|
||||
delete(s.exposedServices, serviceID)
|
||||
|
||||
log.Infof("Service %s removed successfully", serviceID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// convertProtoAuthConfig converts proto auth config to internal auth config
|
||||
func convertProtoAuthConfig(protoAuth *pb.AuthConfig) *auth.Config {
|
||||
authConfig := &auth.Config{}
|
||||
|
||||
switch authType := protoAuth.AuthType.(type) {
|
||||
case *pb.AuthConfig_BasicAuth:
|
||||
authConfig.BasicAuth = &methods.BasicAuthConfig{
|
||||
Username: authType.BasicAuth.Username,
|
||||
Password: authType.BasicAuth.Password,
|
||||
}
|
||||
case *pb.AuthConfig_PinAuth:
|
||||
authConfig.PIN = &methods.PINConfig{
|
||||
PIN: authType.PinAuth.Pin,
|
||||
Header: authType.PinAuth.Header,
|
||||
}
|
||||
case *pb.AuthConfig_BearerAuth:
|
||||
authConfig.Bearer = &methods.BearerConfig{
|
||||
Enabled: authType.BearerAuth.Enabled,
|
||||
}
|
||||
}
|
||||
|
||||
return authConfig
|
||||
}
|
||||
|
||||
// Exposed Service Handlers (deprecated - keeping for backwards compatibility)
|
||||
|
||||
// handleExposedServiceCreated handles the creation of a new exposed service
|
||||
func (s *Server) handleExposedServiceCreated(serviceID string, peerConfig *PeerConfig, upstreamConfig *UpstreamConfig) error {
|
||||
@@ -538,17 +557,6 @@ func (s *Server) GetExposedService(serviceID string) (*ExposedServiceConfig, err
|
||||
return service, nil
|
||||
}
|
||||
|
||||
// Helper methods
|
||||
|
||||
func (s *Server) sendProxyEvent(eventType pb.ProxyEvent_EventType, message string) {
|
||||
// This would typically be called to send events
|
||||
// The actual sending happens via the gRPC stream
|
||||
log.WithFields(log.Fields{
|
||||
"type": eventType.String(),
|
||||
"message": message,
|
||||
}).Debug("Proxy event")
|
||||
}
|
||||
|
||||
// Stats methods
|
||||
|
||||
func (s *Stats) IncrementRequests() {
|
||||
|
||||
@@ -14,4 +14,5 @@ cd "$script_path"
|
||||
go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.26
|
||||
go install google.golang.org/grpc/cmd/protoc-gen-go-grpc@v1.1
|
||||
protoc -I ./ ./management.proto --go_out=../ --go-grpc_out=../
|
||||
protoc -I ./ ./proxy_service.proto --go_out=../ --go-grpc_out=../
|
||||
cd "$old_pwd"
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||
// versions:
|
||||
// protoc-gen-go v1.26.0
|
||||
// protoc v6.33.3
|
||||
// protoc v6.33.0
|
||||
// source: management.proto
|
||||
|
||||
package proto
|
||||
@@ -1607,17 +1607,19 @@ func (x *FlowConfig) GetDnsCollection() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// JWTConfig represents JWT authentication configuration
|
||||
// JWTConfig represents JWT authentication configuration for validating tokens.
|
||||
type JWTConfig struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Issuer string `protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty"`
|
||||
Issuer string `protobuf:"bytes,1,opt,name=issuer,proto3" json:"issuer,omitempty"`
|
||||
// Deprecated: audience is kept for backwards compatibility only. Use audiences instead in the client code but populate this field.
|
||||
Audience string `protobuf:"bytes,2,opt,name=audience,proto3" json:"audience,omitempty"`
|
||||
KeysLocation string `protobuf:"bytes,3,opt,name=keysLocation,proto3" json:"keysLocation,omitempty"`
|
||||
MaxTokenAge int64 `protobuf:"varint,4,opt,name=maxTokenAge,proto3" json:"maxTokenAge,omitempty"`
|
||||
// audiences
|
||||
// audiences contains the list of valid audiences for JWT validation.
|
||||
// Tokens matching any audience in this list are considered valid.
|
||||
Audiences []string `protobuf:"bytes,5,rep,name=audiences,proto3" json:"audiences,omitempty"`
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user