mirror of
https://github.com/netbirdio/netbird.git
synced 2026-04-24 11:16:38 +00:00
[management] Cache peer snapshot + consolidate auth reads on Sync hot path
Trim the fast-path Sync handler by removing two DB round trips on cache hit:
1. Consolidate GetUserIDByPeerKey + GetAccountIDByPeerPubKey into a single
GetPeerAuthInfoByPubKey store call. Both looked up the same peer row by
pubkey and returned one column each; the new method SELECTs both columns
in one query. AccountManager exposes it as GetPeerAuthInfo.
2. Extend peerSyncEntry with AccountID, PeerID, PeerKey, Ephemeral and a
HasUser flag so the cache carries everything the fast path needs. On
cache hit with a matching metaHash:
- The Sync handler skips GetPeerAuthInfo entirely (entry.AccountID and
entry.HasUser drive the loginFilter gate).
- commitFastPath skips GetPeerByPeerPubKey by using the cached peer
snapshot for OnPeerConnectedWithPeer.
Old cache entries from pre-step-2 shape still decode (missing fields zero
out) but IsComplete() returns false, so they fall through to the slow path
and get rewritten with the full shape on first pass. No migration needed.
Expected impact on a 16.8 s pathological Sync observed in production:
~6 s saved from eliminating one auth-read round trip, the pre-fast-path
GetPeerAuthInfo on cache hit, and GetPeerByPeerPubKey in commitFastPath.
Cache miss / cold start remain on the slow path unchanged.
Account-serial, ExtraSettings and peer-group caching — the remaining
synchronous DB reads — are deliberately left for a follow-up so the
invalidation design can be proven incrementally.
This commit is contained in:
@@ -261,19 +261,39 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S
|
||||
realIP := getRealIP(ctx)
|
||||
sRealIP := realIP.String()
|
||||
peerMeta := extractPeerMeta(ctx, syncReq.GetMeta())
|
||||
|
||||
getUserIDStart := time.Now()
|
||||
userID, err := s.accountManager.GetUserIDByPeerKey(ctx, peerKey.String())
|
||||
if err != nil {
|
||||
s.syncSem.Add(-1)
|
||||
if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound {
|
||||
return status.Errorf(codes.PermissionDenied, "peer is not registered")
|
||||
}
|
||||
return mapError(ctx, err)
|
||||
}
|
||||
log.WithContext(ctx).Debugf("fast path: GetUserIDByPeerKey took %s", time.Since(getUserIDStart))
|
||||
|
||||
metahashed := metaHash(peerMeta, sRealIP)
|
||||
|
||||
// Fast path authorisation short-circuit: if the peer-sync cache has a
|
||||
// complete entry whose metaHash still matches the incoming request, we can
|
||||
// skip GetPeerAuthInfo entirely. The entry carries AccountID and HasUser
|
||||
// so we have everything the loginFilter gate and the rest of the handler
|
||||
// need. On any mismatch we fall back to the DB read below.
|
||||
var (
|
||||
userID string
|
||||
accountID string
|
||||
)
|
||||
cachedEntry, cachedEntryHit := s.lookupPeerAuthFromCache(peerKey.String(), metahashed, peerMeta.GoOS)
|
||||
if cachedEntryHit {
|
||||
accountID = cachedEntry.AccountID
|
||||
if cachedEntry.HasUser {
|
||||
userID = "cached"
|
||||
}
|
||||
log.WithContext(ctx).Debugf("fast path: GetPeerAuthInfo skipped (cache hit)")
|
||||
} else {
|
||||
authInfoStart := time.Now()
|
||||
uid, aid, err := s.accountManager.GetPeerAuthInfo(ctx, peerKey.String())
|
||||
if err != nil {
|
||||
s.syncSem.Add(-1)
|
||||
if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound {
|
||||
return status.Errorf(codes.PermissionDenied, "peer is not registered")
|
||||
}
|
||||
return mapError(ctx, err)
|
||||
}
|
||||
userID = uid
|
||||
accountID = aid
|
||||
log.WithContext(ctx).Debugf("fast path: GetPeerAuthInfo took %s", time.Since(authInfoStart))
|
||||
}
|
||||
|
||||
if userID == "" && !s.loginFilter.allowLogin(peerKey.String(), metahashed) {
|
||||
if s.appMetrics != nil {
|
||||
s.appMetrics.GRPCMetrics().CountSyncRequestBlocked()
|
||||
@@ -294,21 +314,6 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S
|
||||
// nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, nbContext.PeerIDKey, peerKey.String())
|
||||
|
||||
getAccountIDStart := time.Now()
|
||||
accountID, err := s.accountManager.GetAccountIDForPeerKey(ctx, peerKey.String())
|
||||
if err != nil {
|
||||
// nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, nbContext.AccountIDKey, "UNKNOWN")
|
||||
log.WithContext(ctx).Tracef("peer %s is not registered", peerKey.String())
|
||||
if errStatus, ok := internalStatus.FromError(err); ok && errStatus.Type() == internalStatus.NotFound {
|
||||
s.syncSem.Add(-1)
|
||||
return status.Errorf(codes.PermissionDenied, "peer is not registered")
|
||||
}
|
||||
s.syncSem.Add(-1)
|
||||
return err
|
||||
}
|
||||
log.WithContext(ctx).Debugf("fast path: GetAccountIDForPeerKey took %s", time.Since(getAccountIDStart))
|
||||
|
||||
// nolint:staticcheck
|
||||
ctx = context.WithValue(ctx, nbContext.AccountIDKey, accountID)
|
||||
|
||||
@@ -348,7 +353,7 @@ func (s *Server) Sync(req *proto.EncryptedMessage, srv proto.ManagementService_S
|
||||
s.cancelPeerRoutinesWithoutLock(ctx, accountID, peer, syncStart)
|
||||
return err
|
||||
}
|
||||
s.recordPeerSyncEntry(peerKey.String(), netMap, metahash)
|
||||
s.recordPeerSyncEntry(peerKey.String(), netMap, metahash, peer)
|
||||
|
||||
updates, err := s.networkMapController.OnPeerConnected(ctx, accountID, peer.ID)
|
||||
if err != nil {
|
||||
@@ -522,7 +527,7 @@ func (s *Server) sendUpdate(ctx context.Context, accountID string, peerKey wgtyp
|
||||
return status.Errorf(codes.Internal, "failed sending update message")
|
||||
}
|
||||
if update.MessageType == network_map.MessageTypeNetworkMap {
|
||||
s.recordPeerSyncEntryFromUpdate(peerKey.String(), update, peerMetaHash)
|
||||
s.recordPeerSyncEntryFromUpdate(peerKey.String(), update, peerMetaHash, peer)
|
||||
}
|
||||
log.WithContext(ctx).Debugf("sent an update to peer %s", peerKey.String())
|
||||
return nil
|
||||
|
||||
@@ -29,6 +29,8 @@ type peerGroupFetcher func(ctx context.Context, accountID, peerID string) ([]str
|
||||
|
||||
// peerSyncEntry records what the server last delivered to a peer on Sync so we
|
||||
// can decide whether the next Sync can skip the full network map computation.
|
||||
// It also carries the minimum peer/auth metadata needed to run the fast path
|
||||
// without a DB round-trip on cache hit.
|
||||
type peerSyncEntry struct {
|
||||
// Serial is the NetworkMap.Serial the server last included in a full map
|
||||
// delivered to this peer.
|
||||
@@ -36,6 +38,68 @@ type peerSyncEntry struct {
|
||||
// MetaHash is the metaHash() value of the peer metadata at the time of that
|
||||
// delivery, used to detect a meta change on reconnect.
|
||||
MetaHash uint64
|
||||
// AccountID is the peer's account ID. Cached so the Sync hot path can skip
|
||||
// GetPeerAuthInfo on cache hit.
|
||||
AccountID string
|
||||
// PeerID is the peer's internal ID, needed for network-map subscription
|
||||
// and update-channel routing.
|
||||
PeerID string
|
||||
// PeerKey mirrors the cache key (peer's wireguard pubkey) so the peer
|
||||
// snapshot carries everything required by cancelPeerRoutines without a
|
||||
// second store lookup.
|
||||
PeerKey string
|
||||
// Ephemeral is the peer's ephemeral flag, used by EphemeralPeersManager
|
||||
// on subscribe/unsubscribe.
|
||||
Ephemeral bool
|
||||
// HasUser is true if the peer is user-owned (peer.UserID != ""). Used in
|
||||
// place of GetUserIDByPeerKey's result to drive the loginFilter gate on
|
||||
// cache hit.
|
||||
HasUser bool
|
||||
}
|
||||
|
||||
// IsComplete reports whether the entry has every field the pure-cache fast
|
||||
// path needs. Entries written by older code (before step 2) will carry only
|
||||
// Serial and MetaHash and must fall back to the slow path so the cache is
|
||||
// repopulated with the full shape.
|
||||
func (e peerSyncEntry) IsComplete() bool {
|
||||
return e.AccountID != "" && e.PeerID != "" && e.PeerKey != ""
|
||||
}
|
||||
|
||||
// PeerSnapshot reconstructs the minimum *nbpeer.Peer needed by
|
||||
// OnPeerConnectedWithPeer, EphemeralPeersManager, handleUpdates,
|
||||
// cancelPeerRoutines, and buildFastPathResponse.
|
||||
func (e peerSyncEntry) PeerSnapshot() *nbpeer.Peer {
|
||||
return &nbpeer.Peer{
|
||||
ID: e.PeerID,
|
||||
Key: e.PeerKey,
|
||||
AccountID: e.AccountID,
|
||||
Ephemeral: e.Ephemeral,
|
||||
}
|
||||
}
|
||||
|
||||
// lookupPeerAuthFromCache checks whether the peer-sync cache holds a complete
|
||||
// entry for this peer with a matching metaHash, so the Sync handler can skip
|
||||
// the pre-fast-path GetPeerAuthInfo store read. Returns hit=false whenever
|
||||
// the fast path is disabled, the peer is Android, the cache is empty, the
|
||||
// entry is from an older shape without snapshot fields, or metaHash differs.
|
||||
func (s *Server) lookupPeerAuthFromCache(peerPubKey string, incomingMetaHash uint64, goOS string) (peerSyncEntry, bool) {
|
||||
if s.peerSerialCache == nil {
|
||||
return peerSyncEntry{}, false
|
||||
}
|
||||
if !s.fastPathFlag.Enabled() {
|
||||
return peerSyncEntry{}, false
|
||||
}
|
||||
if strings.EqualFold(goOS, "android") {
|
||||
return peerSyncEntry{}, false
|
||||
}
|
||||
entry, hit := s.peerSerialCache.Get(peerPubKey)
|
||||
if !hit || !entry.IsComplete() {
|
||||
return peerSyncEntry{}, false
|
||||
}
|
||||
if entry.MetaHash != incomingMetaHash {
|
||||
return peerSyncEntry{}, false
|
||||
}
|
||||
return entry, true
|
||||
}
|
||||
|
||||
// shouldSkipNetworkMap reports whether a Sync request from this peer can be
|
||||
@@ -169,7 +233,11 @@ func (s *Server) tryFastPathSync(
|
||||
}
|
||||
log.WithContext(ctx).Debugf("fast path: eligibility check (hit) took %s", time.Since(eligibilityStart))
|
||||
|
||||
peer, updates, committed := s.commitFastPath(ctx, accountID, peerKey, realIP, syncStart)
|
||||
var cachedPeer *nbpeer.Peer
|
||||
if cached.IsComplete() {
|
||||
cachedPeer = cached.PeerSnapshot()
|
||||
}
|
||||
peer, updates, committed := s.commitFastPath(ctx, accountID, peerKey, realIP, syncStart, cachedPeer)
|
||||
if !committed {
|
||||
return false, nil
|
||||
}
|
||||
@@ -177,32 +245,43 @@ func (s *Server) tryFastPathSync(
|
||||
return true, s.runFastPathSync(ctx, reqStart, syncStart, accountID, peerKey, peer, updates, peerMetaHash, srv, unlock)
|
||||
}
|
||||
|
||||
// commitFastPath fetches the peer, subscribes it to network-map updates and
|
||||
// marks the peer connected. It relies on the same eventual-consistency
|
||||
// guarantee as the slow path: a concurrent writer's broadcast may race the
|
||||
// subscription, but any subsequent serial change reaches the subscribed peer
|
||||
// via its update channel, and a reconnect with a stale cached serial falls
|
||||
// through to the slow path on the next Sync. Returns committed=false on any
|
||||
// failure that should not block the slow path from running.
|
||||
// commitFastPath subscribes the peer to network-map updates and marks it
|
||||
// connected. When cachedPeer is non-nil (cache hit with a complete entry),
|
||||
// the expensive GetPeerByPeerPubKey store call is skipped and the cached
|
||||
// snapshot is used instead.
|
||||
//
|
||||
// It relies on the same eventual-consistency guarantee as the slow path: a
|
||||
// concurrent writer's broadcast may race the subscription, but any subsequent
|
||||
// serial change reaches the subscribed peer via its update channel, and a
|
||||
// reconnect with a stale cached serial falls through to the slow path on the
|
||||
// next Sync. Returns committed=false on any failure that should not block
|
||||
// the slow path from running.
|
||||
func (s *Server) commitFastPath(
|
||||
ctx context.Context,
|
||||
accountID string,
|
||||
peerKey wgtypes.Key,
|
||||
realIP net.IP,
|
||||
syncStart time.Time,
|
||||
cachedPeer *nbpeer.Peer,
|
||||
) (*nbpeer.Peer, chan *network_map.UpdateMessage, bool) {
|
||||
commitStart := time.Now()
|
||||
defer func() {
|
||||
log.WithContext(ctx).Debugf("fast path: commitFastPath took %s", time.Since(commitStart))
|
||||
}()
|
||||
|
||||
getPeerStart := time.Now()
|
||||
peer, err := s.accountManager.GetStore().GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, peerKey.String())
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Debugf("fast path: lookup peer %s: %v", peerKey.String(), err)
|
||||
return nil, nil, false
|
||||
var peer *nbpeer.Peer
|
||||
if cachedPeer != nil {
|
||||
peer = cachedPeer
|
||||
} else {
|
||||
getPeerStart := time.Now()
|
||||
p, err := s.accountManager.GetStore().GetPeerByPeerPubKey(ctx, store.LockingStrengthNone, peerKey.String())
|
||||
if err != nil {
|
||||
log.WithContext(ctx).Debugf("fast path: lookup peer %s: %v", peerKey.String(), err)
|
||||
return nil, nil, false
|
||||
}
|
||||
log.WithContext(ctx).Debugf("fast path: GetPeerByPeerPubKey took %s", time.Since(getPeerStart))
|
||||
peer = p
|
||||
}
|
||||
log.WithContext(ctx).Debugf("fast path: GetPeerByPeerPubKey took %s", time.Since(getPeerStart))
|
||||
|
||||
onConnectedStart := time.Now()
|
||||
updates, err := s.networkMapController.OnPeerConnectedWithPeer(ctx, accountID, peer)
|
||||
@@ -297,7 +376,9 @@ func (s *Server) fetchPeerGroups(ctx context.Context, accountID, peerID string)
|
||||
// recordPeerSyncEntry writes the serial just delivered to this peer so a
|
||||
// subsequent reconnect can take the fast path. Called after the slow path's
|
||||
// sendInitialSync has pushed a full map. A nil cache disables the fast path.
|
||||
func (s *Server) recordPeerSyncEntry(peerKey string, netMap *nbtypes.NetworkMap, peerMetaHash uint64) {
|
||||
// peer is required so the cached entry carries the snapshot fields the
|
||||
// pure-cache fast path needs (AccountID, PeerID, Key, Ephemeral, HasUser).
|
||||
func (s *Server) recordPeerSyncEntry(peerKey string, netMap *nbtypes.NetworkMap, peerMetaHash uint64, peer *nbpeer.Peer) {
|
||||
if s.peerSerialCache == nil {
|
||||
return
|
||||
}
|
||||
@@ -311,13 +392,13 @@ func (s *Server) recordPeerSyncEntry(peerKey string, netMap *nbtypes.NetworkMap,
|
||||
if serial == 0 {
|
||||
return
|
||||
}
|
||||
s.peerSerialCache.Set(peerKey, peerSyncEntry{Serial: serial, MetaHash: peerMetaHash})
|
||||
s.peerSerialCache.Set(peerKey, newPeerSyncEntry(serial, peerMetaHash, peer))
|
||||
}
|
||||
|
||||
// recordPeerSyncEntryFromUpdate is the sendUpdate equivalent of
|
||||
// recordPeerSyncEntry: it extracts the serial from a streamed NetworkMap update
|
||||
// so the cache stays in sync with what the peer most recently received.
|
||||
func (s *Server) recordPeerSyncEntryFromUpdate(peerKey string, update *network_map.UpdateMessage, peerMetaHash uint64) {
|
||||
func (s *Server) recordPeerSyncEntryFromUpdate(peerKey string, update *network_map.UpdateMessage, peerMetaHash uint64, peer *nbpeer.Peer) {
|
||||
if s.peerSerialCache == nil || update == nil || update.Update == nil || update.Update.NetworkMap == nil {
|
||||
return
|
||||
}
|
||||
@@ -328,7 +409,25 @@ func (s *Server) recordPeerSyncEntryFromUpdate(peerKey string, update *network_m
|
||||
if serial == 0 {
|
||||
return
|
||||
}
|
||||
s.peerSerialCache.Set(peerKey, peerSyncEntry{Serial: serial, MetaHash: peerMetaHash})
|
||||
s.peerSerialCache.Set(peerKey, newPeerSyncEntry(serial, peerMetaHash, peer))
|
||||
}
|
||||
|
||||
// newPeerSyncEntry builds a cache entry with every field the pure-cache
|
||||
// fast path needs. peer may be nil (very old call sites), in which case the
|
||||
// entry is written without the snapshot fields and will fail IsComplete().
|
||||
func newPeerSyncEntry(serial, metaHash uint64, peer *nbpeer.Peer) peerSyncEntry {
|
||||
entry := peerSyncEntry{
|
||||
Serial: serial,
|
||||
MetaHash: metaHash,
|
||||
}
|
||||
if peer != nil {
|
||||
entry.AccountID = peer.AccountID
|
||||
entry.PeerID = peer.ID
|
||||
entry.PeerKey = peer.Key
|
||||
entry.Ephemeral = peer.Ephemeral
|
||||
entry.HasUser = peer.UserID != ""
|
||||
}
|
||||
return entry
|
||||
}
|
||||
|
||||
// invalidatePeerSyncEntry is called after a successful Login so the next Sync
|
||||
|
||||
@@ -2287,3 +2287,9 @@ func (am *DefaultAccountManager) savePeerIPUpdate(ctx context.Context, transacti
|
||||
func (am *DefaultAccountManager) GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error) {
|
||||
return am.Store.GetUserIDByPeerKey(ctx, store.LockingStrengthNone, peerKey)
|
||||
}
|
||||
|
||||
// GetPeerAuthInfo returns the userID and accountID for a peer in a single
|
||||
// store call. Used by the Sync hot path to collapse two lookups into one.
|
||||
func (am *DefaultAccountManager) GetPeerAuthInfo(ctx context.Context, peerKey string) (string, string, error) {
|
||||
return am.Store.GetPeerAuthInfoByPubKey(ctx, store.LockingStrengthNone, peerKey)
|
||||
}
|
||||
|
||||
@@ -134,6 +134,9 @@ type Manager interface {
|
||||
GetOwnerInfo(ctx context.Context, accountId string) (*types.UserInfo, error)
|
||||
GetCurrentUserInfo(ctx context.Context, userAuth auth.UserAuth) (*users.UserInfoWithPermissions, error)
|
||||
GetUserIDByPeerKey(ctx context.Context, peerKey string) (string, error)
|
||||
// GetPeerAuthInfo returns the userID and accountID for a peer in a single
|
||||
// store call. Used by the Sync hot path to collapse two lookups into one.
|
||||
GetPeerAuthInfo(ctx context.Context, peerKey string) (userID, accountID string, err error)
|
||||
GetIdentityProvider(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error)
|
||||
GetIdentityProviders(ctx context.Context, accountID, userID string) ([]*types.IdentityProvider, error)
|
||||
CreateIdentityProvider(ctx context.Context, accountID, userID string, idp *types.IdentityProvider) (*types.IdentityProvider, error)
|
||||
|
||||
@@ -900,6 +900,22 @@ func (mr *MockManagerMockRecorder) GetPeer(ctx, accountID, peerID, userID interf
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeer", reflect.TypeOf((*MockManager)(nil).GetPeer), ctx, accountID, peerID, userID)
|
||||
}
|
||||
|
||||
// GetPeerAuthInfo mocks base method.
|
||||
func (m *MockManager) GetPeerAuthInfo(ctx context.Context, peerKey string) (string, string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPeerAuthInfo", ctx, peerKey)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// GetPeerAuthInfo indicates an expected call of GetPeerAuthInfo.
|
||||
func (mr *MockManagerMockRecorder) GetPeerAuthInfo(ctx, peerKey interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerAuthInfo", reflect.TypeOf((*MockManager)(nil).GetPeerAuthInfo), ctx, peerKey)
|
||||
}
|
||||
|
||||
// GetPeerGroups mocks base method.
|
||||
func (m *MockManager) GetPeerGroups(ctx context.Context, accountID, peerID string) ([]*types.Group, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
@@ -1084,6 +1084,20 @@ func (am *MockAccountManager) GetUserIDByPeerKey(ctx context.Context, peerKey st
|
||||
return "something", nil
|
||||
}
|
||||
|
||||
// GetPeerAuthInfo mocks GetPeerAuthInfo of the AccountManager interface by
|
||||
// delegating to GetUserIDByPeerKey and GetAccountIDForPeerKey.
|
||||
func (am *MockAccountManager) GetPeerAuthInfo(ctx context.Context, peerKey string) (string, string, error) {
|
||||
userID, err := am.GetUserIDByPeerKey(ctx, peerKey)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
accountID, err := am.GetAccountIDForPeerKey(ctx, peerKey)
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
return userID, accountID, nil
|
||||
}
|
||||
|
||||
// GetIdentityProvider mocks GetIdentityProvider of the AccountManager interface
|
||||
func (am *MockAccountManager) GetIdentityProvider(ctx context.Context, accountID, idpID, userID string) (*types.IdentityProvider, error) {
|
||||
if am.GetIdentityProviderFunc != nil {
|
||||
|
||||
@@ -4685,6 +4685,33 @@ func (s *SqlStore) GetUserIDByPeerKey(ctx context.Context, lockStrength LockingS
|
||||
return userID, nil
|
||||
}
|
||||
|
||||
// GetPeerAuthInfoByPubKey returns the user_id and account_id for a peer in a
|
||||
// single SELECT. Used by the Sync hot path to replace the back-to-back
|
||||
// GetUserIDByPeerKey + GetAccountIDByPeerPubKey calls.
|
||||
func (s *SqlStore) GetPeerAuthInfoByPubKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, string, error) {
|
||||
tx := s.db
|
||||
if lockStrength != LockingStrengthNone {
|
||||
tx = tx.Clauses(clause.Locking{Strength: string(lockStrength)})
|
||||
}
|
||||
|
||||
var row struct {
|
||||
UserID string
|
||||
AccountID string
|
||||
}
|
||||
result := tx.Model(&nbpeer.Peer{}).
|
||||
Select("user_id", "account_id").
|
||||
Take(&row, GetKeyQueryCondition(s), peerKey)
|
||||
|
||||
if result.Error != nil {
|
||||
if errors.Is(result.Error, gorm.ErrRecordNotFound) {
|
||||
return "", "", status.Errorf(status.NotFound, "peer not found: index lookup failed")
|
||||
}
|
||||
return "", "", status.Errorf(status.Internal, "failed to get peer auth info by peer key")
|
||||
}
|
||||
|
||||
return row.UserID, row.AccountID, nil
|
||||
}
|
||||
|
||||
func (s *SqlStore) CreateZone(ctx context.Context, zone *zones.Zone) error {
|
||||
result := s.db.Create(zone)
|
||||
if result.Error != nil {
|
||||
|
||||
@@ -230,6 +230,10 @@ type Store interface {
|
||||
// SetFieldEncrypt sets the field encryptor for encrypting sensitive user data.
|
||||
SetFieldEncrypt(enc *crypt.FieldEncrypt)
|
||||
GetUserIDByPeerKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, error)
|
||||
// GetPeerAuthInfoByPubKey returns the userID and accountID for a peer in a
|
||||
// single query, replacing the pattern of calling GetUserIDByPeerKey and
|
||||
// GetAccountIDByPeerPubKey back-to-back on the Sync hot path.
|
||||
GetPeerAuthInfoByPubKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (userID, accountID string, err error)
|
||||
|
||||
CreateZone(ctx context.Context, zone *zones.Zone) error
|
||||
UpdateZone(ctx context.Context, zone *zones.Zone) error
|
||||
|
||||
@@ -165,19 +165,6 @@ func (mr *MockStoreMockRecorder) CleanupStaleProxies(ctx, inactivityDuration int
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CleanupStaleProxies", reflect.TypeOf((*MockStore)(nil).CleanupStaleProxies), ctx, inactivityDuration)
|
||||
}
|
||||
|
||||
// GetClusterSupportsCrowdSec mocks base method.
|
||||
func (m *MockStore) GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetClusterSupportsCrowdSec", ctx, clusterAddr)
|
||||
ret0, _ := ret[0].(*bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetClusterSupportsCrowdSec indicates an expected call of GetClusterSupportsCrowdSec.
|
||||
func (mr *MockStoreMockRecorder) GetClusterSupportsCrowdSec(ctx, clusterAddr interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCrowdSec", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCrowdSec), ctx, clusterAddr)
|
||||
}
|
||||
// Close mocks base method.
|
||||
func (m *MockStore) Close(ctx context.Context) error {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1388,6 +1375,20 @@ func (mr *MockStoreMockRecorder) GetClusterRequireSubdomain(ctx, clusterAddr int
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterRequireSubdomain", reflect.TypeOf((*MockStore)(nil).GetClusterRequireSubdomain), ctx, clusterAddr)
|
||||
}
|
||||
|
||||
// GetClusterSupportsCrowdSec mocks base method.
|
||||
func (m *MockStore) GetClusterSupportsCrowdSec(ctx context.Context, clusterAddr string) *bool {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetClusterSupportsCrowdSec", ctx, clusterAddr)
|
||||
ret0, _ := ret[0].(*bool)
|
||||
return ret0
|
||||
}
|
||||
|
||||
// GetClusterSupportsCrowdSec indicates an expected call of GetClusterSupportsCrowdSec.
|
||||
func (mr *MockStoreMockRecorder) GetClusterSupportsCrowdSec(ctx, clusterAddr interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetClusterSupportsCrowdSec", reflect.TypeOf((*MockStore)(nil).GetClusterSupportsCrowdSec), ctx, clusterAddr)
|
||||
}
|
||||
|
||||
// GetClusterSupportsCustomPorts mocks base method.
|
||||
func (m *MockStore) GetClusterSupportsCustomPorts(ctx context.Context, clusterAddr string) *bool {
|
||||
m.ctrl.T.Helper()
|
||||
@@ -1687,6 +1688,22 @@ func (mr *MockStoreMockRecorder) GetPATByID(ctx, lockStrength, userID, patID int
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPATByID", reflect.TypeOf((*MockStore)(nil).GetPATByID), ctx, lockStrength, userID, patID)
|
||||
}
|
||||
|
||||
// GetPeerAuthInfoByPubKey mocks base method.
|
||||
func (m *MockStore) GetPeerAuthInfoByPubKey(ctx context.Context, lockStrength LockingStrength, peerKey string) (string, string, error) {
|
||||
m.ctrl.T.Helper()
|
||||
ret := m.ctrl.Call(m, "GetPeerAuthInfoByPubKey", ctx, lockStrength, peerKey)
|
||||
ret0, _ := ret[0].(string)
|
||||
ret1, _ := ret[1].(string)
|
||||
ret2, _ := ret[2].(error)
|
||||
return ret0, ret1, ret2
|
||||
}
|
||||
|
||||
// GetPeerAuthInfoByPubKey indicates an expected call of GetPeerAuthInfoByPubKey.
|
||||
func (mr *MockStoreMockRecorder) GetPeerAuthInfoByPubKey(ctx, lockStrength, peerKey interface{}) *gomock.Call {
|
||||
mr.mock.ctrl.T.Helper()
|
||||
return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetPeerAuthInfoByPubKey", reflect.TypeOf((*MockStore)(nil).GetPeerAuthInfoByPubKey), ctx, lockStrength, peerKey)
|
||||
}
|
||||
|
||||
// GetPeerByID mocks base method.
|
||||
func (m *MockStore) GetPeerByID(ctx context.Context, lockStrength LockingStrength, accountID, peerID string) (*peer.Peer, error) {
|
||||
m.ctrl.T.Helper()
|
||||
|
||||
Reference in New Issue
Block a user