use gorm cache

This commit is contained in:
Pascal Fischer
2025-10-13 16:29:04 +02:00
parent 0d2e67983a
commit f0e8cd578d
15 changed files with 498 additions and 17 deletions

View File

@@ -17,7 +17,7 @@ type Peer struct {
// ID is an internal ID of the peer
ID string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
AccountID string `gorm:"index"`
// WireGuard public key
Key string `gorm:"index"`
// IP address of the Peer

View File

@@ -7,8 +7,9 @@ import (
"regexp"
"github.com/hashicorp/go-version"
"github.com/netbirdio/netbird/shared/management/http/api"
nbpeer "github.com/netbirdio/netbird/management/server/peer"
"github.com/netbirdio/netbird/shared/management/http/api"
"github.com/netbirdio/netbird/shared/management/status"
)
@@ -45,7 +46,7 @@ type Checks struct {
Description string
// AccountID is a reference to the Account that this object belongs
AccountID string `json:"-" gorm:"index"`
AccountID string `gorm:"index"`
// Checks is a set of objects that perform the actual checks
Checks ChecksDefinition `gorm:"serializer:json"`

48
management/server/store/cache/memory.go vendored Normal file
View File

@@ -0,0 +1,48 @@
package cache
import (
"context"
"sync"
"github.com/go-gorm/caches/v4"
)
type MemoryCacher struct {
store *sync.Map
}
func (c *MemoryCacher) init() {
if c.store == nil {
c.store = &sync.Map{}
}
}
func (c *MemoryCacher) Get(ctx context.Context, key string, q *caches.Query[any]) (*caches.Query[any], error) {
c.init()
val, ok := c.store.Load(key)
if !ok {
return nil, nil
}
if err := q.Unmarshal(val.([]byte)); err != nil {
return nil, err
}
return q, nil
}
func (c *MemoryCacher) Store(ctx context.Context, key string, val *caches.Query[any]) error {
c.init()
res, err := val.Marshal()
if err != nil {
return err
}
c.store.Store(key, res)
return nil
}
func (c *MemoryCacher) Invalidate(ctx context.Context) error {
c.store = &sync.Map{}
return nil
}

73
management/server/store/cache/redis.go vendored Normal file
View File

@@ -0,0 +1,73 @@
package cache
import (
"context"
"fmt"
"time"
"github.com/go-gorm/caches/v4"
"github.com/redis/go-redis/v9"
)
type RedisCacher struct {
rdb *redis.Client
}
func NewRedisCacher(rdb *redis.Client) *RedisCacher {
return &RedisCacher{rdb: rdb}
}
func (c *RedisCacher) Get(ctx context.Context, key string, q *caches.Query[any]) (*caches.Query[any], error) {
res, err := c.rdb.Get(ctx, key).Result()
if err == redis.Nil {
return nil, nil
}
if err != nil {
return nil, err
}
if err := q.Unmarshal([]byte(res)); err != nil {
return nil, err
}
return q, nil
}
func (c *RedisCacher) Store(ctx context.Context, key string, val *caches.Query[any]) error {
res, err := val.Marshal()
if err != nil {
return err
}
c.rdb.Set(ctx, key, res, 300*time.Second) // Set proper cache time
return nil
}
func (c *RedisCacher) Invalidate(ctx context.Context) error {
var (
cursor uint64
keys []string
)
for {
var (
k []string
err error
)
k, cursor, err = c.rdb.Scan(ctx, cursor, fmt.Sprintf("%s*", caches.IdentifierPrefix), 0).Result()
if err != nil {
return err
}
keys = append(keys, k...)
if cursor == 0 {
break
}
}
if len(keys) > 0 {
if _, err := c.rdb.Del(ctx, keys...).Result(); err != nil {
return err
}
}
return nil
}

View File

@@ -15,6 +15,8 @@ import (
"sync"
"time"
"github.com/go-gorm/caches/v4"
"github.com/redis/go-redis/v9"
log "github.com/sirupsen/logrus"
"gorm.io/driver/mysql"
"gorm.io/driver/postgres"
@@ -30,6 +32,7 @@ import (
networkTypes "github.com/netbirdio/netbird/management/server/networks/types"
nbpeer "github.com/netbirdio/netbird/management/server/peer"
"github.com/netbirdio/netbird/management/server/posture"
"github.com/netbirdio/netbird/management/server/store/cache"
"github.com/netbirdio/netbird/management/server/telemetry"
"github.com/netbirdio/netbird/management/server/types"
"github.com/netbirdio/netbird/management/server/util"
@@ -46,11 +49,15 @@ const (
accountAndIDsQueryCondition = "account_id = ? AND id IN ?"
accountIDCondition = "account_id = ?"
peerNotFoundFMT = "peer %s not found"
storeCacheEnabledEnv = "NB_STORE_CACHE_ENABLED"
storeCacheRedisAddrEnv = "NB_STORE_CACHE_REDIS_ADDR"
)
// SqlStore represents an account storage backed by a Sql DB persisted to disk
type SqlStore struct {
db *gorm.DB
uncachedDB *gorm.DB
globalAccountLock sync.Mutex
metrics telemetry.AppMetrics
installationPK int
@@ -66,6 +73,13 @@ type migrationFunc func(*gorm.DB) error
// NewSqlStore creates a new SqlStore instance.
func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, metrics telemetry.AppMetrics, skipMigration bool) (*SqlStore, error) {
if os.Getenv(storeCacheEnabledEnv) == "true" {
err := configureStoreCache(ctx, db)
if err != nil {
return nil, fmt.Errorf("failed to configure store cache: %w", err)
}
}
sql, err := db.DB()
if err != nil {
return nil, err
@@ -116,6 +130,26 @@ func NewSqlStore(ctx context.Context, db *gorm.DB, storeEngine types.Engine, met
return &SqlStore{db: db, storeEngine: storeEngine, metrics: metrics, installationPK: 1}, nil
}
func configureStoreCache(ctx context.Context, db *gorm.DB) error {
var cacher caches.Cacher = &cache.MemoryCacher{}
if addr := os.Getenv(storeCacheRedisAddrEnv); addr != "" {
opt, err := redis.ParseURL(addr)
if err != nil {
return fmt.Errorf("failed to parse redis url from %s: %w", addr, err)
}
cacher = cache.NewRedisCacher(redis.NewClient(opt))
log.WithContext(ctx).Infof("using redis store cache at %s", addr)
} else {
log.WithContext(ctx).Infof("using in-memory store cache")
}
cachesPlugin := &caches.Caches{Conf: &caches.Config{
Cacher: cacher,
}}
return db.Use(cachesPlugin)
}
func GetKeyQueryCondition(s *SqlStore) string {
if s.storeEngine == types.MysqlStoreEngine {
return mysqlKeyQueryCondition

View File

@@ -20,6 +20,8 @@ import (
log "github.com/sirupsen/logrus"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go"
testcontainersredis "github.com/testcontainers/testcontainers-go/modules/redis"
nbdns "github.com/netbirdio/netbird/dns"
resourceTypes "github.com/netbirdio/netbird/management/server/networks/resources/types"
@@ -3717,3 +3719,320 @@ func TestSqlStore_GetPeersByGroupIDs(t *testing.T) {
})
}
}
func TestSqlStore_CacheHit(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
t.Setenv(storeCacheEnabledEnv, "true")
store, cleanUp, err := NewTestStoreFromSQL(context.Background(), "../testdata/store.sql", t.TempDir())
t.Cleanup(cleanUp)
require.NoError(t, err)
ctx := context.Background()
accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
peerID := "ct286bi7qv930dsrrug0"
sqlStore := store.(*SqlStore)
// First call - should hit the database
peer1, err := sqlStore.GetPeerByID(ctx, LockingStrengthShare, accountID, peerID)
require.NoError(t, err)
require.NotNil(t, peer1)
// Get the underlying database connection
db, err := sqlStore.db.DB()
require.NoError(t, err)
// Get DB stats before second call
statsBefore := db.Stats()
// Second call - should hit the cache, not the database
peer2, err := sqlStore.GetPeerByID(ctx, LockingStrengthShare, accountID, peerID)
require.NoError(t, err)
require.NotNil(t, peer2)
// Get DB stats after second call
statsAfter := db.Stats()
// Verify no additional database connections were opened for the cached query
// The OpenConnections count should be the same or very similar
assert.Equal(t, statsBefore.InUse, statsAfter.InUse, "Cache hit should not open new database connections")
// Verify both peers are equal
assert.Equal(t, peer1.ID, peer2.ID)
assert.Equal(t, peer1.Name, peer2.Name)
}
func TestSqlStore_CacheInvalidationAcrossInstances(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
t.Setenv(storeCacheEnabledEnv, "true")
ctx := context.Background()
// Start Redis container for shared cache
redisContainer, err := testcontainersredis.RunContainer(ctx, testcontainers.WithImage("redis:7"))
require.NoError(t, err)
defer func() {
if err := redisContainer.Terminate(ctx); err != nil {
t.Logf("failed to terminate container: %s", err)
}
}()
redisURL, err := redisContainer.ConnectionString(ctx)
require.NoError(t, err)
// Set the Redis URL environment variable for both stores
t.Setenv(storeCacheRedisAddrEnv, redisURL)
// Create a shared SQLite database in a temp directory with cache=shared mode
// This allows multiple connections to the same database
tempDir := t.TempDir()
// Create first store instance with shared database
store1, cleanUp1, err := NewTestStoreFromSQL(ctx, "../testdata/store.sql", tempDir)
t.Cleanup(cleanUp1)
require.NoError(t, err)
// Create second store instance connecting to the SAME database file
// Both stores will share the same underlying database AND the same Redis cache
store2, cleanUp2, err := NewTestStoreFromSQL(ctx, "", tempDir)
t.Cleanup(cleanUp2)
require.NoError(t, err)
accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
peerID := "ct286bi7qv930dsrrug0"
// Store 1: Fetch peer (populates cache)
peer1, err := store1.GetPeerByID(ctx, LockingStrengthShare, accountID, peerID)
require.NoError(t, err)
require.NotNil(t, peer1)
// Store 2: Fetch same peer (should use cache)
peer2, err := store2.GetPeerByID(ctx, LockingStrengthShare, accountID, peerID)
require.NoError(t, err)
require.NotNil(t, peer2)
assert.Equal(t, peer1.ID, peer2.ID)
// Store 1: Modify the peer
peer1.Name = "updated-peer-name"
err = store1.SavePeer(ctx, accountID, peer1)
require.NoError(t, err)
// Store 2: Fetch the peer again - should get updated data (cache was invalidated)
peer2Updated, err := store2.GetPeerByID(ctx, LockingStrengthShare, accountID, peerID)
require.NoError(t, err)
require.NotNil(t, peer2Updated)
// Verify the name was updated via cache invalidation
assert.Equal(t, "updated-peer-name", peer2Updated.Name,
"Cache should have been invalidated, store 2 should see the update from store 1")
}
func TestSqlStore_CacheGetAccountWithAssociations(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
t.Setenv(storeCacheEnabledEnv, "true")
ctx := context.Background()
// Start Redis container for shared cache
redisContainer, err := testcontainersredis.RunContainer(ctx, testcontainers.WithImage("redis:7"))
require.NoError(t, err)
defer func() {
if err := redisContainer.Terminate(ctx); err != nil {
t.Logf("failed to terminate container: %s", err)
}
}()
redisURL, err := redisContainer.ConnectionString(ctx)
require.NoError(t, err)
// Set the Redis URL environment variable for both stores
t.Setenv(storeCacheRedisAddrEnv, redisURL)
// Create a shared SQLite database in a temp directory with cache=shared mode
// This allows multiple connections to the same database
tempDir := t.TempDir()
// Create first store instance with shared database
store1, cleanUp1, err := NewTestStoreFromSQL(ctx, "", tempDir)
t.Cleanup(cleanUp1)
require.NoError(t, err)
// Create second store instance connecting to the SAME database file
// Both stores will share the same underlying database AND the same Redis cache
store2, cleanUp2, err := NewTestStoreFromSQL(ctx, "", tempDir)
t.Cleanup(cleanUp2)
require.NoError(t, err)
accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
userID := "edafee4e-63fb-11ec-90d6-0242ac120003"
// Create a fresh account
account := newAccountWithId(ctx, accountID, userID, "test.com")
err = store1.SaveAccount(ctx, account)
require.NoError(t, err)
// Store 1: Fetch account (populates cache)
account1, err := store1.GetAccount(ctx, accountID)
require.NoError(t, err)
require.NotNil(t, account1)
// Store 2: Fetch same account (should use cache)
account2, err := store2.GetAccount(ctx, accountID)
require.NoError(t, err)
require.NotNil(t, account2)
assert.Equal(t, account1.Id, account2.Id)
// Store 1: Modify the account
account1.Domain = "updated-domain.example.com"
err = store1.SaveAccount(ctx, account1)
require.NoError(t, err)
// Store 2: Fetch the account again - should get updated data (cache was invalidated)
account2Updated, err := store2.GetAccount(ctx, accountID)
require.NoError(t, err)
require.NotNil(t, account2Updated)
// Verify the domain was updated via cache invalidation
assert.Equal(t, "updated-domain.example.com", account2Updated.Domain,
"Cache should have been invalidated, store 2 should see the update from store 1")
}
func TestSqlStore_CacheGetGroupWithAssociations(t *testing.T) {
if runtime.GOOS == "windows" {
t.Skip("The SQLite store is not properly supported by Windows yet")
}
t.Setenv(storeCacheEnabledEnv, "true")
ctx := context.Background()
// Start Redis container for shared cache
redisContainer, err := testcontainersredis.RunContainer(ctx, testcontainers.WithImage("redis:7"))
require.NoError(t, err)
defer func() {
if err := redisContainer.Terminate(ctx); err != nil {
t.Logf("failed to terminate container: %s", err)
}
}()
redisURL, err := redisContainer.ConnectionString(ctx)
require.NoError(t, err)
// Set the Redis URL environment variable for both stores
t.Setenv(storeCacheRedisAddrEnv, redisURL)
// Create a shared SQLite database in a temp directory with cache=shared mode
// This allows multiple connections to the same database
tempDir := t.TempDir()
// Create first store instance with shared database
store1, cleanUp1, err := NewTestStoreFromSQL(ctx, "", tempDir)
t.Cleanup(cleanUp1)
require.NoError(t, err)
// Create second store instance connecting to the SAME database file
// Both stores will share the same underlying database AND the same Redis cache
store2, cleanUp2, err := NewTestStoreFromSQL(ctx, "", tempDir)
t.Cleanup(cleanUp2)
require.NoError(t, err)
accountID := "bf1c8084-ba50-4ce7-9439-34653001fc3b"
userID := "edafee4e-63fb-11ec-90d6-0242ac120003"
// Create a fresh account
account := newAccountWithId(ctx, accountID, userID, "test.com")
// Add peers to the account
peer1 := &nbpeer.Peer{
Key: "peer-key-1",
ID: "peer-id-1",
IP: net.IP{100, 64, 0, 1},
Meta: nbpeer.PeerSystemMeta{Hostname: "test-peer-1"},
Name: "Test Peer 1",
DNSLabel: "test-peer-1",
Status: &nbpeer.PeerStatus{Connected: false, LastSeen: time.Now().UTC()},
CreatedAt: time.Now().UTC(),
UserID: userID,
}
account.Peers[peer1.ID] = peer1
peer2 := &nbpeer.Peer{
Key: "peer-key-2",
ID: "peer-id-2",
IP: net.IP{100, 64, 0, 2},
Meta: nbpeer.PeerSystemMeta{Hostname: "test-peer-2"},
Name: "Test Peer 2",
DNSLabel: "test-peer-2",
Status: &nbpeer.PeerStatus{Connected: false, LastSeen: time.Now().UTC()},
CreatedAt: time.Now().UTC(),
UserID: userID,
}
account.Peers[peer2.ID] = peer2
// Create a group with peers (SaveAccount will convert to GroupPeers)
group := &types.Group{
ID: "group-id-1",
AccountID: accountID,
Name: "Test Group",
Issued: "api",
Peers: []string{peer1.ID, peer2.ID},
Resources: []types.Resource{},
}
account.Groups = map[string]*types.Group{
group.ID: group,
}
// Save the account with all data using store1
err = store1.SaveAccount(ctx, account)
require.NoError(t, err)
// Store 1: Fetch group (populates cache)
group1, err := store1.GetGroupByID(ctx, LockingStrengthShare, accountID, group.ID)
require.NoError(t, err)
require.NotNil(t, group1)
require.NotEmpty(t, group1.Peers, "First call should load Peers (converted from GroupPeers)")
require.Len(t, group1.Peers, 2, "First call should load both Peers")
// Store 2: Fetch same group (should use cache)
group2, err := store2.GetGroupByID(ctx, LockingStrengthShare, accountID, group.ID)
require.NoError(t, err)
require.NotNil(t, group2)
require.NotEmpty(t, group2.Peers, "Cached group should have Peers")
require.Len(t, group2.Peers, 2, "Cached group should have both Peers")
// Verify data matches between both stores
assert.Equal(t, len(group1.Peers), len(group2.Peers))
assert.Equal(t, group1.Name, group2.Name)
assert.ElementsMatch(t, group1.Peers, group2.Peers)
// Modify the group with store1 (update name and remove one peer using UpdateGroup and RemovePeerFromGroup)
group1.Name = "Modified Group Name"
err = store1.UpdateGroup(ctx, group1)
require.NoError(t, err)
// Remove peer2 from the group
err = store1.RemovePeerFromGroup(ctx, peer2.ID, group.ID)
require.NoError(t, err)
// Store2: Fetch the modified group (should get updated data, not stale cache)
group3, err := store2.GetGroupByID(ctx, LockingStrengthShare, accountID, group.ID)
require.NoError(t, err)
require.NotNil(t, group3)
// Verify the updated data is visible from store2
assert.Equal(t, "Modified Group Name", group3.Name, "Store2 should see the updated group name")
assert.Len(t, group3.Peers, 1, "Store2 should see only one peer after modification")
assert.Contains(t, group3.Peers, peer1.ID, "Store2 should see peer1")
assert.NotContains(t, group3.Peers, peer2.ID, "Store2 should NOT see peer2 after removal")
}

View File

@@ -4,7 +4,8 @@ CREATE TABLE `setup_keys` (`id` text,`account_id` text,`key` text,`name` text,`t
CREATE TABLE `peers` (`id` text,`account_id` text,`key` text,`setup_key` text,`ip` text,`meta_hostname` text,`meta_go_os` text,`meta_kernel` text,`meta_core` text,`meta_platform` text,`meta_os` text,`meta_os_version` text,`meta_wt_version` text,`meta_ui_version` text,`meta_kernel_version` text,`meta_network_addresses` text,`meta_system_serial_number` text,`meta_system_product_name` text,`meta_system_manufacturer` text,`meta_environment` text,`meta_files` text,`name` text,`dns_label` text,`peer_status_last_seen` datetime,`peer_status_connected` numeric,`peer_status_login_expired` numeric,`peer_status_requires_approval` numeric,`user_id` text,`ssh_key` text,`ssh_enabled` numeric,`login_expiration_enabled` numeric,`last_login` datetime,`created_at` datetime,`ephemeral` numeric,`location_connection_ip` text,`location_country_code` text,`location_city_name` text,`location_geo_name_id` integer,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_peers_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`));
CREATE TABLE `users` (`id` text,`account_id` text,`role` text,`is_service_user` numeric,`non_deletable` numeric,`service_user_name` text,`auto_groups` text,`blocked` numeric,`last_login` datetime,`created_at` datetime,`issued` text DEFAULT "api",`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_users_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`));
CREATE TABLE `personal_access_tokens` (`id` text,`user_id` text,`name` text,`hashed_token` text,`expiration_date` datetime,`created_by` text,`created_at` datetime,`last_used` datetime DEFAULT NULL,PRIMARY KEY (`id`),CONSTRAINT `fk_users_pa_ts_g` FOREIGN KEY (`user_id`) REFERENCES `users`(`id`));
CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`peers` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`));
CREATE TABLE `groups` (`id` text,`account_id` text,`name` text,`issued` text,`resources` text,`integration_ref_id` integer,`integration_ref_integration_type` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_groups_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`));
CREATE TABLE `group_peers` (`account_id` text,`group_id` text,`peer_id` text,PRIMARY KEY (`group_id`,`peer_id`),CONSTRAINT `fk_groups_group_peers` FOREIGN KEY (`group_id`) REFERENCES `groups`(`id`) ON DELETE CASCADE);
CREATE TABLE `policies` (`id` text,`account_id` text,`name` text,`description` text,`enabled` numeric,`source_posture_checks` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_policies` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`));
CREATE TABLE `policy_rules` (`id` text,`policy_id` text,`name` text,`description` text,`enabled` numeric,`action` text,`destinations` text,`sources` text,`bidirectional` numeric,`protocol` text,`ports` text,`port_ranges` text,PRIMARY KEY (`id`),CONSTRAINT `fk_policies_rules` FOREIGN KEY (`policy_id`) REFERENCES `policies`(`id`) ON DELETE CASCADE);
CREATE TABLE `routes` (`id` text,`account_id` text,`network` text,`domains` text,`keep_route` numeric,`net_id` text,`description` text,`peer` text,`peer_groups` text,`network_type` integer,`masquerade` numeric,`metric` integer,`enabled` numeric,`groups` text,`access_control_groups` text,PRIMARY KEY (`id`),CONSTRAINT `fk_accounts_routes_g` FOREIGN KEY (`account_id`) REFERENCES `accounts`(`id`));
@@ -24,6 +25,7 @@ CREATE INDEX `idx_peers_account_id_ip` ON `peers`(`account_id`,`ip`);
CREATE INDEX `idx_users_account_id` ON `users`(`account_id`);
CREATE INDEX `idx_personal_access_tokens_user_id` ON `personal_access_tokens`(`user_id`);
CREATE INDEX `idx_groups_account_id` ON `groups`(`account_id`);
CREATE INDEX `idx_group_peers_account_id` ON `group_peers`(`account_id`);
CREATE INDEX `idx_policies_account_id` ON `policies`(`account_id`);
CREATE INDEX `idx_policy_rules_policy_id` ON `policy_rules`(`policy_id`);
CREATE INDEX `idx_routes_account_id` ON `routes`(`account_id`);
@@ -40,7 +42,8 @@ CREATE INDEX `idx_networks_account_id` ON `networks`(`account_id`);
INSERT INTO accounts VALUES('bf1c8084-ba50-4ce7-9439-34653001fc3b','edafee4e-63fb-11ec-90d6-0242ac120003','2024-10-02 16:03:06.778746+02:00','test.com','private',1,'af1c8024-ha40-4ce2-9418-34653101fc3c','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL);
INSERT INTO accounts VALUES('9439-34653001fc3b-bf1c8084-ba50-4ce7','90d6-0242ac120003-edafee4e-63fb-11ec','2024-10-02 16:01:38.210000+02:00','test2.com','private',1,'af1c8024-ha40-4ce2-9418-34653101fc3c','{"IP":"100.64.0.0","Mask":"//8AAA=="}','',0,'[]',0,86400000000000,0,0,0,'',NULL,NULL,NULL);
INSERT INTO account_onboardings VALUES('9439-34653001fc3b-bf1c8084-ba50-4ce7','2024-10-02 16:01:38.210000+02:00','2021-08-19 20:46:20.005936822+02:00',1,0);INSERT INTO "groups" VALUES('cs1tnh0hhcjnqoiuebeg','bf1c8084-ba50-4ce7-9439-34653001fc3b','All','api','[]',0,'');
INSERT INTO account_onboardings VALUES('9439-34653001fc3b-bf1c8084-ba50-4ce7','2024-10-02 16:01:38.210000+02:00','2021-08-19 20:46:20.005936822+02:00',1,0);
INSERT INTO "groups" VALUES('cs1tnh0hhcjnqoiuebeg','bf1c8084-ba50-4ce7-9439-34653001fc3b','All','api','[]',0,'');
INSERT INTO setup_keys VALUES('','bf1c8084-ba50-4ce7-9439-34653001fc3b','A2C8E62B-38F5-4553-B31E-DD66C696CEBB','Default key','reusable','2021-08-19 20:46:20.005936822+02:00','2321-09-18 20:46:20.005936822+02:00','2021-08-19 20:46:20.005936822+02:00',0,0,NULL,'["cs1tnh0hhcjnqoiuebeg"]',0,0);
INSERT INTO users VALUES('a23efe53-63fb-11ec-90d6-0242ac120003','bf1c8084-ba50-4ce7-9439-34653001fc3b','owner',0,0,'','[]',0,NULL,'2024-10-02 16:03:06.779156+02:00','api',0,'');
INSERT INTO users VALUES('edafee4e-63fb-11ec-90d6-0242ac120003','bf1c8084-ba50-4ce7-9439-34653001fc3b','admin',0,0,'','[]',0,NULL,'2024-10-02 16:03:06.779156+02:00','api',0,'');

View File

@@ -66,19 +66,19 @@ type Account struct {
DomainCategory string
IsDomainPrimaryAccount bool
SetupKeys map[string]*SetupKey `gorm:"-"`
SetupKeysG []SetupKey `json:"-" gorm:"foreignKey:AccountID;references:id"`
SetupKeysG []SetupKey `gorm:"foreignKey:AccountID;references:id"`
Network *Network `gorm:"embedded;embeddedPrefix:network_"`
Peers map[string]*nbpeer.Peer `gorm:"-"`
PeersG []nbpeer.Peer `json:"-" gorm:"foreignKey:AccountID;references:id"`
PeersG []nbpeer.Peer `gorm:"foreignKey:AccountID;references:id"`
Users map[string]*User `gorm:"-"`
UsersG []User `json:"-" gorm:"foreignKey:AccountID;references:id"`
UsersG []User `gorm:"foreignKey:AccountID;references:id"`
Groups map[string]*Group `gorm:"-"`
GroupsG []*Group `json:"-" gorm:"foreignKey:AccountID;references:id"`
GroupsG []*Group `gorm:"foreignKey:AccountID;references:id"`
Policies []*Policy `gorm:"foreignKey:AccountID;references:id"`
Routes map[route.ID]*route.Route `gorm:"-"`
RoutesG []route.Route `json:"-" gorm:"foreignKey:AccountID;references:id"`
RoutesG []route.Route `gorm:"foreignKey:AccountID;references:id"`
NameServerGroups map[string]*nbdns.NameServerGroup `gorm:"-"`
NameServerGroupsG []nbdns.NameServerGroup `json:"-" gorm:"foreignKey:AccountID;references:id"`
NameServerGroupsG []nbdns.NameServerGroup `gorm:"foreignKey:AccountID;references:id"`
DNSSettings DNSSettings `gorm:"embedded;embeddedPrefix:dns_settings_"`
PostureChecks []*posture.Checks `gorm:"foreignKey:AccountID;references:id"`
// Settings is a dictionary of Account settings

View File

@@ -17,7 +17,7 @@ type Group struct {
ID string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
AccountID string `gorm:"index"`
// Name visible in the UI
Name string

View File

@@ -55,7 +55,7 @@ type Policy struct {
ID string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
AccountID string `gorm:"index"`
// Name of the Policy
Name string

View File

@@ -43,7 +43,7 @@ type PolicyRule struct {
ID string `gorm:"primaryKey"`
// PolicyID is a reference to Policy that this object belongs
PolicyID string `json:"-" gorm:"index"`
PolicyID string `gorm:"index"`
// Name of the rule visible in the UI
Name string

View File

@@ -33,7 +33,7 @@ type SetupKeyType string
type SetupKey struct {
Id string
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
AccountID string `gorm:"index"`
Key string
KeySecret string `gorm:"index"`
Name string

View File

@@ -72,7 +72,7 @@ type UserInfo struct {
type User struct {
Id string `gorm:"primaryKey"`
// AccountID is a reference to Account that this object belongs
AccountID string `json:"-" gorm:"index"`
AccountID string `gorm:"index"`
Role UserRole
IsServiceUser bool
// NonDeletable indicates whether the service user can be deleted
@@ -82,7 +82,7 @@ type User struct {
// AutoGroups is a list of Group IDs to auto-assign to peers registered by this user
AutoGroups []string `gorm:"serializer:json"`
PATs map[string]*PersonalAccessToken `gorm:"-"`
PATsG []PersonalAccessToken `json:"-" gorm:"foreignKey:UserID;references:id;constraint:OnDelete:CASCADE;"`
PATsG []PersonalAccessToken `gorm:"foreignKey:UserID;references:id;constraint:OnDelete:CASCADE;"`
// Blocked indicates whether the user is blocked. Blocked users can't use the system.
Blocked bool
// PendingApproval indicates whether the user requires approval before being activated