This commit is contained in:
Owen
2025-10-04 18:36:44 -07:00
parent 3123f858bb
commit c2c907852d
320 changed files with 35785 additions and 2984 deletions

1014
server/db/countries.ts Normal file

File diff suppressed because it is too large Load Diff

13
server/db/maxmind.ts Normal file
View File

@@ -0,0 +1,13 @@
import maxmind, { CountryResponse, Reader } from "maxmind";
import config from "@server/lib/config";
let maxmindLookup: Reader<CountryResponse> | null;
if (config.getRawConfig().server.maxmind_db_path) {
maxmindLookup = await maxmind.open<CountryResponse>(
config.getRawConfig().server.maxmind_db_path!
);
} else {
maxmindLookup = null;
}
export { maxmindLookup };

View File

@@ -39,7 +39,7 @@ function createDb() {
connectionString,
max: 20,
idleTimeoutMillis: 30000,
connectionTimeoutMillis: 2000,
connectionTimeoutMillis: 5000,
});
const replicas = [];
@@ -52,7 +52,7 @@ function createDb() {
connectionString: conn.connection_string,
max: 10,
idleTimeoutMillis: 30000,
connectionTimeoutMillis: 2000,
connectionTimeoutMillis: 5000,
});
replicas.push(DrizzlePostgres(replicaPool));
}

View File

@@ -1,2 +1,3 @@
export * from "./driver";
export * from "./schema";
export * from "./schema";
export * from "./privateSchema";

View File

@@ -0,0 +1,245 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import {
pgTable,
serial,
varchar,
boolean,
integer,
bigint,
real,
text
} from "drizzle-orm/pg-core";
import { InferSelectModel } from "drizzle-orm";
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
export const certificates = pgTable("certificates", {
certId: serial("certId").primaryKey(),
domain: varchar("domain", { length: 255 }).notNull().unique(),
domainId: varchar("domainId").references(() => domains.domainId, {
onDelete: "cascade"
}),
wildcard: boolean("wildcard").default(false),
status: varchar("status", { length: 50 }).notNull().default("pending"), // pending, requested, valid, expired, failed
expiresAt: bigint("expiresAt", { mode: "number" }),
lastRenewalAttempt: bigint("lastRenewalAttempt", { mode: "number" }),
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
updatedAt: bigint("updatedAt", { mode: "number" }).notNull(),
orderId: varchar("orderId", { length: 500 }),
errorMessage: text("errorMessage"),
renewalCount: integer("renewalCount").default(0),
certFile: text("certFile"),
keyFile: text("keyFile")
});
export const dnsChallenge = pgTable("dnsChallenges", {
dnsChallengeId: serial("dnsChallengeId").primaryKey(),
domain: varchar("domain", { length: 255 }).notNull(),
token: varchar("token", { length: 255 }).notNull(),
keyAuthorization: varchar("keyAuthorization", { length: 1000 }).notNull(),
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
completed: boolean("completed").default(false)
});
export const account = pgTable("account", {
accountId: serial("accountId").primaryKey(),
userId: varchar("userId")
.notNull()
.references(() => users.userId, { onDelete: "cascade" })
});
export const customers = pgTable("customers", {
customerId: varchar("customerId", { length: 255 }).primaryKey().notNull(),
orgId: varchar("orgId", { length: 255 })
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
// accountId: integer("accountId")
// .references(() => account.accountId, { onDelete: "cascade" }), // Optional, if using accounts
email: varchar("email", { length: 255 }),
name: varchar("name", { length: 255 }),
phone: varchar("phone", { length: 50 }),
address: text("address"),
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
updatedAt: bigint("updatedAt", { mode: "number" }).notNull()
});
export const subscriptions = pgTable("subscriptions", {
subscriptionId: varchar("subscriptionId", { length: 255 })
.primaryKey()
.notNull(),
customerId: varchar("customerId", { length: 255 })
.notNull()
.references(() => customers.customerId, { onDelete: "cascade" }),
status: varchar("status", { length: 50 }).notNull().default("active"), // active, past_due, canceled, unpaid
canceledAt: bigint("canceledAt", { mode: "number" }),
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
updatedAt: bigint("updatedAt", { mode: "number" }),
billingCycleAnchor: bigint("billingCycleAnchor", { mode: "number" })
});
export const subscriptionItems = pgTable("subscriptionItems", {
subscriptionItemId: serial("subscriptionItemId").primaryKey(),
subscriptionId: varchar("subscriptionId", { length: 255 })
.notNull()
.references(() => subscriptions.subscriptionId, {
onDelete: "cascade"
}),
planId: varchar("planId", { length: 255 }).notNull(),
priceId: varchar("priceId", { length: 255 }),
meterId: varchar("meterId", { length: 255 }),
unitAmount: real("unitAmount"),
tiers: text("tiers"),
interval: varchar("interval", { length: 50 }),
currentPeriodStart: bigint("currentPeriodStart", { mode: "number" }),
currentPeriodEnd: bigint("currentPeriodEnd", { mode: "number" }),
name: varchar("name", { length: 255 })
});
export const accountDomains = pgTable("accountDomains", {
accountId: integer("accountId")
.notNull()
.references(() => account.accountId, { onDelete: "cascade" }),
domainId: varchar("domainId")
.notNull()
.references(() => domains.domainId, { onDelete: "cascade" })
});
export const usage = pgTable("usage", {
usageId: varchar("usageId", { length: 255 }).primaryKey(),
featureId: varchar("featureId", { length: 255 }).notNull(),
orgId: varchar("orgId")
.references(() => orgs.orgId, { onDelete: "cascade" })
.notNull(),
meterId: varchar("meterId", { length: 255 }),
instantaneousValue: real("instantaneousValue"),
latestValue: real("latestValue").notNull(),
previousValue: real("previousValue"),
updatedAt: bigint("updatedAt", { mode: "number" }).notNull(),
rolledOverAt: bigint("rolledOverAt", { mode: "number" }),
nextRolloverAt: bigint("nextRolloverAt", { mode: "number" })
});
export const limits = pgTable("limits", {
limitId: varchar("limitId", { length: 255 }).primaryKey(),
featureId: varchar("featureId", { length: 255 }).notNull(),
orgId: varchar("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
value: real("value"),
description: text("description")
});
export const usageNotifications = pgTable("usageNotifications", {
notificationId: serial("notificationId").primaryKey(),
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
featureId: varchar("featureId", { length: 255 }).notNull(),
limitId: varchar("limitId", { length: 255 }).notNull(),
notificationType: varchar("notificationType", { length: 50 }).notNull(),
sentAt: bigint("sentAt", { mode: "number" }).notNull()
});
export const domainNamespaces = pgTable("domainNamespaces", {
domainNamespaceId: varchar("domainNamespaceId", {
length: 255
}).primaryKey(),
domainId: varchar("domainId")
.references(() => domains.domainId, {
onDelete: "set null"
})
.notNull()
});
export const exitNodeOrgs = pgTable("exitNodeOrgs", {
exitNodeId: integer("exitNodeId")
.notNull()
.references(() => exitNodes.exitNodeId, { onDelete: "cascade" }),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" })
});
export const remoteExitNodes = pgTable("remoteExitNode", {
remoteExitNodeId: varchar("id").primaryKey(),
secretHash: varchar("secretHash").notNull(),
dateCreated: varchar("dateCreated").notNull(),
version: varchar("version"),
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
onDelete: "cascade"
})
});
export const remoteExitNodeSessions = pgTable("remoteExitNodeSession", {
sessionId: varchar("id").primaryKey(),
remoteExitNodeId: varchar("remoteExitNodeId")
.notNull()
.references(() => remoteExitNodes.remoteExitNodeId, {
onDelete: "cascade"
}),
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
});
export const loginPage = pgTable("loginPage", {
loginPageId: serial("loginPageId").primaryKey(),
subdomain: varchar("subdomain"),
fullDomain: varchar("fullDomain"),
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
onDelete: "set null"
}),
domainId: varchar("domainId").references(() => domains.domainId, {
onDelete: "set null"
})
});
export const loginPageOrg = pgTable("loginPageOrg", {
loginPageId: integer("loginPageId")
.notNull()
.references(() => loginPage.loginPageId, { onDelete: "cascade" }),
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" })
});
export const sessionTransferToken = pgTable("sessionTransferToken", {
token: varchar("token").primaryKey(),
sessionId: varchar("sessionId")
.notNull()
.references(() => sessions.sessionId, {
onDelete: "cascade"
}),
encryptedSession: text("encryptedSession").notNull(),
expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
});
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;
export type Certificate = InferSelectModel<typeof certificates>;
export type DnsChallenge = InferSelectModel<typeof dnsChallenge>;
export type Customer = InferSelectModel<typeof customers>;
export type Subscription = InferSelectModel<typeof subscriptions>;
export type SubscriptionItem = InferSelectModel<typeof subscriptionItems>;
export type Usage = InferSelectModel<typeof usage>;
export type UsageLimit = InferSelectModel<typeof limits>;
export type AccountDomain = InferSelectModel<typeof accountDomains>;
export type UsageNotification = InferSelectModel<typeof usageNotifications>;
export type RemoteExitNode = InferSelectModel<typeof remoteExitNodes>;
export type RemoteExitNodeSession = InferSelectModel<
typeof remoteExitNodeSessions
>;
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
export type LoginPage = InferSelectModel<typeof loginPage>;

View File

@@ -128,6 +128,27 @@ export const targets = pgTable("targets", {
rewritePathType: text("rewritePathType") // exact, prefix, regex, stripPrefix
});
export const targetHealthCheck = pgTable("targetHealthCheck", {
targetHealthCheckId: serial("targetHealthCheckId").primaryKey(),
targetId: integer("targetId")
.notNull()
.references(() => targets.targetId, { onDelete: "cascade" }),
hcEnabled: boolean("hcEnabled").notNull().default(false),
hcPath: varchar("hcPath"),
hcScheme: varchar("hcScheme"),
hcMode: varchar("hcMode").default("http"),
hcHostname: varchar("hcHostname"),
hcPort: integer("hcPort"),
hcInterval: integer("hcInterval").default(30), // in seconds
hcUnhealthyInterval: integer("hcUnhealthyInterval").default(30), // in seconds
hcTimeout: integer("hcTimeout").default(5), // in seconds
hcHeaders: varchar("hcHeaders"),
hcFollowRedirects: boolean("hcFollowRedirects").default(true),
hcMethod: varchar("hcMethod").default("GET"),
hcStatus: integer("hcStatus"), // http code
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
});
export const exitNodes = pgTable("exitNodes", {
exitNodeId: serial("exitNodeId").primaryKey(),
name: varchar("name").notNull(),
@@ -689,3 +710,4 @@ export type OrgDomains = InferSelectModel<typeof orgDomains>;
export type SiteResource = InferSelectModel<typeof siteResources>;
export type SetupToken = InferSelectModel<typeof setupTokens>;
export type HostMeta = InferSelectModel<typeof hostMeta>;
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;

View File

@@ -0,0 +1,202 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
// Simple test file for the rate limit service with Redis
// Run with: npx ts-node rateLimitService.test.ts
import { RateLimitService } from './rateLimit';
function generateClientId() {
return 'client-' + Math.random().toString(36).substring(2, 15);
}
async function runTests() {
console.log('Starting Rate Limit Service Tests...\n');
const rateLimitService = new RateLimitService();
let testsPassed = 0;
let testsTotal = 0;
// Helper function to run a test
async function test(name: string, testFn: () => Promise<void>) {
testsTotal++;
try {
await testFn();
console.log(`${name}`);
testsPassed++;
} catch (error) {
console.log(`${name}: ${error}`);
}
}
// Helper function for assertions
function assert(condition: boolean, message: string) {
if (!condition) {
throw new Error(message);
}
}
// Test 1: Basic rate limiting
await test('Should allow requests under the limit', async () => {
const clientId = generateClientId();
const maxRequests = 5;
for (let i = 0; i < maxRequests - 1; i++) {
const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
assert(!result.isLimited, `Request ${i + 1} should be allowed`);
assert(result.totalHits === i + 1, `Expected ${i + 1} hits, got ${result.totalHits}`);
}
});
// Test 2: Rate limit blocking
await test('Should block requests over the limit', async () => {
const clientId = generateClientId();
const maxRequests = 30;
// Use up all allowed requests
for (let i = 0; i < maxRequests - 1; i++) {
const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
assert(!result.isLimited, `Request ${i + 1} should be allowed`);
}
// Next request should be blocked
const blockedResult = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
assert(blockedResult.isLimited, 'Request should be blocked');
assert(blockedResult.reason === 'global', 'Should be blocked for global reason');
});
// Test 3: Message type limits
await test('Should handle message type limits', async () => {
const clientId = generateClientId();
const globalMax = 10;
const messageTypeMax = 2;
// Send messages of type 'ping' up to the limit
for (let i = 0; i < messageTypeMax - 1; i++) {
const result = await rateLimitService.checkRateLimit(
clientId,
'ping',
globalMax,
messageTypeMax
);
assert(!result.isLimited, `Ping message ${i + 1} should be allowed`);
}
// Next 'ping' should be blocked
const blockedResult = await rateLimitService.checkRateLimit(
clientId,
'ping',
globalMax,
messageTypeMax
);
assert(blockedResult.isLimited, 'Ping message should be blocked');
assert(blockedResult.reason === 'message_type:ping', 'Should be blocked for message type');
// Other message types should still work
const otherResult = await rateLimitService.checkRateLimit(
clientId,
'pong',
globalMax,
messageTypeMax
);
assert(!otherResult.isLimited, 'Pong message should be allowed');
});
// Test 4: Reset functionality
await test('Should reset client correctly', async () => {
const clientId = generateClientId();
const maxRequests = 3;
// Use up some requests
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
await rateLimitService.checkRateLimit(clientId, 'test', maxRequests);
// Reset the client
await rateLimitService.resetKey(clientId);
// Should be able to make fresh requests
const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
assert(!result.isLimited, 'Request after reset should be allowed');
assert(result.totalHits === 1, 'Should have 1 hit after reset');
});
// Test 5: Different clients are independent
await test('Should handle different clients independently', async () => {
const client1 = generateClientId();
const client2 = generateClientId();
const maxRequests = 2;
// Client 1 uses up their limit
await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
const client1Blocked = await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
assert(client1Blocked.isLimited, 'Client 1 should be blocked');
// Client 2 should still be able to make requests
const client2Result = await rateLimitService.checkRateLimit(client2, undefined, maxRequests);
assert(!client2Result.isLimited, 'Client 2 should not be blocked');
assert(client2Result.totalHits === 1, 'Client 2 should have 1 hit');
});
// Test 6: Decrement functionality
await test('Should decrement correctly', async () => {
const clientId = generateClientId();
const maxRequests = 5;
// Make some requests
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
let result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
assert(result.totalHits === 3, 'Should have 3 hits before decrement');
// Decrement
await rateLimitService.decrementRateLimit(clientId);
// Next request should reflect the decrement
result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
assert(result.totalHits === 3, 'Should have 3 hits after decrement + increment');
});
// Wait a moment for any pending Redis operations
console.log('\nWaiting for Redis sync...');
await new Promise(resolve => setTimeout(resolve, 1000));
// Force sync to test Redis integration
await test('Should sync to Redis', async () => {
await rateLimitService.forceSyncAllPendingData();
// If this doesn't throw, Redis sync is working
assert(true, 'Redis sync completed');
});
// Cleanup
await rateLimitService.cleanup();
// Results
console.log(`\n--- Test Results ---`);
console.log(`✅ Passed: ${testsPassed}/${testsTotal}`);
console.log(`❌ Failed: ${testsTotal - testsPassed}/${testsTotal}`);
if (testsPassed === testsTotal) {
console.log('\n🎉 All tests passed!');
process.exit(0);
} else {
console.log('\n💥 Some tests failed!');
process.exit(1);
}
}
// Run the tests
runTests().catch(error => {
console.error('Test runner error:', error);
process.exit(1);
});

View File

@@ -0,0 +1,458 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import logger from "@server/logger";
import redisManager from "@server/db/private/redis";
import { build } from "@server/build";
// Rate limiting configuration
export const RATE_LIMIT_WINDOW = 60; // 1 minute in seconds
export const RATE_LIMIT_MAX_REQUESTS = 100;
export const RATE_LIMIT_PER_MESSAGE_TYPE = 20; // Per message type limit within the window
// Configuration for batched Redis sync
export const REDIS_SYNC_THRESHOLD = 15; // Sync to Redis every N messages
export const REDIS_SYNC_FORCE_INTERVAL = 30000; // Force sync every 30 seconds as backup
interface RateLimitTracker {
count: number;
windowStart: number;
pendingCount: number;
lastSyncedCount: number;
}
interface RateLimitResult {
isLimited: boolean;
reason?: string;
totalHits?: number;
resetTime?: Date;
}
export class RateLimitService {
private localRateLimitTracker: Map<string, RateLimitTracker> = new Map();
private localMessageTypeRateLimitTracker: Map<string, RateLimitTracker> = new Map();
private cleanupInterval: NodeJS.Timeout | null = null;
private forceSyncInterval: NodeJS.Timeout | null = null;
constructor() {
if (build == "oss") {
return;
}
// Start cleanup and sync intervals
this.cleanupInterval = setInterval(() => {
this.cleanupLocalRateLimit().catch((error) => {
logger.error("Error during rate limit cleanup:", error);
});
}, 60000); // Run cleanup every minute
this.forceSyncInterval = setInterval(() => {
this.forceSyncAllPendingData().catch((error) => {
logger.error("Error during force sync:", error);
});
}, REDIS_SYNC_FORCE_INTERVAL);
}
// Redis keys
private getRateLimitKey(clientId: string): string {
return `ratelimit:${clientId}`;
}
private getMessageTypeRateLimitKey(clientId: string, messageType: string): string {
return `ratelimit:${clientId}:${messageType}`;
}
// Helper function to sync local rate limit data to Redis
private async syncRateLimitToRedis(
clientId: string,
tracker: RateLimitTracker
): Promise<void> {
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) return;
try {
const currentTime = Math.floor(Date.now() / 1000);
const globalKey = this.getRateLimitKey(clientId);
// Get current value and add pending count
const currentValue = await redisManager.hget(
globalKey,
currentTime.toString()
);
const newValue = (
parseInt(currentValue || "0") + tracker.pendingCount
).toString();
await redisManager.hset(globalKey, currentTime.toString(), newValue);
// Set TTL using the client directly
if (redisManager.getClient()) {
await redisManager
.getClient()
.expire(globalKey, RATE_LIMIT_WINDOW + 10);
}
// Update tracking
tracker.lastSyncedCount = tracker.count;
tracker.pendingCount = 0;
logger.debug(`Synced global rate limit to Redis for client ${clientId}`);
} catch (error) {
logger.error("Failed to sync global rate limit to Redis:", error);
}
}
private async syncMessageTypeRateLimitToRedis(
clientId: string,
messageType: string,
tracker: RateLimitTracker
): Promise<void> {
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) return;
try {
const currentTime = Math.floor(Date.now() / 1000);
const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
// Get current value and add pending count
const currentValue = await redisManager.hget(
messageTypeKey,
currentTime.toString()
);
const newValue = (
parseInt(currentValue || "0") + tracker.pendingCount
).toString();
await redisManager.hset(
messageTypeKey,
currentTime.toString(),
newValue
);
// Set TTL using the client directly
if (redisManager.getClient()) {
await redisManager
.getClient()
.expire(messageTypeKey, RATE_LIMIT_WINDOW + 10);
}
// Update tracking
tracker.lastSyncedCount = tracker.count;
tracker.pendingCount = 0;
logger.debug(
`Synced message type rate limit to Redis for client ${clientId}, type ${messageType}`
);
} catch (error) {
logger.error("Failed to sync message type rate limit to Redis:", error);
}
}
// Initialize local tracker from Redis data
private async initializeLocalTracker(clientId: string): Promise<RateLimitTracker> {
const currentTime = Math.floor(Date.now() / 1000);
const windowStart = currentTime - RATE_LIMIT_WINDOW;
if (!redisManager.isRedisEnabled()) {
return {
count: 0,
windowStart: currentTime,
pendingCount: 0,
lastSyncedCount: 0
};
}
try {
const globalKey = this.getRateLimitKey(clientId);
const globalRateLimitData = await redisManager.hgetall(globalKey);
let count = 0;
for (const [timestamp, countStr] of Object.entries(globalRateLimitData)) {
const time = parseInt(timestamp);
if (time >= windowStart) {
count += parseInt(countStr);
}
}
return {
count,
windowStart: currentTime,
pendingCount: 0,
lastSyncedCount: count
};
} catch (error) {
logger.error("Failed to initialize global tracker from Redis:", error);
return {
count: 0,
windowStart: currentTime,
pendingCount: 0,
lastSyncedCount: 0
};
}
}
private async initializeMessageTypeTracker(
clientId: string,
messageType: string
): Promise<RateLimitTracker> {
const currentTime = Math.floor(Date.now() / 1000);
const windowStart = currentTime - RATE_LIMIT_WINDOW;
if (!redisManager.isRedisEnabled()) {
return {
count: 0,
windowStart: currentTime,
pendingCount: 0,
lastSyncedCount: 0
};
}
try {
const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
const messageTypeRateLimitData = await redisManager.hgetall(messageTypeKey);
let count = 0;
for (const [timestamp, countStr] of Object.entries(messageTypeRateLimitData)) {
const time = parseInt(timestamp);
if (time >= windowStart) {
count += parseInt(countStr);
}
}
return {
count,
windowStart: currentTime,
pendingCount: 0,
lastSyncedCount: count
};
} catch (error) {
logger.error("Failed to initialize message type tracker from Redis:", error);
return {
count: 0,
windowStart: currentTime,
pendingCount: 0,
lastSyncedCount: 0
};
}
}
// Main rate limiting function
async checkRateLimit(
clientId: string,
messageType?: string,
maxRequests: number = RATE_LIMIT_MAX_REQUESTS,
messageTypeLimit: number = RATE_LIMIT_PER_MESSAGE_TYPE,
windowMs: number = RATE_LIMIT_WINDOW * 1000
): Promise<RateLimitResult> {
const currentTime = Math.floor(Date.now() / 1000);
const windowStart = currentTime - Math.floor(windowMs / 1000);
// Check global rate limit
let globalTracker = this.localRateLimitTracker.get(clientId);
if (!globalTracker || globalTracker.windowStart < windowStart) {
// New window or first request - initialize from Redis if available
globalTracker = await this.initializeLocalTracker(clientId);
globalTracker.windowStart = currentTime;
this.localRateLimitTracker.set(clientId, globalTracker);
}
// Increment global counters
globalTracker.count++;
globalTracker.pendingCount++;
this.localRateLimitTracker.set(clientId, globalTracker);
// Check if global limit would be exceeded
if (globalTracker.count >= maxRequests) {
return {
isLimited: true,
reason: "global",
totalHits: globalTracker.count,
resetTime: new Date((globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
};
}
// Sync to Redis if threshold reached
if (globalTracker.pendingCount >= REDIS_SYNC_THRESHOLD) {
this.syncRateLimitToRedis(clientId, globalTracker);
}
// Check message type specific rate limit if messageType is provided
if (messageType) {
const messageTypeKey = `${clientId}:${messageType}`;
let messageTypeTracker = this.localMessageTypeRateLimitTracker.get(messageTypeKey);
if (!messageTypeTracker || messageTypeTracker.windowStart < windowStart) {
// New window or first request for this message type - initialize from Redis if available
messageTypeTracker = await this.initializeMessageTypeTracker(clientId, messageType);
messageTypeTracker.windowStart = currentTime;
this.localMessageTypeRateLimitTracker.set(messageTypeKey, messageTypeTracker);
}
// Increment message type counters
messageTypeTracker.count++;
messageTypeTracker.pendingCount++;
this.localMessageTypeRateLimitTracker.set(messageTypeKey, messageTypeTracker);
// Check if message type limit would be exceeded
if (messageTypeTracker.count >= messageTypeLimit) {
return {
isLimited: true,
reason: `message_type:${messageType}`,
totalHits: messageTypeTracker.count,
resetTime: new Date((messageTypeTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
};
}
// Sync to Redis if threshold reached
if (messageTypeTracker.pendingCount >= REDIS_SYNC_THRESHOLD) {
this.syncMessageTypeRateLimitToRedis(clientId, messageType, messageTypeTracker);
}
}
return {
isLimited: false,
totalHits: globalTracker.count,
resetTime: new Date((globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
};
}
// Decrement function for skipSuccessfulRequests/skipFailedRequests functionality
async decrementRateLimit(clientId: string, messageType?: string): Promise<void> {
// Decrement global counter
const globalTracker = this.localRateLimitTracker.get(clientId);
if (globalTracker && globalTracker.count > 0) {
globalTracker.count--;
// We need to account for this in pending count to sync correctly
globalTracker.pendingCount--;
}
// Decrement message type counter if provided
if (messageType) {
const messageTypeKey = `${clientId}:${messageType}`;
const messageTypeTracker = this.localMessageTypeRateLimitTracker.get(messageTypeKey);
if (messageTypeTracker && messageTypeTracker.count > 0) {
messageTypeTracker.count--;
messageTypeTracker.pendingCount--;
}
}
}
// Reset key function
async resetKey(clientId: string): Promise<void> {
// Remove from local tracking
this.localRateLimitTracker.delete(clientId);
// Remove all message type entries for this client
for (const [key] of this.localMessageTypeRateLimitTracker) {
if (key.startsWith(`${clientId}:`)) {
this.localMessageTypeRateLimitTracker.delete(key);
}
}
// Remove from Redis if enabled
if (redisManager.isRedisEnabled()) {
const globalKey = this.getRateLimitKey(clientId);
await redisManager.del(globalKey);
// Get all message type keys for this client and delete them
const client = redisManager.getClient();
if (client) {
const messageTypeKeys = await client.keys(`ratelimit:${clientId}:*`);
if (messageTypeKeys.length > 0) {
await Promise.all(messageTypeKeys.map(key => redisManager.del(key)));
}
}
}
}
// Cleanup old local rate limit entries and force sync pending data
private async cleanupLocalRateLimit(): Promise<void> {
const currentTime = Math.floor(Date.now() / 1000);
const windowStart = currentTime - RATE_LIMIT_WINDOW;
// Clean up global rate limit tracking and sync pending data
for (const [clientId, tracker] of this.localRateLimitTracker.entries()) {
if (tracker.windowStart < windowStart) {
// Sync any pending data before cleanup
if (tracker.pendingCount > 0) {
await this.syncRateLimitToRedis(clientId, tracker);
}
this.localRateLimitTracker.delete(clientId);
}
}
// Clean up message type rate limit tracking and sync pending data
for (const [key, tracker] of this.localMessageTypeRateLimitTracker.entries()) {
if (tracker.windowStart < windowStart) {
// Sync any pending data before cleanup
if (tracker.pendingCount > 0) {
const [clientId, messageType] = key.split(":", 2);
await this.syncMessageTypeRateLimitToRedis(clientId, messageType, tracker);
}
this.localMessageTypeRateLimitTracker.delete(key);
}
}
}
// Force sync all pending rate limit data to Redis
async forceSyncAllPendingData(): Promise<void> {
if (!redisManager.isRedisEnabled()) return;
logger.debug("Force syncing all pending rate limit data to Redis...");
// Sync all pending global rate limits
for (const [clientId, tracker] of this.localRateLimitTracker.entries()) {
if (tracker.pendingCount > 0) {
await this.syncRateLimitToRedis(clientId, tracker);
}
}
// Sync all pending message type rate limits
for (const [key, tracker] of this.localMessageTypeRateLimitTracker.entries()) {
if (tracker.pendingCount > 0) {
const [clientId, messageType] = key.split(":", 2);
await this.syncMessageTypeRateLimitToRedis(clientId, messageType, tracker);
}
}
logger.debug("Completed force sync of pending rate limit data");
}
// Cleanup function for graceful shutdown
async cleanup(): Promise<void> {
if (build == "oss") {
return;
}
// Clear intervals
if (this.cleanupInterval) {
clearInterval(this.cleanupInterval);
}
if (this.forceSyncInterval) {
clearInterval(this.forceSyncInterval);
}
// Force sync all pending data
await this.forceSyncAllPendingData();
// Clear local data
this.localRateLimitTracker.clear();
this.localMessageTypeRateLimitTracker.clear();
logger.info("Rate limit service cleanup completed");
}
}
// Export singleton instance
export const rateLimitService = new RateLimitService();
// Handle process termination
process.on("SIGTERM", () => rateLimitService.cleanup());
process.on("SIGINT", () => rateLimitService.cleanup());

782
server/db/private/redis.ts Normal file
View File

@@ -0,0 +1,782 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import Redis, { RedisOptions } from "ioredis";
import logger from "@server/logger";
import config from "@server/lib/config";
import { build } from "@server/build";
class RedisManager {
public client: Redis | null = null;
private writeClient: Redis | null = null; // Master for writes
private readClient: Redis | null = null; // Replica for reads
private subscriber: Redis | null = null;
private publisher: Redis | null = null;
private isEnabled: boolean = false;
private isHealthy: boolean = true;
private isWriteHealthy: boolean = true;
private isReadHealthy: boolean = true;
private lastHealthCheck: number = 0;
private healthCheckInterval: number = 30000; // 30 seconds
private connectionTimeout: number = 15000; // 15 seconds
private commandTimeout: number = 15000; // 15 seconds
private hasReplicas: boolean = false;
private maxRetries: number = 3;
private baseRetryDelay: number = 100; // 100ms
private maxRetryDelay: number = 2000; // 2 seconds
private backoffMultiplier: number = 2;
private subscribers: Map<
string,
Set<(channel: string, message: string) => void>
> = new Map();
private reconnectionCallbacks: Set<() => Promise<void>> = new Set();
constructor() {
if (build == "oss") {
this.isEnabled = false;
return
}
this.isEnabled = config.getRawPrivateConfig().flags?.enable_redis || false;
if (this.isEnabled) {
this.initializeClients();
}
}
// Register callback to be called when Redis reconnects
public onReconnection(callback: () => Promise<void>): void {
this.reconnectionCallbacks.add(callback);
}
// Unregister reconnection callback
public offReconnection(callback: () => Promise<void>): void {
this.reconnectionCallbacks.delete(callback);
}
private async triggerReconnectionCallbacks(): Promise<void> {
logger.info(`Triggering ${this.reconnectionCallbacks.size} reconnection callbacks`);
const promises = Array.from(this.reconnectionCallbacks).map(async (callback) => {
try {
await callback();
} catch (error) {
logger.error("Error in reconnection callback:", error);
}
});
await Promise.allSettled(promises);
}
private async resubscribeToChannels(): Promise<void> {
if (!this.subscriber || this.subscribers.size === 0) return;
logger.info(`Re-subscribing to ${this.subscribers.size} channels after Redis reconnection`);
try {
const channels = Array.from(this.subscribers.keys());
if (channels.length > 0) {
await this.subscriber.subscribe(...channels);
logger.info(`Successfully re-subscribed to channels: ${channels.join(', ')}`);
}
} catch (error) {
logger.error("Failed to re-subscribe to channels:", error);
}
}
private getRedisConfig(): RedisOptions {
const redisConfig = config.getRawPrivateConfig().redis!;
const opts: RedisOptions = {
host: redisConfig.host!,
port: redisConfig.port!,
password: redisConfig.password,
db: redisConfig.db,
// tls: {
// rejectUnauthorized:
// redisConfig.tls?.reject_unauthorized || false
// }
};
return opts;
}
private getReplicaRedisConfig(): RedisOptions | null {
const redisConfig = config.getRawPrivateConfig().redis!;
if (!redisConfig.replicas || redisConfig.replicas.length === 0) {
return null;
}
// Use the first replica for simplicity
// In production, you might want to implement load balancing across replicas
const replica = redisConfig.replicas[0];
const opts: RedisOptions = {
host: replica.host!,
port: replica.port!,
password: replica.password,
db: replica.db || redisConfig.db,
// tls: {
// rejectUnauthorized:
// replica.tls?.reject_unauthorized || false
// }
};
return opts;
}
// Add reconnection logic in initializeClients
private initializeClients(): void {
const masterConfig = this.getRedisConfig();
const replicaConfig = this.getReplicaRedisConfig();
this.hasReplicas = replicaConfig !== null;
try {
// Initialize master connection for writes
this.writeClient = new Redis({
...masterConfig,
enableReadyCheck: false,
maxRetriesPerRequest: 3,
keepAlive: 30000,
connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout,
});
// Initialize replica connection for reads (if available)
if (this.hasReplicas) {
this.readClient = new Redis({
...replicaConfig!,
enableReadyCheck: false,
maxRetriesPerRequest: 3,
keepAlive: 30000,
connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout,
});
} else {
// Fallback to master for reads if no replicas
this.readClient = this.writeClient;
}
// Backward compatibility - point to write client
this.client = this.writeClient;
// Publisher uses master (writes)
this.publisher = new Redis({
...masterConfig,
enableReadyCheck: false,
maxRetriesPerRequest: 3,
keepAlive: 30000,
connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout,
});
// Subscriber uses replica if available (reads)
this.subscriber = new Redis({
...(this.hasReplicas ? replicaConfig! : masterConfig),
enableReadyCheck: false,
maxRetriesPerRequest: 3,
keepAlive: 30000,
connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout,
});
// Add reconnection handlers for write client
this.writeClient.on("error", (err) => {
logger.error("Redis write client error:", err);
this.isWriteHealthy = false;
this.isHealthy = false;
});
this.writeClient.on("reconnecting", () => {
logger.info("Redis write client reconnecting...");
this.isWriteHealthy = false;
this.isHealthy = false;
});
this.writeClient.on("ready", () => {
logger.info("Redis write client ready");
this.isWriteHealthy = true;
this.updateOverallHealth();
// Trigger reconnection callbacks when Redis comes back online
if (this.isHealthy) {
this.triggerReconnectionCallbacks().catch(error => {
logger.error("Error triggering reconnection callbacks:", error);
});
}
});
this.writeClient.on("connect", () => {
logger.info("Redis write client connected");
});
// Add reconnection handlers for read client (if different from write)
if (this.hasReplicas && this.readClient !== this.writeClient) {
this.readClient.on("error", (err) => {
logger.error("Redis read client error:", err);
this.isReadHealthy = false;
this.updateOverallHealth();
});
this.readClient.on("reconnecting", () => {
logger.info("Redis read client reconnecting...");
this.isReadHealthy = false;
this.updateOverallHealth();
});
this.readClient.on("ready", () => {
logger.info("Redis read client ready");
this.isReadHealthy = true;
this.updateOverallHealth();
// Trigger reconnection callbacks when Redis comes back online
if (this.isHealthy) {
this.triggerReconnectionCallbacks().catch(error => {
logger.error("Error triggering reconnection callbacks:", error);
});
}
});
this.readClient.on("connect", () => {
logger.info("Redis read client connected");
});
} else {
// If using same client for reads and writes
this.isReadHealthy = this.isWriteHealthy;
}
this.publisher.on("error", (err) => {
logger.error("Redis publisher error:", err);
});
this.publisher.on("ready", () => {
logger.info("Redis publisher ready");
});
this.publisher.on("connect", () => {
logger.info("Redis publisher connected");
});
this.subscriber.on("error", (err) => {
logger.error("Redis subscriber error:", err);
});
this.subscriber.on("ready", () => {
logger.info("Redis subscriber ready");
// Re-subscribe to all channels after reconnection
this.resubscribeToChannels().catch((error: any) => {
logger.error("Error re-subscribing to channels:", error);
});
});
this.subscriber.on("connect", () => {
logger.info("Redis subscriber connected");
});
// Set up message handler for subscriber
this.subscriber.on(
"message",
(channel: string, message: string) => {
const channelSubscribers = this.subscribers.get(channel);
if (channelSubscribers) {
channelSubscribers.forEach((callback) => {
try {
callback(channel, message);
} catch (error) {
logger.error(
`Error in subscriber callback for channel ${channel}:`,
error
);
}
});
}
}
);
const setupMessage = this.hasReplicas
? "Redis clients initialized successfully with replica support"
: "Redis clients initialized successfully (single instance)";
logger.info(setupMessage);
// Start periodic health monitoring
this.startHealthMonitoring();
} catch (error) {
logger.error("Failed to initialize Redis clients:", error);
this.isEnabled = false;
}
}
private updateOverallHealth(): void {
// Overall health is true if write is healthy and (read is healthy OR we don't have replicas)
this.isHealthy = this.isWriteHealthy && (this.isReadHealthy || !this.hasReplicas);
}
private async executeWithRetry<T>(
operation: () => Promise<T>,
operationName: string,
fallbackOperation?: () => Promise<T>
): Promise<T> {
let lastError: Error | null = null;
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
try {
return await operation();
} catch (error) {
lastError = error as Error;
// If this is the last attempt, try fallback if available
if (attempt === this.maxRetries && fallbackOperation) {
try {
logger.warn(`${operationName} primary operation failed, trying fallback`);
return await fallbackOperation();
} catch (fallbackError) {
logger.error(`${operationName} fallback also failed:`, fallbackError);
throw lastError;
}
}
// Don't retry on the last attempt
if (attempt === this.maxRetries) {
break;
}
// Calculate delay with exponential backoff
const delay = Math.min(
this.baseRetryDelay * Math.pow(this.backoffMultiplier, attempt),
this.maxRetryDelay
);
logger.warn(`${operationName} failed (attempt ${attempt + 1}/${this.maxRetries + 1}), retrying in ${delay}ms:`, error);
// Wait before retrying
await new Promise(resolve => setTimeout(resolve, delay));
}
}
logger.error(`${operationName} failed after ${this.maxRetries + 1} attempts:`, lastError);
throw lastError;
}
private startHealthMonitoring(): void {
if (!this.isEnabled) return;
// Check health every 30 seconds
setInterval(async () => {
try {
await this.checkRedisHealth();
} catch (error) {
logger.error("Error during Redis health monitoring:", error);
}
}, this.healthCheckInterval);
}
public isRedisEnabled(): boolean {
return this.isEnabled && this.client !== null && this.isHealthy;
}
private async checkRedisHealth(): Promise<boolean> {
const now = Date.now();
// Only check health every 30 seconds
if (now - this.lastHealthCheck < this.healthCheckInterval) {
return this.isHealthy;
}
this.lastHealthCheck = now;
if (!this.writeClient) {
this.isHealthy = false;
this.isWriteHealthy = false;
this.isReadHealthy = false;
return false;
}
try {
// Check write client (master) health
await Promise.race([
this.writeClient.ping(),
new Promise((_, reject) =>
setTimeout(() => reject(new Error('Write client health check timeout')), 2000)
)
]);
this.isWriteHealthy = true;
// Check read client health if it's different from write client
if (this.hasReplicas && this.readClient && this.readClient !== this.writeClient) {
try {
await Promise.race([
this.readClient.ping(),
new Promise((_, reject) =>
setTimeout(() => reject(new Error('Read client health check timeout')), 2000)
)
]);
this.isReadHealthy = true;
} catch (error) {
logger.error("Redis read client health check failed:", error);
this.isReadHealthy = false;
}
} else {
this.isReadHealthy = this.isWriteHealthy;
}
this.updateOverallHealth();
return this.isHealthy;
} catch (error) {
logger.error("Redis write client health check failed:", error);
this.isWriteHealthy = false;
this.isReadHealthy = false; // If write fails, consider read as failed too for safety
this.isHealthy = false;
return false;
}
}
public getClient(): Redis {
return this.client!;
}
public getWriteClient(): Redis | null {
return this.writeClient;
}
public getReadClient(): Redis | null {
return this.readClient;
}
public hasReplicaSupport(): boolean {
return this.hasReplicas;
}
public getHealthStatus(): {
isEnabled: boolean;
isHealthy: boolean;
isWriteHealthy: boolean;
isReadHealthy: boolean;
hasReplicas: boolean;
} {
return {
isEnabled: this.isEnabled,
isHealthy: this.isHealthy,
isWriteHealthy: this.isWriteHealthy,
isReadHealthy: this.isReadHealthy,
hasReplicas: this.hasReplicas
};
}
public async set(
key: string,
value: string,
ttl?: number
): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
await this.executeWithRetry(
async () => {
if (ttl) {
await this.writeClient!.setex(key, ttl, value);
} else {
await this.writeClient!.set(key, value);
}
},
"Redis SET"
);
return true;
} catch (error) {
logger.error("Redis SET error:", error);
return false;
}
}
public async get(key: string): Promise<string | null> {
if (!this.isRedisEnabled() || !this.readClient) return null;
try {
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
? () => this.writeClient!.get(key)
: undefined;
return await this.executeWithRetry(
() => this.readClient!.get(key),
"Redis GET",
fallbackOperation
);
} catch (error) {
logger.error("Redis GET error:", error);
return null;
}
}
public async del(key: string): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
await this.executeWithRetry(
() => this.writeClient!.del(key),
"Redis DEL"
);
return true;
} catch (error) {
logger.error("Redis DEL error:", error);
return false;
}
}
public async sadd(key: string, member: string): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
await this.executeWithRetry(
() => this.writeClient!.sadd(key, member),
"Redis SADD"
);
return true;
} catch (error) {
logger.error("Redis SADD error:", error);
return false;
}
}
public async srem(key: string, member: string): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
await this.executeWithRetry(
() => this.writeClient!.srem(key, member),
"Redis SREM"
);
return true;
} catch (error) {
logger.error("Redis SREM error:", error);
return false;
}
}
public async smembers(key: string): Promise<string[]> {
if (!this.isRedisEnabled() || !this.readClient) return [];
try {
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
? () => this.writeClient!.smembers(key)
: undefined;
return await this.executeWithRetry(
() => this.readClient!.smembers(key),
"Redis SMEMBERS",
fallbackOperation
);
} catch (error) {
logger.error("Redis SMEMBERS error:", error);
return [];
}
}
public async hset(
key: string,
field: string,
value: string
): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
await this.executeWithRetry(
() => this.writeClient!.hset(key, field, value),
"Redis HSET"
);
return true;
} catch (error) {
logger.error("Redis HSET error:", error);
return false;
}
}
public async hget(key: string, field: string): Promise<string | null> {
if (!this.isRedisEnabled() || !this.readClient) return null;
try {
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
? () => this.writeClient!.hget(key, field)
: undefined;
return await this.executeWithRetry(
() => this.readClient!.hget(key, field),
"Redis HGET",
fallbackOperation
);
} catch (error) {
logger.error("Redis HGET error:", error);
return null;
}
}
public async hdel(key: string, field: string): Promise<boolean> {
if (!this.isRedisEnabled() || !this.writeClient) return false;
try {
await this.executeWithRetry(
() => this.writeClient!.hdel(key, field),
"Redis HDEL"
);
return true;
} catch (error) {
logger.error("Redis HDEL error:", error);
return false;
}
}
public async hgetall(key: string): Promise<Record<string, string>> {
if (!this.isRedisEnabled() || !this.readClient) return {};
try {
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
? () => this.writeClient!.hgetall(key)
: undefined;
return await this.executeWithRetry(
() => this.readClient!.hgetall(key),
"Redis HGETALL",
fallbackOperation
);
} catch (error) {
logger.error("Redis HGETALL error:", error);
return {};
}
}
public async publish(channel: string, message: string): Promise<boolean> {
if (!this.isRedisEnabled() || !this.publisher) return false;
// Quick health check before attempting to publish
const isHealthy = await this.checkRedisHealth();
if (!isHealthy) {
logger.warn("Skipping Redis publish due to unhealthy connection");
return false;
}
try {
await this.executeWithRetry(
async () => {
// Add timeout to prevent hanging
return Promise.race([
this.publisher!.publish(channel, message),
new Promise((_, reject) =>
setTimeout(() => reject(new Error('Redis publish timeout')), 3000)
)
]);
},
"Redis PUBLISH"
);
return true;
} catch (error) {
logger.error("Redis PUBLISH error:", error);
this.isHealthy = false; // Mark as unhealthy on error
return false;
}
}
public async subscribe(
channel: string,
callback: (channel: string, message: string) => void
): Promise<boolean> {
if (!this.isRedisEnabled() || !this.subscriber) return false;
try {
// Add callback to subscribers map
if (!this.subscribers.has(channel)) {
this.subscribers.set(channel, new Set());
// Only subscribe to the channel if it's the first subscriber
await this.executeWithRetry(
async () => {
return Promise.race([
this.subscriber!.subscribe(channel),
new Promise((_, reject) =>
setTimeout(() => reject(new Error('Redis subscribe timeout')), 5000)
)
]);
},
"Redis SUBSCRIBE"
);
}
this.subscribers.get(channel)!.add(callback);
return true;
} catch (error) {
logger.error("Redis SUBSCRIBE error:", error);
this.isHealthy = false;
return false;
}
}
public async unsubscribe(
channel: string,
callback?: (channel: string, message: string) => void
): Promise<boolean> {
if (!this.isRedisEnabled() || !this.subscriber) return false;
try {
const channelSubscribers = this.subscribers.get(channel);
if (!channelSubscribers) return true;
if (callback) {
// Remove specific callback
channelSubscribers.delete(callback);
if (channelSubscribers.size === 0) {
this.subscribers.delete(channel);
await this.executeWithRetry(
() => this.subscriber!.unsubscribe(channel),
"Redis UNSUBSCRIBE"
);
}
} else {
// Remove all callbacks for this channel
this.subscribers.delete(channel);
await this.executeWithRetry(
() => this.subscriber!.unsubscribe(channel),
"Redis UNSUBSCRIBE"
);
}
return true;
} catch (error) {
logger.error("Redis UNSUBSCRIBE error:", error);
return false;
}
}
public async disconnect(): Promise<void> {
try {
if (this.client) {
await this.client.quit();
this.client = null;
}
if (this.writeClient) {
await this.writeClient.quit();
this.writeClient = null;
}
if (this.readClient && this.readClient !== this.writeClient) {
await this.readClient.quit();
this.readClient = null;
}
if (this.publisher) {
await this.publisher.quit();
this.publisher = null;
}
if (this.subscriber) {
await this.subscriber.quit();
this.subscriber = null;
}
this.subscribers.clear();
logger.info("Redis clients disconnected");
} catch (error) {
logger.error("Error disconnecting Redis clients:", error);
}
}
}
export const redisManager = new RedisManager();
export const redis = redisManager.getClient();
export default redisManager;

View File

@@ -0,0 +1,223 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { Store, Options, IncrementResponse } from 'express-rate-limit';
import { rateLimitService } from './rateLimit';
import logger from '@server/logger';
/**
* A Redis-backed rate limiting store for express-rate-limit that optimizes
* for local read performance and batched writes to Redis.
*
* This store uses the same optimized rate limiting logic as the WebSocket
* implementation, providing:
* - Local caching for fast reads
* - Batched writes to Redis to reduce load
* - Automatic cleanup of expired entries
* - Graceful fallback when Redis is unavailable
*/
export default class RedisStore implements Store {
/**
* The duration of time before which all hit counts are reset (in milliseconds).
*/
windowMs!: number;
/**
* Maximum number of requests allowed within the window.
*/
max!: number;
/**
* Optional prefix for Redis keys to avoid collisions.
*/
prefix: string;
/**
* Whether to skip incrementing on failed requests.
*/
skipFailedRequests: boolean;
/**
* Whether to skip incrementing on successful requests.
*/
skipSuccessfulRequests: boolean;
/**
* @constructor for RedisStore.
*
* @param options - Configuration options for the store.
*/
constructor(options: {
prefix?: string;
skipFailedRequests?: boolean;
skipSuccessfulRequests?: boolean;
} = {}) {
this.prefix = options.prefix || 'express-rate-limit';
this.skipFailedRequests = options.skipFailedRequests || false;
this.skipSuccessfulRequests = options.skipSuccessfulRequests || false;
}
/**
* Method that actually initializes the store. Must be synchronous.
*
* @param options - The options used to setup express-rate-limit.
*/
init(options: Options): void {
this.windowMs = options.windowMs;
this.max = options.max as number;
// logger.debug(`RedisStore initialized with windowMs: ${this.windowMs}, max: ${this.max}, prefix: ${this.prefix}`);
}
/**
* Method to increment a client's hit counter.
*
* @param key - The identifier for a client (usually IP address).
* @returns Promise resolving to the number of hits and reset time for that client.
*/
async increment(key: string): Promise<IncrementResponse> {
try {
const clientId = `${this.prefix}:${key}`;
const result = await rateLimitService.checkRateLimit(
clientId,
undefined, // No message type for HTTP requests
this.max,
undefined, // No message type limit
this.windowMs
);
// logger.debug(`Incremented rate limit for key: ${key} with max: ${this.max}, totalHits: ${result.totalHits}`);
return {
totalHits: result.totalHits || 1,
resetTime: result.resetTime || new Date(Date.now() + this.windowMs)
};
} catch (error) {
logger.error(`RedisStore increment error for key ${key}:`, error);
// Return safe defaults on error to prevent blocking requests
return {
totalHits: 1,
resetTime: new Date(Date.now() + this.windowMs)
};
}
}
/**
* Method to decrement a client's hit counter.
* Used when skipSuccessfulRequests or skipFailedRequests is enabled.
*
* @param key - The identifier for a client.
*/
async decrement(key: string): Promise<void> {
try {
const clientId = `${this.prefix}:${key}`;
await rateLimitService.decrementRateLimit(clientId);
// logger.debug(`Decremented rate limit for key: ${key}`);
} catch (error) {
logger.error(`RedisStore decrement error for key ${key}:`, error);
// Don't throw - decrement failures shouldn't block requests
}
}
/**
* Method to reset a client's hit counter.
*
* @param key - The identifier for a client.
*/
async resetKey(key: string): Promise<void> {
try {
const clientId = `${this.prefix}:${key}`;
await rateLimitService.resetKey(clientId);
// logger.debug(`Reset rate limit for key: ${key}`);
} catch (error) {
logger.error(`RedisStore resetKey error for key ${key}:`, error);
// Don't throw - reset failures shouldn't block requests
}
}
/**
* Method to reset everyone's hit counter.
*
* This method is optional and is never called by express-rate-limit.
* We implement it for completeness but it's not recommended for production use
* as it could be expensive with large datasets.
*/
async resetAll(): Promise<void> {
try {
logger.warn('RedisStore resetAll called - this operation can be expensive');
// Force sync all pending data first
await rateLimitService.forceSyncAllPendingData();
// Note: We don't actually implement full reset as it would require
// scanning all Redis keys with our prefix, which could be expensive.
// In production, it's better to let entries expire naturally.
logger.info('RedisStore resetAll completed (pending data synced)');
} catch (error) {
logger.error('RedisStore resetAll error:', error);
// Don't throw - this is an optional method
}
}
/**
* Get current hit count for a key without incrementing.
* This is a custom method not part of the Store interface.
*
* @param key - The identifier for a client.
* @returns Current hit count and reset time, or null if no data exists.
*/
async getHits(key: string): Promise<{ totalHits: number; resetTime: Date } | null> {
try {
const clientId = `${this.prefix}:${key}`;
// Use checkRateLimit with max + 1 to avoid actually incrementing
// but still get the current count
const result = await rateLimitService.checkRateLimit(
clientId,
undefined,
this.max + 1000, // Set artificially high to avoid triggering limit
undefined,
this.windowMs
);
// Decrement since we don't actually want to count this check
await rateLimitService.decrementRateLimit(clientId);
return {
totalHits: Math.max(0, (result.totalHits || 0) - 1), // Adjust for the decrement
resetTime: result.resetTime || new Date(Date.now() + this.windowMs)
};
} catch (error) {
logger.error(`RedisStore getHits error for key ${key}:`, error);
return null;
}
}
/**
* Cleanup method for graceful shutdown.
* This is not part of the Store interface but is useful for cleanup.
*/
async shutdown(): Promise<void> {
try {
// The rateLimitService handles its own cleanup
logger.info('RedisStore shutdown completed');
} catch (error) {
logger.error('RedisStore shutdown error:', error);
}
}
}

View File

@@ -1,4 +1,4 @@
import { db } from "@server/db";
import { db, loginPage, LoginPage, loginPageOrg } from "@server/db";
import {
Resource,
ResourcePassword,
@@ -39,7 +39,10 @@ export async function getResourceByDomain(
): Promise<ResourceWithAuth | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/domain/${domain}`, await tokenManager.getAuthHeader());
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/domain/${domain}`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
@@ -91,7 +94,10 @@ export async function getUserSessionWithUser(
): Promise<UserSessionWithUser | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/session/${userSessionId}`, await tokenManager.getAuthHeader());
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/session/${userSessionId}`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
@@ -132,7 +138,10 @@ export async function getUserSessionWithUser(
export async function getUserOrgRole(userId: string, orgId: string) {
if (config.isManagedMode()) {
try {
const response = await axios.get(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/org/${orgId}/role`, await tokenManager.getAuthHeader());
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/org/${orgId}/role`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
@@ -154,12 +163,7 @@ export async function getUserOrgRole(userId: string, orgId: string) {
const userOrgRole = await db
.select()
.from(userOrgs)
.where(
and(
eq(userOrgs.userId, userId),
eq(userOrgs.orgId, orgId)
)
)
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
.limit(1);
return userOrgRole.length > 0 ? userOrgRole[0] : null;
@@ -168,10 +172,16 @@ export async function getUserOrgRole(userId: string, orgId: string) {
/**
* Check if role has access to resource
*/
export async function getRoleResourceAccess(resourceId: number, roleId: number) {
export async function getRoleResourceAccess(
resourceId: number,
roleId: number
) {
if (config.isManagedMode()) {
try {
const response = await axios.get(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/role/${roleId}/resource/${resourceId}/access`, await tokenManager.getAuthHeader());
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/role/${roleId}/resource/${resourceId}/access`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
@@ -207,10 +217,16 @@ export async function getRoleResourceAccess(resourceId: number, roleId: number)
/**
* Check if user has direct access to resource
*/
export async function getUserResourceAccess(userId: string, resourceId: number) {
export async function getUserResourceAccess(
userId: string,
resourceId: number
) {
if (config.isManagedMode()) {
try {
const response = await axios.get(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/resource/${resourceId}/access`, await tokenManager.getAuthHeader());
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/resource/${resourceId}/access`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
@@ -246,10 +262,15 @@ export async function getUserResourceAccess(userId: string, resourceId: number)
/**
* Get resource rules for a given resource
*/
export async function getResourceRules(resourceId: number): Promise<ResourceRule[]> {
export async function getResourceRules(
resourceId: number
): Promise<ResourceRule[]> {
if (config.isManagedMode()) {
try {
const response = await axios.get(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/rules`, await tokenManager.getAuthHeader());
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/rules`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
@@ -275,3 +296,50 @@ export async function getResourceRules(resourceId: number): Promise<ResourceRule
return rules;
}
/**
* Get organization login page
*/
export async function getOrgLoginPage(
orgId: string
): Promise<LoginPage | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/org/${orgId}/login-page`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const [result] = await db
.select()
.from(loginPageOrg)
.where(eq(loginPageOrg.orgId, orgId))
.innerJoin(
loginPage,
eq(loginPageOrg.loginPageId, loginPage.loginPageId)
)
.limit(1);
if (!result) {
return null;
}
return result?.loginPage;
}

View File

@@ -1,2 +1,3 @@
export * from "./driver";
export * from "./schema";
export * from "./privateSchema";

View File

@@ -0,0 +1,239 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import {
sqliteTable,
integer,
text,
real
} from "drizzle-orm/sqlite-core";
import { InferSelectModel } from "drizzle-orm";
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
export const certificates = sqliteTable("certificates", {
certId: integer("certId").primaryKey({ autoIncrement: true }),
domain: text("domain").notNull().unique(),
domainId: text("domainId").references(() => domains.domainId, {
onDelete: "cascade"
}),
wildcard: integer("wildcard", { mode: "boolean" }).default(false),
status: text("status").notNull().default("pending"), // pending, requested, valid, expired, failed
expiresAt: integer("expiresAt"),
lastRenewalAttempt: integer("lastRenewalAttempt"),
createdAt: integer("createdAt").notNull(),
updatedAt: integer("updatedAt").notNull(),
orderId: text("orderId"),
errorMessage: text("errorMessage"),
renewalCount: integer("renewalCount").default(0),
certFile: text("certFile"),
keyFile: text("keyFile")
});
export const dnsChallenge = sqliteTable("dnsChallenges", {
dnsChallengeId: integer("dnsChallengeId").primaryKey({ autoIncrement: true }),
domain: text("domain").notNull(),
token: text("token").notNull(),
keyAuthorization: text("keyAuthorization").notNull(),
createdAt: integer("createdAt").notNull(),
expiresAt: integer("expiresAt").notNull(),
completed: integer("completed", { mode: "boolean" }).default(false)
});
export const account = sqliteTable("account", {
accountId: integer("accountId").primaryKey({ autoIncrement: true }),
userId: text("userId")
.notNull()
.references(() => users.userId, { onDelete: "cascade" })
});
export const customers = sqliteTable("customers", {
customerId: text("customerId").primaryKey().notNull(),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
// accountId: integer("accountId")
// .references(() => account.accountId, { onDelete: "cascade" }), // Optional, if using accounts
email: text("email"),
name: text("name"),
phone: text("phone"),
address: text("address"),
createdAt: integer("createdAt").notNull(),
updatedAt: integer("updatedAt").notNull()
});
export const subscriptions = sqliteTable("subscriptions", {
subscriptionId: text("subscriptionId")
.primaryKey()
.notNull(),
customerId: text("customerId")
.notNull()
.references(() => customers.customerId, { onDelete: "cascade" }),
status: text("status").notNull().default("active"), // active, past_due, canceled, unpaid
canceledAt: integer("canceledAt"),
createdAt: integer("createdAt").notNull(),
updatedAt: integer("updatedAt"),
billingCycleAnchor: integer("billingCycleAnchor")
});
export const subscriptionItems = sqliteTable("subscriptionItems", {
subscriptionItemId: integer("subscriptionItemId").primaryKey({ autoIncrement: true }),
subscriptionId: text("subscriptionId")
.notNull()
.references(() => subscriptions.subscriptionId, {
onDelete: "cascade"
}),
planId: text("planId").notNull(),
priceId: text("priceId"),
meterId: text("meterId"),
unitAmount: real("unitAmount"),
tiers: text("tiers"),
interval: text("interval"),
currentPeriodStart: integer("currentPeriodStart"),
currentPeriodEnd: integer("currentPeriodEnd"),
name: text("name")
});
export const accountDomains = sqliteTable("accountDomains", {
accountId: integer("accountId")
.notNull()
.references(() => account.accountId, { onDelete: "cascade" }),
domainId: text("domainId")
.notNull()
.references(() => domains.domainId, { onDelete: "cascade" })
});
export const usage = sqliteTable("usage", {
usageId: text("usageId").primaryKey(),
featureId: text("featureId").notNull(),
orgId: text("orgId")
.references(() => orgs.orgId, { onDelete: "cascade" })
.notNull(),
meterId: text("meterId"),
instantaneousValue: real("instantaneousValue"),
latestValue: real("latestValue").notNull(),
previousValue: real("previousValue"),
updatedAt: integer("updatedAt").notNull(),
rolledOverAt: integer("rolledOverAt"),
nextRolloverAt: integer("nextRolloverAt")
});
export const limits = sqliteTable("limits", {
limitId: text("limitId").primaryKey(),
featureId: text("featureId").notNull(),
orgId: text("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
value: real("value"),
description: text("description")
});
export const usageNotifications = sqliteTable("usageNotifications", {
notificationId: integer("notificationId").primaryKey({ autoIncrement: true }),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
featureId: text("featureId").notNull(),
limitId: text("limitId").notNull(),
notificationType: text("notificationType").notNull(),
sentAt: integer("sentAt").notNull()
});
export const domainNamespaces = sqliteTable("domainNamespaces", {
domainNamespaceId: text("domainNamespaceId").primaryKey(),
domainId: text("domainId")
.references(() => domains.domainId, {
onDelete: "set null"
})
.notNull()
});
export const exitNodeOrgs = sqliteTable("exitNodeOrgs", {
exitNodeId: integer("exitNodeId")
.notNull()
.references(() => exitNodes.exitNodeId, { onDelete: "cascade" }),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" })
});
export const remoteExitNodes = sqliteTable("remoteExitNode", {
remoteExitNodeId: text("id").primaryKey(),
secretHash: text("secretHash").notNull(),
dateCreated: text("dateCreated").notNull(),
version: text("version"),
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
onDelete: "cascade"
})
});
export const remoteExitNodeSessions = sqliteTable("remoteExitNodeSession", {
sessionId: text("id").primaryKey(),
remoteExitNodeId: text("remoteExitNodeId")
.notNull()
.references(() => remoteExitNodes.remoteExitNodeId, {
onDelete: "cascade"
}),
expiresAt: integer("expiresAt").notNull()
});
export const loginPage = sqliteTable("loginPage", {
loginPageId: integer("loginPageId").primaryKey({ autoIncrement: true }),
subdomain: text("subdomain"),
fullDomain: text("fullDomain"),
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
onDelete: "set null"
}),
domainId: text("domainId").references(() => domains.domainId, {
onDelete: "set null"
})
});
export const loginPageOrg = sqliteTable("loginPageOrg", {
loginPageId: integer("loginPageId")
.notNull()
.references(() => loginPage.loginPageId, { onDelete: "cascade" }),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" })
});
export const sessionTransferToken = sqliteTable("sessionTransferToken", {
token: text("token").primaryKey(),
sessionId: text("sessionId")
.notNull()
.references(() => sessions.sessionId, {
onDelete: "cascade"
}),
encryptedSession: text("encryptedSession").notNull(),
expiresAt: integer("expiresAt").notNull()
});
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;
export type Certificate = InferSelectModel<typeof certificates>;
export type DnsChallenge = InferSelectModel<typeof dnsChallenge>;
export type Customer = InferSelectModel<typeof customers>;
export type Subscription = InferSelectModel<typeof subscriptions>;
export type SubscriptionItem = InferSelectModel<typeof subscriptionItems>;
export type Usage = InferSelectModel<typeof usage>;
export type UsageLimit = InferSelectModel<typeof limits>;
export type AccountDomain = InferSelectModel<typeof accountDomains>;
export type UsageNotification = InferSelectModel<typeof usageNotifications>;
export type RemoteExitNode = InferSelectModel<typeof remoteExitNodes>;
export type RemoteExitNodeSession = InferSelectModel<
typeof remoteExitNodeSessions
>;
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
export type LoginPage = InferSelectModel<typeof loginPage>;

View File

@@ -140,6 +140,27 @@ export const targets = sqliteTable("targets", {
rewritePathType: text("rewritePathType") // exact, prefix, regex, stripPrefix
});
export const targetHealthCheck = sqliteTable("targetHealthCheck", {
targetHealthCheckId: integer("targetHealthCheckId").primaryKey({ autoIncrement: true }),
targetId: integer("targetId")
.notNull()
.references(() => targets.targetId, { onDelete: "cascade" }),
hcEnabled: integer("hcEnabled", { mode: "boolean" }).notNull().default(false),
hcPath: text("hcPath"),
hcScheme: text("hcScheme"),
hcMode: text("hcMode").default("http"),
hcHostname: text("hcHostname"),
hcPort: integer("hcPort"),
hcInterval: integer("hcInterval").default(30), // in seconds
hcUnhealthyInterval: integer("hcUnhealthyInterval").default(30), // in seconds
hcTimeout: integer("hcTimeout").default(5), // in seconds
hcHeaders: text("hcHeaders"),
hcFollowRedirects: integer("hcFollowRedirects", { mode: "boolean" }).default(true),
hcMethod: text("hcMethod").default("GET"),
hcStatus: integer("hcStatus"), // http code
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
});
export const exitNodes = sqliteTable("exitNodes", {
exitNodeId: integer("exitNodeId").primaryKey({ autoIncrement: true }),
name: text("name").notNull(),
@@ -458,18 +479,6 @@ export const userResources = sqliteTable("userResources", {
.references(() => resources.resourceId, { onDelete: "cascade" })
});
export const limitsTable = sqliteTable("limits", {
limitId: integer("limitId").primaryKey({ autoIncrement: true }),
orgId: text("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
name: text("name").notNull(),
value: integer("value").notNull(),
description: text("description")
});
export const userInvites = sqliteTable("userInvites", {
inviteId: text("inviteId").primaryKey(),
orgId: text("orgId")
@@ -714,7 +723,6 @@ export type RoleSite = InferSelectModel<typeof roleSites>;
export type UserSite = InferSelectModel<typeof userSites>;
export type RoleResource = InferSelectModel<typeof roleResources>;
export type UserResource = InferSelectModel<typeof userResources>;
export type Limit = InferSelectModel<typeof limitsTable>;
export type UserInvite = InferSelectModel<typeof userInvites>;
export type UserOrg = InferSelectModel<typeof userOrgs>;
export type ResourceSession = InferSelectModel<typeof resourceSessions>;
@@ -739,3 +747,4 @@ export type SiteResource = InferSelectModel<typeof siteResources>;
export type OrgDomains = InferSelectModel<typeof orgDomains>;
export type SetupToken = InferSelectModel<typeof setupTokens>;
export type HostMeta = InferSelectModel<typeof hostMeta>;
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;