mirror of
https://github.com/fosrl/pangolin.git
synced 2026-02-19 19:36:38 +00:00
Merge dev into fix/log-analytics-adjustments
This commit is contained in:
@@ -12,4 +12,4 @@
|
||||
*/
|
||||
|
||||
export * from "./getOrgTierData";
|
||||
export * from "./createCustomer";
|
||||
export * from "./createCustomer";
|
||||
|
||||
@@ -55,7 +55,6 @@ export async function getValidCertificatesForDomains(
|
||||
domains: Set<string>,
|
||||
useCache: boolean = true
|
||||
): Promise<Array<CertificateResult>> {
|
||||
|
||||
loadEncryptData(); // Ensure encryption key is loaded
|
||||
|
||||
const finalResults: CertificateResult[] = [];
|
||||
|
||||
@@ -12,14 +12,7 @@
|
||||
*/
|
||||
|
||||
import { build } from "@server/build";
|
||||
import {
|
||||
db,
|
||||
Org,
|
||||
orgs,
|
||||
ResourceSession,
|
||||
sessions,
|
||||
users
|
||||
} from "@server/db";
|
||||
import { db, Org, orgs, ResourceSession, sessions, users } from "@server/db";
|
||||
import { getOrgTierData } from "#private/lib/billing";
|
||||
import { TierId } from "@server/lib/billing/tiers";
|
||||
import license from "#private/license/license";
|
||||
|
||||
@@ -66,7 +66,9 @@ export async function sendToExitNode(
|
||||
// logger.debug(`Configured local exit node name: ${config.getRawConfig().gerbil.exit_node_name}`);
|
||||
|
||||
if (exitNode.name == config.getRawConfig().gerbil.exit_node_name) {
|
||||
hostname = privateConfig.getRawPrivateConfig().gerbil.local_exit_node_reachable_at;
|
||||
hostname =
|
||||
privateConfig.getRawPrivateConfig().gerbil
|
||||
.local_exit_node_reachable_at;
|
||||
}
|
||||
|
||||
if (!hostname) {
|
||||
|
||||
@@ -44,43 +44,53 @@ async function checkExitNodeOnlineStatus(
|
||||
const delayBetweenAttempts = 100; // 100ms delay between starting each attempt
|
||||
|
||||
// Create promises for all attempts with staggered delays
|
||||
const attemptPromises = Array.from({ length: maxAttempts }, async (_, index) => {
|
||||
const attemptNumber = index + 1;
|
||||
|
||||
// Add delay before each attempt (except the first)
|
||||
if (index > 0) {
|
||||
await new Promise((resolve) => setTimeout(resolve, delayBetweenAttempts * index));
|
||||
}
|
||||
const attemptPromises = Array.from(
|
||||
{ length: maxAttempts },
|
||||
async (_, index) => {
|
||||
const attemptNumber = index + 1;
|
||||
|
||||
try {
|
||||
const response = await axios.get(`http://${endpoint}/ping`, {
|
||||
timeout: timeoutMs,
|
||||
validateStatus: (status) => status === 200
|
||||
});
|
||||
|
||||
if (response.status === 200) {
|
||||
logger.debug(
|
||||
`Exit node ${endpoint} is online (attempt ${attemptNumber}/${maxAttempts})`
|
||||
// Add delay before each attempt (except the first)
|
||||
if (index > 0) {
|
||||
await new Promise((resolve) =>
|
||||
setTimeout(resolve, delayBetweenAttempts * index)
|
||||
);
|
||||
return { success: true, attemptNumber };
|
||||
}
|
||||
return { success: false, attemptNumber, error: 'Non-200 status' };
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : "Unknown error";
|
||||
logger.debug(
|
||||
`Exit node ${endpoint} ping failed (attempt ${attemptNumber}/${maxAttempts}): ${errorMessage}`
|
||||
);
|
||||
return { success: false, attemptNumber, error: errorMessage };
|
||||
|
||||
try {
|
||||
const response = await axios.get(`http://${endpoint}/ping`, {
|
||||
timeout: timeoutMs,
|
||||
validateStatus: (status) => status === 200
|
||||
});
|
||||
|
||||
if (response.status === 200) {
|
||||
logger.debug(
|
||||
`Exit node ${endpoint} is online (attempt ${attemptNumber}/${maxAttempts})`
|
||||
);
|
||||
return { success: true, attemptNumber };
|
||||
}
|
||||
return {
|
||||
success: false,
|
||||
attemptNumber,
|
||||
error: "Non-200 status"
|
||||
};
|
||||
} catch (error) {
|
||||
const errorMessage =
|
||||
error instanceof Error ? error.message : "Unknown error";
|
||||
logger.debug(
|
||||
`Exit node ${endpoint} ping failed (attempt ${attemptNumber}/${maxAttempts}): ${errorMessage}`
|
||||
);
|
||||
return { success: false, attemptNumber, error: errorMessage };
|
||||
}
|
||||
}
|
||||
});
|
||||
);
|
||||
|
||||
try {
|
||||
// Wait for the first successful response or all to fail
|
||||
const results = await Promise.allSettled(attemptPromises);
|
||||
|
||||
|
||||
// Check if any attempt succeeded
|
||||
for (const result of results) {
|
||||
if (result.status === 'fulfilled' && result.value.success) {
|
||||
if (result.status === "fulfilled" && result.value.success) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -137,7 +147,11 @@ export async function verifyExitNodeOrgAccess(
|
||||
return { hasAccess: false, exitNode };
|
||||
}
|
||||
|
||||
export async function listExitNodes(orgId: string, filterOnline = false, noCloud = false) {
|
||||
export async function listExitNodes(
|
||||
orgId: string,
|
||||
filterOnline = false,
|
||||
noCloud = false
|
||||
) {
|
||||
const allExitNodes = await db
|
||||
.select({
|
||||
exitNodeId: exitNodes.exitNodeId,
|
||||
@@ -166,7 +180,10 @@ export async function listExitNodes(orgId: string, filterOnline = false, noCloud
|
||||
eq(exitNodes.type, "gerbil"),
|
||||
or(
|
||||
// only choose nodes that are in the same region
|
||||
eq(exitNodes.region, config.getRawPrivateConfig().app.region),
|
||||
eq(
|
||||
exitNodes.region,
|
||||
config.getRawPrivateConfig().app.region
|
||||
),
|
||||
isNull(exitNodes.region) // or for enterprise where region is not set
|
||||
)
|
||||
),
|
||||
@@ -191,7 +208,7 @@ export async function listExitNodes(orgId: string, filterOnline = false, noCloud
|
||||
// let online: boolean;
|
||||
// if (filterOnline && node.type == "remoteExitNode") {
|
||||
// try {
|
||||
// const isActuallyOnline = await checkExitNodeOnlineStatus(
|
||||
// const isActuallyOnline = await checkExitNodeOnlineStatus(
|
||||
// node.endpoint
|
||||
// );
|
||||
|
||||
@@ -225,7 +242,8 @@ export async function listExitNodes(orgId: string, filterOnline = false, noCloud
|
||||
node.type === "remoteExitNode" && (!filterOnline || node.online)
|
||||
);
|
||||
const gerbilExitNodes = allExitNodes.filter(
|
||||
(node) => node.type === "gerbil" && (!filterOnline || node.online) && !noCloud
|
||||
(node) =>
|
||||
node.type === "gerbil" && (!filterOnline || node.online) && !noCloud
|
||||
);
|
||||
|
||||
// THIS PROVIDES THE FALL
|
||||
@@ -334,7 +352,11 @@ export function selectBestExitNode(
|
||||
return fallbackNode;
|
||||
}
|
||||
|
||||
export async function checkExitNodeOrg(exitNodeId: number, orgId: string, trx: Transaction | typeof db = db) {
|
||||
export async function checkExitNodeOrg(
|
||||
exitNodeId: number,
|
||||
orgId: string,
|
||||
trx: Transaction | typeof db = db
|
||||
) {
|
||||
const [exitNodeOrg] = await trx
|
||||
.select()
|
||||
.from(exitNodeOrgs)
|
||||
|
||||
@@ -12,4 +12,4 @@
|
||||
*/
|
||||
|
||||
export * from "./exitNodeComms";
|
||||
export * from "./exitNodes";
|
||||
export * from "./exitNodes";
|
||||
|
||||
@@ -177,7 +177,9 @@ export class LockManager {
|
||||
const exists = value !== null;
|
||||
const ownedByMe =
|
||||
exists &&
|
||||
value!.startsWith(`${config.getRawConfig().gerbil.exit_node_name}:`);
|
||||
value!.startsWith(
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
);
|
||||
const owner = exists ? value!.split(":")[0] : undefined;
|
||||
|
||||
return {
|
||||
|
||||
@@ -14,15 +14,15 @@
|
||||
// Simple test file for the rate limit service with Redis
|
||||
// Run with: npx ts-node rateLimitService.test.ts
|
||||
|
||||
import { RateLimitService } from './rateLimit';
|
||||
import { RateLimitService } from "./rateLimit";
|
||||
|
||||
function generateClientId() {
|
||||
return 'client-' + Math.random().toString(36).substring(2, 15);
|
||||
return "client-" + Math.random().toString(36).substring(2, 15);
|
||||
}
|
||||
|
||||
async function runTests() {
|
||||
console.log('Starting Rate Limit Service Tests...\n');
|
||||
|
||||
console.log("Starting Rate Limit Service Tests...\n");
|
||||
|
||||
const rateLimitService = new RateLimitService();
|
||||
let testsPassed = 0;
|
||||
let testsTotal = 0;
|
||||
@@ -47,36 +47,54 @@ async function runTests() {
|
||||
}
|
||||
|
||||
// Test 1: Basic rate limiting
|
||||
await test('Should allow requests under the limit', async () => {
|
||||
await test("Should allow requests under the limit", async () => {
|
||||
const clientId = generateClientId();
|
||||
const maxRequests = 5;
|
||||
|
||||
for (let i = 0; i < maxRequests - 1; i++) {
|
||||
const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
const result = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(!result.isLimited, `Request ${i + 1} should be allowed`);
|
||||
assert(result.totalHits === i + 1, `Expected ${i + 1} hits, got ${result.totalHits}`);
|
||||
assert(
|
||||
result.totalHits === i + 1,
|
||||
`Expected ${i + 1} hits, got ${result.totalHits}`
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
// Test 2: Rate limit blocking
|
||||
await test('Should block requests over the limit', async () => {
|
||||
await test("Should block requests over the limit", async () => {
|
||||
const clientId = generateClientId();
|
||||
const maxRequests = 30;
|
||||
|
||||
// Use up all allowed requests
|
||||
for (let i = 0; i < maxRequests - 1; i++) {
|
||||
const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
const result = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(!result.isLimited, `Request ${i + 1} should be allowed`);
|
||||
}
|
||||
|
||||
// Next request should be blocked
|
||||
const blockedResult = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
assert(blockedResult.isLimited, 'Request should be blocked');
|
||||
assert(blockedResult.reason === 'global', 'Should be blocked for global reason');
|
||||
const blockedResult = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(blockedResult.isLimited, "Request should be blocked");
|
||||
assert(
|
||||
blockedResult.reason === "global",
|
||||
"Should be blocked for global reason"
|
||||
);
|
||||
});
|
||||
|
||||
// Test 3: Message type limits
|
||||
await test('Should handle message type limits', async () => {
|
||||
await test("Should handle message type limits", async () => {
|
||||
const clientId = generateClientId();
|
||||
const globalMax = 10;
|
||||
const messageTypeMax = 2;
|
||||
@@ -84,54 +102,64 @@ async function runTests() {
|
||||
// Send messages of type 'ping' up to the limit
|
||||
for (let i = 0; i < messageTypeMax - 1; i++) {
|
||||
const result = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
'ping',
|
||||
globalMax,
|
||||
clientId,
|
||||
"ping",
|
||||
globalMax,
|
||||
messageTypeMax
|
||||
);
|
||||
assert(!result.isLimited, `Ping message ${i + 1} should be allowed`);
|
||||
assert(
|
||||
!result.isLimited,
|
||||
`Ping message ${i + 1} should be allowed`
|
||||
);
|
||||
}
|
||||
|
||||
// Next 'ping' should be blocked
|
||||
const blockedResult = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
'ping',
|
||||
globalMax,
|
||||
clientId,
|
||||
"ping",
|
||||
globalMax,
|
||||
messageTypeMax
|
||||
);
|
||||
assert(blockedResult.isLimited, 'Ping message should be blocked');
|
||||
assert(blockedResult.reason === 'message_type:ping', 'Should be blocked for message type');
|
||||
assert(blockedResult.isLimited, "Ping message should be blocked");
|
||||
assert(
|
||||
blockedResult.reason === "message_type:ping",
|
||||
"Should be blocked for message type"
|
||||
);
|
||||
|
||||
// Other message types should still work
|
||||
const otherResult = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
'pong',
|
||||
globalMax,
|
||||
clientId,
|
||||
"pong",
|
||||
globalMax,
|
||||
messageTypeMax
|
||||
);
|
||||
assert(!otherResult.isLimited, 'Pong message should be allowed');
|
||||
assert(!otherResult.isLimited, "Pong message should be allowed");
|
||||
});
|
||||
|
||||
// Test 4: Reset functionality
|
||||
await test('Should reset client correctly', async () => {
|
||||
await test("Should reset client correctly", async () => {
|
||||
const clientId = generateClientId();
|
||||
const maxRequests = 3;
|
||||
|
||||
// Use up some requests
|
||||
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
await rateLimitService.checkRateLimit(clientId, 'test', maxRequests);
|
||||
await rateLimitService.checkRateLimit(clientId, "test", maxRequests);
|
||||
|
||||
// Reset the client
|
||||
await rateLimitService.resetKey(clientId);
|
||||
|
||||
// Should be able to make fresh requests
|
||||
const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
assert(!result.isLimited, 'Request after reset should be allowed');
|
||||
assert(result.totalHits === 1, 'Should have 1 hit after reset');
|
||||
const result = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(!result.isLimited, "Request after reset should be allowed");
|
||||
assert(result.totalHits === 1, "Should have 1 hit after reset");
|
||||
});
|
||||
|
||||
// Test 5: Different clients are independent
|
||||
await test('Should handle different clients independently', async () => {
|
||||
await test("Should handle different clients independently", async () => {
|
||||
const client1 = generateClientId();
|
||||
const client2 = generateClientId();
|
||||
const maxRequests = 2;
|
||||
@@ -139,43 +167,62 @@ async function runTests() {
|
||||
// Client 1 uses up their limit
|
||||
await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
|
||||
await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
|
||||
const client1Blocked = await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
|
||||
assert(client1Blocked.isLimited, 'Client 1 should be blocked');
|
||||
const client1Blocked = await rateLimitService.checkRateLimit(
|
||||
client1,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(client1Blocked.isLimited, "Client 1 should be blocked");
|
||||
|
||||
// Client 2 should still be able to make requests
|
||||
const client2Result = await rateLimitService.checkRateLimit(client2, undefined, maxRequests);
|
||||
assert(!client2Result.isLimited, 'Client 2 should not be blocked');
|
||||
assert(client2Result.totalHits === 1, 'Client 2 should have 1 hit');
|
||||
const client2Result = await rateLimitService.checkRateLimit(
|
||||
client2,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(!client2Result.isLimited, "Client 2 should not be blocked");
|
||||
assert(client2Result.totalHits === 1, "Client 2 should have 1 hit");
|
||||
});
|
||||
|
||||
// Test 6: Decrement functionality
|
||||
await test('Should decrement correctly', async () => {
|
||||
await test("Should decrement correctly", async () => {
|
||||
const clientId = generateClientId();
|
||||
const maxRequests = 5;
|
||||
|
||||
// Make some requests
|
||||
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
let result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
assert(result.totalHits === 3, 'Should have 3 hits before decrement');
|
||||
let result = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(result.totalHits === 3, "Should have 3 hits before decrement");
|
||||
|
||||
// Decrement
|
||||
await rateLimitService.decrementRateLimit(clientId);
|
||||
|
||||
// Next request should reflect the decrement
|
||||
result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
|
||||
assert(result.totalHits === 3, 'Should have 3 hits after decrement + increment');
|
||||
result = await rateLimitService.checkRateLimit(
|
||||
clientId,
|
||||
undefined,
|
||||
maxRequests
|
||||
);
|
||||
assert(
|
||||
result.totalHits === 3,
|
||||
"Should have 3 hits after decrement + increment"
|
||||
);
|
||||
});
|
||||
|
||||
// Wait a moment for any pending Redis operations
|
||||
console.log('\nWaiting for Redis sync...');
|
||||
await new Promise(resolve => setTimeout(resolve, 1000));
|
||||
console.log("\nWaiting for Redis sync...");
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
|
||||
// Force sync to test Redis integration
|
||||
await test('Should sync to Redis', async () => {
|
||||
await test("Should sync to Redis", async () => {
|
||||
await rateLimitService.forceSyncAllPendingData();
|
||||
// If this doesn't throw, Redis sync is working
|
||||
assert(true, 'Redis sync completed');
|
||||
assert(true, "Redis sync completed");
|
||||
});
|
||||
|
||||
// Cleanup
|
||||
@@ -185,18 +232,18 @@ async function runTests() {
|
||||
console.log(`\n--- Test Results ---`);
|
||||
console.log(`✅ Passed: ${testsPassed}/${testsTotal}`);
|
||||
console.log(`❌ Failed: ${testsTotal - testsPassed}/${testsTotal}`);
|
||||
|
||||
|
||||
if (testsPassed === testsTotal) {
|
||||
console.log('\n🎉 All tests passed!');
|
||||
console.log("\n🎉 All tests passed!");
|
||||
process.exit(0);
|
||||
} else {
|
||||
console.log('\n💥 Some tests failed!');
|
||||
console.log("\n💥 Some tests failed!");
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
// Run the tests
|
||||
runTests().catch(error => {
|
||||
console.error('Test runner error:', error);
|
||||
runTests().catch((error) => {
|
||||
console.error("Test runner error:", error);
|
||||
process.exit(1);
|
||||
});
|
||||
});
|
||||
|
||||
@@ -40,7 +40,8 @@ interface RateLimitResult {
|
||||
|
||||
export class RateLimitService {
|
||||
private localRateLimitTracker: Map<string, RateLimitTracker> = new Map();
|
||||
private localMessageTypeRateLimitTracker: Map<string, RateLimitTracker> = new Map();
|
||||
private localMessageTypeRateLimitTracker: Map<string, RateLimitTracker> =
|
||||
new Map();
|
||||
private cleanupInterval: NodeJS.Timeout | null = null;
|
||||
private forceSyncInterval: NodeJS.Timeout | null = null;
|
||||
|
||||
@@ -68,12 +69,18 @@ export class RateLimitService {
|
||||
return `ratelimit:${clientId}`;
|
||||
}
|
||||
|
||||
private getMessageTypeRateLimitKey(clientId: string, messageType: string): string {
|
||||
private getMessageTypeRateLimitKey(
|
||||
clientId: string,
|
||||
messageType: string
|
||||
): string {
|
||||
return `ratelimit:${clientId}:${messageType}`;
|
||||
}
|
||||
|
||||
// Helper function to clean up old timestamp fields from a Redis hash
|
||||
private async cleanupOldTimestamps(key: string, windowStart: number): Promise<void> {
|
||||
private async cleanupOldTimestamps(
|
||||
key: string,
|
||||
windowStart: number
|
||||
): Promise<void> {
|
||||
if (!redisManager.isRedisEnabled()) return;
|
||||
|
||||
try {
|
||||
@@ -101,10 +108,15 @@ export class RateLimitService {
|
||||
const batch = fieldsToDelete.slice(i, i + batchSize);
|
||||
await client.hdel(key, ...batch);
|
||||
}
|
||||
logger.debug(`Cleaned up ${fieldsToDelete.length} old timestamp fields from ${key}`);
|
||||
logger.debug(
|
||||
`Cleaned up ${fieldsToDelete.length} old timestamp fields from ${key}`
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to cleanup old timestamps for key ${key}:`, error);
|
||||
logger.error(
|
||||
`Failed to cleanup old timestamps for key ${key}:`,
|
||||
error
|
||||
);
|
||||
// Don't throw - cleanup failures shouldn't block rate limiting
|
||||
}
|
||||
}
|
||||
@@ -114,7 +126,8 @@ export class RateLimitService {
|
||||
clientId: string,
|
||||
tracker: RateLimitTracker
|
||||
): Promise<void> {
|
||||
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) return;
|
||||
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0)
|
||||
return;
|
||||
|
||||
try {
|
||||
const currentTime = Math.floor(Date.now() / 1000);
|
||||
@@ -132,7 +145,11 @@ export class RateLimitService {
|
||||
const newValue = (
|
||||
parseInt(currentValue || "0") + tracker.pendingCount
|
||||
).toString();
|
||||
await redisManager.hset(globalKey, currentTime.toString(), newValue);
|
||||
await redisManager.hset(
|
||||
globalKey,
|
||||
currentTime.toString(),
|
||||
newValue
|
||||
);
|
||||
|
||||
// Set TTL using the client directly - this prevents the key from persisting forever
|
||||
if (redisManager.getClient()) {
|
||||
@@ -145,7 +162,9 @@ export class RateLimitService {
|
||||
tracker.lastSyncedCount = tracker.count;
|
||||
tracker.pendingCount = 0;
|
||||
|
||||
logger.debug(`Synced global rate limit to Redis for client ${clientId}`);
|
||||
logger.debug(
|
||||
`Synced global rate limit to Redis for client ${clientId}`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error("Failed to sync global rate limit to Redis:", error);
|
||||
}
|
||||
@@ -156,12 +175,16 @@ export class RateLimitService {
|
||||
messageType: string,
|
||||
tracker: RateLimitTracker
|
||||
): Promise<void> {
|
||||
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) return;
|
||||
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0)
|
||||
return;
|
||||
|
||||
try {
|
||||
const currentTime = Math.floor(Date.now() / 1000);
|
||||
const windowStart = currentTime - RATE_LIMIT_WINDOW;
|
||||
const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
|
||||
const messageTypeKey = this.getMessageTypeRateLimitKey(
|
||||
clientId,
|
||||
messageType
|
||||
);
|
||||
|
||||
// Clean up old timestamp fields before writing
|
||||
await this.cleanupOldTimestamps(messageTypeKey, windowStart);
|
||||
@@ -195,12 +218,17 @@ export class RateLimitService {
|
||||
`Synced message type rate limit to Redis for client ${clientId}, type ${messageType}`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error("Failed to sync message type rate limit to Redis:", error);
|
||||
logger.error(
|
||||
"Failed to sync message type rate limit to Redis:",
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Initialize local tracker from Redis data
|
||||
private async initializeLocalTracker(clientId: string): Promise<RateLimitTracker> {
|
||||
private async initializeLocalTracker(
|
||||
clientId: string
|
||||
): Promise<RateLimitTracker> {
|
||||
const currentTime = Math.floor(Date.now() / 1000);
|
||||
const windowStart = currentTime - RATE_LIMIT_WINDOW;
|
||||
|
||||
@@ -215,14 +243,16 @@ export class RateLimitService {
|
||||
|
||||
try {
|
||||
const globalKey = this.getRateLimitKey(clientId);
|
||||
|
||||
|
||||
// Clean up old timestamp fields before reading
|
||||
await this.cleanupOldTimestamps(globalKey, windowStart);
|
||||
|
||||
|
||||
const globalRateLimitData = await redisManager.hgetall(globalKey);
|
||||
|
||||
let count = 0;
|
||||
for (const [timestamp, countStr] of Object.entries(globalRateLimitData)) {
|
||||
for (const [timestamp, countStr] of Object.entries(
|
||||
globalRateLimitData
|
||||
)) {
|
||||
const time = parseInt(timestamp);
|
||||
if (time >= windowStart) {
|
||||
count += parseInt(countStr);
|
||||
@@ -236,7 +266,10 @@ export class RateLimitService {
|
||||
lastSyncedCount: count
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error("Failed to initialize global tracker from Redis:", error);
|
||||
logger.error(
|
||||
"Failed to initialize global tracker from Redis:",
|
||||
error
|
||||
);
|
||||
return {
|
||||
count: 0,
|
||||
windowStart: currentTime,
|
||||
@@ -263,15 +296,21 @@ export class RateLimitService {
|
||||
}
|
||||
|
||||
try {
|
||||
const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
|
||||
|
||||
const messageTypeKey = this.getMessageTypeRateLimitKey(
|
||||
clientId,
|
||||
messageType
|
||||
);
|
||||
|
||||
// Clean up old timestamp fields before reading
|
||||
await this.cleanupOldTimestamps(messageTypeKey, windowStart);
|
||||
|
||||
const messageTypeRateLimitData = await redisManager.hgetall(messageTypeKey);
|
||||
|
||||
const messageTypeRateLimitData =
|
||||
await redisManager.hgetall(messageTypeKey);
|
||||
|
||||
let count = 0;
|
||||
for (const [timestamp, countStr] of Object.entries(messageTypeRateLimitData)) {
|
||||
for (const [timestamp, countStr] of Object.entries(
|
||||
messageTypeRateLimitData
|
||||
)) {
|
||||
const time = parseInt(timestamp);
|
||||
if (time >= windowStart) {
|
||||
count += parseInt(countStr);
|
||||
@@ -285,7 +324,10 @@ export class RateLimitService {
|
||||
lastSyncedCount: count
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error("Failed to initialize message type tracker from Redis:", error);
|
||||
logger.error(
|
||||
"Failed to initialize message type tracker from Redis:",
|
||||
error
|
||||
);
|
||||
return {
|
||||
count: 0,
|
||||
windowStart: currentTime,
|
||||
@@ -327,7 +369,10 @@ export class RateLimitService {
|
||||
isLimited: true,
|
||||
reason: "global",
|
||||
totalHits: globalTracker.count,
|
||||
resetTime: new Date((globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
|
||||
resetTime: new Date(
|
||||
(globalTracker.windowStart + Math.floor(windowMs / 1000)) *
|
||||
1000
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
@@ -339,19 +384,32 @@ export class RateLimitService {
|
||||
// Check message type specific rate limit if messageType is provided
|
||||
if (messageType) {
|
||||
const messageTypeKey = `${clientId}:${messageType}`;
|
||||
let messageTypeTracker = this.localMessageTypeRateLimitTracker.get(messageTypeKey);
|
||||
let messageTypeTracker =
|
||||
this.localMessageTypeRateLimitTracker.get(messageTypeKey);
|
||||
|
||||
if (!messageTypeTracker || messageTypeTracker.windowStart < windowStart) {
|
||||
if (
|
||||
!messageTypeTracker ||
|
||||
messageTypeTracker.windowStart < windowStart
|
||||
) {
|
||||
// New window or first request for this message type - initialize from Redis if available
|
||||
messageTypeTracker = await this.initializeMessageTypeTracker(clientId, messageType);
|
||||
messageTypeTracker = await this.initializeMessageTypeTracker(
|
||||
clientId,
|
||||
messageType
|
||||
);
|
||||
messageTypeTracker.windowStart = currentTime;
|
||||
this.localMessageTypeRateLimitTracker.set(messageTypeKey, messageTypeTracker);
|
||||
this.localMessageTypeRateLimitTracker.set(
|
||||
messageTypeKey,
|
||||
messageTypeTracker
|
||||
);
|
||||
}
|
||||
|
||||
// Increment message type counters
|
||||
messageTypeTracker.count++;
|
||||
messageTypeTracker.pendingCount++;
|
||||
this.localMessageTypeRateLimitTracker.set(messageTypeKey, messageTypeTracker);
|
||||
this.localMessageTypeRateLimitTracker.set(
|
||||
messageTypeKey,
|
||||
messageTypeTracker
|
||||
);
|
||||
|
||||
// Check if message type limit would be exceeded
|
||||
if (messageTypeTracker.count >= messageTypeLimit) {
|
||||
@@ -359,25 +417,38 @@ export class RateLimitService {
|
||||
isLimited: true,
|
||||
reason: `message_type:${messageType}`,
|
||||
totalHits: messageTypeTracker.count,
|
||||
resetTime: new Date((messageTypeTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
|
||||
resetTime: new Date(
|
||||
(messageTypeTracker.windowStart +
|
||||
Math.floor(windowMs / 1000)) *
|
||||
1000
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
// Sync to Redis if threshold reached
|
||||
if (messageTypeTracker.pendingCount >= REDIS_SYNC_THRESHOLD) {
|
||||
this.syncMessageTypeRateLimitToRedis(clientId, messageType, messageTypeTracker);
|
||||
this.syncMessageTypeRateLimitToRedis(
|
||||
clientId,
|
||||
messageType,
|
||||
messageTypeTracker
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
isLimited: false,
|
||||
totalHits: globalTracker.count,
|
||||
resetTime: new Date((globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
|
||||
resetTime: new Date(
|
||||
(globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
// Decrement function for skipSuccessfulRequests/skipFailedRequests functionality
|
||||
async decrementRateLimit(clientId: string, messageType?: string): Promise<void> {
|
||||
async decrementRateLimit(
|
||||
clientId: string,
|
||||
messageType?: string
|
||||
): Promise<void> {
|
||||
// Decrement global counter
|
||||
const globalTracker = this.localRateLimitTracker.get(clientId);
|
||||
if (globalTracker && globalTracker.count > 0) {
|
||||
@@ -389,7 +460,8 @@ export class RateLimitService {
|
||||
// Decrement message type counter if provided
|
||||
if (messageType) {
|
||||
const messageTypeKey = `${clientId}:${messageType}`;
|
||||
const messageTypeTracker = this.localMessageTypeRateLimitTracker.get(messageTypeKey);
|
||||
const messageTypeTracker =
|
||||
this.localMessageTypeRateLimitTracker.get(messageTypeKey);
|
||||
if (messageTypeTracker && messageTypeTracker.count > 0) {
|
||||
messageTypeTracker.count--;
|
||||
messageTypeTracker.pendingCount--;
|
||||
@@ -401,7 +473,7 @@ export class RateLimitService {
|
||||
async resetKey(clientId: string): Promise<void> {
|
||||
// Remove from local tracking
|
||||
this.localRateLimitTracker.delete(clientId);
|
||||
|
||||
|
||||
// Remove all message type entries for this client
|
||||
for (const [key] of this.localMessageTypeRateLimitTracker) {
|
||||
if (key.startsWith(`${clientId}:`)) {
|
||||
@@ -417,9 +489,13 @@ export class RateLimitService {
|
||||
// Get all message type keys for this client and delete them
|
||||
const client = redisManager.getClient();
|
||||
if (client) {
|
||||
const messageTypeKeys = await client.keys(`ratelimit:${clientId}:*`);
|
||||
const messageTypeKeys = await client.keys(
|
||||
`ratelimit:${clientId}:*`
|
||||
);
|
||||
if (messageTypeKeys.length > 0) {
|
||||
await Promise.all(messageTypeKeys.map(key => redisManager.del(key)));
|
||||
await Promise.all(
|
||||
messageTypeKeys.map((key) => redisManager.del(key))
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -431,7 +507,10 @@ export class RateLimitService {
|
||||
const windowStart = currentTime - RATE_LIMIT_WINDOW;
|
||||
|
||||
// Clean up global rate limit tracking and sync pending data
|
||||
for (const [clientId, tracker] of this.localRateLimitTracker.entries()) {
|
||||
for (const [
|
||||
clientId,
|
||||
tracker
|
||||
] of this.localRateLimitTracker.entries()) {
|
||||
if (tracker.windowStart < windowStart) {
|
||||
// Sync any pending data before cleanup
|
||||
if (tracker.pendingCount > 0) {
|
||||
@@ -442,12 +521,19 @@ export class RateLimitService {
|
||||
}
|
||||
|
||||
// Clean up message type rate limit tracking and sync pending data
|
||||
for (const [key, tracker] of this.localMessageTypeRateLimitTracker.entries()) {
|
||||
for (const [
|
||||
key,
|
||||
tracker
|
||||
] of this.localMessageTypeRateLimitTracker.entries()) {
|
||||
if (tracker.windowStart < windowStart) {
|
||||
// Sync any pending data before cleanup
|
||||
if (tracker.pendingCount > 0) {
|
||||
const [clientId, messageType] = key.split(":", 2);
|
||||
await this.syncMessageTypeRateLimitToRedis(clientId, messageType, tracker);
|
||||
await this.syncMessageTypeRateLimitToRedis(
|
||||
clientId,
|
||||
messageType,
|
||||
tracker
|
||||
);
|
||||
}
|
||||
this.localMessageTypeRateLimitTracker.delete(key);
|
||||
}
|
||||
@@ -461,17 +547,27 @@ export class RateLimitService {
|
||||
logger.debug("Force syncing all pending rate limit data to Redis...");
|
||||
|
||||
// Sync all pending global rate limits
|
||||
for (const [clientId, tracker] of this.localRateLimitTracker.entries()) {
|
||||
for (const [
|
||||
clientId,
|
||||
tracker
|
||||
] of this.localRateLimitTracker.entries()) {
|
||||
if (tracker.pendingCount > 0) {
|
||||
await this.syncRateLimitToRedis(clientId, tracker);
|
||||
}
|
||||
}
|
||||
|
||||
// Sync all pending message type rate limits
|
||||
for (const [key, tracker] of this.localMessageTypeRateLimitTracker.entries()) {
|
||||
for (const [
|
||||
key,
|
||||
tracker
|
||||
] of this.localMessageTypeRateLimitTracker.entries()) {
|
||||
if (tracker.pendingCount > 0) {
|
||||
const [clientId, messageType] = key.split(":", 2);
|
||||
await this.syncMessageTypeRateLimitToRedis(clientId, messageType, tracker);
|
||||
await this.syncMessageTypeRateLimitToRedis(
|
||||
clientId,
|
||||
messageType,
|
||||
tracker
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -504,4 +600,4 @@ export class RateLimitService {
|
||||
}
|
||||
|
||||
// Export singleton instance
|
||||
export const rateLimitService = new RateLimitService();
|
||||
export const rateLimitService = new RateLimitService();
|
||||
|
||||
@@ -17,7 +17,10 @@ import { MemoryStore, Store } from "express-rate-limit";
|
||||
import RedisStore from "#private/lib/redisStore";
|
||||
|
||||
export function createStore(): Store {
|
||||
if (build != "oss" && privateConfig.getRawPrivateConfig().flags.enable_redis) {
|
||||
if (
|
||||
build != "oss" &&
|
||||
privateConfig.getRawPrivateConfig().flags.enable_redis
|
||||
) {
|
||||
const rateLimitStore: Store = new RedisStore({
|
||||
prefix: "api-rate-limit", // Optional: customize Redis key prefix
|
||||
skipFailedRequests: true, // Don't count failed requests
|
||||
|
||||
@@ -19,7 +19,7 @@ import { build } from "@server/build";
|
||||
class RedisManager {
|
||||
public client: Redis | null = null;
|
||||
private writeClient: Redis | null = null; // Master for writes
|
||||
private readClient: Redis | null = null; // Replica for reads
|
||||
private readClient: Redis | null = null; // Replica for reads
|
||||
private subscriber: Redis | null = null;
|
||||
private publisher: Redis | null = null;
|
||||
private isEnabled: boolean = false;
|
||||
@@ -46,7 +46,8 @@ class RedisManager {
|
||||
this.isEnabled = false;
|
||||
return;
|
||||
}
|
||||
this.isEnabled = privateConfig.getRawPrivateConfig().flags.enable_redis || false;
|
||||
this.isEnabled =
|
||||
privateConfig.getRawPrivateConfig().flags.enable_redis || false;
|
||||
if (this.isEnabled) {
|
||||
this.initializeClients();
|
||||
}
|
||||
@@ -63,15 +64,19 @@ class RedisManager {
|
||||
}
|
||||
|
||||
private async triggerReconnectionCallbacks(): Promise<void> {
|
||||
logger.info(`Triggering ${this.reconnectionCallbacks.size} reconnection callbacks`);
|
||||
|
||||
const promises = Array.from(this.reconnectionCallbacks).map(async (callback) => {
|
||||
try {
|
||||
await callback();
|
||||
} catch (error) {
|
||||
logger.error("Error in reconnection callback:", error);
|
||||
logger.info(
|
||||
`Triggering ${this.reconnectionCallbacks.size} reconnection callbacks`
|
||||
);
|
||||
|
||||
const promises = Array.from(this.reconnectionCallbacks).map(
|
||||
async (callback) => {
|
||||
try {
|
||||
await callback();
|
||||
} catch (error) {
|
||||
logger.error("Error in reconnection callback:", error);
|
||||
}
|
||||
}
|
||||
});
|
||||
);
|
||||
|
||||
await Promise.allSettled(promises);
|
||||
}
|
||||
@@ -79,13 +84,17 @@ class RedisManager {
|
||||
private async resubscribeToChannels(): Promise<void> {
|
||||
if (!this.subscriber || this.subscribers.size === 0) return;
|
||||
|
||||
logger.info(`Re-subscribing to ${this.subscribers.size} channels after Redis reconnection`);
|
||||
|
||||
logger.info(
|
||||
`Re-subscribing to ${this.subscribers.size} channels after Redis reconnection`
|
||||
);
|
||||
|
||||
try {
|
||||
const channels = Array.from(this.subscribers.keys());
|
||||
if (channels.length > 0) {
|
||||
await this.subscriber.subscribe(...channels);
|
||||
logger.info(`Successfully re-subscribed to channels: ${channels.join(', ')}`);
|
||||
logger.info(
|
||||
`Successfully re-subscribed to channels: ${channels.join(", ")}`
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error("Failed to re-subscribe to channels:", error);
|
||||
@@ -98,7 +107,7 @@ class RedisManager {
|
||||
host: redisConfig.host!,
|
||||
port: redisConfig.port!,
|
||||
password: redisConfig.password,
|
||||
db: redisConfig.db,
|
||||
db: redisConfig.db
|
||||
// tls: {
|
||||
// rejectUnauthorized:
|
||||
// redisConfig.tls?.reject_unauthorized || false
|
||||
@@ -112,7 +121,7 @@ class RedisManager {
|
||||
if (!redisConfig.replicas || redisConfig.replicas.length === 0) {
|
||||
return null;
|
||||
}
|
||||
|
||||
|
||||
// Use the first replica for simplicity
|
||||
// In production, you might want to implement load balancing across replicas
|
||||
const replica = redisConfig.replicas[0];
|
||||
@@ -120,7 +129,7 @@ class RedisManager {
|
||||
host: replica.host!,
|
||||
port: replica.port!,
|
||||
password: replica.password,
|
||||
db: replica.db || redisConfig.db,
|
||||
db: replica.db || redisConfig.db
|
||||
// tls: {
|
||||
// rejectUnauthorized:
|
||||
// replica.tls?.reject_unauthorized || false
|
||||
@@ -133,7 +142,7 @@ class RedisManager {
|
||||
private initializeClients(): void {
|
||||
const masterConfig = this.getRedisConfig();
|
||||
const replicaConfig = this.getReplicaRedisConfig();
|
||||
|
||||
|
||||
this.hasReplicas = replicaConfig !== null;
|
||||
|
||||
try {
|
||||
@@ -144,7 +153,7 @@ class RedisManager {
|
||||
maxRetriesPerRequest: 3,
|
||||
keepAlive: 30000,
|
||||
connectTimeout: this.connectionTimeout,
|
||||
commandTimeout: this.commandTimeout,
|
||||
commandTimeout: this.commandTimeout
|
||||
});
|
||||
|
||||
// Initialize replica connection for reads (if available)
|
||||
@@ -155,7 +164,7 @@ class RedisManager {
|
||||
maxRetriesPerRequest: 3,
|
||||
keepAlive: 30000,
|
||||
connectTimeout: this.connectionTimeout,
|
||||
commandTimeout: this.commandTimeout,
|
||||
commandTimeout: this.commandTimeout
|
||||
});
|
||||
} else {
|
||||
// Fallback to master for reads if no replicas
|
||||
@@ -172,7 +181,7 @@ class RedisManager {
|
||||
maxRetriesPerRequest: 3,
|
||||
keepAlive: 30000,
|
||||
connectTimeout: this.connectionTimeout,
|
||||
commandTimeout: this.commandTimeout,
|
||||
commandTimeout: this.commandTimeout
|
||||
});
|
||||
|
||||
// Subscriber uses replica if available (reads)
|
||||
@@ -182,7 +191,7 @@ class RedisManager {
|
||||
maxRetriesPerRequest: 3,
|
||||
keepAlive: 30000,
|
||||
connectTimeout: this.connectionTimeout,
|
||||
commandTimeout: this.commandTimeout,
|
||||
commandTimeout: this.commandTimeout
|
||||
});
|
||||
|
||||
// Add reconnection handlers for write client
|
||||
@@ -202,11 +211,14 @@ class RedisManager {
|
||||
logger.info("Redis write client ready");
|
||||
this.isWriteHealthy = true;
|
||||
this.updateOverallHealth();
|
||||
|
||||
|
||||
// Trigger reconnection callbacks when Redis comes back online
|
||||
if (this.isHealthy) {
|
||||
this.triggerReconnectionCallbacks().catch(error => {
|
||||
logger.error("Error triggering reconnection callbacks:", error);
|
||||
this.triggerReconnectionCallbacks().catch((error) => {
|
||||
logger.error(
|
||||
"Error triggering reconnection callbacks:",
|
||||
error
|
||||
);
|
||||
});
|
||||
}
|
||||
});
|
||||
@@ -233,11 +245,14 @@ class RedisManager {
|
||||
logger.info("Redis read client ready");
|
||||
this.isReadHealthy = true;
|
||||
this.updateOverallHealth();
|
||||
|
||||
|
||||
// Trigger reconnection callbacks when Redis comes back online
|
||||
if (this.isHealthy) {
|
||||
this.triggerReconnectionCallbacks().catch(error => {
|
||||
logger.error("Error triggering reconnection callbacks:", error);
|
||||
this.triggerReconnectionCallbacks().catch((error) => {
|
||||
logger.error(
|
||||
"Error triggering reconnection callbacks:",
|
||||
error
|
||||
);
|
||||
});
|
||||
}
|
||||
});
|
||||
@@ -298,8 +313,8 @@ class RedisManager {
|
||||
}
|
||||
);
|
||||
|
||||
const setupMessage = this.hasReplicas
|
||||
? "Redis clients initialized successfully with replica support"
|
||||
const setupMessage = this.hasReplicas
|
||||
? "Redis clients initialized successfully with replica support"
|
||||
: "Redis clients initialized successfully (single instance)";
|
||||
logger.info(setupMessage);
|
||||
|
||||
@@ -313,7 +328,8 @@ class RedisManager {
|
||||
|
||||
private updateOverallHealth(): void {
|
||||
// Overall health is true if write is healthy and (read is healthy OR we don't have replicas)
|
||||
this.isHealthy = this.isWriteHealthy && (this.isReadHealthy || !this.hasReplicas);
|
||||
this.isHealthy =
|
||||
this.isWriteHealthy && (this.isReadHealthy || !this.hasReplicas);
|
||||
}
|
||||
|
||||
private async executeWithRetry<T>(
|
||||
@@ -322,49 +338,61 @@ class RedisManager {
|
||||
fallbackOperation?: () => Promise<T>
|
||||
): Promise<T> {
|
||||
let lastError: Error | null = null;
|
||||
|
||||
|
||||
for (let attempt = 0; attempt <= this.maxRetries; attempt++) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error) {
|
||||
lastError = error as Error;
|
||||
|
||||
|
||||
// If this is the last attempt, try fallback if available
|
||||
if (attempt === this.maxRetries && fallbackOperation) {
|
||||
try {
|
||||
logger.warn(`${operationName} primary operation failed, trying fallback`);
|
||||
logger.warn(
|
||||
`${operationName} primary operation failed, trying fallback`
|
||||
);
|
||||
return await fallbackOperation();
|
||||
} catch (fallbackError) {
|
||||
logger.error(`${operationName} fallback also failed:`, fallbackError);
|
||||
logger.error(
|
||||
`${operationName} fallback also failed:`,
|
||||
fallbackError
|
||||
);
|
||||
throw lastError;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Don't retry on the last attempt
|
||||
if (attempt === this.maxRetries) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
// Calculate delay with exponential backoff
|
||||
const delay = Math.min(
|
||||
this.baseRetryDelay * Math.pow(this.backoffMultiplier, attempt),
|
||||
this.baseRetryDelay *
|
||||
Math.pow(this.backoffMultiplier, attempt),
|
||||
this.maxRetryDelay
|
||||
);
|
||||
|
||||
logger.warn(`${operationName} failed (attempt ${attempt + 1}/${this.maxRetries + 1}), retrying in ${delay}ms:`, error);
|
||||
|
||||
|
||||
logger.warn(
|
||||
`${operationName} failed (attempt ${attempt + 1}/${this.maxRetries + 1}), retrying in ${delay}ms:`,
|
||||
error
|
||||
);
|
||||
|
||||
// Wait before retrying
|
||||
await new Promise(resolve => setTimeout(resolve, delay));
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
logger.error(`${operationName} failed after ${this.maxRetries + 1} attempts:`, lastError);
|
||||
|
||||
logger.error(
|
||||
`${operationName} failed after ${this.maxRetries + 1} attempts:`,
|
||||
lastError
|
||||
);
|
||||
throw lastError;
|
||||
}
|
||||
|
||||
private startHealthMonitoring(): void {
|
||||
if (!this.isEnabled) return;
|
||||
|
||||
|
||||
// Check health every 30 seconds
|
||||
setInterval(async () => {
|
||||
try {
|
||||
@@ -381,7 +409,7 @@ class RedisManager {
|
||||
|
||||
private async checkRedisHealth(): Promise<boolean> {
|
||||
const now = Date.now();
|
||||
|
||||
|
||||
// Only check health every 30 seconds
|
||||
if (now - this.lastHealthCheck < this.healthCheckInterval) {
|
||||
return this.isHealthy;
|
||||
@@ -400,24 +428,45 @@ class RedisManager {
|
||||
// Check write client (master) health
|
||||
await Promise.race([
|
||||
this.writeClient.ping(),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Write client health check timeout')), 2000)
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(
|
||||
() =>
|
||||
reject(
|
||||
new Error("Write client health check timeout")
|
||||
),
|
||||
2000
|
||||
)
|
||||
)
|
||||
]);
|
||||
this.isWriteHealthy = true;
|
||||
|
||||
// Check read client health if it's different from write client
|
||||
if (this.hasReplicas && this.readClient && this.readClient !== this.writeClient) {
|
||||
if (
|
||||
this.hasReplicas &&
|
||||
this.readClient &&
|
||||
this.readClient !== this.writeClient
|
||||
) {
|
||||
try {
|
||||
await Promise.race([
|
||||
this.readClient.ping(),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Read client health check timeout')), 2000)
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(
|
||||
() =>
|
||||
reject(
|
||||
new Error(
|
||||
"Read client health check timeout"
|
||||
)
|
||||
),
|
||||
2000
|
||||
)
|
||||
)
|
||||
]);
|
||||
this.isReadHealthy = true;
|
||||
} catch (error) {
|
||||
logger.error("Redis read client health check failed:", error);
|
||||
logger.error(
|
||||
"Redis read client health check failed:",
|
||||
error
|
||||
);
|
||||
this.isReadHealthy = false;
|
||||
}
|
||||
} else {
|
||||
@@ -475,16 +524,13 @@ class RedisManager {
|
||||
if (!this.isRedisEnabled() || !this.writeClient) return false;
|
||||
|
||||
try {
|
||||
await this.executeWithRetry(
|
||||
async () => {
|
||||
if (ttl) {
|
||||
await this.writeClient!.setex(key, ttl, value);
|
||||
} else {
|
||||
await this.writeClient!.set(key, value);
|
||||
}
|
||||
},
|
||||
"Redis SET"
|
||||
);
|
||||
await this.executeWithRetry(async () => {
|
||||
if (ttl) {
|
||||
await this.writeClient!.setex(key, ttl, value);
|
||||
} else {
|
||||
await this.writeClient!.set(key, value);
|
||||
}
|
||||
}, "Redis SET");
|
||||
return true;
|
||||
} catch (error) {
|
||||
logger.error("Redis SET error:", error);
|
||||
@@ -496,9 +542,10 @@ class RedisManager {
|
||||
if (!this.isRedisEnabled() || !this.readClient) return null;
|
||||
|
||||
try {
|
||||
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
|
||||
? () => this.writeClient!.get(key)
|
||||
: undefined;
|
||||
const fallbackOperation =
|
||||
this.hasReplicas && this.writeClient && this.isWriteHealthy
|
||||
? () => this.writeClient!.get(key)
|
||||
: undefined;
|
||||
|
||||
return await this.executeWithRetry(
|
||||
() => this.readClient!.get(key),
|
||||
@@ -560,9 +607,10 @@ class RedisManager {
|
||||
if (!this.isRedisEnabled() || !this.readClient) return [];
|
||||
|
||||
try {
|
||||
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
|
||||
? () => this.writeClient!.smembers(key)
|
||||
: undefined;
|
||||
const fallbackOperation =
|
||||
this.hasReplicas && this.writeClient && this.isWriteHealthy
|
||||
? () => this.writeClient!.smembers(key)
|
||||
: undefined;
|
||||
|
||||
return await this.executeWithRetry(
|
||||
() => this.readClient!.smembers(key),
|
||||
@@ -598,9 +646,10 @@ class RedisManager {
|
||||
if (!this.isRedisEnabled() || !this.readClient) return null;
|
||||
|
||||
try {
|
||||
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
|
||||
? () => this.writeClient!.hget(key, field)
|
||||
: undefined;
|
||||
const fallbackOperation =
|
||||
this.hasReplicas && this.writeClient && this.isWriteHealthy
|
||||
? () => this.writeClient!.hget(key, field)
|
||||
: undefined;
|
||||
|
||||
return await this.executeWithRetry(
|
||||
() => this.readClient!.hget(key, field),
|
||||
@@ -632,9 +681,10 @@ class RedisManager {
|
||||
if (!this.isRedisEnabled() || !this.readClient) return {};
|
||||
|
||||
try {
|
||||
const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
|
||||
? () => this.writeClient!.hgetall(key)
|
||||
: undefined;
|
||||
const fallbackOperation =
|
||||
this.hasReplicas && this.writeClient && this.isWriteHealthy
|
||||
? () => this.writeClient!.hgetall(key)
|
||||
: undefined;
|
||||
|
||||
return await this.executeWithRetry(
|
||||
() => this.readClient!.hgetall(key),
|
||||
@@ -658,18 +708,18 @@ class RedisManager {
|
||||
}
|
||||
|
||||
try {
|
||||
await this.executeWithRetry(
|
||||
async () => {
|
||||
// Add timeout to prevent hanging
|
||||
return Promise.race([
|
||||
this.publisher!.publish(channel, message),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Redis publish timeout')), 3000)
|
||||
await this.executeWithRetry(async () => {
|
||||
// Add timeout to prevent hanging
|
||||
return Promise.race([
|
||||
this.publisher!.publish(channel, message),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(
|
||||
() => reject(new Error("Redis publish timeout")),
|
||||
3000
|
||||
)
|
||||
]);
|
||||
},
|
||||
"Redis PUBLISH"
|
||||
);
|
||||
)
|
||||
]);
|
||||
}, "Redis PUBLISH");
|
||||
return true;
|
||||
} catch (error) {
|
||||
logger.error("Redis PUBLISH error:", error);
|
||||
@@ -689,17 +739,20 @@ class RedisManager {
|
||||
if (!this.subscribers.has(channel)) {
|
||||
this.subscribers.set(channel, new Set());
|
||||
// Only subscribe to the channel if it's the first subscriber
|
||||
await this.executeWithRetry(
|
||||
async () => {
|
||||
return Promise.race([
|
||||
this.subscriber!.subscribe(channel),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(() => reject(new Error('Redis subscribe timeout')), 5000)
|
||||
await this.executeWithRetry(async () => {
|
||||
return Promise.race([
|
||||
this.subscriber!.subscribe(channel),
|
||||
new Promise((_, reject) =>
|
||||
setTimeout(
|
||||
() =>
|
||||
reject(
|
||||
new Error("Redis subscribe timeout")
|
||||
),
|
||||
5000
|
||||
)
|
||||
]);
|
||||
},
|
||||
"Redis SUBSCRIBE"
|
||||
);
|
||||
)
|
||||
]);
|
||||
}, "Redis SUBSCRIBE");
|
||||
}
|
||||
|
||||
this.subscribers.get(channel)!.add(callback);
|
||||
|
||||
@@ -11,9 +11,9 @@
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Store, Options, IncrementResponse } from 'express-rate-limit';
|
||||
import { rateLimitService } from './rateLimit';
|
||||
import logger from '@server/logger';
|
||||
import { Store, Options, IncrementResponse } from "express-rate-limit";
|
||||
import { rateLimitService } from "./rateLimit";
|
||||
import logger from "@server/logger";
|
||||
|
||||
/**
|
||||
* A Redis-backed rate limiting store for express-rate-limit that optimizes
|
||||
@@ -57,12 +57,14 @@ export default class RedisStore implements Store {
|
||||
*
|
||||
* @param options - Configuration options for the store.
|
||||
*/
|
||||
constructor(options: {
|
||||
prefix?: string;
|
||||
skipFailedRequests?: boolean;
|
||||
skipSuccessfulRequests?: boolean;
|
||||
} = {}) {
|
||||
this.prefix = options.prefix || 'express-rate-limit';
|
||||
constructor(
|
||||
options: {
|
||||
prefix?: string;
|
||||
skipFailedRequests?: boolean;
|
||||
skipSuccessfulRequests?: boolean;
|
||||
} = {}
|
||||
) {
|
||||
this.prefix = options.prefix || "express-rate-limit";
|
||||
this.skipFailedRequests = options.skipFailedRequests || false;
|
||||
this.skipSuccessfulRequests = options.skipSuccessfulRequests || false;
|
||||
}
|
||||
@@ -101,7 +103,8 @@ export default class RedisStore implements Store {
|
||||
|
||||
return {
|
||||
totalHits: result.totalHits || 1,
|
||||
resetTime: result.resetTime || new Date(Date.now() + this.windowMs)
|
||||
resetTime:
|
||||
result.resetTime || new Date(Date.now() + this.windowMs)
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error(`RedisStore increment error for key ${key}:`, error);
|
||||
@@ -158,7 +161,9 @@ export default class RedisStore implements Store {
|
||||
*/
|
||||
async resetAll(): Promise<void> {
|
||||
try {
|
||||
logger.warn('RedisStore resetAll called - this operation can be expensive');
|
||||
logger.warn(
|
||||
"RedisStore resetAll called - this operation can be expensive"
|
||||
);
|
||||
|
||||
// Force sync all pending data first
|
||||
await rateLimitService.forceSyncAllPendingData();
|
||||
@@ -167,9 +172,9 @@ export default class RedisStore implements Store {
|
||||
// scanning all Redis keys with our prefix, which could be expensive.
|
||||
// In production, it's better to let entries expire naturally.
|
||||
|
||||
logger.info('RedisStore resetAll completed (pending data synced)');
|
||||
logger.info("RedisStore resetAll completed (pending data synced)");
|
||||
} catch (error) {
|
||||
logger.error('RedisStore resetAll error:', error);
|
||||
logger.error("RedisStore resetAll error:", error);
|
||||
// Don't throw - this is an optional method
|
||||
}
|
||||
}
|
||||
@@ -181,7 +186,9 @@ export default class RedisStore implements Store {
|
||||
* @param key - The identifier for a client.
|
||||
* @returns Current hit count and reset time, or null if no data exists.
|
||||
*/
|
||||
async getHits(key: string): Promise<{ totalHits: number; resetTime: Date } | null> {
|
||||
async getHits(
|
||||
key: string
|
||||
): Promise<{ totalHits: number; resetTime: Date } | null> {
|
||||
try {
|
||||
const clientId = `${this.prefix}:${key}`;
|
||||
|
||||
@@ -200,7 +207,8 @@ export default class RedisStore implements Store {
|
||||
|
||||
return {
|
||||
totalHits: Math.max(0, (result.totalHits || 0) - 1), // Adjust for the decrement
|
||||
resetTime: result.resetTime || new Date(Date.now() + this.windowMs)
|
||||
resetTime:
|
||||
result.resetTime || new Date(Date.now() + this.windowMs)
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error(`RedisStore getHits error for key ${key}:`, error);
|
||||
@@ -215,9 +223,9 @@ export default class RedisStore implements Store {
|
||||
async shutdown(): Promise<void> {
|
||||
try {
|
||||
// The rateLimitService handles its own cleanup
|
||||
logger.info('RedisStore shutdown completed');
|
||||
logger.info("RedisStore shutdown completed");
|
||||
} catch (error) {
|
||||
logger.error('RedisStore shutdown error:', error);
|
||||
logger.error("RedisStore shutdown error:", error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ import privateConfig from "#private/lib/config";
|
||||
import logger from "@server/logger";
|
||||
|
||||
export enum AudienceIds {
|
||||
SignUps = "6c4e77b2-0851-4bd6-bac8-f51f91360f1a",
|
||||
Subscribed = "870b43fd-387f-44de-8fc1-707335f30b20",
|
||||
Churned = "f3ae92bd-2fdb-4d77-8746-2118afd62549",
|
||||
Newsletter = "5500c431-191c-42f0-a5d4-8b6d445b4ea0"
|
||||
SignUps = "6c4e77b2-0851-4bd6-bac8-f51f91360f1a",
|
||||
Subscribed = "870b43fd-387f-44de-8fc1-707335f30b20",
|
||||
Churned = "f3ae92bd-2fdb-4d77-8746-2118afd62549",
|
||||
Newsletter = "5500c431-191c-42f0-a5d4-8b6d445b4ea0"
|
||||
}
|
||||
|
||||
const resend = new Resend(
|
||||
@@ -33,7 +33,9 @@ export async function moveEmailToAudience(
|
||||
audienceId: AudienceIds
|
||||
) {
|
||||
if (process.env.ENVIRONMENT !== "prod") {
|
||||
logger.debug(`Skipping moving email ${email} to audience ${audienceId} in non-prod environment`);
|
||||
logger.debug(
|
||||
`Skipping moving email ${email} to audience ${audienceId} in non-prod environment`
|
||||
);
|
||||
return;
|
||||
}
|
||||
const { error, data } = await retryWithBackoff(async () => {
|
||||
|
||||
@@ -189,7 +189,7 @@ export async function getTraefikConfig(
|
||||
);
|
||||
|
||||
if (!validation.isValid) {
|
||||
logger.error(
|
||||
logger.debug(
|
||||
`Invalid path rewrite configuration for resource ${resourceId}: ${validation.error}`
|
||||
);
|
||||
return;
|
||||
|
||||
@@ -11,4 +11,4 @@
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export * from "./getTraefikConfig";
|
||||
export * from "./getTraefikConfig";
|
||||
|
||||
Reference in New Issue
Block a user