mirror of
https://github.com/fosrl/pangolin.git
synced 2026-02-08 05:56:38 +00:00
@@ -49,27 +49,43 @@ const auditLogBuffer: Array<{
|
||||
|
||||
const BATCH_SIZE = 100; // Write to DB every 100 logs
|
||||
const BATCH_INTERVAL_MS = 5000; // Or every 5 seconds, whichever comes first
|
||||
const MAX_BUFFER_SIZE = 10000; // Prevent unbounded memory growth
|
||||
let flushTimer: NodeJS.Timeout | null = null;
|
||||
let isFlushInProgress = false;
|
||||
|
||||
/**
|
||||
* Flush buffered logs to database
|
||||
*/
|
||||
async function flushAuditLogs() {
|
||||
if (auditLogBuffer.length === 0) {
|
||||
if (auditLogBuffer.length === 0 || isFlushInProgress) {
|
||||
return;
|
||||
}
|
||||
|
||||
isFlushInProgress = true;
|
||||
|
||||
// Take all current logs and clear buffer
|
||||
const logsToWrite = auditLogBuffer.splice(0, auditLogBuffer.length);
|
||||
|
||||
try {
|
||||
// Batch insert all logs at once
|
||||
await db.insert(requestAuditLog).values(logsToWrite);
|
||||
// Batch insert logs in groups of 25 to avoid overwhelming the database
|
||||
const BATCH_DB_SIZE = 25;
|
||||
for (let i = 0; i < logsToWrite.length; i += BATCH_DB_SIZE) {
|
||||
const batch = logsToWrite.slice(i, i + BATCH_DB_SIZE);
|
||||
await db.insert(requestAuditLog).values(batch);
|
||||
}
|
||||
logger.debug(`Flushed ${logsToWrite.length} audit logs to database`);
|
||||
} catch (error) {
|
||||
logger.error("Error flushing audit logs:", error);
|
||||
// On error, we lose these logs - consider a fallback strategy if needed
|
||||
// (e.g., write to file, or put back in buffer with retry limit)
|
||||
} finally {
|
||||
isFlushInProgress = false;
|
||||
// If buffer filled up while we were flushing, flush again
|
||||
if (auditLogBuffer.length >= BATCH_SIZE) {
|
||||
flushAuditLogs().catch((err) =>
|
||||
logger.error("Error in follow-up flush:", err)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -95,6 +111,10 @@ export async function shutdownAuditLogger() {
|
||||
clearTimeout(flushTimer);
|
||||
flushTimer = null;
|
||||
}
|
||||
// Force flush even if one is in progress by waiting and retrying
|
||||
while (isFlushInProgress) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
await flushAuditLogs();
|
||||
}
|
||||
|
||||
@@ -212,6 +232,14 @@ export async function logRequestAudit(
|
||||
? stripPortFromHost(body.requestIp)
|
||||
: undefined;
|
||||
|
||||
// Prevent unbounded buffer growth - drop oldest entries if buffer is too large
|
||||
if (auditLogBuffer.length >= MAX_BUFFER_SIZE) {
|
||||
const dropped = auditLogBuffer.splice(0, BATCH_SIZE);
|
||||
logger.warn(
|
||||
`Audit log buffer exceeded max size (${MAX_BUFFER_SIZE}), dropped ${dropped.length} oldest entries`
|
||||
);
|
||||
}
|
||||
|
||||
// Add to buffer instead of writing directly to DB
|
||||
auditLogBuffer.push({
|
||||
timestamp,
|
||||
|
||||
Reference in New Issue
Block a user