mirror of
https://github.com/fosrl/pangolin.git
synced 2026-04-10 20:06:37 +00:00
Merge branch 'dev' into private-site-ha
This commit is contained in:
@@ -12,15 +12,21 @@
|
||||
*/
|
||||
|
||||
import { rateLimitService } from "#private/lib/rateLimit";
|
||||
import { logStreamingManager } from "#private/lib/logStreaming";
|
||||
import { cleanup as wsCleanup } from "#private/routers/ws";
|
||||
import { flushBandwidthToDb } from "@server/routers/newt/handleReceiveBandwidthMessage";
|
||||
import { flushConnectionLogToDb } from "#private/routers/newt";
|
||||
import { flushSiteBandwidthToDb } from "@server/routers/gerbil/receiveBandwidth";
|
||||
import { stopPingAccumulator } from "@server/routers/newt/pingAccumulator";
|
||||
|
||||
async function cleanup() {
|
||||
await stopPingAccumulator();
|
||||
await flushBandwidthToDb();
|
||||
await flushConnectionLogToDb();
|
||||
await flushSiteBandwidthToDb();
|
||||
await rateLimitService.cleanup();
|
||||
await wsCleanup();
|
||||
await logStreamingManager.shutdown();
|
||||
|
||||
process.exit(0);
|
||||
}
|
||||
@@ -29,4 +35,4 @@ export async function initCleanup() {
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => cleanup());
|
||||
process.on("SIGINT", () => cleanup());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,16 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import NodeCache from "node-cache";
|
||||
import logger from "@server/logger";
|
||||
import { redisManager } from "@server/private/lib/redis";
|
||||
@@ -24,23 +37,31 @@ setInterval(() => {
|
||||
*/
|
||||
class AdaptiveCache {
|
||||
private useRedis(): boolean {
|
||||
return redisManager.isRedisEnabled() && redisManager.getHealthStatus().isHealthy;
|
||||
return (
|
||||
redisManager.isRedisEnabled() &&
|
||||
redisManager.getHealthStatus().isHealthy
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Set a value in the cache
|
||||
* @param key - Cache key
|
||||
* @param value - Value to cache (will be JSON stringified for Redis)
|
||||
* @param ttl - Time to live in seconds (0 = no expiration)
|
||||
* @param ttl - Time to live in seconds (0 = no expiration; omit = 3600s for Redis)
|
||||
* @returns boolean indicating success
|
||||
*/
|
||||
async set(key: string, value: any, ttl?: number): Promise<boolean> {
|
||||
const effectiveTtl = ttl === 0 ? undefined : ttl;
|
||||
const redisTtl = ttl === 0 ? undefined : (ttl ?? 3600);
|
||||
|
||||
if (this.useRedis()) {
|
||||
try {
|
||||
const serialized = JSON.stringify(value);
|
||||
const success = await redisManager.set(key, serialized, effectiveTtl);
|
||||
const success = await redisManager.set(
|
||||
key,
|
||||
serialized,
|
||||
redisTtl
|
||||
);
|
||||
|
||||
if (success) {
|
||||
logger.debug(`Set key in Redis: ${key}`);
|
||||
@@ -48,7 +69,9 @@ class AdaptiveCache {
|
||||
}
|
||||
|
||||
// Redis failed, fall through to local cache
|
||||
logger.debug(`Redis set failed for key ${key}, falling back to local cache`);
|
||||
logger.debug(
|
||||
`Redis set failed for key ${key}, falling back to local cache`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(`Redis set error for key ${key}:`, error);
|
||||
// Fall through to local cache
|
||||
@@ -120,9 +143,14 @@ class AdaptiveCache {
|
||||
}
|
||||
|
||||
// Some Redis deletes failed, fall through to local cache
|
||||
logger.debug(`Some Redis deletes failed, falling back to local cache`);
|
||||
logger.debug(
|
||||
`Some Redis deletes failed, falling back to local cache`
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(`Redis del error for keys ${keys.join(", ")}:`, error);
|
||||
logger.error(
|
||||
`Redis del error for keys ${keys.join(", ")}:`,
|
||||
error
|
||||
);
|
||||
// Fall through to local cache
|
||||
deletedCount = 0;
|
||||
}
|
||||
@@ -195,7 +223,9 @@ class AdaptiveCache {
|
||||
*/
|
||||
async flushAll(): Promise<void> {
|
||||
if (this.useRedis()) {
|
||||
logger.warn("Adaptive cache flushAll called - Redis flush not implemented, only local cache will be flushed");
|
||||
logger.warn(
|
||||
"Adaptive cache flushAll called - Redis flush not implemented, only local cache will be flushed"
|
||||
);
|
||||
}
|
||||
|
||||
localCache.flushAll();
|
||||
@@ -239,7 +269,9 @@ class AdaptiveCache {
|
||||
getTtl(key: string): number {
|
||||
// Note: This only works for local cache, Redis TTL is not supported
|
||||
if (this.useRedis()) {
|
||||
logger.warn(`getTtl called for key ${key} but Redis TTL lookup is not implemented`);
|
||||
logger.warn(
|
||||
`getTtl called for key ${key} but Redis TTL lookup is not implemented`
|
||||
);
|
||||
}
|
||||
|
||||
const ttl = localCache.getTtl(key);
|
||||
@@ -255,7 +287,9 @@ class AdaptiveCache {
|
||||
*/
|
||||
keys(): string[] {
|
||||
if (this.useRedis()) {
|
||||
logger.warn("keys() called but Redis keys are not included, only local cache keys returned");
|
||||
logger.warn(
|
||||
"keys() called but Redis keys are not included, only local cache keys returned"
|
||||
);
|
||||
}
|
||||
return localCache.keys();
|
||||
}
|
||||
|
||||
@@ -74,6 +74,7 @@ export async function logAccessAudit(data: {
|
||||
type: string;
|
||||
orgId: string;
|
||||
resourceId?: number;
|
||||
siteResourceId?: number;
|
||||
user?: { username: string; userId: string };
|
||||
apiKey?: { name: string | null; apiKeyId: string };
|
||||
metadata?: any;
|
||||
@@ -134,6 +135,7 @@ export async function logAccessAudit(data: {
|
||||
type: data.type,
|
||||
metadata,
|
||||
resourceId: data.resourceId,
|
||||
siteResourceId: data.siteResourceId,
|
||||
userAgent: data.userAgent,
|
||||
ip: clientIp,
|
||||
location: countryCode
|
||||
|
||||
234
server/private/lib/logConnectionAudit.ts
Normal file
234
server/private/lib/logConnectionAudit.ts
Normal file
@@ -0,0 +1,234 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { logsDb, connectionAuditLog } from "@server/db";
|
||||
import logger from "@server/logger";
|
||||
import { and, eq, lt } from "drizzle-orm";
|
||||
import { calculateCutoffTimestamp } from "@server/lib/cleanupLogs";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Retry configuration for deadlock handling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const MAX_RETRIES = 3;
|
||||
const BASE_DELAY_MS = 50;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Buffer / flush configuration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** How often to flush accumulated connection log data to the database. */
|
||||
const FLUSH_INTERVAL_MS = 30_000; // 30 seconds
|
||||
|
||||
/** Maximum number of records to buffer before forcing a flush. */
|
||||
const MAX_BUFFERED_RECORDS = 500;
|
||||
|
||||
/** Maximum number of records to insert in a single database batch. */
|
||||
const INSERT_BATCH_SIZE = 100;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface ConnectionLogRecord {
|
||||
sessionId: string;
|
||||
siteResourceId: number;
|
||||
orgId: string;
|
||||
siteId: number;
|
||||
clientId: number | null;
|
||||
userId: string | null;
|
||||
sourceAddr: string;
|
||||
destAddr: string;
|
||||
protocol: string;
|
||||
startedAt: number; // epoch seconds
|
||||
endedAt: number | null;
|
||||
bytesTx: number | null;
|
||||
bytesRx: number | null;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// In-memory buffer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
let buffer: ConnectionLogRecord[] = [];
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Deadlock helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function isDeadlockError(error: any): boolean {
|
||||
return (
|
||||
error?.code === "40P01" ||
|
||||
error?.cause?.code === "40P01" ||
|
||||
(error?.message && error.message.includes("deadlock"))
|
||||
);
|
||||
}
|
||||
|
||||
async function withDeadlockRetry<T>(
|
||||
operation: () => Promise<T>,
|
||||
context: string
|
||||
): Promise<T> {
|
||||
let attempt = 0;
|
||||
while (true) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error: any) {
|
||||
if (isDeadlockError(error) && attempt < MAX_RETRIES) {
|
||||
attempt++;
|
||||
const baseDelay = Math.pow(2, attempt - 1) * BASE_DELAY_MS;
|
||||
const jitter = Math.random() * baseDelay;
|
||||
const delay = baseDelay + jitter;
|
||||
logger.warn(
|
||||
`Deadlock detected in ${context}, retrying attempt ${attempt}/${MAX_RETRIES} after ${delay.toFixed(0)}ms`
|
||||
);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
continue;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Flush
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Flush all buffered connection log records to the database.
|
||||
*
|
||||
* Swaps out the buffer before writing so that any records added during the
|
||||
* flush are captured in the new buffer rather than being lost. Entries that
|
||||
* fail to write are re-queued back into the buffer so they will be retried
|
||||
* on the next flush.
|
||||
*
|
||||
* This function is exported so that the application's graceful-shutdown
|
||||
* cleanup handler can call it before the process exits.
|
||||
*/
|
||||
export async function flushConnectionLogToDb(): Promise<void> {
|
||||
if (buffer.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Atomically swap out the buffer so new data keeps flowing in
|
||||
const snapshot = buffer;
|
||||
buffer = [];
|
||||
|
||||
logger.debug(
|
||||
`Flushing ${snapshot.length} connection log record(s) to the database`
|
||||
);
|
||||
|
||||
for (let i = 0; i < snapshot.length; i += INSERT_BATCH_SIZE) {
|
||||
const batch = snapshot.slice(i, i + INSERT_BATCH_SIZE);
|
||||
|
||||
try {
|
||||
await withDeadlockRetry(async () => {
|
||||
await logsDb.insert(connectionAuditLog).values(batch);
|
||||
}, `flush connection log batch (${batch.length} records)`);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to flush connection log batch of ${batch.length} records:`,
|
||||
error
|
||||
);
|
||||
|
||||
// Re-queue the failed batch so it is retried on the next flush
|
||||
buffer = [...batch, ...buffer];
|
||||
|
||||
// Cap buffer to prevent unbounded growth if the DB is unreachable
|
||||
const hardLimit = MAX_BUFFERED_RECORDS * 5;
|
||||
if (buffer.length > hardLimit) {
|
||||
const dropped = buffer.length - hardLimit;
|
||||
buffer = buffer.slice(0, hardLimit);
|
||||
logger.warn(
|
||||
`Connection log buffer overflow, dropped ${dropped} oldest records`
|
||||
);
|
||||
}
|
||||
|
||||
// Stop processing further batches from this snapshot — they will
|
||||
// be picked up via the re-queued records on the next flush.
|
||||
const remaining = snapshot.slice(i + INSERT_BATCH_SIZE);
|
||||
if (remaining.length > 0) {
|
||||
buffer = [...remaining, ...buffer];
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Periodic flush timer
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const flushTimer = setInterval(async () => {
|
||||
try {
|
||||
await flushConnectionLogToDb();
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
"Unexpected error during periodic connection log flush:",
|
||||
error
|
||||
);
|
||||
}
|
||||
}, FLUSH_INTERVAL_MS);
|
||||
|
||||
// Calling unref() means this timer will not keep the Node.js event loop alive
|
||||
// on its own — the process can still exit normally when there is no other work
|
||||
// left. The graceful-shutdown path will call flushConnectionLogToDb() explicitly
|
||||
// before process.exit(), so no data is lost.
|
||||
flushTimer.unref();
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cleanup
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export async function cleanUpOldLogs(
|
||||
orgId: string,
|
||||
retentionDays: number
|
||||
): Promise<void> {
|
||||
const cutoffTimestamp = calculateCutoffTimestamp(retentionDays);
|
||||
|
||||
try {
|
||||
await logsDb
|
||||
.delete(connectionAuditLog)
|
||||
.where(
|
||||
and(
|
||||
lt(connectionAuditLog.startedAt, cutoffTimestamp),
|
||||
eq(connectionAuditLog.orgId, orgId)
|
||||
)
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error("Error cleaning up old connection audit logs:", error);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public logging entry-point
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Buffer a single connection log record for eventual persistence.
|
||||
*
|
||||
* Records are written to the database in batches either when the buffer
|
||||
* reaches MAX_BUFFERED_RECORDS or when the periodic flush timer fires.
|
||||
*/
|
||||
export function logConnectionAudit(record: ConnectionLogRecord): void {
|
||||
buffer.push(record);
|
||||
|
||||
if (buffer.length >= MAX_BUFFERED_RECORDS) {
|
||||
// Fire and forget — errors are handled inside flushConnectionLogToDb
|
||||
flushConnectionLogToDb().catch((error) => {
|
||||
logger.error(
|
||||
"Unexpected error during size-triggered connection log flush:",
|
||||
error
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
776
server/private/lib/logStreaming/LogStreamingManager.ts
Normal file
776
server/private/lib/logStreaming/LogStreamingManager.ts
Normal file
@@ -0,0 +1,776 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
db,
|
||||
logsDb,
|
||||
eventStreamingDestinations,
|
||||
eventStreamingCursors,
|
||||
requestAuditLog,
|
||||
actionAuditLog,
|
||||
accessAuditLog,
|
||||
connectionAuditLog
|
||||
} from "@server/db";
|
||||
import logger from "@server/logger";
|
||||
import { and, eq, gt, desc, max, sql } from "drizzle-orm";
|
||||
import { decrypt } from "@server/lib/crypto";
|
||||
import config from "@server/lib/config";
|
||||
import {
|
||||
LogType,
|
||||
LOG_TYPES,
|
||||
LogEvent,
|
||||
DestinationFailureState,
|
||||
HttpConfig
|
||||
} from "./types";
|
||||
import { LogDestinationProvider } from "./providers/LogDestinationProvider";
|
||||
import { HttpLogDestination } from "./providers/HttpLogDestination";
|
||||
import type { EventStreamingDestination } from "@server/db";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Configuration
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* How often (ms) the manager polls all destinations for new log records.
|
||||
* Destinations that were behind (full batch returned) will be re-polled
|
||||
* immediately without waiting for this interval.
|
||||
*/
|
||||
const POLL_INTERVAL_MS = 30_000;
|
||||
|
||||
/**
|
||||
* Maximum number of log records fetched from the DB in a single query.
|
||||
* This also controls the maximum size of one HTTP POST body.
|
||||
*/
|
||||
const BATCH_SIZE = 250;
|
||||
|
||||
/**
|
||||
* Minimum delay (ms) between consecutive HTTP requests to the same destination
|
||||
* during a catch-up run. Prevents bursting thousands of requests back-to-back
|
||||
* when a destination has fallen behind.
|
||||
*/
|
||||
const INTER_BATCH_DELAY_MS = 100;
|
||||
|
||||
/**
|
||||
* Maximum number of consecutive back-to-back batches to process for a single
|
||||
* destination per poll cycle. After this limit the destination will wait for
|
||||
* the next scheduled poll before continuing, giving other destinations a turn.
|
||||
*/
|
||||
const MAX_CATCHUP_BATCHES = 20;
|
||||
|
||||
/**
|
||||
* Back-off schedule (ms) indexed by consecutive failure count.
|
||||
* After the last entry the max value is re-used.
|
||||
*/
|
||||
const BACKOFF_SCHEDULE_MS = [
|
||||
60_000, // 1 min (failure 1)
|
||||
2 * 60_000, // 2 min (failure 2)
|
||||
5 * 60_000, // 5 min (failure 3)
|
||||
10 * 60_000, // 10 min (failure 4)
|
||||
30 * 60_000 // 30 min (failure 5+)
|
||||
];
|
||||
|
||||
/**
|
||||
* If a destination has been continuously unreachable for this long, its
|
||||
* cursors are advanced to the current max row id and the backlog is silently
|
||||
* discarded. This prevents unbounded queue growth when a webhook endpoint is
|
||||
* down for an extended period. A prominent warning is logged so operators are
|
||||
* aware logs were dropped.
|
||||
*
|
||||
* Default: 24 hours.
|
||||
*/
|
||||
const MAX_BACKLOG_DURATION_MS = 24 * 60 * 60_000;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// LogStreamingManager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Orchestrates periodic polling of the four audit-log tables and forwards new
|
||||
* records to every enabled event-streaming destination.
|
||||
*
|
||||
* ### Design
|
||||
* - **Interval-based**: a timer fires every `POLL_INTERVAL_MS`. On each tick
|
||||
* every enabled destination is processed in sequence.
|
||||
* - **Cursor-based**: the last successfully forwarded row `id` is persisted in
|
||||
* the `eventStreamingCursors` table so state survives restarts.
|
||||
* - **Catch-up**: if a full batch is returned the destination is immediately
|
||||
* re-queried (up to `MAX_CATCHUP_BATCHES` times) before yielding.
|
||||
* - **Smoothing**: `INTER_BATCH_DELAY_MS` is inserted between consecutive
|
||||
* catch-up batches to avoid hammering the remote endpoint.
|
||||
* - **Back-off**: consecutive send failures trigger exponential back-off
|
||||
* (tracked in-memory per destination). Successful sends reset the counter.
|
||||
* - **Backlog abandonment**: if a destination remains unreachable for longer
|
||||
* than `MAX_BACKLOG_DURATION_MS`, all cursors for that destination are
|
||||
* advanced to the current max id so the backlog is discarded and streaming
|
||||
* resumes from the present moment on recovery.
|
||||
*/
|
||||
export class LogStreamingManager {
|
||||
private pollTimer: ReturnType<typeof setTimeout> | null = null;
|
||||
private isRunning = false;
|
||||
private isPolling = false;
|
||||
|
||||
/** In-memory back-off state keyed by destinationId. */
|
||||
private readonly failures = new Map<number, DestinationFailureState>();
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Lifecycle
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
start(): void {
|
||||
if (this.isRunning) return;
|
||||
this.isRunning = true;
|
||||
logger.debug("LogStreamingManager: started");
|
||||
this.schedulePoll(POLL_INTERVAL_MS);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Cursor initialisation (call this when a destination is first created)
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Eagerly seed cursors for every log type at the **current** max row id of
|
||||
* each table, scoped to the destination's org.
|
||||
*
|
||||
* Call this immediately after inserting a new row into
|
||||
* `eventStreamingDestinations` so the destination only receives events
|
||||
* that were written *after* it was created. If a cursor row already exists
|
||||
* (e.g. the method is called twice) it is left untouched.
|
||||
*
|
||||
* The manager also has a lazy fallback inside `getOrCreateCursor` for
|
||||
* destinations that existed before this method was introduced.
|
||||
*/
|
||||
async initializeCursorsForDestination(
|
||||
destinationId: number,
|
||||
orgId: string
|
||||
): Promise<void> {
|
||||
for (const logType of LOG_TYPES) {
|
||||
const currentMaxId = await this.getCurrentMaxId(logType, orgId);
|
||||
try {
|
||||
await db
|
||||
.insert(eventStreamingCursors)
|
||||
.values({
|
||||
destinationId,
|
||||
logType,
|
||||
lastSentId: currentMaxId,
|
||||
lastSentAt: null
|
||||
})
|
||||
.onConflictDoNothing();
|
||||
} catch (err) {
|
||||
logger.warn(
|
||||
`LogStreamingManager: could not initialise cursor for ` +
|
||||
`destination ${destinationId} logType="${logType}"`,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`LogStreamingManager: cursors initialised for destination ${destinationId} ` +
|
||||
`(org=${orgId})`
|
||||
);
|
||||
}
|
||||
|
||||
async shutdown(): Promise<void> {
|
||||
this.isRunning = false;
|
||||
if (this.pollTimer !== null) {
|
||||
clearTimeout(this.pollTimer);
|
||||
this.pollTimer = null;
|
||||
}
|
||||
// Wait for any in-progress poll to finish before returning so that
|
||||
// callers (graceful-shutdown handlers) can safely exit afterward.
|
||||
const deadline = Date.now() + 15_000;
|
||||
while (this.isPolling && Date.now() < deadline) {
|
||||
await sleep(100);
|
||||
}
|
||||
logger.info("LogStreamingManager: stopped");
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Scheduling
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private schedulePoll(delayMs: number): void {
|
||||
this.pollTimer = setTimeout(() => {
|
||||
this.pollTimer = null;
|
||||
this.runPoll()
|
||||
.catch((err) =>
|
||||
logger.error("LogStreamingManager: unexpected poll error", err)
|
||||
)
|
||||
.finally(() => {
|
||||
if (this.isRunning) {
|
||||
this.schedulePoll(POLL_INTERVAL_MS);
|
||||
}
|
||||
});
|
||||
}, delayMs);
|
||||
|
||||
// Do not keep the event loop alive just for the poll timer – the
|
||||
// graceful-shutdown path calls shutdown() explicitly.
|
||||
this.pollTimer.unref?.();
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Poll cycle
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private async runPoll(): Promise<void> {
|
||||
if (this.isPolling) return; // previous poll still running – skip
|
||||
this.isPolling = true;
|
||||
|
||||
try {
|
||||
const destinations = await this.loadEnabledDestinations();
|
||||
if (destinations.length === 0) return;
|
||||
|
||||
for (const dest of destinations) {
|
||||
if (!this.isRunning) break;
|
||||
await this.processDestination(dest).catch((err) => {
|
||||
// Individual destination errors must never abort the whole cycle
|
||||
logger.error(
|
||||
`LogStreamingManager: unhandled error for destination ${dest.destinationId}`,
|
||||
err
|
||||
);
|
||||
});
|
||||
}
|
||||
} finally {
|
||||
this.isPolling = false;
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Per-destination processing
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private async processDestination(
|
||||
dest: EventStreamingDestination
|
||||
): Promise<void> {
|
||||
const failState = this.failures.get(dest.destinationId);
|
||||
|
||||
// Check whether this destination has been unreachable long enough that
|
||||
// we should give up on the accumulated backlog.
|
||||
if (failState) {
|
||||
const failingForMs = Date.now() - failState.firstFailedAt;
|
||||
if (failingForMs >= MAX_BACKLOG_DURATION_MS) {
|
||||
await this.abandonBacklog(dest, failState);
|
||||
this.failures.delete(dest.destinationId);
|
||||
// Cursors now point to the current head – retry on next poll.
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Check regular exponential back-off window
|
||||
if (failState && Date.now() < failState.nextRetryAt) {
|
||||
logger.debug(
|
||||
`LogStreamingManager: destination ${dest.destinationId} in back-off, skipping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Decrypt and parse config – skip destination if either step fails
|
||||
let configFromDb: HttpConfig;
|
||||
try {
|
||||
const decryptedConfig = decrypt(dest.config, config.getRawConfig().server.secret!);
|
||||
configFromDb = JSON.parse(decryptedConfig) as HttpConfig;
|
||||
} catch (err) {
|
||||
logger.error(
|
||||
`LogStreamingManager: destination ${dest.destinationId} has invalid or undecryptable config`,
|
||||
err
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const provider = this.createProvider(dest.type, configFromDb);
|
||||
if (!provider) {
|
||||
logger.warn(
|
||||
`LogStreamingManager: unsupported destination type "${dest.type}" ` +
|
||||
`for destination ${dest.destinationId} – skipping`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const enabledTypes: LogType[] = [];
|
||||
if (dest.sendRequestLogs) enabledTypes.push("request");
|
||||
if (dest.sendActionLogs) enabledTypes.push("action");
|
||||
if (dest.sendAccessLogs) enabledTypes.push("access");
|
||||
if (dest.sendConnectionLogs) enabledTypes.push("connection");
|
||||
|
||||
if (enabledTypes.length === 0) return;
|
||||
|
||||
let anyFailure = false;
|
||||
|
||||
for (const logType of enabledTypes) {
|
||||
if (!this.isRunning) break;
|
||||
try {
|
||||
await this.processLogType(dest, provider, logType);
|
||||
} catch (err) {
|
||||
anyFailure = true;
|
||||
logger.error(
|
||||
`LogStreamingManager: failed to process "${logType}" logs ` +
|
||||
`for destination ${dest.destinationId}`,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if (anyFailure) {
|
||||
this.recordFailure(dest.destinationId);
|
||||
} else {
|
||||
// Any success resets the failure/back-off state
|
||||
if (this.failures.has(dest.destinationId)) {
|
||||
this.failures.delete(dest.destinationId);
|
||||
logger.info(
|
||||
`LogStreamingManager: destination ${dest.destinationId} recovered`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Advance every cursor for the destination to the current max row id,
|
||||
* effectively discarding the accumulated backlog. Called when the
|
||||
* destination has been unreachable for longer than MAX_BACKLOG_DURATION_MS.
|
||||
*/
|
||||
private async abandonBacklog(
|
||||
dest: EventStreamingDestination,
|
||||
failState: DestinationFailureState
|
||||
): Promise<void> {
|
||||
const failingForHours = (
|
||||
(Date.now() - failState.firstFailedAt) /
|
||||
3_600_000
|
||||
).toFixed(1);
|
||||
|
||||
let totalDropped = 0;
|
||||
|
||||
for (const logType of LOG_TYPES) {
|
||||
try {
|
||||
const currentMaxId = await this.getCurrentMaxId(
|
||||
logType,
|
||||
dest.orgId
|
||||
);
|
||||
|
||||
// Find out how many rows are being skipped for this type
|
||||
const cursor = await db
|
||||
.select({ lastSentId: eventStreamingCursors.lastSentId })
|
||||
.from(eventStreamingCursors)
|
||||
.where(
|
||||
and(
|
||||
eq(eventStreamingCursors.destinationId, dest.destinationId),
|
||||
eq(eventStreamingCursors.logType, logType)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
const prevId = cursor[0]?.lastSentId ?? currentMaxId;
|
||||
totalDropped += Math.max(0, currentMaxId - prevId);
|
||||
|
||||
await this.updateCursor(
|
||||
dest.destinationId,
|
||||
logType,
|
||||
currentMaxId
|
||||
);
|
||||
} catch (err) {
|
||||
logger.error(
|
||||
`LogStreamingManager: failed to advance cursor for ` +
|
||||
`destination ${dest.destinationId} logType="${logType}" ` +
|
||||
`during backlog abandonment`,
|
||||
err
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
logger.warn(
|
||||
`LogStreamingManager: destination ${dest.destinationId} has been ` +
|
||||
`unreachable for ${failingForHours}h ` +
|
||||
`(${failState.consecutiveFailures} consecutive failures). ` +
|
||||
`Discarding backlog of ~${totalDropped} log event(s) and ` +
|
||||
`resuming from the current position. ` +
|
||||
`Verify the destination URL and credentials.`
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Forward all pending log records of a specific type for a destination.
|
||||
*
|
||||
* Fetches up to `BATCH_SIZE` records at a time. If the batch is full
|
||||
* (indicating more records may exist) it loops immediately, inserting a
|
||||
* short delay between consecutive requests to the remote endpoint.
|
||||
* The loop is capped at `MAX_CATCHUP_BATCHES` to keep the poll cycle
|
||||
* bounded.
|
||||
*/
|
||||
private async processLogType(
|
||||
dest: EventStreamingDestination,
|
||||
provider: LogDestinationProvider,
|
||||
logType: LogType
|
||||
): Promise<void> {
|
||||
// Ensure a cursor row exists (creates one pointing at the current max
|
||||
// id so we do not replay historical logs on first run)
|
||||
const cursor = await this.getOrCreateCursor(
|
||||
dest.destinationId,
|
||||
logType,
|
||||
dest.orgId
|
||||
);
|
||||
|
||||
let lastSentId = cursor.lastSentId;
|
||||
let batchCount = 0;
|
||||
|
||||
while (batchCount < MAX_CATCHUP_BATCHES) {
|
||||
const rows = await this.fetchLogs(
|
||||
logType,
|
||||
dest.orgId,
|
||||
lastSentId,
|
||||
BATCH_SIZE
|
||||
);
|
||||
|
||||
if (rows.length === 0) break;
|
||||
|
||||
const events = rows.map((row) =>
|
||||
this.rowToLogEvent(logType, row)
|
||||
);
|
||||
|
||||
// Throws on failure – caught by the caller which applies back-off
|
||||
await provider.send(events);
|
||||
|
||||
lastSentId = rows[rows.length - 1].id;
|
||||
await this.updateCursor(dest.destinationId, logType, lastSentId);
|
||||
|
||||
batchCount++;
|
||||
|
||||
if (rows.length < BATCH_SIZE) {
|
||||
// Partial batch means we have caught up
|
||||
break;
|
||||
}
|
||||
|
||||
// Full batch – there are likely more records; pause briefly before
|
||||
// fetching the next batch to smooth out the HTTP request rate
|
||||
if (batchCount < MAX_CATCHUP_BATCHES) {
|
||||
await sleep(INTER_BATCH_DELAY_MS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Cursor management
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private async getOrCreateCursor(
|
||||
destinationId: number,
|
||||
logType: LogType,
|
||||
orgId: string
|
||||
): Promise<{ lastSentId: number }> {
|
||||
// Try to read an existing cursor
|
||||
const existing = await db
|
||||
.select({
|
||||
lastSentId: eventStreamingCursors.lastSentId
|
||||
})
|
||||
.from(eventStreamingCursors)
|
||||
.where(
|
||||
and(
|
||||
eq(eventStreamingCursors.destinationId, destinationId),
|
||||
eq(eventStreamingCursors.logType, logType)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (existing.length > 0) {
|
||||
return { lastSentId: existing[0].lastSentId };
|
||||
}
|
||||
|
||||
// No cursor yet – this destination pre-dates the eager initialisation
|
||||
// path (initializeCursorsForDestination). Seed at the current max id
|
||||
// so we do not replay historical logs.
|
||||
const initialId = await this.getCurrentMaxId(logType, orgId);
|
||||
|
||||
// Use onConflictDoNothing in case of a rare race between two poll
|
||||
// cycles both hitting this branch simultaneously.
|
||||
await db
|
||||
.insert(eventStreamingCursors)
|
||||
.values({
|
||||
destinationId,
|
||||
logType,
|
||||
lastSentId: initialId,
|
||||
lastSentAt: null
|
||||
})
|
||||
.onConflictDoNothing();
|
||||
|
||||
logger.debug(
|
||||
`LogStreamingManager: lazily initialised cursor for destination ${destinationId} ` +
|
||||
`logType="${logType}" at id=${initialId} ` +
|
||||
`(prefer initializeCursorsForDestination at creation time)`
|
||||
);
|
||||
|
||||
return { lastSentId: initialId };
|
||||
}
|
||||
|
||||
private async updateCursor(
|
||||
destinationId: number,
|
||||
logType: LogType,
|
||||
lastSentId: number
|
||||
): Promise<void> {
|
||||
await db
|
||||
.update(eventStreamingCursors)
|
||||
.set({
|
||||
lastSentId,
|
||||
lastSentAt: Date.now()
|
||||
})
|
||||
.where(
|
||||
and(
|
||||
eq(eventStreamingCursors.destinationId, destinationId),
|
||||
eq(eventStreamingCursors.logType, logType)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the current maximum `id` in the given log table for the org.
|
||||
* Returns 0 when the table is empty.
|
||||
*/
|
||||
private async getCurrentMaxId(
|
||||
logType: LogType,
|
||||
orgId: string
|
||||
): Promise<number> {
|
||||
try {
|
||||
switch (logType) {
|
||||
case "request": {
|
||||
const [row] = await logsDb
|
||||
.select({ maxId: max(requestAuditLog.id) })
|
||||
.from(requestAuditLog)
|
||||
.where(eq(requestAuditLog.orgId, orgId));
|
||||
return row?.maxId ?? 0;
|
||||
}
|
||||
case "action": {
|
||||
const [row] = await logsDb
|
||||
.select({ maxId: max(actionAuditLog.id) })
|
||||
.from(actionAuditLog)
|
||||
.where(eq(actionAuditLog.orgId, orgId));
|
||||
return row?.maxId ?? 0;
|
||||
}
|
||||
case "access": {
|
||||
const [row] = await logsDb
|
||||
.select({ maxId: max(accessAuditLog.id) })
|
||||
.from(accessAuditLog)
|
||||
.where(eq(accessAuditLog.orgId, orgId));
|
||||
return row?.maxId ?? 0;
|
||||
}
|
||||
case "connection": {
|
||||
const [row] = await logsDb
|
||||
.select({ maxId: max(connectionAuditLog.id) })
|
||||
.from(connectionAuditLog)
|
||||
.where(eq(connectionAuditLog.orgId, orgId));
|
||||
return row?.maxId ?? 0;
|
||||
}
|
||||
}
|
||||
} catch (err) {
|
||||
logger.warn(
|
||||
`LogStreamingManager: could not determine current max id for ` +
|
||||
`logType="${logType}", defaulting to 0`,
|
||||
err
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Log fetching
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Fetch up to `limit` log rows with `id > afterId`, ordered by id ASC,
|
||||
* filtered to the given organisation.
|
||||
*/
|
||||
private async fetchLogs(
|
||||
logType: LogType,
|
||||
orgId: string,
|
||||
afterId: number,
|
||||
limit: number
|
||||
): Promise<Array<Record<string, unknown> & { id: number }>> {
|
||||
switch (logType) {
|
||||
case "request":
|
||||
return (await logsDb
|
||||
.select()
|
||||
.from(requestAuditLog)
|
||||
.where(
|
||||
and(
|
||||
eq(requestAuditLog.orgId, orgId),
|
||||
gt(requestAuditLog.id, afterId)
|
||||
)
|
||||
)
|
||||
.orderBy(requestAuditLog.id)
|
||||
.limit(limit)) as Array<
|
||||
Record<string, unknown> & { id: number }
|
||||
>;
|
||||
|
||||
case "action":
|
||||
return (await logsDb
|
||||
.select()
|
||||
.from(actionAuditLog)
|
||||
.where(
|
||||
and(
|
||||
eq(actionAuditLog.orgId, orgId),
|
||||
gt(actionAuditLog.id, afterId)
|
||||
)
|
||||
)
|
||||
.orderBy(actionAuditLog.id)
|
||||
.limit(limit)) as Array<
|
||||
Record<string, unknown> & { id: number }
|
||||
>;
|
||||
|
||||
case "access":
|
||||
return (await logsDb
|
||||
.select()
|
||||
.from(accessAuditLog)
|
||||
.where(
|
||||
and(
|
||||
eq(accessAuditLog.orgId, orgId),
|
||||
gt(accessAuditLog.id, afterId)
|
||||
)
|
||||
)
|
||||
.orderBy(accessAuditLog.id)
|
||||
.limit(limit)) as Array<
|
||||
Record<string, unknown> & { id: number }
|
||||
>;
|
||||
|
||||
case "connection":
|
||||
return (await logsDb
|
||||
.select()
|
||||
.from(connectionAuditLog)
|
||||
.where(
|
||||
and(
|
||||
eq(connectionAuditLog.orgId, orgId),
|
||||
gt(connectionAuditLog.id, afterId)
|
||||
)
|
||||
)
|
||||
.orderBy(connectionAuditLog.id)
|
||||
.limit(limit)) as Array<
|
||||
Record<string, unknown> & { id: number }
|
||||
>;
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Row → LogEvent conversion
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private rowToLogEvent(
|
||||
logType: LogType,
|
||||
row: Record<string, unknown> & { id: number }
|
||||
): LogEvent {
|
||||
// Determine the epoch-seconds timestamp for this row type
|
||||
let timestamp: number;
|
||||
switch (logType) {
|
||||
case "request":
|
||||
case "action":
|
||||
case "access":
|
||||
timestamp =
|
||||
typeof row.timestamp === "number" ? row.timestamp : 0;
|
||||
break;
|
||||
case "connection":
|
||||
timestamp =
|
||||
typeof row.startedAt === "number" ? row.startedAt : 0;
|
||||
break;
|
||||
}
|
||||
|
||||
const orgId =
|
||||
typeof row.orgId === "string" ? row.orgId : "";
|
||||
|
||||
return {
|
||||
id: row.id,
|
||||
logType,
|
||||
orgId,
|
||||
timestamp,
|
||||
data: row as Record<string, unknown>
|
||||
};
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Provider factory
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Instantiate the correct LogDestinationProvider for the given destination
|
||||
* type string. Returns `null` for unknown types.
|
||||
*
|
||||
* To add a new provider:
|
||||
* 1. Implement `LogDestinationProvider` in a new file under `providers/`
|
||||
* 2. Add a `case` here
|
||||
*/
|
||||
private createProvider(
|
||||
type: string,
|
||||
config: unknown
|
||||
): LogDestinationProvider | null {
|
||||
switch (type) {
|
||||
case "http":
|
||||
return new HttpLogDestination(config as HttpConfig);
|
||||
// Future providers:
|
||||
// case "datadog": return new DatadogLogDestination(config as DatadogConfig);
|
||||
default:
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// Back-off tracking
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private recordFailure(destinationId: number): void {
|
||||
const current = this.failures.get(destinationId) ?? {
|
||||
consecutiveFailures: 0,
|
||||
nextRetryAt: 0,
|
||||
// Stamp the very first failure so we can measure total outage duration
|
||||
firstFailedAt: Date.now()
|
||||
};
|
||||
|
||||
current.consecutiveFailures += 1;
|
||||
|
||||
const scheduleIdx = Math.min(
|
||||
current.consecutiveFailures - 1,
|
||||
BACKOFF_SCHEDULE_MS.length - 1
|
||||
);
|
||||
const backoffMs = BACKOFF_SCHEDULE_MS[scheduleIdx];
|
||||
current.nextRetryAt = Date.now() + backoffMs;
|
||||
|
||||
this.failures.set(destinationId, current);
|
||||
|
||||
logger.warn(
|
||||
`LogStreamingManager: destination ${destinationId} failed ` +
|
||||
`(consecutive #${current.consecutiveFailures}), ` +
|
||||
`backing off for ${backoffMs / 1000}s`
|
||||
);
|
||||
}
|
||||
|
||||
// -------------------------------------------------------------------------
|
||||
// DB helpers
|
||||
// -------------------------------------------------------------------------
|
||||
|
||||
private async loadEnabledDestinations(): Promise<
|
||||
EventStreamingDestination[]
|
||||
> {
|
||||
try {
|
||||
return await db
|
||||
.select()
|
||||
.from(eventStreamingDestinations)
|
||||
.where(eq(eventStreamingDestinations.enabled, true));
|
||||
} catch (err) {
|
||||
logger.error(
|
||||
"LogStreamingManager: failed to load destinations",
|
||||
err
|
||||
);
|
||||
return [];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function sleep(ms: number): Promise<void> {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
}
|
||||
34
server/private/lib/logStreaming/index.ts
Normal file
34
server/private/lib/logStreaming/index.ts
Normal file
@@ -0,0 +1,34 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { build } from "@server/build";
|
||||
import { LogStreamingManager } from "./LogStreamingManager";
|
||||
|
||||
/**
|
||||
* Module-level singleton. Importing this module is sufficient to start the
|
||||
* streaming manager – no explicit init call required by the caller.
|
||||
*
|
||||
* The manager registers a non-blocking timer (unref'd) so it will not keep
|
||||
* the Node.js event loop alive on its own. Call `logStreamingManager.shutdown()`
|
||||
* during graceful shutdown to drain any in-progress poll and release resources.
|
||||
*/
|
||||
export const logStreamingManager = new LogStreamingManager();
|
||||
|
||||
if (build != "saas") { // this is handled separately in the saas build, so we don't want to start it here
|
||||
logStreamingManager.start();
|
||||
}
|
||||
|
||||
export { LogStreamingManager } from "./LogStreamingManager";
|
||||
export type { LogDestinationProvider } from "./providers/LogDestinationProvider";
|
||||
export { HttpLogDestination } from "./providers/HttpLogDestination";
|
||||
export * from "./types";
|
||||
322
server/private/lib/logStreaming/providers/HttpLogDestination.ts
Normal file
322
server/private/lib/logStreaming/providers/HttpLogDestination.ts
Normal file
@@ -0,0 +1,322 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import logger from "@server/logger";
|
||||
import { LogEvent, HttpConfig, PayloadFormat } from "../types";
|
||||
import { LogDestinationProvider } from "./LogDestinationProvider";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/** Maximum time (ms) to wait for a single HTTP response. */
|
||||
const REQUEST_TIMEOUT_MS = 30_000;
|
||||
|
||||
/** Default payload format when none is specified in the config. */
|
||||
const DEFAULT_FORMAT: PayloadFormat = "json_array";
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// HttpLogDestination
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/**
|
||||
* Forwards a batch of log events to an arbitrary HTTP endpoint via a single
|
||||
* POST request per batch.
|
||||
*
|
||||
* **Payload format**
|
||||
*
|
||||
* **Payload formats** (controlled by `config.format`):
|
||||
*
|
||||
* - `json_array` (default) — one POST per batch, body is a JSON array:
|
||||
* ```json
|
||||
* [
|
||||
* { "event": "request", "timestamp": "2024-01-01T00:00:00.000Z", "data": { … } },
|
||||
* …
|
||||
* ]
|
||||
* ```
|
||||
* `Content-Type: application/json`
|
||||
*
|
||||
* - `ndjson` — one POST per batch, body is newline-delimited JSON (one object
|
||||
* per line, no outer array). Required by Splunk HEC, Elastic/OpenSearch,
|
||||
* and Grafana Loki:
|
||||
* ```
|
||||
* {"event":"request","timestamp":"…","data":{…}}
|
||||
* {"event":"action","timestamp":"…","data":{…}}
|
||||
* ```
|
||||
* `Content-Type: application/x-ndjson`
|
||||
*
|
||||
* - `json_single` — one POST **per event**, body is a plain JSON object.
|
||||
* Use only for endpoints that cannot handle batches at all.
|
||||
*
|
||||
* With a body template each event is rendered through the template before
|
||||
* serialisation. Template placeholders:
|
||||
* - `{{event}}` → the LogType string ("request", "action", etc.)
|
||||
* - `{{timestamp}}` → ISO-8601 UTC datetime string
|
||||
* - `{{data}}` → raw inline JSON object (**no surrounding quotes**)
|
||||
*
|
||||
* Example template:
|
||||
* ```
|
||||
* { "event": "{{event}}", "ts": "{{timestamp}}", "payload": {{data}} }
|
||||
* ```
|
||||
*/
|
||||
export class HttpLogDestination implements LogDestinationProvider {
|
||||
readonly type = "http";
|
||||
|
||||
private readonly config: HttpConfig;
|
||||
|
||||
constructor(config: HttpConfig) {
|
||||
this.config = config;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// LogDestinationProvider implementation
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
async send(events: LogEvent[]): Promise<void> {
|
||||
if (events.length === 0) return;
|
||||
|
||||
const format = this.config.format ?? DEFAULT_FORMAT;
|
||||
|
||||
if (format === "json_single") {
|
||||
// One HTTP POST per event – send sequentially so a failure on one
|
||||
// event throws and lets the manager retry the whole batch from the
|
||||
// same cursor position.
|
||||
for (const event of events) {
|
||||
await this.postRequest(
|
||||
this.buildSingleBody(event),
|
||||
"application/json"
|
||||
);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (format === "ndjson") {
|
||||
const body = this.buildNdjsonBody(events);
|
||||
await this.postRequest(body, "application/x-ndjson");
|
||||
return;
|
||||
}
|
||||
|
||||
// json_array (default)
|
||||
const body = JSON.stringify(this.buildArrayPayload(events));
|
||||
await this.postRequest(body, "application/json");
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Internal HTTP sender
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
private async postRequest(
|
||||
body: string,
|
||||
contentType: string
|
||||
): Promise<void> {
|
||||
const headers = this.buildHeaders(contentType);
|
||||
|
||||
const controller = new AbortController();
|
||||
const timeoutHandle = setTimeout(
|
||||
() => controller.abort(),
|
||||
REQUEST_TIMEOUT_MS
|
||||
);
|
||||
|
||||
let response: Response;
|
||||
try {
|
||||
response = await fetch(this.config.url, {
|
||||
method: "POST",
|
||||
headers,
|
||||
body,
|
||||
signal: controller.signal
|
||||
});
|
||||
} catch (err: unknown) {
|
||||
const isAbort =
|
||||
err instanceof Error && err.name === "AbortError";
|
||||
if (isAbort) {
|
||||
throw new Error(
|
||||
`HttpLogDestination: request to "${this.config.url}" timed out after ${REQUEST_TIMEOUT_MS} ms`
|
||||
);
|
||||
}
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
throw new Error(
|
||||
`HttpLogDestination: request to "${this.config.url}" failed – ${msg}`
|
||||
);
|
||||
} finally {
|
||||
clearTimeout(timeoutHandle);
|
||||
}
|
||||
|
||||
if (!response.ok) {
|
||||
// Try to include a snippet of the response body in the error so
|
||||
// operators can diagnose auth or schema rejections.
|
||||
let responseSnippet = "";
|
||||
try {
|
||||
const text = await response.text();
|
||||
responseSnippet = text.slice(0, 300);
|
||||
} catch {
|
||||
// ignore – best effort
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
`HttpLogDestination: server at "${this.config.url}" returned ` +
|
||||
`HTTP ${response.status} ${response.statusText}` +
|
||||
(responseSnippet ? ` – ${responseSnippet}` : "")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Header construction
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
private buildHeaders(contentType: string): Record<string, string> {
|
||||
const headers: Record<string, string> = {
|
||||
"Content-Type": contentType
|
||||
};
|
||||
|
||||
// Authentication
|
||||
switch (this.config.authType) {
|
||||
case "bearer": {
|
||||
const token = this.config.bearerToken?.trim();
|
||||
if (token) {
|
||||
headers["Authorization"] = `Bearer ${token}`;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "basic": {
|
||||
const creds = this.config.basicCredentials?.trim();
|
||||
if (creds) {
|
||||
const encoded = Buffer.from(creds).toString("base64");
|
||||
headers["Authorization"] = `Basic ${encoded}`;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "custom": {
|
||||
const name = this.config.customHeaderName?.trim();
|
||||
const value = this.config.customHeaderValue ?? "";
|
||||
if (name) {
|
||||
headers[name] = value;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "none":
|
||||
default:
|
||||
// No Authorization header
|
||||
break;
|
||||
}
|
||||
|
||||
// Additional static headers (user-defined; may override Content-Type
|
||||
// if the operator explicitly sets it, which is intentional).
|
||||
for (const { key, value } of this.config.headers ?? []) {
|
||||
const trimmedKey = key?.trim();
|
||||
if (trimmedKey) {
|
||||
headers[trimmedKey] = value ?? "";
|
||||
}
|
||||
}
|
||||
|
||||
return headers;
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Payload construction
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/** Single default event object (no surrounding array). */
|
||||
private buildEventObject(event: LogEvent): unknown {
|
||||
if (this.config.useBodyTemplate && this.config.bodyTemplate?.trim()) {
|
||||
return this.renderTemplate(this.config.bodyTemplate!, event);
|
||||
}
|
||||
return {
|
||||
event: event.logType,
|
||||
timestamp: epochSecondsToIso(event.timestamp),
|
||||
data: event.data
|
||||
};
|
||||
}
|
||||
|
||||
/** JSON array payload – used for `json_array` format. */
|
||||
private buildArrayPayload(events: LogEvent[]): unknown[] {
|
||||
return events.map((e) => this.buildEventObject(e));
|
||||
}
|
||||
|
||||
/**
|
||||
* NDJSON payload – one JSON object per line, no outer array.
|
||||
* Each line must be a complete, valid JSON object.
|
||||
*/
|
||||
private buildNdjsonBody(events: LogEvent[]): string {
|
||||
return events
|
||||
.map((e) => JSON.stringify(this.buildEventObject(e)))
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
/** Single-event body – used for `json_single` format. */
|
||||
private buildSingleBody(event: LogEvent): string {
|
||||
return JSON.stringify(this.buildEventObject(event));
|
||||
}
|
||||
|
||||
/**
|
||||
* Render a single event through the body template.
|
||||
*
|
||||
* The three placeholder tokens are replaced in a specific order to avoid
|
||||
* accidental double-replacement:
|
||||
*
|
||||
* 1. `{{data}}` → raw JSON (may contain `{{` characters in values)
|
||||
* 2. `{{event}}` → safe string
|
||||
* 3. `{{timestamp}}` → safe ISO string
|
||||
*
|
||||
* If the rendered string is not valid JSON we fall back to returning it as
|
||||
* a plain string so the batch still makes it out and the operator can
|
||||
* inspect the template.
|
||||
*/
|
||||
private renderTemplate(template: string, event: LogEvent): unknown {
|
||||
const isoTimestamp = epochSecondsToIso(event.timestamp);
|
||||
const dataJson = JSON.stringify(event.data);
|
||||
|
||||
// Replace {{data}} first because its JSON value might legitimately
|
||||
// contain the substrings "{{event}}" or "{{timestamp}}" inside string
|
||||
// fields – those should NOT be re-expanded.
|
||||
const rendered = template
|
||||
.replace(/\{\{data\}\}/g, dataJson)
|
||||
.replace(/\{\{event\}\}/g, escapeJsonString(event.logType))
|
||||
.replace(
|
||||
/\{\{timestamp\}\}/g,
|
||||
escapeJsonString(isoTimestamp)
|
||||
);
|
||||
|
||||
try {
|
||||
return JSON.parse(rendered);
|
||||
} catch {
|
||||
logger.warn(
|
||||
`HttpLogDestination: body template produced invalid JSON for ` +
|
||||
`event type "${event.logType}" destined for "${this.config.url}". ` +
|
||||
`Sending rendered template as a raw string. ` +
|
||||
`Check your template syntax – specifically that {{data}} is ` +
|
||||
`NOT wrapped in quotes.`
|
||||
);
|
||||
return rendered;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
function epochSecondsToIso(epochSeconds: number): string {
|
||||
return new Date(epochSeconds * 1000).toISOString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Escape a string value so it can be safely substituted into the interior of
|
||||
* a JSON string literal (i.e. between existing `"` quotes in the template).
|
||||
* This prevents a crafted logType or timestamp from breaking out of its
|
||||
* string context in the rendered template.
|
||||
*/
|
||||
function escapeJsonString(value: string): string {
|
||||
// JSON.stringify produces `"<escaped>"` – strip the outer quotes.
|
||||
return JSON.stringify(value).slice(1, -1);
|
||||
}
|
||||
@@ -0,0 +1,44 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { LogEvent } from "../types";
|
||||
|
||||
/**
|
||||
* Common interface that every log-forwarding backend must implement.
|
||||
*
|
||||
* Adding a new destination type (e.g. Datadog, Splunk, Kafka) is as simple as
|
||||
* creating a class that satisfies this interface and registering it inside
|
||||
* LogStreamingManager.createProvider().
|
||||
*/
|
||||
export interface LogDestinationProvider {
|
||||
/**
|
||||
* The string identifier that matches the `type` column in the
|
||||
* `eventStreamingDestinations` table (e.g. "http", "datadog").
|
||||
*/
|
||||
readonly type: string;
|
||||
|
||||
/**
|
||||
* Forward a batch of log events to the destination.
|
||||
*
|
||||
* Implementations should:
|
||||
* - Treat the call as atomic: either all events are accepted or an error
|
||||
* is thrown so the caller can retry / back off.
|
||||
* - Respect the timeout contract expected by the manager (default 30 s).
|
||||
* - NOT swallow errors – the manager relies on thrown exceptions to track
|
||||
* failure state and apply exponential back-off.
|
||||
*
|
||||
* @param events A non-empty array of normalised log events to forward.
|
||||
* @throws Any network, authentication, or serialisation error.
|
||||
*/
|
||||
send(events: LogEvent[]): Promise<void>;
|
||||
}
|
||||
134
server/private/lib/logStreaming/types.ts
Normal file
134
server/private/lib/logStreaming/types.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Log type identifiers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type LogType = "request" | "action" | "access" | "connection";
|
||||
|
||||
export const LOG_TYPES: LogType[] = [
|
||||
"request",
|
||||
"action",
|
||||
"access",
|
||||
"connection"
|
||||
];
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// A normalised event ready to be forwarded to a destination
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface LogEvent {
|
||||
/** The auto-increment primary key from the source table */
|
||||
id: number;
|
||||
/** Which log table this event came from */
|
||||
logType: LogType;
|
||||
/** The organisation that owns this event */
|
||||
orgId: string;
|
||||
/** Unix epoch seconds – taken from the record's own timestamp field */
|
||||
timestamp: number;
|
||||
/** Full row data from the source table, serialised as a plain object */
|
||||
data: Record<string, unknown>;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// A batch of events destined for a single streaming target
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface LogBatch {
|
||||
destinationId: number;
|
||||
logType: LogType;
|
||||
events: LogEvent[];
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// HTTP destination configuration (mirrors HttpConfig in the UI component)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export type AuthType = "none" | "bearer" | "basic" | "custom";
|
||||
|
||||
/**
|
||||
* Controls how the batch of events is serialised into the HTTP request body.
|
||||
*
|
||||
* - `json_array` – `[{…}, {…}]` — default; one POST per batch wrapped in a
|
||||
* JSON array. Works with most generic webhooks and Datadog.
|
||||
* - `ndjson` – `{…}\n{…}` — newline-delimited JSON, one object per
|
||||
* line. Required by Splunk HEC, Elastic/OpenSearch, Loki.
|
||||
* - `json_single` – one HTTP POST per event, body is a plain JSON object.
|
||||
* Use only for endpoints that cannot handle batches at all.
|
||||
*/
|
||||
export type PayloadFormat = "json_array" | "ndjson" | "json_single";
|
||||
|
||||
export interface HttpConfig {
|
||||
/** Human-readable label for the destination */
|
||||
name: string;
|
||||
/** Target URL that will receive POST requests */
|
||||
url: string;
|
||||
/** Authentication strategy to use */
|
||||
authType: AuthType;
|
||||
/** Used when authType === "bearer" */
|
||||
bearerToken?: string;
|
||||
/** Used when authType === "basic" – must be "username:password" */
|
||||
basicCredentials?: string;
|
||||
/** Used when authType === "custom" – header name */
|
||||
customHeaderName?: string;
|
||||
/** Used when authType === "custom" – header value */
|
||||
customHeaderValue?: string;
|
||||
/** Additional static headers appended to every request */
|
||||
headers: Array<{ key: string; value: string }>;
|
||||
/** Whether to render a custom body template instead of the default shape */
|
||||
/**
|
||||
* How events are serialised into the request body.
|
||||
* Defaults to `"json_array"` when absent.
|
||||
*/
|
||||
format?: PayloadFormat;
|
||||
useBodyTemplate: boolean;
|
||||
/**
|
||||
* Handlebars-style template for the JSON body of each event.
|
||||
*
|
||||
* Supported placeholders:
|
||||
* {{event}} – the LogType string ("request", "action", etc.)
|
||||
* {{timestamp}} – ISO-8601 UTC string derived from the event's timestamp
|
||||
* {{data}} – raw JSON object (no surrounding quotes) of the full row
|
||||
*
|
||||
* Example:
|
||||
* { "event": "{{event}}", "ts": "{{timestamp}}", "payload": {{data}} }
|
||||
*/
|
||||
bodyTemplate?: string;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Per-destination per-log-type cursor (reflects the DB table)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface StreamingCursor {
|
||||
destinationId: number;
|
||||
logType: LogType;
|
||||
/** The `id` of the last row that was successfully forwarded */
|
||||
lastSentId: number;
|
||||
/** Epoch milliseconds of the last successful send (or null if never sent) */
|
||||
lastSentAt: number | null;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// In-memory failure / back-off state tracked per destination
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
export interface DestinationFailureState {
|
||||
/** How many consecutive send failures have occurred */
|
||||
consecutiveFailures: number;
|
||||
/** Date.now() value after which the destination may be retried */
|
||||
nextRetryAt: number;
|
||||
/** Date.now() value of the very first failure in the current streak */
|
||||
firstFailedAt: number;
|
||||
}
|
||||
@@ -57,7 +57,10 @@ export const privateConfigSchema = z.object({
|
||||
.object({
|
||||
host: z.string(),
|
||||
port: portSchema,
|
||||
password: z.string().optional(),
|
||||
password: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(getEnvOrYaml("REDIS_PASSWORD")),
|
||||
db: z.int().nonnegative().optional().default(0),
|
||||
replicas: z
|
||||
.array(
|
||||
|
||||
77
server/private/lib/tokenCache.ts
Normal file
77
server/private/lib/tokenCache.ts
Normal file
@@ -0,0 +1,77 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import redisManager from "#private/lib/redis";
|
||||
import { encrypt, decrypt } from "@server/lib/crypto";
|
||||
import logger from "@server/logger";
|
||||
|
||||
/**
|
||||
* Returns a cached plaintext token from Redis if one exists and decrypts
|
||||
* cleanly, otherwise calls `createSession` to mint a fresh token, stores the
|
||||
* encrypted value in Redis with the given TTL, and returns it.
|
||||
*
|
||||
* Failures at the Redis layer are non-fatal – the function always falls
|
||||
* through to session creation so the caller is never blocked by a Redis outage.
|
||||
*
|
||||
* @param cacheKey Unique Redis key, e.g. `"newt:token_cache:abc123"`
|
||||
* @param secret Server secret used for AES encryption/decryption
|
||||
* @param ttlSeconds Cache TTL in seconds (should match session expiry)
|
||||
* @param createSession Factory that mints a new session and returns its raw token
|
||||
*/
|
||||
export async function getOrCreateCachedToken(
|
||||
cacheKey: string,
|
||||
secret: string,
|
||||
ttlSeconds: number,
|
||||
createSession: () => Promise<string>
|
||||
): Promise<string> {
|
||||
if (redisManager.isRedisEnabled()) {
|
||||
try {
|
||||
const cached = await redisManager.get(cacheKey);
|
||||
if (cached) {
|
||||
const token = decrypt(cached, secret);
|
||||
if (token) {
|
||||
logger.debug(`Token cache hit for key: ${cacheKey}`);
|
||||
return token;
|
||||
}
|
||||
// Decryption produced an empty string – treat as a miss
|
||||
logger.warn(
|
||||
`Token cache decryption returned empty string for key: ${cacheKey}, treating as miss`
|
||||
);
|
||||
}
|
||||
} catch (e) {
|
||||
logger.warn(
|
||||
`Token cache read/decrypt failed for key ${cacheKey}, falling through to session creation:`,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const token = await createSession();
|
||||
|
||||
if (redisManager.isRedisEnabled()) {
|
||||
try {
|
||||
const encrypted = encrypt(token, secret);
|
||||
await redisManager.set(cacheKey, encrypted, ttlSeconds);
|
||||
logger.debug(
|
||||
`Token cached in Redis for key: ${cacheKey} (TTL ${ttlSeconds}s)`
|
||||
);
|
||||
} catch (e) {
|
||||
logger.warn(
|
||||
`Token cache write failed for key ${cacheKey} (session was still created):`,
|
||||
e
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
return token;
|
||||
}
|
||||
@@ -671,10 +671,7 @@ export async function getTraefikConfig(
|
||||
|
||||
// TODO: HOW TO HANDLE ^^^^^^ BETTER
|
||||
const anySitesOnline = targets.some(
|
||||
(target) =>
|
||||
target.site.online ||
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
(target) => target.site.online
|
||||
);
|
||||
|
||||
return (
|
||||
@@ -802,10 +799,7 @@ export async function getTraefikConfig(
|
||||
servers: (() => {
|
||||
// Check if any sites are online
|
||||
const anySitesOnline = targets.some(
|
||||
(target) =>
|
||||
target.site.online ||
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
(target) => target.site.online
|
||||
);
|
||||
|
||||
return targets
|
||||
|
||||
@@ -13,9 +13,10 @@
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { userOrgs, db, idp, idpOrg } from "@server/db";
|
||||
import { and, eq, or } from "drizzle-orm";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
|
||||
|
||||
export async function verifyIdpAccess(
|
||||
req: Request,
|
||||
@@ -84,8 +85,10 @@ export async function verifyIdpAccess(
|
||||
);
|
||||
}
|
||||
|
||||
const userOrgRoleId = req.userOrg.roleId;
|
||||
req.userOrgRoleId = userOrgRoleId;
|
||||
req.userOrgRoleIds = await getUserOrgRoleIds(
|
||||
req.userOrg.userId,
|
||||
idpRes.idpOrg.orgId
|
||||
);
|
||||
|
||||
return next();
|
||||
} catch (error) {
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { db, exitNodeOrgs, exitNodes, remoteExitNodes } from "@server/db";
|
||||
import { sites, userOrgs, userSites, roleSites, roles } from "@server/db";
|
||||
import { and, eq, or } from "drizzle-orm";
|
||||
import { db, exitNodeOrgs, remoteExitNodes } from "@server/db";
|
||||
import { userOrgs } from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
|
||||
|
||||
export async function verifyRemoteExitNodeAccess(
|
||||
req: Request,
|
||||
@@ -103,8 +104,10 @@ export async function verifyRemoteExitNodeAccess(
|
||||
);
|
||||
}
|
||||
|
||||
const userOrgRoleId = req.userOrg.roleId;
|
||||
req.userOrgRoleId = userOrgRoleId;
|
||||
req.userOrgRoleIds = await getUserOrgRoleIds(
|
||||
req.userOrg.userId,
|
||||
exitNodeOrg.orgId
|
||||
);
|
||||
|
||||
return next();
|
||||
} catch (error) {
|
||||
|
||||
99
server/private/routers/auditLogs/exportConnectionAuditLog.ts
Normal file
99
server/private/routers/auditLogs/exportConnectionAuditLog.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { registry } from "@server/openApi";
|
||||
import { NextFunction } from "express";
|
||||
import { Request, Response } from "express";
|
||||
import { OpenAPITags } from "@server/openApi";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import logger from "@server/logger";
|
||||
import {
|
||||
queryConnectionAuditLogsParams,
|
||||
queryConnectionAuditLogsQuery,
|
||||
queryConnection,
|
||||
countConnectionQuery
|
||||
} from "./queryConnectionAuditLog";
|
||||
import { generateCSV } from "@server/routers/auditLogs/generateCSV";
|
||||
import { MAX_EXPORT_LIMIT } from "@server/routers/auditLogs";
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/org/{orgId}/logs/connection/export",
|
||||
description: "Export the connection audit log for an organization as CSV",
|
||||
tags: [OpenAPITags.Logs],
|
||||
request: {
|
||||
query: queryConnectionAuditLogsQuery,
|
||||
params: queryConnectionAuditLogsParams
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function exportConnectionAuditLogs(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedQuery = queryConnectionAuditLogsQuery.safeParse(req.query);
|
||||
if (!parsedQuery.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedQuery.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const parsedParams = queryConnectionAuditLogsParams.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const data = { ...parsedQuery.data, ...parsedParams.data };
|
||||
const [{ count }] = await countConnectionQuery(data);
|
||||
if (count > MAX_EXPORT_LIMIT) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
`Export limit exceeded. Your selection contains ${count} rows, but the maximum is ${MAX_EXPORT_LIMIT} rows. Please select a shorter time range to reduce the data.`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const baseQuery = queryConnection(data);
|
||||
|
||||
const log = await baseQuery.limit(data.limit).offset(data.offset);
|
||||
|
||||
const csvData = generateCSV(log);
|
||||
|
||||
res.setHeader("Content-Type", "text/csv");
|
||||
res.setHeader(
|
||||
"Content-Disposition",
|
||||
`attachment; filename="connection-audit-logs-${data.orgId}-${Date.now()}.csv"`
|
||||
);
|
||||
|
||||
return res.send(csvData);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -15,3 +15,5 @@ export * from "./queryActionAuditLog";
|
||||
export * from "./exportActionAuditLog";
|
||||
export * from "./queryAccessAuditLog";
|
||||
export * from "./exportAccessAuditLog";
|
||||
export * from "./queryConnectionAuditLog";
|
||||
export * from "./exportConnectionAuditLog";
|
||||
|
||||
@@ -11,11 +11,11 @@
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { accessAuditLog, logsDb, resources, db, primaryDb } from "@server/db";
|
||||
import { accessAuditLog, logsDb, resources, siteResources, db, primaryDb } from "@server/db";
|
||||
import { registry } from "@server/openApi";
|
||||
import { NextFunction } from "express";
|
||||
import { Request, Response } from "express";
|
||||
import { eq, gt, lt, and, count, desc, inArray } from "drizzle-orm";
|
||||
import { eq, gt, lt, and, count, desc, inArray, isNull } from "drizzle-orm";
|
||||
import { OpenAPITags } from "@server/openApi";
|
||||
import { z } from "zod";
|
||||
import createHttpError from "http-errors";
|
||||
@@ -122,6 +122,7 @@ export function queryAccess(data: Q) {
|
||||
actorType: accessAuditLog.actorType,
|
||||
actorId: accessAuditLog.actorId,
|
||||
resourceId: accessAuditLog.resourceId,
|
||||
siteResourceId: accessAuditLog.siteResourceId,
|
||||
ip: accessAuditLog.ip,
|
||||
location: accessAuditLog.location,
|
||||
userAgent: accessAuditLog.userAgent,
|
||||
@@ -136,37 +137,73 @@ export function queryAccess(data: Q) {
|
||||
}
|
||||
|
||||
async function enrichWithResourceDetails(logs: Awaited<ReturnType<typeof queryAccess>>) {
|
||||
// If logs database is the same as main database, we can do a join
|
||||
// Otherwise, we need to fetch resource details separately
|
||||
const resourceIds = logs
|
||||
.map(log => log.resourceId)
|
||||
.filter((id): id is number => id !== null && id !== undefined);
|
||||
|
||||
if (resourceIds.length === 0) {
|
||||
const siteResourceIds = logs
|
||||
.filter(log => log.resourceId == null && log.siteResourceId != null)
|
||||
.map(log => log.siteResourceId)
|
||||
.filter((id): id is number => id !== null && id !== undefined);
|
||||
|
||||
if (resourceIds.length === 0 && siteResourceIds.length === 0) {
|
||||
return logs.map(log => ({ ...log, resourceName: null, resourceNiceId: null }));
|
||||
}
|
||||
|
||||
// Fetch resource details from main database
|
||||
const resourceDetails = await primaryDb
|
||||
.select({
|
||||
resourceId: resources.resourceId,
|
||||
name: resources.name,
|
||||
niceId: resources.niceId
|
||||
})
|
||||
.from(resources)
|
||||
.where(inArray(resources.resourceId, resourceIds));
|
||||
const resourceMap = new Map<number, { name: string | null; niceId: string | null }>();
|
||||
|
||||
// Create a map for quick lookup
|
||||
const resourceMap = new Map(
|
||||
resourceDetails.map(r => [r.resourceId, { name: r.name, niceId: r.niceId }])
|
||||
);
|
||||
if (resourceIds.length > 0) {
|
||||
const resourceDetails = await primaryDb
|
||||
.select({
|
||||
resourceId: resources.resourceId,
|
||||
name: resources.name,
|
||||
niceId: resources.niceId
|
||||
})
|
||||
.from(resources)
|
||||
.where(inArray(resources.resourceId, resourceIds));
|
||||
|
||||
for (const r of resourceDetails) {
|
||||
resourceMap.set(r.resourceId, { name: r.name, niceId: r.niceId });
|
||||
}
|
||||
}
|
||||
|
||||
const siteResourceMap = new Map<number, { name: string | null; niceId: string | null }>();
|
||||
|
||||
if (siteResourceIds.length > 0) {
|
||||
const siteResourceDetails = await primaryDb
|
||||
.select({
|
||||
siteResourceId: siteResources.siteResourceId,
|
||||
name: siteResources.name,
|
||||
niceId: siteResources.niceId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(inArray(siteResources.siteResourceId, siteResourceIds));
|
||||
|
||||
for (const r of siteResourceDetails) {
|
||||
siteResourceMap.set(r.siteResourceId, { name: r.name, niceId: r.niceId });
|
||||
}
|
||||
}
|
||||
|
||||
// Enrich logs with resource details
|
||||
return logs.map(log => ({
|
||||
...log,
|
||||
resourceName: log.resourceId ? resourceMap.get(log.resourceId)?.name ?? null : null,
|
||||
resourceNiceId: log.resourceId ? resourceMap.get(log.resourceId)?.niceId ?? null : null
|
||||
}));
|
||||
return logs.map(log => {
|
||||
if (log.resourceId != null) {
|
||||
const details = resourceMap.get(log.resourceId);
|
||||
return {
|
||||
...log,
|
||||
resourceName: details?.name ?? null,
|
||||
resourceNiceId: details?.niceId ?? null
|
||||
};
|
||||
} else if (log.siteResourceId != null) {
|
||||
const details = siteResourceMap.get(log.siteResourceId);
|
||||
return {
|
||||
...log,
|
||||
resourceId: log.siteResourceId,
|
||||
resourceName: details?.name ?? null,
|
||||
resourceNiceId: details?.niceId ?? null
|
||||
};
|
||||
}
|
||||
return { ...log, resourceName: null, resourceNiceId: null };
|
||||
});
|
||||
}
|
||||
|
||||
export function countAccessQuery(data: Q) {
|
||||
@@ -212,11 +249,23 @@ async function queryUniqueFilterAttributes(
|
||||
.from(accessAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique siteResources (only for logs where resourceId is null)
|
||||
const uniqueSiteResources = await logsDb
|
||||
.selectDistinct({
|
||||
id: accessAuditLog.siteResourceId
|
||||
})
|
||||
.from(accessAuditLog)
|
||||
.where(and(baseConditions, isNull(accessAuditLog.resourceId)));
|
||||
|
||||
// Fetch resource names from main database for the unique resource IDs
|
||||
const resourceIds = uniqueResources
|
||||
.map(row => row.id)
|
||||
.filter((id): id is number => id !== null);
|
||||
|
||||
const siteResourceIds = uniqueSiteResources
|
||||
.map(row => row.id)
|
||||
.filter((id): id is number => id !== null);
|
||||
|
||||
let resourcesWithNames: Array<{ id: number; name: string | null }> = [];
|
||||
|
||||
if (resourceIds.length > 0) {
|
||||
@@ -228,10 +277,31 @@ async function queryUniqueFilterAttributes(
|
||||
.from(resources)
|
||||
.where(inArray(resources.resourceId, resourceIds));
|
||||
|
||||
resourcesWithNames = resourceDetails.map(r => ({
|
||||
id: r.resourceId,
|
||||
name: r.name
|
||||
}));
|
||||
resourcesWithNames = [
|
||||
...resourcesWithNames,
|
||||
...resourceDetails.map(r => ({
|
||||
id: r.resourceId,
|
||||
name: r.name
|
||||
}))
|
||||
];
|
||||
}
|
||||
|
||||
if (siteResourceIds.length > 0) {
|
||||
const siteResourceDetails = await primaryDb
|
||||
.select({
|
||||
siteResourceId: siteResources.siteResourceId,
|
||||
name: siteResources.name
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(inArray(siteResources.siteResourceId, siteResourceIds));
|
||||
|
||||
resourcesWithNames = [
|
||||
...resourcesWithNames,
|
||||
...siteResourceDetails.map(r => ({
|
||||
id: r.siteResourceId,
|
||||
name: r.name
|
||||
}))
|
||||
];
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
524
server/private/routers/auditLogs/queryConnectionAuditLog.ts
Normal file
524
server/private/routers/auditLogs/queryConnectionAuditLog.ts
Normal file
@@ -0,0 +1,524 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
connectionAuditLog,
|
||||
logsDb,
|
||||
siteResources,
|
||||
sites,
|
||||
clients,
|
||||
users,
|
||||
primaryDb
|
||||
} from "@server/db";
|
||||
import { registry } from "@server/openApi";
|
||||
import { NextFunction } from "express";
|
||||
import { Request, Response } from "express";
|
||||
import { eq, gt, lt, and, count, desc, inArray } from "drizzle-orm";
|
||||
import { OpenAPITags } from "@server/openApi";
|
||||
import { z } from "zod";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { QueryConnectionAuditLogResponse } from "@server/routers/auditLogs/types";
|
||||
import response from "@server/lib/response";
|
||||
import logger from "@server/logger";
|
||||
import { getSevenDaysAgo } from "@app/lib/getSevenDaysAgo";
|
||||
|
||||
export const queryConnectionAuditLogsQuery = z.object({
|
||||
// iso string just validate its a parseable date
|
||||
timeStart: z
|
||||
.string()
|
||||
.refine((val) => !isNaN(Date.parse(val)), {
|
||||
error: "timeStart must be a valid ISO date string"
|
||||
})
|
||||
.transform((val) => Math.floor(new Date(val).getTime() / 1000))
|
||||
.prefault(() => getSevenDaysAgo().toISOString())
|
||||
.openapi({
|
||||
type: "string",
|
||||
format: "date-time",
|
||||
description:
|
||||
"Start time as ISO date string (defaults to 7 days ago)"
|
||||
}),
|
||||
timeEnd: z
|
||||
.string()
|
||||
.refine((val) => !isNaN(Date.parse(val)), {
|
||||
error: "timeEnd must be a valid ISO date string"
|
||||
})
|
||||
.transform((val) => Math.floor(new Date(val).getTime() / 1000))
|
||||
.optional()
|
||||
.prefault(() => new Date().toISOString())
|
||||
.openapi({
|
||||
type: "string",
|
||||
format: "date-time",
|
||||
description:
|
||||
"End time as ISO date string (defaults to current time)"
|
||||
}),
|
||||
protocol: z.string().optional(),
|
||||
sourceAddr: z.string().optional(),
|
||||
destAddr: z.string().optional(),
|
||||
clientId: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive())
|
||||
.optional(),
|
||||
siteId: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive())
|
||||
.optional(),
|
||||
siteResourceId: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive())
|
||||
.optional(),
|
||||
userId: z.string().optional(),
|
||||
limit: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("1000")
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive()),
|
||||
offset: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("0")
|
||||
.transform(Number)
|
||||
.pipe(z.int().nonnegative())
|
||||
});
|
||||
|
||||
export const queryConnectionAuditLogsParams = z.object({
|
||||
orgId: z.string()
|
||||
});
|
||||
|
||||
export const queryConnectionAuditLogsCombined =
|
||||
queryConnectionAuditLogsQuery.merge(queryConnectionAuditLogsParams);
|
||||
type Q = z.infer<typeof queryConnectionAuditLogsCombined>;
|
||||
|
||||
function getWhere(data: Q) {
|
||||
return and(
|
||||
gt(connectionAuditLog.startedAt, data.timeStart),
|
||||
lt(connectionAuditLog.startedAt, data.timeEnd),
|
||||
eq(connectionAuditLog.orgId, data.orgId),
|
||||
data.protocol
|
||||
? eq(connectionAuditLog.protocol, data.protocol)
|
||||
: undefined,
|
||||
data.sourceAddr
|
||||
? eq(connectionAuditLog.sourceAddr, data.sourceAddr)
|
||||
: undefined,
|
||||
data.destAddr
|
||||
? eq(connectionAuditLog.destAddr, data.destAddr)
|
||||
: undefined,
|
||||
data.clientId
|
||||
? eq(connectionAuditLog.clientId, data.clientId)
|
||||
: undefined,
|
||||
data.siteId
|
||||
? eq(connectionAuditLog.siteId, data.siteId)
|
||||
: undefined,
|
||||
data.siteResourceId
|
||||
? eq(connectionAuditLog.siteResourceId, data.siteResourceId)
|
||||
: undefined,
|
||||
data.userId
|
||||
? eq(connectionAuditLog.userId, data.userId)
|
||||
: undefined
|
||||
);
|
||||
}
|
||||
|
||||
export function queryConnection(data: Q) {
|
||||
return logsDb
|
||||
.select({
|
||||
sessionId: connectionAuditLog.sessionId,
|
||||
siteResourceId: connectionAuditLog.siteResourceId,
|
||||
orgId: connectionAuditLog.orgId,
|
||||
siteId: connectionAuditLog.siteId,
|
||||
clientId: connectionAuditLog.clientId,
|
||||
userId: connectionAuditLog.userId,
|
||||
sourceAddr: connectionAuditLog.sourceAddr,
|
||||
destAddr: connectionAuditLog.destAddr,
|
||||
protocol: connectionAuditLog.protocol,
|
||||
startedAt: connectionAuditLog.startedAt,
|
||||
endedAt: connectionAuditLog.endedAt,
|
||||
bytesTx: connectionAuditLog.bytesTx,
|
||||
bytesRx: connectionAuditLog.bytesRx
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(getWhere(data))
|
||||
.orderBy(
|
||||
desc(connectionAuditLog.startedAt),
|
||||
desc(connectionAuditLog.id)
|
||||
);
|
||||
}
|
||||
|
||||
export function countConnectionQuery(data: Q) {
|
||||
const countQuery = logsDb
|
||||
.select({ count: count() })
|
||||
.from(connectionAuditLog)
|
||||
.where(getWhere(data));
|
||||
return countQuery;
|
||||
}
|
||||
|
||||
async function enrichWithDetails(
|
||||
logs: Awaited<ReturnType<typeof queryConnection>>
|
||||
) {
|
||||
// Collect unique IDs from logs
|
||||
const siteResourceIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.siteResourceId)
|
||||
.filter((id): id is number => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
const siteIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.siteId)
|
||||
.filter((id): id is number => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
const clientIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.clientId)
|
||||
.filter((id): id is number => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
const userIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.userId)
|
||||
.filter((id): id is string => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
|
||||
// Fetch resource details from main database
|
||||
const resourceMap = new Map<
|
||||
number,
|
||||
{ name: string; niceId: string }
|
||||
>();
|
||||
if (siteResourceIds.length > 0) {
|
||||
const resourceDetails = await primaryDb
|
||||
.select({
|
||||
siteResourceId: siteResources.siteResourceId,
|
||||
name: siteResources.name,
|
||||
niceId: siteResources.niceId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(inArray(siteResources.siteResourceId, siteResourceIds));
|
||||
|
||||
for (const r of resourceDetails) {
|
||||
resourceMap.set(r.siteResourceId, {
|
||||
name: r.name,
|
||||
niceId: r.niceId
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch site details from main database
|
||||
const siteMap = new Map<number, { name: string; niceId: string }>();
|
||||
if (siteIds.length > 0) {
|
||||
const siteDetails = await primaryDb
|
||||
.select({
|
||||
siteId: sites.siteId,
|
||||
name: sites.name,
|
||||
niceId: sites.niceId
|
||||
})
|
||||
.from(sites)
|
||||
.where(inArray(sites.siteId, siteIds));
|
||||
|
||||
for (const s of siteDetails) {
|
||||
siteMap.set(s.siteId, { name: s.name, niceId: s.niceId });
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch client details from main database
|
||||
const clientMap = new Map<
|
||||
number,
|
||||
{ name: string; niceId: string; type: string }
|
||||
>();
|
||||
if (clientIds.length > 0) {
|
||||
const clientDetails = await primaryDb
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
name: clients.name,
|
||||
niceId: clients.niceId,
|
||||
type: clients.type
|
||||
})
|
||||
.from(clients)
|
||||
.where(inArray(clients.clientId, clientIds));
|
||||
|
||||
for (const c of clientDetails) {
|
||||
clientMap.set(c.clientId, {
|
||||
name: c.name,
|
||||
niceId: c.niceId,
|
||||
type: c.type
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch user details from main database
|
||||
const userMap = new Map<
|
||||
string,
|
||||
{ email: string | null }
|
||||
>();
|
||||
if (userIds.length > 0) {
|
||||
const userDetails = await primaryDb
|
||||
.select({
|
||||
userId: users.userId,
|
||||
email: users.email
|
||||
})
|
||||
.from(users)
|
||||
.where(inArray(users.userId, userIds));
|
||||
|
||||
for (const u of userDetails) {
|
||||
userMap.set(u.userId, { email: u.email });
|
||||
}
|
||||
}
|
||||
|
||||
// Enrich logs with details
|
||||
return logs.map((log) => ({
|
||||
...log,
|
||||
resourceName: log.siteResourceId
|
||||
? resourceMap.get(log.siteResourceId)?.name ?? null
|
||||
: null,
|
||||
resourceNiceId: log.siteResourceId
|
||||
? resourceMap.get(log.siteResourceId)?.niceId ?? null
|
||||
: null,
|
||||
siteName: log.siteId
|
||||
? siteMap.get(log.siteId)?.name ?? null
|
||||
: null,
|
||||
siteNiceId: log.siteId
|
||||
? siteMap.get(log.siteId)?.niceId ?? null
|
||||
: null,
|
||||
clientName: log.clientId
|
||||
? clientMap.get(log.clientId)?.name ?? null
|
||||
: null,
|
||||
clientNiceId: log.clientId
|
||||
? clientMap.get(log.clientId)?.niceId ?? null
|
||||
: null,
|
||||
clientType: log.clientId
|
||||
? clientMap.get(log.clientId)?.type ?? null
|
||||
: null,
|
||||
userEmail: log.userId
|
||||
? userMap.get(log.userId)?.email ?? null
|
||||
: null
|
||||
}));
|
||||
}
|
||||
|
||||
async function queryUniqueFilterAttributes(
|
||||
timeStart: number,
|
||||
timeEnd: number,
|
||||
orgId: string
|
||||
) {
|
||||
const baseConditions = and(
|
||||
gt(connectionAuditLog.startedAt, timeStart),
|
||||
lt(connectionAuditLog.startedAt, timeEnd),
|
||||
eq(connectionAuditLog.orgId, orgId)
|
||||
);
|
||||
|
||||
// Get unique protocols
|
||||
const uniqueProtocols = await logsDb
|
||||
.selectDistinct({
|
||||
protocol: connectionAuditLog.protocol
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique destination addresses
|
||||
const uniqueDestAddrs = await logsDb
|
||||
.selectDistinct({
|
||||
destAddr: connectionAuditLog.destAddr
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique client IDs
|
||||
const uniqueClients = await logsDb
|
||||
.selectDistinct({
|
||||
clientId: connectionAuditLog.clientId
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique resource IDs
|
||||
const uniqueResources = await logsDb
|
||||
.selectDistinct({
|
||||
siteResourceId: connectionAuditLog.siteResourceId
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique user IDs
|
||||
const uniqueUsers = await logsDb
|
||||
.selectDistinct({
|
||||
userId: connectionAuditLog.userId
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Enrich client IDs with names from main database
|
||||
const clientIds = uniqueClients
|
||||
.map((row) => row.clientId)
|
||||
.filter((id): id is number => id !== null);
|
||||
|
||||
let clientsWithNames: Array<{ id: number; name: string }> = [];
|
||||
if (clientIds.length > 0) {
|
||||
const clientDetails = await primaryDb
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
name: clients.name
|
||||
})
|
||||
.from(clients)
|
||||
.where(inArray(clients.clientId, clientIds));
|
||||
|
||||
clientsWithNames = clientDetails.map((c) => ({
|
||||
id: c.clientId,
|
||||
name: c.name
|
||||
}));
|
||||
}
|
||||
|
||||
// Enrich resource IDs with names from main database
|
||||
const resourceIds = uniqueResources
|
||||
.map((row) => row.siteResourceId)
|
||||
.filter((id): id is number => id !== null);
|
||||
|
||||
let resourcesWithNames: Array<{ id: number; name: string | null }> = [];
|
||||
if (resourceIds.length > 0) {
|
||||
const resourceDetails = await primaryDb
|
||||
.select({
|
||||
siteResourceId: siteResources.siteResourceId,
|
||||
name: siteResources.name
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(inArray(siteResources.siteResourceId, resourceIds));
|
||||
|
||||
resourcesWithNames = resourceDetails.map((r) => ({
|
||||
id: r.siteResourceId,
|
||||
name: r.name
|
||||
}));
|
||||
}
|
||||
|
||||
// Enrich user IDs with emails from main database
|
||||
const userIdsList = uniqueUsers
|
||||
.map((row) => row.userId)
|
||||
.filter((id): id is string => id !== null);
|
||||
|
||||
let usersWithEmails: Array<{ id: string; email: string | null }> = [];
|
||||
if (userIdsList.length > 0) {
|
||||
const userDetails = await primaryDb
|
||||
.select({
|
||||
userId: users.userId,
|
||||
email: users.email
|
||||
})
|
||||
.from(users)
|
||||
.where(inArray(users.userId, userIdsList));
|
||||
|
||||
usersWithEmails = userDetails.map((u) => ({
|
||||
id: u.userId,
|
||||
email: u.email
|
||||
}));
|
||||
}
|
||||
|
||||
return {
|
||||
protocols: uniqueProtocols
|
||||
.map((row) => row.protocol)
|
||||
.filter((protocol): protocol is string => protocol !== null),
|
||||
destAddrs: uniqueDestAddrs
|
||||
.map((row) => row.destAddr)
|
||||
.filter((addr): addr is string => addr !== null),
|
||||
clients: clientsWithNames,
|
||||
resources: resourcesWithNames,
|
||||
users: usersWithEmails
|
||||
};
|
||||
}
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/org/{orgId}/logs/connection",
|
||||
description: "Query the connection audit log for an organization",
|
||||
tags: [OpenAPITags.Logs],
|
||||
request: {
|
||||
query: queryConnectionAuditLogsQuery,
|
||||
params: queryConnectionAuditLogsParams
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function queryConnectionAuditLogs(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedQuery = queryConnectionAuditLogsQuery.safeParse(req.query);
|
||||
if (!parsedQuery.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedQuery.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
const parsedParams = queryConnectionAuditLogsParams.safeParse(
|
||||
req.params
|
||||
);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const data = { ...parsedQuery.data, ...parsedParams.data };
|
||||
|
||||
const baseQuery = queryConnection(data);
|
||||
|
||||
const logsRaw = await baseQuery.limit(data.limit).offset(data.offset);
|
||||
|
||||
// Enrich with resource, site, client, and user details
|
||||
const log = await enrichWithDetails(logsRaw);
|
||||
|
||||
const totalCountResult = await countConnectionQuery(data);
|
||||
const totalCount = totalCountResult[0].count;
|
||||
|
||||
const filterAttributes = await queryUniqueFilterAttributes(
|
||||
data.timeStart,
|
||||
data.timeEnd,
|
||||
data.orgId
|
||||
);
|
||||
|
||||
return response<QueryConnectionAuditLogResponse>(res, {
|
||||
data: {
|
||||
log: log,
|
||||
pagination: {
|
||||
total: totalCount,
|
||||
limit: data.limit,
|
||||
offset: data.offset
|
||||
},
|
||||
filterAttributes
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Connection audit logs retrieved successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -26,9 +26,12 @@ import {
|
||||
orgs,
|
||||
resources,
|
||||
roles,
|
||||
siteResources
|
||||
siteResources,
|
||||
userOrgRoles,
|
||||
siteProvisioningKeyOrg,
|
||||
siteProvisioningKeys,
|
||||
} from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
|
||||
/**
|
||||
* Get the maximum allowed retention days for a given tier
|
||||
@@ -117,6 +120,18 @@ async function capRetentionDays(
|
||||
);
|
||||
}
|
||||
|
||||
// Cap action log retention if it exceeds the limit
|
||||
if (
|
||||
org.settingsLogRetentionDaysConnection !== null &&
|
||||
org.settingsLogRetentionDaysConnection > maxRetentionDays
|
||||
) {
|
||||
updates.settingsLogRetentionDaysConnection = maxRetentionDays;
|
||||
needsUpdate = true;
|
||||
logger.info(
|
||||
`Capping connection log retention from ${org.settingsLogRetentionDaysConnection} to ${maxRetentionDays} days for org ${orgId}`
|
||||
);
|
||||
}
|
||||
|
||||
// Apply updates if needed
|
||||
if (needsUpdate) {
|
||||
await db.update(orgs).set(updates).where(eq(orgs.orgId, orgId));
|
||||
@@ -259,6 +274,10 @@ async function disableFeature(
|
||||
await disableActionLogs(orgId);
|
||||
break;
|
||||
|
||||
case TierFeature.ConnectionLogs:
|
||||
await disableConnectionLogs(orgId);
|
||||
break;
|
||||
|
||||
case TierFeature.RotateCredentials:
|
||||
await disableRotateCredentials(orgId);
|
||||
break;
|
||||
@@ -291,6 +310,14 @@ async function disableFeature(
|
||||
await disableSshPam(orgId);
|
||||
break;
|
||||
|
||||
case TierFeature.FullRbac:
|
||||
await disableFullRbac(orgId);
|
||||
break;
|
||||
|
||||
case TierFeature.SiteProvisioningKeys:
|
||||
await disableSiteProvisioningKeys(orgId);
|
||||
break;
|
||||
|
||||
default:
|
||||
logger.warn(
|
||||
`Unknown feature ${feature} for org ${orgId}, skipping`
|
||||
@@ -326,6 +353,61 @@ async function disableSshPam(orgId: string): Promise<void> {
|
||||
);
|
||||
}
|
||||
|
||||
async function disableFullRbac(orgId: string): Promise<void> {
|
||||
logger.info(`Disabled full RBAC for org ${orgId}`);
|
||||
}
|
||||
|
||||
async function disableSiteProvisioningKeys(orgId: string): Promise<void> {
|
||||
const rows = await db
|
||||
.select({
|
||||
siteProvisioningKeyId:
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId
|
||||
})
|
||||
.from(siteProvisioningKeyOrg)
|
||||
.where(eq(siteProvisioningKeyOrg.orgId, orgId));
|
||||
|
||||
for (const { siteProvisioningKeyId } of rows) {
|
||||
await db.transaction(async (trx) => {
|
||||
await trx
|
||||
.delete(siteProvisioningKeyOrg)
|
||||
.where(
|
||||
and(
|
||||
eq(
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
),
|
||||
eq(siteProvisioningKeyOrg.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const remaining = await trx
|
||||
.select()
|
||||
.from(siteProvisioningKeyOrg)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
);
|
||||
|
||||
if (remaining.length === 0) {
|
||||
await trx
|
||||
.delete(siteProvisioningKeys)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`Removed site provisioning keys for org ${orgId} after tier downgrade`
|
||||
);
|
||||
}
|
||||
|
||||
async function disableLoginPageBranding(orgId: string): Promise<void> {
|
||||
const [existingBranding] = await db
|
||||
.select()
|
||||
@@ -392,6 +474,15 @@ async function disableActionLogs(orgId: string): Promise<void> {
|
||||
logger.info(`Disabled action logs for org ${orgId}`);
|
||||
}
|
||||
|
||||
async function disableConnectionLogs(orgId: string): Promise<void> {
|
||||
await db
|
||||
.update(orgs)
|
||||
.set({ settingsLogRetentionDaysConnection: 0 })
|
||||
.where(eq(orgs.orgId, orgId));
|
||||
|
||||
logger.info(`Disabled connection logs for org ${orgId}`);
|
||||
}
|
||||
|
||||
async function disableRotateCredentials(orgId: string): Promise<void> {}
|
||||
|
||||
async function disableMaintencePage(orgId: string): Promise<void> {
|
||||
|
||||
@@ -0,0 +1,143 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db } from "@server/db";
|
||||
import { eventStreamingDestinations } from "@server/db";
|
||||
import { logStreamingManager } from "#private/lib/logStreaming";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { encrypt } from "@server/lib/crypto";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
const paramsSchema = z.strictObject({
|
||||
orgId: z.string().nonempty()
|
||||
});
|
||||
|
||||
const bodySchema = z.strictObject({
|
||||
type: z.string().nonempty(),
|
||||
config: z.string().nonempty(),
|
||||
enabled: z.boolean().optional().default(true),
|
||||
sendConnectionLogs: z.boolean().optional().default(false),
|
||||
sendRequestLogs: z.boolean().optional().default(false),
|
||||
sendActionLogs: z.boolean().optional().default(false),
|
||||
sendAccessLogs: z.boolean().optional().default(false)
|
||||
});
|
||||
|
||||
export type CreateEventStreamingDestinationResponse = {
|
||||
destinationId: number;
|
||||
};
|
||||
|
||||
registry.registerPath({
|
||||
method: "put",
|
||||
path: "/org/{orgId}/event-streaming-destination",
|
||||
description: "Create an event streaming destination for a specific organization.",
|
||||
tags: [OpenAPITags.Org],
|
||||
request: {
|
||||
params: paramsSchema,
|
||||
body: {
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: bodySchema
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function createEventStreamingDestination(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { orgId } = parsedParams.data;
|
||||
|
||||
const parsedBody = bodySchema.safeParse(req.body);
|
||||
if (!parsedBody.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedBody.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { type, config: configToSet, enabled } = parsedBody.data;
|
||||
|
||||
const key = config.getRawConfig().server.secret!;
|
||||
const encryptedConfig = encrypt(configToSet, key);
|
||||
|
||||
const now = Date.now();
|
||||
|
||||
const [destination] = await db
|
||||
.insert(eventStreamingDestinations)
|
||||
.values({
|
||||
orgId,
|
||||
type,
|
||||
config: encryptedConfig,
|
||||
enabled,
|
||||
createdAt: now,
|
||||
updatedAt: now,
|
||||
sendAccessLogs: parsedBody.data.sendAccessLogs,
|
||||
sendActionLogs: parsedBody.data.sendActionLogs,
|
||||
sendConnectionLogs: parsedBody.data.sendConnectionLogs,
|
||||
sendRequestLogs: parsedBody.data.sendRequestLogs
|
||||
})
|
||||
.returning();
|
||||
|
||||
// Seed cursors at the current max row id for every log type so this
|
||||
// destination only receives events written *after* it was created.
|
||||
// Fire-and-forget: a failure here is non-fatal; the manager has a lazy
|
||||
// fallback that will seed at the next poll if these rows are missing.
|
||||
logStreamingManager
|
||||
.initializeCursorsForDestination(destination.destinationId, orgId)
|
||||
.catch((err) =>
|
||||
logger.error(
|
||||
"createEventStreamingDestination: failed to initialise streaming cursors",
|
||||
err
|
||||
)
|
||||
);
|
||||
|
||||
return response<CreateEventStreamingDestinationResponse>(res, {
|
||||
data: {
|
||||
destinationId: destination.destinationId
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Event streaming destination created successfully",
|
||||
status: HttpCode.CREATED
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,103 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db } from "@server/db";
|
||||
import { eventStreamingDestinations } from "@server/db";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
|
||||
const paramsSchema = z
|
||||
.object({
|
||||
orgId: z.string().nonempty(),
|
||||
destinationId: z.coerce.number<number>()
|
||||
})
|
||||
.strict();
|
||||
|
||||
registry.registerPath({
|
||||
method: "delete",
|
||||
path: "/org/{orgId}/event-streaming-destination/{destinationId}",
|
||||
description: "Delete an event streaming destination for a specific organization.",
|
||||
tags: [OpenAPITags.Org],
|
||||
request: {
|
||||
params: paramsSchema
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function deleteEventStreamingDestination(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { orgId, destinationId } = parsedParams.data;
|
||||
|
||||
const [existing] = await db
|
||||
.select()
|
||||
.from(eventStreamingDestinations)
|
||||
.where(
|
||||
and(
|
||||
eq(eventStreamingDestinations.destinationId, destinationId),
|
||||
eq(eventStreamingDestinations.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
if (!existing) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
"Event streaming destination not found"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
await db
|
||||
.delete(eventStreamingDestinations)
|
||||
.where(
|
||||
and(
|
||||
eq(eventStreamingDestinations.destinationId, destinationId),
|
||||
eq(eventStreamingDestinations.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
return response<null>(res, {
|
||||
data: null,
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Event streaming destination deleted successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
17
server/private/routers/eventStreamingDestination/index.ts
Normal file
17
server/private/routers/eventStreamingDestination/index.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export * from "./createEventStreamingDestination";
|
||||
export * from "./updateEventStreamingDestination";
|
||||
export * from "./deleteEventStreamingDestination";
|
||||
export * from "./listEventStreamingDestinations";
|
||||
@@ -0,0 +1,159 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db } from "@server/db";
|
||||
import { eventStreamingDestinations } from "@server/db";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { eq, sql } from "drizzle-orm";
|
||||
import { decrypt } from "@server/lib/crypto";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
const paramsSchema = z.strictObject({
|
||||
orgId: z.string().nonempty()
|
||||
});
|
||||
|
||||
const querySchema = z.strictObject({
|
||||
limit: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("1000")
|
||||
.transform(Number)
|
||||
.pipe(z.int().nonnegative()),
|
||||
offset: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("0")
|
||||
.transform(Number)
|
||||
.pipe(z.int().nonnegative())
|
||||
});
|
||||
|
||||
export type ListEventStreamingDestinationsResponse = {
|
||||
destinations: {
|
||||
destinationId: number;
|
||||
orgId: string;
|
||||
type: string;
|
||||
config: string;
|
||||
enabled: boolean;
|
||||
createdAt: number;
|
||||
updatedAt: number;
|
||||
sendConnectionLogs: boolean;
|
||||
sendRequestLogs: boolean;
|
||||
sendActionLogs: boolean;
|
||||
sendAccessLogs: boolean;
|
||||
}[];
|
||||
pagination: {
|
||||
total: number;
|
||||
limit: number;
|
||||
offset: number;
|
||||
};
|
||||
};
|
||||
|
||||
async function query(orgId: string, limit: number, offset: number) {
|
||||
const res = await db
|
||||
.select()
|
||||
.from(eventStreamingDestinations)
|
||||
.where(eq(eventStreamingDestinations.orgId, orgId))
|
||||
.orderBy(sql`${eventStreamingDestinations.createdAt} DESC`)
|
||||
.limit(limit)
|
||||
.offset(offset);
|
||||
return res;
|
||||
}
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/org/{orgId}/event-streaming-destination",
|
||||
description: "List all event streaming destinations for a specific organization.",
|
||||
tags: [OpenAPITags.Org],
|
||||
request: {
|
||||
query: querySchema,
|
||||
params: paramsSchema
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function listEventStreamingDestinations(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
const { orgId } = parsedParams.data;
|
||||
|
||||
const parsedQuery = querySchema.safeParse(req.query);
|
||||
if (!parsedQuery.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedQuery.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
const { limit, offset } = parsedQuery.data;
|
||||
|
||||
const list = await query(orgId, limit, offset);
|
||||
|
||||
const [{ count }] = await db
|
||||
.select({ count: sql<number>`count(*)` })
|
||||
.from(eventStreamingDestinations)
|
||||
.where(eq(eventStreamingDestinations.orgId, orgId));
|
||||
|
||||
const key = config.getRawConfig().server.secret!;
|
||||
const decryptedList = list.map((dest) => {
|
||||
try {
|
||||
return { ...dest, config: decrypt(dest.config, key) };
|
||||
} catch (err) {
|
||||
logger.error(
|
||||
`listEventStreamingDestinations: failed to decrypt config for destination ${dest.destinationId}`,
|
||||
err
|
||||
);
|
||||
return { ...dest, config: "" };
|
||||
}
|
||||
});
|
||||
|
||||
return response<ListEventStreamingDestinationsResponse>(res, {
|
||||
data: {
|
||||
destinations: decryptedList,
|
||||
pagination: {
|
||||
total: count,
|
||||
limit,
|
||||
offset
|
||||
}
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Event streaming destinations retrieved successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,157 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db } from "@server/db";
|
||||
import { eventStreamingDestinations } from "@server/db";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import { encrypt } from "@server/lib/crypto";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
const paramsSchema = z
|
||||
.object({
|
||||
orgId: z.string().nonempty(),
|
||||
destinationId: z.coerce.number<number>()
|
||||
})
|
||||
.strict();
|
||||
|
||||
const bodySchema = z.strictObject({
|
||||
type: z.string().optional(),
|
||||
config: z.string().optional(),
|
||||
enabled: z.boolean().optional(),
|
||||
sendConnectionLogs: z.boolean().optional(),
|
||||
sendRequestLogs: z.boolean().optional(),
|
||||
sendActionLogs: z.boolean().optional(),
|
||||
sendAccessLogs: z.boolean().optional()
|
||||
});
|
||||
|
||||
export type UpdateEventStreamingDestinationResponse = {
|
||||
destinationId: number;
|
||||
};
|
||||
|
||||
registry.registerPath({
|
||||
method: "post",
|
||||
path: "/org/{orgId}/event-streaming-destination/{destinationId}",
|
||||
description: "Update an event streaming destination for a specific organization.",
|
||||
tags: [OpenAPITags.Org],
|
||||
request: {
|
||||
params: paramsSchema,
|
||||
body: {
|
||||
content: {
|
||||
"application/json": {
|
||||
schema: bodySchema
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function updateEventStreamingDestination(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { orgId, destinationId } = parsedParams.data;
|
||||
|
||||
const parsedBody = bodySchema.safeParse(req.body);
|
||||
if (!parsedBody.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedBody.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const [existing] = await db
|
||||
.select()
|
||||
.from(eventStreamingDestinations)
|
||||
.where(
|
||||
and(
|
||||
eq(eventStreamingDestinations.destinationId, destinationId),
|
||||
eq(eventStreamingDestinations.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
if (!existing) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
"Event streaming destination not found"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { type, config: configToUpdate, enabled, sendAccessLogs, sendActionLogs, sendConnectionLogs, sendRequestLogs } = parsedBody.data;
|
||||
|
||||
const updateData: Record<string, unknown> = {
|
||||
updatedAt: Date.now()
|
||||
};
|
||||
|
||||
if (type !== undefined) updateData.type = type;
|
||||
if (configToUpdate !== undefined) {
|
||||
const key = config.getRawConfig().server.secret!;
|
||||
updateData.config = encrypt(configToUpdate, key);
|
||||
}
|
||||
if (enabled !== undefined) updateData.enabled = enabled;
|
||||
if (sendAccessLogs !== undefined) updateData.sendAccessLogs = sendAccessLogs;
|
||||
if (sendActionLogs !== undefined) updateData.sendActionLogs = sendActionLogs;
|
||||
if (sendConnectionLogs !== undefined) updateData.sendConnectionLogs = sendConnectionLogs;
|
||||
if (sendRequestLogs !== undefined) updateData.sendRequestLogs = sendRequestLogs;
|
||||
|
||||
await db
|
||||
.update(eventStreamingDestinations)
|
||||
.set(updateData)
|
||||
.where(
|
||||
and(
|
||||
eq(eventStreamingDestinations.destinationId, destinationId),
|
||||
eq(eventStreamingDestinations.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
|
||||
return response<UpdateEventStreamingDestinationResponse>(res, {
|
||||
data: {
|
||||
destinationId
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Event streaming destination updated successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -26,6 +26,9 @@ import * as misc from "#private/routers/misc";
|
||||
import * as reKey from "#private/routers/re-key";
|
||||
import * as approval from "#private/routers/approvals";
|
||||
import * as ssh from "#private/routers/ssh";
|
||||
import * as user from "#private/routers/user";
|
||||
import * as siteProvisioning from "#private/routers/siteProvisioning";
|
||||
import * as eventStreamingDestination from "#private/routers/eventStreamingDestination";
|
||||
|
||||
import {
|
||||
verifyOrgAccess,
|
||||
@@ -33,7 +36,11 @@ import {
|
||||
verifyUserIsServerAdmin,
|
||||
verifySiteAccess,
|
||||
verifyClientAccess,
|
||||
verifyLimits
|
||||
verifyLimits,
|
||||
verifyRoleAccess,
|
||||
verifyUserAccess,
|
||||
verifyUserCanSetUserOrgRoles,
|
||||
verifySiteProvisioningKeyAccess
|
||||
} from "@server/middlewares";
|
||||
import { ActionsEnum } from "@server/auth/actions";
|
||||
import {
|
||||
@@ -478,6 +485,25 @@ authenticated.get(
|
||||
logs.exportAccessAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.connectionLogs),
|
||||
verifyOrgAccess,
|
||||
verifyUserHasAction(ActionsEnum.exportLogs),
|
||||
logs.queryConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection/export",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.logExport),
|
||||
verifyOrgAccess,
|
||||
verifyUserHasAction(ActionsEnum.exportLogs),
|
||||
logActionAudit(ActionsEnum.exportLogs),
|
||||
logs.exportConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/re-key/:clientId/regenerate-client-secret",
|
||||
verifyClientAccess, // this is first to set the org id
|
||||
@@ -518,3 +544,111 @@ authenticated.post(
|
||||
// logActionAudit(ActionsEnum.signSshKey), // it is handled inside of the function below so we can include more metadata
|
||||
ssh.signSshKey
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/user/:userId/add-role/:roleId",
|
||||
verifyRoleAccess,
|
||||
verifyUserAccess,
|
||||
verifyLimits,
|
||||
verifyUserHasAction(ActionsEnum.addUserRole),
|
||||
logActionAudit(ActionsEnum.addUserRole),
|
||||
user.addUserRole
|
||||
);
|
||||
|
||||
authenticated.delete(
|
||||
"/user/:userId/remove-role/:roleId",
|
||||
verifyRoleAccess,
|
||||
verifyUserAccess,
|
||||
verifyLimits,
|
||||
verifyUserHasAction(ActionsEnum.removeUserRole),
|
||||
logActionAudit(ActionsEnum.removeUserRole),
|
||||
user.removeUserRole
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/user/:userId/org/:orgId/roles",
|
||||
verifyOrgAccess,
|
||||
verifyUserAccess,
|
||||
verifyLimits,
|
||||
verifyUserCanSetUserOrgRoles(),
|
||||
logActionAudit(ActionsEnum.setUserOrgRoles),
|
||||
user.setUserOrgRoles
|
||||
);
|
||||
|
||||
authenticated.put(
|
||||
"/org/:orgId/site-provisioning-key",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.siteProvisioningKeys),
|
||||
verifyOrgAccess,
|
||||
verifyLimits,
|
||||
verifyUserHasAction(ActionsEnum.createSiteProvisioningKey),
|
||||
logActionAudit(ActionsEnum.createSiteProvisioningKey),
|
||||
siteProvisioning.createSiteProvisioningKey
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/site-provisioning-keys",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.siteProvisioningKeys),
|
||||
verifyOrgAccess,
|
||||
verifyUserHasAction(ActionsEnum.listSiteProvisioningKeys),
|
||||
siteProvisioning.listSiteProvisioningKeys
|
||||
);
|
||||
|
||||
authenticated.delete(
|
||||
"/org/:orgId/site-provisioning-key/:siteProvisioningKeyId",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.siteProvisioningKeys),
|
||||
verifyOrgAccess,
|
||||
verifySiteProvisioningKeyAccess,
|
||||
verifyUserHasAction(ActionsEnum.deleteSiteProvisioningKey),
|
||||
logActionAudit(ActionsEnum.deleteSiteProvisioningKey),
|
||||
siteProvisioning.deleteSiteProvisioningKey
|
||||
);
|
||||
|
||||
authenticated.patch(
|
||||
"/org/:orgId/site-provisioning-key/:siteProvisioningKeyId",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.siteProvisioningKeys),
|
||||
verifyOrgAccess,
|
||||
verifySiteProvisioningKeyAccess,
|
||||
verifyUserHasAction(ActionsEnum.updateSiteProvisioningKey),
|
||||
logActionAudit(ActionsEnum.updateSiteProvisioningKey),
|
||||
siteProvisioning.updateSiteProvisioningKey
|
||||
);
|
||||
|
||||
authenticated.put(
|
||||
"/org/:orgId/event-streaming-destination",
|
||||
verifyValidLicense,
|
||||
verifyOrgAccess,
|
||||
verifyLimits,
|
||||
verifyUserHasAction(ActionsEnum.createEventStreamingDestination),
|
||||
logActionAudit(ActionsEnum.createEventStreamingDestination),
|
||||
eventStreamingDestination.createEventStreamingDestination
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/org/:orgId/event-streaming-destination/:destinationId",
|
||||
verifyValidLicense,
|
||||
verifyOrgAccess,
|
||||
verifyLimits,
|
||||
verifyUserHasAction(ActionsEnum.updateEventStreamingDestination),
|
||||
logActionAudit(ActionsEnum.updateEventStreamingDestination),
|
||||
eventStreamingDestination.updateEventStreamingDestination
|
||||
);
|
||||
|
||||
authenticated.delete(
|
||||
"/org/:orgId/event-streaming-destination/:destinationId",
|
||||
verifyValidLicense,
|
||||
verifyOrgAccess,
|
||||
verifyUserHasAction(ActionsEnum.deleteEventStreamingDestination),
|
||||
logActionAudit(ActionsEnum.deleteEventStreamingDestination),
|
||||
eventStreamingDestination.deleteEventStreamingDestination
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/event-streaming-destinations",
|
||||
verifyOrgAccess,
|
||||
verifyUserHasAction(ActionsEnum.listEventStreamingDestinations),
|
||||
eventStreamingDestination.listEventStreamingDestinations
|
||||
);
|
||||
|
||||
@@ -15,6 +15,7 @@ import { verifySessionRemoteExitNodeMiddleware } from "#private/middlewares/veri
|
||||
import { Router } from "express";
|
||||
import {
|
||||
db,
|
||||
logsDb,
|
||||
exitNodes,
|
||||
Resource,
|
||||
ResourcePassword,
|
||||
@@ -51,7 +52,9 @@ import {
|
||||
userOrgs,
|
||||
roleResources,
|
||||
userResources,
|
||||
resourceRules
|
||||
resourceRules,
|
||||
userOrgRoles,
|
||||
roles
|
||||
} from "@server/db";
|
||||
import { eq, and, inArray, isNotNull, ne } from "drizzle-orm";
|
||||
import { response } from "@server/lib/response";
|
||||
@@ -81,6 +84,7 @@ import { verifyResourceAccessToken } from "@server/auth/verifyResourceAccessToke
|
||||
import semver from "semver";
|
||||
import { maxmindAsnLookup } from "@server/db/maxmindAsn";
|
||||
import { checkOrgAccessPolicy } from "@server/lib/checkOrgAccessPolicy";
|
||||
import { sanitizeString } from "@server/lib/sanitize";
|
||||
|
||||
// Zod schemas for request validation
|
||||
const getResourceByDomainParamsSchema = z.strictObject({
|
||||
@@ -102,6 +106,13 @@ const getUserOrgSessionVerifySchema = z.strictObject({
|
||||
sessionId: z.string().min(1, "Session ID is required")
|
||||
});
|
||||
|
||||
const getRoleNameParamsSchema = z.strictObject({
|
||||
roleId: z
|
||||
.string()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive("Role ID must be a positive integer"))
|
||||
});
|
||||
|
||||
const getRoleResourceAccessParamsSchema = z.strictObject({
|
||||
roleId: z
|
||||
.string()
|
||||
@@ -113,6 +124,23 @@ const getRoleResourceAccessParamsSchema = z.strictObject({
|
||||
.pipe(z.int().positive("Resource ID must be a positive integer"))
|
||||
});
|
||||
|
||||
const getResourceAccessParamsSchema = z.strictObject({
|
||||
resourceId: z
|
||||
.string()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive("Resource ID must be a positive integer"))
|
||||
});
|
||||
|
||||
const getResourceAccessQuerySchema = z.strictObject({
|
||||
roleIds: z
|
||||
.union([z.array(z.string()), z.string()])
|
||||
.transform((val) =>
|
||||
(Array.isArray(val) ? val : [val])
|
||||
.map(Number)
|
||||
.filter((n) => !isNaN(n))
|
||||
)
|
||||
});
|
||||
|
||||
const getUserResourceAccessParamsSchema = z.strictObject({
|
||||
userId: z.string().min(1, "User ID is required"),
|
||||
resourceId: z
|
||||
@@ -758,7 +786,7 @@ hybridRouter.get(
|
||||
|
||||
// Get user organization role
|
||||
hybridRouter.get(
|
||||
"/user/:userId/org/:orgId/role",
|
||||
"/user/:userId/org/:orgId/roles",
|
||||
async (req: Request, res: Response, next: NextFunction) => {
|
||||
try {
|
||||
const parsedParams = getUserOrgRoleParamsSchema.safeParse(
|
||||
@@ -794,23 +822,129 @@ hybridRouter.get(
|
||||
);
|
||||
}
|
||||
|
||||
const userOrgRole = await db
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
const userOrgRoleRows = await db
|
||||
.select({ roleId: userOrgRoles.roleId, roleName: roles.name })
|
||||
.from(userOrgRoles)
|
||||
.innerJoin(roles, eq(roles.roleId, userOrgRoles.roleId))
|
||||
.where(
|
||||
and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId))
|
||||
)
|
||||
.limit(1);
|
||||
and(
|
||||
eq(userOrgRoles.userId, userId),
|
||||
eq(userOrgRoles.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const result = userOrgRole.length > 0 ? userOrgRole[0] : null;
|
||||
logger.debug(`User ${userId} has roles in org ${orgId}:`, userOrgRoleRows);
|
||||
|
||||
return response<typeof userOrgs.$inferSelect | null>(res, {
|
||||
data: result,
|
||||
return response<{ roleId: number, roleName: string }[]>(res, {
|
||||
data: userOrgRoleRows,
|
||||
success: true,
|
||||
error: false,
|
||||
message: result
|
||||
? "User org role retrieved successfully"
|
||||
: "User org role not found",
|
||||
message:
|
||||
userOrgRoleRows.length > 0
|
||||
? "User org roles retrieved successfully"
|
||||
: "User has no roles in this organization",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Failed to get user org role"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// DEPRICATED Get user organization role
|
||||
// used for backward compatibility with old remote nodes
|
||||
hybridRouter.get(
|
||||
"/user/:userId/org/:orgId/role", // <- note the missing s
|
||||
async (req: Request, res: Response, next: NextFunction) => {
|
||||
try {
|
||||
const parsedParams = getUserOrgRoleParamsSchema.safeParse(
|
||||
req.params
|
||||
);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { userId, orgId } = parsedParams.data;
|
||||
const remoteExitNode = req.remoteExitNode;
|
||||
|
||||
if (!remoteExitNode || !remoteExitNode.exitNodeId) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"Remote exit node not found"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (await checkExitNodeOrg(remoteExitNode.exitNodeId, orgId)) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.UNAUTHORIZED,
|
||||
"User is not authorized to access this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// get the roles on the user
|
||||
|
||||
const userOrgRoleRows = await db
|
||||
.select({ roleId: userOrgRoles.roleId })
|
||||
.from(userOrgRoles)
|
||||
.where(
|
||||
and(
|
||||
eq(userOrgRoles.userId, userId),
|
||||
eq(userOrgRoles.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const roleIds = userOrgRoleRows.map((r) => r.roleId);
|
||||
|
||||
let roleId: number | null = null;
|
||||
|
||||
if (userOrgRoleRows.length === 0) {
|
||||
// User has no roles in this organization
|
||||
roleId = null;
|
||||
} else if (userOrgRoleRows.length === 1) {
|
||||
// User has exactly one role, return it
|
||||
roleId = userOrgRoleRows[0].roleId;
|
||||
} else {
|
||||
// User has multiple roles
|
||||
// Check if any of these roles are also assigned to a resource
|
||||
// If we find a match, prefer that role; otherwise return the first role
|
||||
// Get all resources that have any of these roles assigned
|
||||
const roleResourceMatches = await db
|
||||
.select({ roleId: roleResources.roleId })
|
||||
.from(roleResources)
|
||||
.where(inArray(roleResources.roleId, roleIds))
|
||||
.limit(1);
|
||||
if (roleResourceMatches.length > 0) {
|
||||
// Return the first role that's also on a resource
|
||||
roleId = roleResourceMatches[0].roleId;
|
||||
} else {
|
||||
// No resource match found, return the first role
|
||||
roleId = userOrgRoleRows[0].roleId;
|
||||
}
|
||||
}
|
||||
|
||||
return response<{ roleId: number | null }>(res, {
|
||||
data: { roleId },
|
||||
success: true,
|
||||
error: false,
|
||||
message:
|
||||
roleIds.length > 0
|
||||
? "User org roles retrieved successfully"
|
||||
: "User has no roles in this organization",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
@@ -888,6 +1022,60 @@ hybridRouter.get(
|
||||
}
|
||||
);
|
||||
|
||||
// Get role name by ID
|
||||
hybridRouter.get(
|
||||
"/role/:roleId/name",
|
||||
async (req: Request, res: Response, next: NextFunction) => {
|
||||
try {
|
||||
const parsedParams = getRoleNameParamsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { roleId } = parsedParams.data;
|
||||
const remoteExitNode = req.remoteExitNode;
|
||||
|
||||
if (!remoteExitNode?.exitNodeId) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"Remote exit node not found"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const [role] = await db
|
||||
.select({ name: roles.name })
|
||||
.from(roles)
|
||||
.where(eq(roles.roleId, roleId))
|
||||
.limit(1);
|
||||
|
||||
return response<string | null>(res, {
|
||||
data: role?.name ?? null,
|
||||
success: true,
|
||||
error: false,
|
||||
message: role
|
||||
? "Role name retrieved successfully"
|
||||
: "Role not found",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Failed to get role name"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Check if role has access to resource
|
||||
hybridRouter.get(
|
||||
"/role/:roleId/resource/:resourceId/access",
|
||||
@@ -973,6 +1161,101 @@ hybridRouter.get(
|
||||
}
|
||||
);
|
||||
|
||||
// Check if role has access to resource
|
||||
hybridRouter.get(
|
||||
"/resource/:resourceId/access",
|
||||
async (req: Request, res: Response, next: NextFunction) => {
|
||||
try {
|
||||
const parsedParams = getResourceAccessParamsSchema.safeParse(
|
||||
req.params
|
||||
);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { resourceId } = parsedParams.data;
|
||||
const parsedQuery = getResourceAccessQuerySchema.safeParse(
|
||||
req.query
|
||||
);
|
||||
const roleIds = parsedQuery.success ? parsedQuery.data.roleIds : [];
|
||||
|
||||
const remoteExitNode = req.remoteExitNode;
|
||||
|
||||
if (!remoteExitNode?.exitNodeId) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"Remote exit node not found"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const [resource] = await db
|
||||
.select()
|
||||
.from(resources)
|
||||
.where(eq(resources.resourceId, resourceId))
|
||||
.limit(1);
|
||||
|
||||
if (
|
||||
await checkExitNodeOrg(
|
||||
remoteExitNode.exitNodeId,
|
||||
resource.orgId
|
||||
)
|
||||
) {
|
||||
// If the exit node is not allowed for the org, return an error
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"Exit node not allowed for this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const roleResourceAccess = await db
|
||||
.select({
|
||||
resourceId: roleResources.resourceId,
|
||||
roleId: roleResources.roleId
|
||||
})
|
||||
.from(roleResources)
|
||||
.where(
|
||||
and(
|
||||
eq(roleResources.resourceId, resourceId),
|
||||
inArray(roleResources.roleId, roleIds)
|
||||
)
|
||||
);
|
||||
|
||||
const result =
|
||||
roleResourceAccess.length > 0 ? roleResourceAccess : null;
|
||||
|
||||
return response<{ resourceId: number; roleId: number }[] | null>(
|
||||
res,
|
||||
{
|
||||
data: result,
|
||||
success: true,
|
||||
error: false,
|
||||
message: result
|
||||
? "Role resource access retrieved successfully"
|
||||
: "Role resource access not found",
|
||||
status: HttpCode.OK
|
||||
}
|
||||
);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Failed to get role resource access"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Check if user has direct access to resource
|
||||
hybridRouter.get(
|
||||
"/user/:userId/resource/:resourceId/access",
|
||||
@@ -1859,24 +2142,25 @@ hybridRouter.post(
|
||||
})
|
||||
.map((logEntry) => ({
|
||||
timestamp: logEntry.timestamp,
|
||||
orgId: logEntry.orgId,
|
||||
actorType: logEntry.actorType,
|
||||
actor: logEntry.actor,
|
||||
actorId: logEntry.actorId,
|
||||
metadata: logEntry.metadata,
|
||||
orgId: sanitizeString(logEntry.orgId),
|
||||
actorType: sanitizeString(logEntry.actorType),
|
||||
actor: sanitizeString(logEntry.actor),
|
||||
actorId: sanitizeString(logEntry.actorId),
|
||||
metadata: sanitizeString(logEntry.metadata),
|
||||
action: logEntry.action,
|
||||
resourceId: logEntry.resourceId,
|
||||
reason: logEntry.reason,
|
||||
location: logEntry.location,
|
||||
location: sanitizeString(logEntry.location),
|
||||
// userAgent: data.userAgent, // TODO: add this
|
||||
// headers: data.body.headers,
|
||||
// query: data.body.query,
|
||||
originalRequestURL: logEntry.originalRequestURL,
|
||||
scheme: logEntry.scheme,
|
||||
host: logEntry.host,
|
||||
path: logEntry.path,
|
||||
method: logEntry.method,
|
||||
ip: logEntry.ip,
|
||||
originalRequestURL:
|
||||
sanitizeString(logEntry.originalRequestURL) ?? "",
|
||||
scheme: sanitizeString(logEntry.scheme) ?? "",
|
||||
host: sanitizeString(logEntry.host) ?? "",
|
||||
path: sanitizeString(logEntry.path) ?? "",
|
||||
method: sanitizeString(logEntry.method) ?? "",
|
||||
ip: sanitizeString(logEntry.ip),
|
||||
tls: logEntry.tls
|
||||
}));
|
||||
|
||||
@@ -1884,7 +2168,7 @@ hybridRouter.post(
|
||||
const batchSize = 100;
|
||||
for (let i = 0; i < logEntries.length; i += batchSize) {
|
||||
const batch = logEntries.slice(i, i + batchSize);
|
||||
await db.insert(requestAuditLog).values(batch);
|
||||
await logsDb.insert(requestAuditLog).values(batch);
|
||||
}
|
||||
|
||||
return response(res, {
|
||||
|
||||
@@ -20,8 +20,11 @@ import {
|
||||
verifyApiKeyIsRoot,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyIdpAccess,
|
||||
verifyApiKeyRoleAccess,
|
||||
verifyApiKeyUserAccess,
|
||||
verifyLimits
|
||||
} from "@server/middlewares";
|
||||
import * as user from "#private/routers/user";
|
||||
import {
|
||||
verifyValidSubscription,
|
||||
verifyValidLicense
|
||||
@@ -91,6 +94,25 @@ authenticated.get(
|
||||
logs.exportAccessAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.connectionLogs),
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logs.queryConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection/export",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.logExport),
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logActionAudit(ActionsEnum.exportLogs),
|
||||
logs.exportConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.put(
|
||||
"/org/:orgId/idp/oidc",
|
||||
verifyValidLicense,
|
||||
@@ -140,3 +162,23 @@ authenticated.get(
|
||||
verifyApiKeyHasAction(ActionsEnum.listIdps),
|
||||
orgIdp.listOrgIdps
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/user/:userId/add-role/:roleId",
|
||||
verifyApiKeyRoleAccess,
|
||||
verifyApiKeyUserAccess,
|
||||
verifyLimits,
|
||||
verifyApiKeyHasAction(ActionsEnum.addUserRole),
|
||||
logActionAudit(ActionsEnum.addUserRole),
|
||||
user.addUserRole
|
||||
);
|
||||
|
||||
authenticated.delete(
|
||||
"/user/:userId/remove-role/:roleId",
|
||||
verifyApiKeyRoleAccess,
|
||||
verifyApiKeyUserAccess,
|
||||
verifyLimits,
|
||||
verifyApiKeyHasAction(ActionsEnum.removeUserRole),
|
||||
logActionAudit(ActionsEnum.removeUserRole),
|
||||
user.removeUserRole
|
||||
);
|
||||
|
||||
239
server/private/routers/newt/handleConnectionLogMessage.ts
Normal file
239
server/private/routers/newt/handleConnectionLogMessage.ts
Normal file
@@ -0,0 +1,239 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { db } from "@server/db";
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
import { sites, Newt, clients, orgs } from "@server/db";
|
||||
import { and, eq, inArray } from "drizzle-orm";
|
||||
import logger from "@server/logger";
|
||||
import { inflate } from "zlib";
|
||||
import { promisify } from "util";
|
||||
import {
|
||||
logConnectionAudit,
|
||||
flushConnectionLogToDb,
|
||||
cleanUpOldLogs
|
||||
} from "#private/lib/logConnectionAudit";
|
||||
|
||||
export { flushConnectionLogToDb, cleanUpOldLogs };
|
||||
|
||||
const zlibInflate = promisify(inflate);
|
||||
|
||||
interface ConnectionSessionData {
|
||||
sessionId: string;
|
||||
resourceId: number;
|
||||
sourceAddr: string;
|
||||
destAddr: string;
|
||||
protocol: string;
|
||||
startedAt: string; // ISO 8601 timestamp
|
||||
endedAt?: string; // ISO 8601 timestamp
|
||||
bytesTx?: number;
|
||||
bytesRx?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decompress a base64-encoded zlib-compressed string into parsed JSON.
|
||||
*/
|
||||
async function decompressConnectionLog(
|
||||
compressed: string
|
||||
): Promise<ConnectionSessionData[]> {
|
||||
const compressedBuffer = Buffer.from(compressed, "base64");
|
||||
const decompressed = await zlibInflate(compressedBuffer);
|
||||
const jsonString = decompressed.toString("utf-8");
|
||||
const parsed = JSON.parse(jsonString);
|
||||
|
||||
if (!Array.isArray(parsed)) {
|
||||
throw new Error("Decompressed connection log data is not an array");
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an ISO 8601 timestamp string to epoch seconds.
|
||||
* Returns null if the input is falsy.
|
||||
*/
|
||||
function toEpochSeconds(isoString: string | undefined | null): number | null {
|
||||
if (!isoString) {
|
||||
return null;
|
||||
}
|
||||
const ms = new Date(isoString).getTime();
|
||||
if (isNaN(ms)) {
|
||||
return null;
|
||||
}
|
||||
return Math.floor(ms / 1000);
|
||||
}
|
||||
|
||||
export const handleConnectionLogMessage: MessageHandler = async (context) => {
|
||||
const { message, client } = context;
|
||||
const newt = client as Newt;
|
||||
|
||||
if (!newt) {
|
||||
logger.warn("Connection log received but no newt client in context");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!newt.siteId) {
|
||||
logger.warn("Connection log received but newt has no siteId");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!message.data?.compressed) {
|
||||
logger.warn("Connection log message missing compressed data");
|
||||
return;
|
||||
}
|
||||
|
||||
// Look up the org for this site
|
||||
const [site] = await db
|
||||
.select({ orgId: sites.orgId, orgSubnet: orgs.subnet })
|
||||
.from(sites)
|
||||
.innerJoin(orgs, eq(sites.orgId, orgs.orgId))
|
||||
.where(eq(sites.siteId, newt.siteId));
|
||||
|
||||
if (!site) {
|
||||
logger.warn(
|
||||
`Connection log received but site ${newt.siteId} not found in database`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const orgId = site.orgId;
|
||||
|
||||
// Extract the CIDR suffix (e.g. "/16") from the org subnet so we can
|
||||
// reconstruct the exact subnet string stored on each client record.
|
||||
const cidrSuffix = site.orgSubnet?.includes("/")
|
||||
? site.orgSubnet.substring(site.orgSubnet.indexOf("/"))
|
||||
: null;
|
||||
|
||||
let sessions: ConnectionSessionData[];
|
||||
try {
|
||||
sessions = await decompressConnectionLog(message.data.compressed);
|
||||
} catch (error) {
|
||||
logger.error("Failed to decompress connection log data:", error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sessions.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug(`Sessions: ${JSON.stringify(sessions)}`);
|
||||
|
||||
// Build a map from sourceAddr → { clientId, userId } by querying clients
|
||||
// whose subnet field matches exactly. Client subnets are stored with the
|
||||
// org's CIDR suffix (e.g. "100.90.128.5/16"), so we reconstruct that from
|
||||
// each unique sourceAddr + the org's CIDR suffix and do a targeted IN query.
|
||||
const ipToClient = new Map<
|
||||
string,
|
||||
{ clientId: number; userId: string | null }
|
||||
>();
|
||||
|
||||
if (cidrSuffix) {
|
||||
// Collect unique source addresses so we only query for what we need
|
||||
const uniqueSourceAddrs = new Set<string>();
|
||||
for (const session of sessions) {
|
||||
if (session.sourceAddr) {
|
||||
uniqueSourceAddrs.add(session.sourceAddr);
|
||||
}
|
||||
}
|
||||
|
||||
if (uniqueSourceAddrs.size > 0) {
|
||||
// Construct the exact subnet strings as stored in the DB
|
||||
const subnetQueries = Array.from(uniqueSourceAddrs).map((addr) => {
|
||||
// Strip port if present (e.g. "100.90.128.1:38004" → "100.90.128.1")
|
||||
const ip = addr.includes(":") ? addr.split(":")[0] : addr;
|
||||
return `${ip}${cidrSuffix}`;
|
||||
});
|
||||
|
||||
logger.debug(`Subnet queries: ${JSON.stringify(subnetQueries)}`);
|
||||
|
||||
const matchedClients = await db
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
userId: clients.userId,
|
||||
subnet: clients.subnet
|
||||
})
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
eq(clients.orgId, orgId),
|
||||
inArray(clients.subnet, subnetQueries)
|
||||
)
|
||||
);
|
||||
|
||||
for (const c of matchedClients) {
|
||||
const ip = c.subnet.split("/")[0];
|
||||
logger.debug(
|
||||
`Client ${c.clientId} subnet ${c.subnet} matches ${ip}`
|
||||
);
|
||||
ipToClient.set(ip, {
|
||||
clientId: c.clientId,
|
||||
userId: c.userId
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to DB records and hand off to the audit logger
|
||||
for (const session of sessions) {
|
||||
// Validate required fields
|
||||
if (
|
||||
!session.sessionId ||
|
||||
!session.resourceId ||
|
||||
!session.sourceAddr ||
|
||||
!session.destAddr ||
|
||||
!session.protocol
|
||||
) {
|
||||
logger.debug(
|
||||
`Skipping connection log session with missing required fields: ${JSON.stringify(session)}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const startedAt = toEpochSeconds(session.startedAt);
|
||||
if (startedAt === null) {
|
||||
logger.debug(
|
||||
`Skipping connection log session with invalid startedAt: ${session.startedAt}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Match the source address to a client. The sourceAddr is the
|
||||
// client's IP on the WireGuard network, which corresponds to the IP
|
||||
// portion of the client's subnet CIDR (e.g. "100.90.128.5/24").
|
||||
// Strip port if present (e.g. "100.90.128.1:38004" → "100.90.128.1")
|
||||
const sourceIp = session.sourceAddr.includes(":")
|
||||
? session.sourceAddr.split(":")[0]
|
||||
: session.sourceAddr;
|
||||
const clientInfo = ipToClient.get(sourceIp) ?? null;
|
||||
|
||||
logConnectionAudit({
|
||||
sessionId: session.sessionId,
|
||||
siteResourceId: session.resourceId,
|
||||
orgId,
|
||||
siteId: newt.siteId,
|
||||
clientId: clientInfo?.clientId ?? null,
|
||||
userId: clientInfo?.userId ?? null,
|
||||
sourceAddr: session.sourceAddr,
|
||||
destAddr: session.destAddr,
|
||||
protocol: session.protocol,
|
||||
startedAt,
|
||||
endedAt: toEpochSeconds(session.endedAt),
|
||||
bytesTx: session.bytesTx ?? null,
|
||||
bytesRx: session.bytesRx ?? null
|
||||
});
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Buffered ${sessions.length} connection log session(s) from newt ${newt.newtId} (site ${newt.siteId})`
|
||||
);
|
||||
};
|
||||
14
server/private/routers/newt/index.ts
Normal file
14
server/private/routers/newt/index.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export * from "./handleConnectionLogMessage";
|
||||
@@ -14,7 +14,7 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db } from "@server/db";
|
||||
import { userOrgs, users, roles, orgs } from "@server/db";
|
||||
import { userOrgs, userOrgRoles, users, roles, orgs } from "@server/db";
|
||||
import { eq, and, or } from "drizzle-orm";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
@@ -95,7 +95,14 @@ async function getOrgAdmins(orgId: string) {
|
||||
})
|
||||
.from(userOrgs)
|
||||
.innerJoin(users, eq(userOrgs.userId, users.userId))
|
||||
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
|
||||
.leftJoin(
|
||||
userOrgRoles,
|
||||
and(
|
||||
eq(userOrgs.userId, userOrgRoles.userId),
|
||||
eq(userOrgs.orgId, userOrgRoles.orgId)
|
||||
)
|
||||
)
|
||||
.leftJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
|
||||
.where(
|
||||
and(
|
||||
eq(userOrgs.orgId, orgId),
|
||||
@@ -103,8 +110,11 @@ async function getOrgAdmins(orgId: string) {
|
||||
)
|
||||
);
|
||||
|
||||
// Filter to only include users with verified emails
|
||||
const orgAdmins = admins.filter(
|
||||
// Dedupe by userId (user may have multiple roles)
|
||||
const byUserId = new Map(
|
||||
admins.map((a) => [a.userId, a])
|
||||
);
|
||||
const orgAdmins = Array.from(byUserId.values()).filter(
|
||||
(admin) => admin.email && admin.email.length > 0
|
||||
);
|
||||
|
||||
|
||||
@@ -79,7 +79,7 @@ export async function createRemoteExitNode(
|
||||
|
||||
const { remoteExitNodeId, secret } = parsedBody.data;
|
||||
|
||||
if (req.user && !req.userOrgRoleId) {
|
||||
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
|
||||
return next(
|
||||
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
|
||||
);
|
||||
|
||||
@@ -23,8 +23,10 @@ import { z } from "zod";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import {
|
||||
createRemoteExitNodeSession,
|
||||
validateRemoteExitNodeSessionToken
|
||||
validateRemoteExitNodeSessionToken,
|
||||
EXPIRES
|
||||
} from "#private/auth/sessions/remoteExitNode";
|
||||
import { getOrCreateCachedToken } from "@server/private/lib/tokenCache";
|
||||
import { verifyPassword } from "@server/auth/password";
|
||||
import logger from "@server/logger";
|
||||
import config from "@server/lib/config";
|
||||
@@ -103,14 +105,23 @@ export async function getRemoteExitNodeToken(
|
||||
);
|
||||
}
|
||||
|
||||
const resToken = generateSessionToken();
|
||||
await createRemoteExitNodeSession(
|
||||
resToken,
|
||||
existingRemoteExitNode.remoteExitNodeId
|
||||
// Return a cached token if one exists to prevent thundering herd on
|
||||
// simultaneous restarts; falls back to creating a fresh session when
|
||||
// Redis is unavailable or the cache has expired.
|
||||
const resToken = await getOrCreateCachedToken(
|
||||
`remote_exit_node:token_cache:${existingRemoteExitNode.remoteExitNodeId}`,
|
||||
config.getRawConfig().server.secret!,
|
||||
Math.floor(EXPIRES / 1000),
|
||||
async () => {
|
||||
const token = generateSessionToken();
|
||||
await createRemoteExitNodeSession(
|
||||
token,
|
||||
existingRemoteExitNode.remoteExitNodeId
|
||||
);
|
||||
return token;
|
||||
}
|
||||
);
|
||||
|
||||
// logger.debug(`Created RemoteExitNode token response: ${JSON.stringify(resToken)}`);
|
||||
|
||||
return response<{ token: string }>(res, {
|
||||
data: {
|
||||
token: resToken
|
||||
|
||||
@@ -38,7 +38,7 @@ export const startRemoteExitNodeOfflineChecker = (): void => {
|
||||
);
|
||||
|
||||
// Find clients that haven't pinged in the last 2 minutes and mark them as offline
|
||||
const newlyOfflineNodes = await db
|
||||
const offlineNodes = await db
|
||||
.update(exitNodes)
|
||||
.set({ online: false })
|
||||
.where(
|
||||
@@ -53,32 +53,15 @@ export const startRemoteExitNodeOfflineChecker = (): void => {
|
||||
)
|
||||
.returning();
|
||||
|
||||
// Update the sites to offline if they have not pinged either
|
||||
const exitNodeIds = newlyOfflineNodes.map(
|
||||
(node) => node.exitNodeId
|
||||
);
|
||||
|
||||
const sitesOnNode = await db
|
||||
.select()
|
||||
.from(sites)
|
||||
.where(
|
||||
and(
|
||||
eq(sites.online, true),
|
||||
inArray(sites.exitNodeId, exitNodeIds)
|
||||
)
|
||||
if (offlineNodes.length > 0) {
|
||||
logger.info(
|
||||
`checkRemoteExitNodeOffline: Marked ${offlineNodes.length} remoteExitNode client(s) offline due to inactivity`
|
||||
);
|
||||
|
||||
// loop through the sites and process their lastBandwidthUpdate as an iso string and if its more than 1 minute old then mark the site offline
|
||||
for (const site of sitesOnNode) {
|
||||
if (!site.lastBandwidthUpdate) {
|
||||
continue;
|
||||
}
|
||||
const lastBandwidthUpdate = new Date(site.lastBandwidthUpdate);
|
||||
if (Date.now() - lastBandwidthUpdate.getTime() > 60 * 1000) {
|
||||
await db
|
||||
.update(sites)
|
||||
.set({ online: false })
|
||||
.where(eq(sites.siteId, site.siteId));
|
||||
for (const offlineClient of offlineNodes) {
|
||||
logger.debug(
|
||||
`checkRemoteExitNodeOffline: Client ${offlineClient.exitNodeId} marked offline (lastPing: ${offlineClient.lastPing})`
|
||||
);
|
||||
}
|
||||
}
|
||||
} catch (error) {
|
||||
|
||||
@@ -0,0 +1,149 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { NextFunction, Request, Response } from "express";
|
||||
import { db, siteProvisioningKeyOrg, siteProvisioningKeys } from "@server/db";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { z } from "zod";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import createHttpError from "http-errors";
|
||||
import response from "@server/lib/response";
|
||||
import moment from "moment";
|
||||
import {
|
||||
generateId,
|
||||
generateIdFromEntropySize
|
||||
} from "@server/auth/sessions/app";
|
||||
import logger from "@server/logger";
|
||||
import { hashPassword } from "@server/auth/password";
|
||||
import type { CreateSiteProvisioningKeyResponse } from "@server/routers/siteProvisioning/types";
|
||||
|
||||
const paramsSchema = z.object({
|
||||
orgId: z.string().nonempty()
|
||||
});
|
||||
|
||||
const bodySchema = z
|
||||
.strictObject({
|
||||
name: z.string().min(1).max(255),
|
||||
maxBatchSize: z.union([
|
||||
z.null(),
|
||||
z.coerce.number().int().positive().max(1_000_000)
|
||||
]),
|
||||
validUntil: z.string().max(255).optional(),
|
||||
approveNewSites: z.boolean().optional().default(true)
|
||||
})
|
||||
.superRefine((data, ctx) => {
|
||||
const v = data.validUntil;
|
||||
if (v == null || v.trim() === "") {
|
||||
return;
|
||||
}
|
||||
if (Number.isNaN(Date.parse(v))) {
|
||||
ctx.addIssue({
|
||||
code: "custom",
|
||||
message: "Invalid validUntil",
|
||||
path: ["validUntil"]
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
export type CreateSiteProvisioningKeyBody = z.infer<typeof bodySchema>;
|
||||
|
||||
export async function createSiteProvisioningKey(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const parsedBody = bodySchema.safeParse(req.body);
|
||||
if (!parsedBody.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedBody.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { orgId } = parsedParams.data;
|
||||
const { name, maxBatchSize, approveNewSites } = parsedBody.data;
|
||||
const vuRaw = parsedBody.data.validUntil;
|
||||
const validUntil =
|
||||
vuRaw == null || vuRaw.trim() === ""
|
||||
? null
|
||||
: new Date(Date.parse(vuRaw)).toISOString();
|
||||
|
||||
const siteProvisioningKeyId = `spk-${generateId(15)}`;
|
||||
const siteProvisioningKey = generateIdFromEntropySize(25);
|
||||
const siteProvisioningKeyHash = await hashPassword(siteProvisioningKey);
|
||||
const lastChars = siteProvisioningKey.slice(-4);
|
||||
const createdAt = moment().toISOString();
|
||||
const provisioningKey = `${siteProvisioningKeyId}.${siteProvisioningKey}`;
|
||||
|
||||
await db.transaction(async (trx) => {
|
||||
await trx.insert(siteProvisioningKeys).values({
|
||||
siteProvisioningKeyId,
|
||||
name,
|
||||
siteProvisioningKeyHash,
|
||||
createdAt,
|
||||
lastChars,
|
||||
lastUsed: null,
|
||||
maxBatchSize,
|
||||
numUsed: 0,
|
||||
validUntil,
|
||||
approveNewSites
|
||||
});
|
||||
|
||||
await trx.insert(siteProvisioningKeyOrg).values({
|
||||
siteProvisioningKeyId,
|
||||
orgId
|
||||
});
|
||||
});
|
||||
|
||||
try {
|
||||
return response<CreateSiteProvisioningKeyResponse>(res, {
|
||||
data: {
|
||||
siteProvisioningKeyId,
|
||||
orgId,
|
||||
name,
|
||||
siteProvisioningKey: provisioningKey,
|
||||
lastChars,
|
||||
createdAt,
|
||||
lastUsed: null,
|
||||
maxBatchSize,
|
||||
numUsed: 0,
|
||||
validUntil,
|
||||
approveNewSites
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Site provisioning key created",
|
||||
status: HttpCode.CREATED
|
||||
});
|
||||
} catch (e) {
|
||||
logger.error(e);
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Failed to create site provisioning key"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,129 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import {
|
||||
db,
|
||||
siteProvisioningKeyOrg,
|
||||
siteProvisioningKeys
|
||||
} from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
|
||||
const paramsSchema = z.object({
|
||||
siteProvisioningKeyId: z.string().nonempty(),
|
||||
orgId: z.string().nonempty()
|
||||
});
|
||||
|
||||
export async function deleteSiteProvisioningKey(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { siteProvisioningKeyId, orgId } = parsedParams.data;
|
||||
|
||||
const [row] = await db
|
||||
.select()
|
||||
.from(siteProvisioningKeys)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
)
|
||||
.innerJoin(
|
||||
siteProvisioningKeyOrg,
|
||||
and(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId
|
||||
),
|
||||
eq(siteProvisioningKeyOrg.orgId, orgId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!row) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
`Site provisioning key with ID ${siteProvisioningKeyId} not found`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
await db.transaction(async (trx) => {
|
||||
await trx
|
||||
.delete(siteProvisioningKeyOrg)
|
||||
.where(
|
||||
and(
|
||||
eq(
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
),
|
||||
eq(siteProvisioningKeyOrg.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
const siteProvisioningKeyOrgs = await trx
|
||||
.select()
|
||||
.from(siteProvisioningKeyOrg)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
);
|
||||
|
||||
if (siteProvisioningKeyOrgs.length === 0) {
|
||||
await trx
|
||||
.delete(siteProvisioningKeys)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
return response(res, {
|
||||
data: null,
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Site provisioning key deleted successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
17
server/private/routers/siteProvisioning/index.ts
Normal file
17
server/private/routers/siteProvisioning/index.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export * from "./createSiteProvisioningKey";
|
||||
export * from "./listSiteProvisioningKeys";
|
||||
export * from "./deleteSiteProvisioningKey";
|
||||
export * from "./updateSiteProvisioningKey";
|
||||
@@ -0,0 +1,127 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
db,
|
||||
siteProvisioningKeyOrg,
|
||||
siteProvisioningKeys
|
||||
} from "@server/db";
|
||||
import logger from "@server/logger";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import response from "@server/lib/response";
|
||||
import { NextFunction, Request, Response } from "express";
|
||||
import createHttpError from "http-errors";
|
||||
import { z } from "zod";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { eq } from "drizzle-orm";
|
||||
import type { ListSiteProvisioningKeysResponse } from "@server/routers/siteProvisioning/types";
|
||||
|
||||
const paramsSchema = z.object({
|
||||
orgId: z.string().nonempty()
|
||||
});
|
||||
|
||||
const querySchema = z.object({
|
||||
limit: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("1000")
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive()),
|
||||
offset: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("0")
|
||||
.transform(Number)
|
||||
.pipe(z.int().nonnegative())
|
||||
});
|
||||
|
||||
function querySiteProvisioningKeys(orgId: string) {
|
||||
return db
|
||||
.select({
|
||||
siteProvisioningKeyId:
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
orgId: siteProvisioningKeyOrg.orgId,
|
||||
lastChars: siteProvisioningKeys.lastChars,
|
||||
createdAt: siteProvisioningKeys.createdAt,
|
||||
name: siteProvisioningKeys.name,
|
||||
lastUsed: siteProvisioningKeys.lastUsed,
|
||||
maxBatchSize: siteProvisioningKeys.maxBatchSize,
|
||||
numUsed: siteProvisioningKeys.numUsed,
|
||||
validUntil: siteProvisioningKeys.validUntil,
|
||||
approveNewSites: siteProvisioningKeys.approveNewSites
|
||||
})
|
||||
.from(siteProvisioningKeyOrg)
|
||||
.innerJoin(
|
||||
siteProvisioningKeys,
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId
|
||||
)
|
||||
)
|
||||
.where(eq(siteProvisioningKeyOrg.orgId, orgId));
|
||||
}
|
||||
|
||||
export async function listSiteProvisioningKeys(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const parsedQuery = querySchema.safeParse(req.query);
|
||||
if (!parsedQuery.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedQuery.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { orgId } = parsedParams.data;
|
||||
const { limit, offset } = parsedQuery.data;
|
||||
|
||||
const siteProvisioningKeysList = await querySiteProvisioningKeys(orgId)
|
||||
.limit(limit)
|
||||
.offset(offset);
|
||||
|
||||
return response<ListSiteProvisioningKeysResponse>(res, {
|
||||
data: {
|
||||
siteProvisioningKeys: siteProvisioningKeysList,
|
||||
pagination: {
|
||||
total: siteProvisioningKeysList.length,
|
||||
limit,
|
||||
offset
|
||||
}
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Site provisioning keys retrieved successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,206 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import {
|
||||
db,
|
||||
siteProvisioningKeyOrg,
|
||||
siteProvisioningKeys
|
||||
} from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import type { UpdateSiteProvisioningKeyResponse } from "@server/routers/siteProvisioning/types";
|
||||
|
||||
const paramsSchema = z.object({
|
||||
siteProvisioningKeyId: z.string().nonempty(),
|
||||
orgId: z.string().nonempty()
|
||||
});
|
||||
|
||||
const bodySchema = z
|
||||
.strictObject({
|
||||
maxBatchSize: z
|
||||
.union([
|
||||
z.null(),
|
||||
z.coerce.number().int().positive().max(1_000_000)
|
||||
])
|
||||
.optional(),
|
||||
validUntil: z.string().max(255).optional(),
|
||||
approveNewSites: z.boolean().optional()
|
||||
})
|
||||
.superRefine((data, ctx) => {
|
||||
if (
|
||||
data.maxBatchSize === undefined &&
|
||||
data.validUntil === undefined &&
|
||||
data.approveNewSites === undefined
|
||||
) {
|
||||
ctx.addIssue({
|
||||
code: "custom",
|
||||
message: "Provide maxBatchSize and/or validUntil and/or approveNewSites",
|
||||
path: ["maxBatchSize"]
|
||||
});
|
||||
}
|
||||
const v = data.validUntil;
|
||||
if (v == null || v.trim() === "") {
|
||||
return;
|
||||
}
|
||||
if (Number.isNaN(Date.parse(v))) {
|
||||
ctx.addIssue({
|
||||
code: "custom",
|
||||
message: "Invalid validUntil",
|
||||
path: ["validUntil"]
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
export type UpdateSiteProvisioningKeyBody = z.infer<typeof bodySchema>;
|
||||
|
||||
export async function updateSiteProvisioningKey(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = paramsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const parsedBody = bodySchema.safeParse(req.body);
|
||||
if (!parsedBody.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedBody.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { siteProvisioningKeyId, orgId } = parsedParams.data;
|
||||
const body = parsedBody.data;
|
||||
|
||||
const [row] = await db
|
||||
.select()
|
||||
.from(siteProvisioningKeys)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
)
|
||||
.innerJoin(
|
||||
siteProvisioningKeyOrg,
|
||||
and(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyOrg.siteProvisioningKeyId
|
||||
),
|
||||
eq(siteProvisioningKeyOrg.orgId, orgId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!row) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
`Site provisioning key with ID ${siteProvisioningKeyId} not found`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const setValues: {
|
||||
maxBatchSize?: number | null;
|
||||
validUntil?: string | null;
|
||||
approveNewSites?: boolean;
|
||||
} = {};
|
||||
if (body.maxBatchSize !== undefined) {
|
||||
setValues.maxBatchSize = body.maxBatchSize;
|
||||
}
|
||||
if (body.validUntil !== undefined) {
|
||||
setValues.validUntil =
|
||||
body.validUntil.trim() === ""
|
||||
? null
|
||||
: new Date(Date.parse(body.validUntil)).toISOString();
|
||||
}
|
||||
if (body.approveNewSites !== undefined) {
|
||||
setValues.approveNewSites = body.approveNewSites;
|
||||
}
|
||||
|
||||
await db
|
||||
.update(siteProvisioningKeys)
|
||||
.set(setValues)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
);
|
||||
|
||||
const [updated] = await db
|
||||
.select({
|
||||
siteProvisioningKeyId:
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
name: siteProvisioningKeys.name,
|
||||
lastChars: siteProvisioningKeys.lastChars,
|
||||
createdAt: siteProvisioningKeys.createdAt,
|
||||
lastUsed: siteProvisioningKeys.lastUsed,
|
||||
maxBatchSize: siteProvisioningKeys.maxBatchSize,
|
||||
numUsed: siteProvisioningKeys.numUsed,
|
||||
validUntil: siteProvisioningKeys.validUntil,
|
||||
approveNewSites: siteProvisioningKeys.approveNewSites
|
||||
})
|
||||
.from(siteProvisioningKeys)
|
||||
.where(
|
||||
eq(
|
||||
siteProvisioningKeys.siteProvisioningKeyId,
|
||||
siteProvisioningKeyId
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!updated) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Failed to load updated site provisioning key"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
return response<UpdateSiteProvisioningKeyResponse>(res, {
|
||||
data: {
|
||||
...updated,
|
||||
orgId
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Site provisioning key updated successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -24,6 +24,7 @@ import {
|
||||
siteNetworks,
|
||||
userOrgs
|
||||
} from "@server/db";
|
||||
import { logAccessAudit } from "#private/lib/logAccessAudit";
|
||||
import { isLicensedOrSubscribed } from "#private/lib/isLicencedOrSubscribed";
|
||||
import { tierMatrix } from "@server/lib/billing/tierMatrix";
|
||||
import response from "@server/lib/response";
|
||||
@@ -31,7 +32,7 @@ import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { eq, or, and } from "drizzle-orm";
|
||||
import { and, eq, inArray, or } from "drizzle-orm";
|
||||
import { canUserAccessSiteResource } from "@server/auth/canUserAccessSiteResource";
|
||||
import { signPublicKey, getOrgCAKeys } from "@server/lib/sshCA";
|
||||
import config from "@server/lib/config";
|
||||
@@ -127,7 +128,7 @@ export async function signSshKey(
|
||||
resource: resourceQueryString
|
||||
} = parsedBody.data;
|
||||
const userId = req.user?.userId;
|
||||
const roleId = req.userOrgRoleId!;
|
||||
const roleIds = req.userOrgRoleIds ?? [];
|
||||
|
||||
if (!userId) {
|
||||
return next(
|
||||
@@ -135,6 +136,15 @@ export async function signSshKey(
|
||||
);
|
||||
}
|
||||
|
||||
if (roleIds.length === 0) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"User has no role in organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const [userOrg] = await db
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
@@ -338,7 +348,7 @@ export async function signSshKey(
|
||||
const hasAccess = await canUserAccessSiteResource({
|
||||
userId: userId,
|
||||
resourceId: resource.siteResourceId,
|
||||
roleId: roleId
|
||||
roleIds
|
||||
});
|
||||
|
||||
if (!hasAccess) {
|
||||
@@ -350,28 +360,39 @@ export async function signSshKey(
|
||||
);
|
||||
}
|
||||
|
||||
const [roleRow] = await db
|
||||
const roleRows = await db
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(eq(roles.roleId, roleId))
|
||||
.limit(1);
|
||||
.where(inArray(roles.roleId, roleIds));
|
||||
|
||||
let parsedSudoCommands: string[] = [];
|
||||
let parsedGroups: string[] = [];
|
||||
try {
|
||||
parsedSudoCommands = JSON.parse(roleRow?.sshSudoCommands ?? "[]");
|
||||
if (!Array.isArray(parsedSudoCommands)) parsedSudoCommands = [];
|
||||
} catch {
|
||||
parsedSudoCommands = [];
|
||||
const parsedSudoCommands: string[] = [];
|
||||
const parsedGroupsSet = new Set<string>();
|
||||
let homedir: boolean | null = null;
|
||||
const sudoModeOrder = { none: 0, commands: 1, all: 2 };
|
||||
let sudoMode: "none" | "commands" | "all" = "none";
|
||||
for (const roleRow of roleRows) {
|
||||
try {
|
||||
const cmds = JSON.parse(roleRow?.sshSudoCommands ?? "[]");
|
||||
if (Array.isArray(cmds)) parsedSudoCommands.push(...cmds);
|
||||
} catch {
|
||||
// skip
|
||||
}
|
||||
try {
|
||||
const grps = JSON.parse(roleRow?.sshUnixGroups ?? "[]");
|
||||
if (Array.isArray(grps)) grps.forEach((g: string) => parsedGroupsSet.add(g));
|
||||
} catch {
|
||||
// skip
|
||||
}
|
||||
if (roleRow?.sshCreateHomeDir === true) homedir = true;
|
||||
const m = roleRow?.sshSudoMode ?? "none";
|
||||
if (sudoModeOrder[m as keyof typeof sudoModeOrder] > sudoModeOrder[sudoMode]) {
|
||||
sudoMode = m as "none" | "commands" | "all";
|
||||
}
|
||||
}
|
||||
try {
|
||||
parsedGroups = JSON.parse(roleRow?.sshUnixGroups ?? "[]");
|
||||
if (!Array.isArray(parsedGroups)) parsedGroups = [];
|
||||
} catch {
|
||||
parsedGroups = [];
|
||||
const parsedGroups = Array.from(parsedGroupsSet);
|
||||
if (homedir === null && roleRows.length > 0) {
|
||||
homedir = roleRows[0].sshCreateHomeDir ?? null;
|
||||
}
|
||||
const homedir = roleRow?.sshCreateHomeDir ?? null;
|
||||
const sudoMode = roleRow?.sshSudoMode ?? "none";
|
||||
|
||||
const sites = await db
|
||||
.select({ siteId: siteNetworks.siteId })
|
||||
@@ -474,6 +495,24 @@ export async function signSshKey(
|
||||
})
|
||||
});
|
||||
|
||||
await logAccessAudit({
|
||||
action: true,
|
||||
type: "ssh",
|
||||
orgId: orgId,
|
||||
siteResourceId: resource.siteResourceId,
|
||||
user: req.user
|
||||
? { username: req.user.username ?? "", userId: req.user.userId }
|
||||
: undefined,
|
||||
metadata: {
|
||||
resourceName: resource.name,
|
||||
siteId: resource.siteId,
|
||||
sshUsername: usernameToUse,
|
||||
sshHost: sshHost
|
||||
},
|
||||
userAgent: req.headers["user-agent"],
|
||||
requestIp: req.ip
|
||||
});
|
||||
|
||||
return response<SignSshKeyResponse>(res, {
|
||||
data: {
|
||||
certificate: cert.certificate,
|
||||
|
||||
170
server/private/routers/user/addUserRole.ts
Normal file
170
server/private/routers/user/addUserRole.ts
Normal file
@@ -0,0 +1,170 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import stoi from "@server/lib/stoi";
|
||||
import { clients, db } from "@server/db";
|
||||
import { userOrgRoles, userOrgs, roles } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
|
||||
|
||||
const addUserRoleParamsSchema = z.strictObject({
|
||||
userId: z.string(),
|
||||
roleId: z.string().transform(stoi).pipe(z.number())
|
||||
});
|
||||
|
||||
registry.registerPath({
|
||||
method: "post",
|
||||
path: "/user/{userId}/add-role/{roleId}",
|
||||
description: "Add a role to a user.",
|
||||
tags: [OpenAPITags.Role, OpenAPITags.User],
|
||||
request: {
|
||||
params: addUserRoleParamsSchema
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function addUserRole(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = addUserRoleParamsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { userId, roleId } = parsedParams.data;
|
||||
|
||||
if (req.user && !req.userOrg) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"You do not have access to this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// get the role
|
||||
const [role] = await db
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(eq(roles.roleId, roleId))
|
||||
.limit(1);
|
||||
|
||||
if (!role) {
|
||||
return next(
|
||||
createHttpError(HttpCode.BAD_REQUEST, "Invalid role ID")
|
||||
);
|
||||
}
|
||||
|
||||
const existingUser = await db
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
.where(
|
||||
and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, role.orgId))
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (existingUser.length === 0) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
"User not found or does not belong to the specified organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (existingUser[0].isOwner) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"Cannot change the role of the owner of the organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const roleExists = await db
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(and(eq(roles.roleId, roleId), eq(roles.orgId, role.orgId)))
|
||||
.limit(1);
|
||||
|
||||
if (roleExists.length === 0) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
"Role not found or does not belong to the specified organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
let newUserRole: { userId: string; orgId: string; roleId: number } | null =
|
||||
null;
|
||||
await db.transaction(async (trx) => {
|
||||
const inserted = await trx
|
||||
.insert(userOrgRoles)
|
||||
.values({
|
||||
userId,
|
||||
orgId: role.orgId,
|
||||
roleId
|
||||
})
|
||||
.onConflictDoNothing()
|
||||
.returning();
|
||||
|
||||
if (inserted.length > 0) {
|
||||
newUserRole = inserted[0];
|
||||
}
|
||||
|
||||
const orgClients = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
eq(clients.userId, userId),
|
||||
eq(clients.orgId, role.orgId)
|
||||
)
|
||||
);
|
||||
|
||||
for (const orgClient of orgClients) {
|
||||
await rebuildClientAssociationsFromClient(orgClient, trx);
|
||||
}
|
||||
});
|
||||
|
||||
return response(res, {
|
||||
data: newUserRole ?? { userId, orgId: role.orgId, roleId },
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Role added to user successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
16
server/private/routers/user/index.ts
Normal file
16
server/private/routers/user/index.ts
Normal file
@@ -0,0 +1,16 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export * from "./addUserRole";
|
||||
export * from "./removeUserRole";
|
||||
export * from "./setUserOrgRoles";
|
||||
171
server/private/routers/user/removeUserRole.ts
Normal file
171
server/private/routers/user/removeUserRole.ts
Normal file
@@ -0,0 +1,171 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import stoi from "@server/lib/stoi";
|
||||
import { db } from "@server/db";
|
||||
import { userOrgRoles, userOrgs, roles, clients } from "@server/db";
|
||||
import { eq, and } from "drizzle-orm";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
|
||||
|
||||
const removeUserRoleParamsSchema = z.strictObject({
|
||||
userId: z.string(),
|
||||
roleId: z.string().transform(stoi).pipe(z.number())
|
||||
});
|
||||
|
||||
registry.registerPath({
|
||||
method: "delete",
|
||||
path: "/user/{userId}/remove-role/{roleId}",
|
||||
description:
|
||||
"Remove a role from a user. User must have at least one role left in the org.",
|
||||
tags: [OpenAPITags.Role, OpenAPITags.User],
|
||||
request: {
|
||||
params: removeUserRoleParamsSchema
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function removeUserRole(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = removeUserRoleParamsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { userId, roleId } = parsedParams.data;
|
||||
|
||||
if (req.user && !req.userOrg) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"You do not have access to this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const [role] = await db
|
||||
.select()
|
||||
.from(roles)
|
||||
.where(eq(roles.roleId, roleId))
|
||||
.limit(1);
|
||||
|
||||
if (!role) {
|
||||
return next(
|
||||
createHttpError(HttpCode.BAD_REQUEST, "Invalid role ID")
|
||||
);
|
||||
}
|
||||
|
||||
const [existingUser] = await db
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
.where(
|
||||
and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, role.orgId))
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!existingUser) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
"User not found or does not belong to the specified organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (existingUser.isOwner) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"Cannot change the roles of the owner of the organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const remainingRoles = await db
|
||||
.select({ roleId: userOrgRoles.roleId })
|
||||
.from(userOrgRoles)
|
||||
.where(
|
||||
and(
|
||||
eq(userOrgRoles.userId, userId),
|
||||
eq(userOrgRoles.orgId, role.orgId)
|
||||
)
|
||||
);
|
||||
|
||||
if (remainingRoles.length <= 1) {
|
||||
const hasThisRole = remainingRoles.some((r) => r.roleId === roleId);
|
||||
if (hasThisRole) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"User must have at least one role in the organization. Remove the last role is not allowed."
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
await db.transaction(async (trx) => {
|
||||
await trx
|
||||
.delete(userOrgRoles)
|
||||
.where(
|
||||
and(
|
||||
eq(userOrgRoles.userId, userId),
|
||||
eq(userOrgRoles.orgId, role.orgId),
|
||||
eq(userOrgRoles.roleId, roleId)
|
||||
)
|
||||
);
|
||||
|
||||
const orgClients = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
eq(clients.userId, userId),
|
||||
eq(clients.orgId, role.orgId)
|
||||
)
|
||||
);
|
||||
|
||||
for (const orgClient of orgClients) {
|
||||
await rebuildClientAssociationsFromClient(orgClient, trx);
|
||||
}
|
||||
});
|
||||
|
||||
return response(res, {
|
||||
data: { userId, orgId: role.orgId, roleId },
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Role removed from user successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
163
server/private/routers/user/setUserOrgRoles.ts
Normal file
163
server/private/routers/user/setUserOrgRoles.ts
Normal file
@@ -0,0 +1,163 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { clients, db } from "@server/db";
|
||||
import { userOrgRoles, userOrgs, roles } from "@server/db";
|
||||
import { eq, and, inArray } from "drizzle-orm";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
|
||||
|
||||
const setUserOrgRolesParamsSchema = z.strictObject({
|
||||
orgId: z.string(),
|
||||
userId: z.string()
|
||||
});
|
||||
|
||||
const setUserOrgRolesBodySchema = z.strictObject({
|
||||
roleIds: z.array(z.int().positive()).min(1)
|
||||
});
|
||||
|
||||
export async function setUserOrgRoles(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedParams = setUserOrgRolesParamsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const parsedBody = setUserOrgRolesBodySchema.safeParse(req.body);
|
||||
if (!parsedBody.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedBody.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { orgId, userId } = parsedParams.data;
|
||||
const { roleIds } = parsedBody.data;
|
||||
|
||||
if (req.user && !req.userOrg) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"You do not have access to this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const uniqueRoleIds = [...new Set(roleIds)];
|
||||
|
||||
const [existingUser] = await db
|
||||
.select()
|
||||
.from(userOrgs)
|
||||
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
|
||||
.limit(1);
|
||||
|
||||
if (!existingUser) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
"User not found in this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (existingUser.isOwner) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.FORBIDDEN,
|
||||
"Cannot change the roles of the owner of the organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const orgRoles = await db
|
||||
.select({ roleId: roles.roleId })
|
||||
.from(roles)
|
||||
.where(
|
||||
and(
|
||||
eq(roles.orgId, orgId),
|
||||
inArray(roles.roleId, uniqueRoleIds)
|
||||
)
|
||||
);
|
||||
|
||||
if (orgRoles.length !== uniqueRoleIds.length) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"One or more role IDs are invalid for this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
await db.transaction(async (trx) => {
|
||||
await trx
|
||||
.delete(userOrgRoles)
|
||||
.where(
|
||||
and(
|
||||
eq(userOrgRoles.userId, userId),
|
||||
eq(userOrgRoles.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
if (uniqueRoleIds.length > 0) {
|
||||
await trx.insert(userOrgRoles).values(
|
||||
uniqueRoleIds.map((roleId) => ({
|
||||
userId,
|
||||
orgId,
|
||||
roleId
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
const orgClients = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(
|
||||
and(eq(clients.userId, userId), eq(clients.orgId, orgId))
|
||||
);
|
||||
|
||||
for (const orgClient of orgClients) {
|
||||
await rebuildClientAssociationsFromClient(orgClient, trx);
|
||||
}
|
||||
});
|
||||
|
||||
return response(res, {
|
||||
data: { userId, orgId, roleIds: uniqueRoleIds },
|
||||
success: true,
|
||||
error: false,
|
||||
message: "User roles set successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -18,10 +18,12 @@ import {
|
||||
} from "#private/routers/remoteExitNode";
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
import { build } from "@server/build";
|
||||
import { handleConnectionLogMessage } from "#private/routers/newt";
|
||||
|
||||
export const messageHandlers: Record<string, MessageHandler> = {
|
||||
"remoteExitNode/register": handleRemoteExitNodeRegisterMessage,
|
||||
"remoteExitNode/ping": handleRemoteExitNodePingMessage
|
||||
"remoteExitNode/ping": handleRemoteExitNodePingMessage,
|
||||
"newt/access-log": handleConnectionLogMessage,
|
||||
};
|
||||
|
||||
if (build != "saas") {
|
||||
|
||||
@@ -19,17 +19,14 @@ import { Socket } from "net";
|
||||
import {
|
||||
Newt,
|
||||
newts,
|
||||
NewtSession,
|
||||
olms,
|
||||
Olm,
|
||||
OlmSession,
|
||||
olms,
|
||||
RemoteExitNode,
|
||||
RemoteExitNodeSession,
|
||||
remoteExitNodes,
|
||||
sites
|
||||
} from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { db } from "@server/db";
|
||||
import { recordPing } from "@server/routers/newt/pingAccumulator";
|
||||
import { validateNewtSessionToken } from "@server/auth/sessions/newt";
|
||||
import { validateOlmSessionToken } from "@server/auth/sessions/olm";
|
||||
import logger from "@server/logger";
|
||||
@@ -197,11 +194,7 @@ const connectedClients: Map<string, AuthenticatedWebSocket[]> = new Map();
|
||||
// Config version tracking map (local to this node, resets on server restart)
|
||||
const clientConfigVersions: Map<string, number> = new Map();
|
||||
|
||||
// Tracks the last Unix timestamp (seconds) at which a ping was flushed to the
|
||||
// DB for a given siteId. Resets on server restart which is fine – the first
|
||||
// ping after startup will always write, re-establishing the online state.
|
||||
const lastPingDbWrite: Map<number, number> = new Map();
|
||||
const PING_DB_WRITE_INTERVAL = 45; // seconds
|
||||
|
||||
|
||||
// Recovery tracking
|
||||
let isRedisRecoveryInProgress = false;
|
||||
@@ -853,32 +846,16 @@ const setupConnection = async (
|
||||
);
|
||||
});
|
||||
|
||||
// Handle WebSocket protocol-level pings from older newt clients that do
|
||||
// not send application-level "newt/ping" messages. Update the site's
|
||||
// online state and lastPing timestamp so the offline checker treats them
|
||||
// the same as modern newt clients.
|
||||
if (clientType === "newt") {
|
||||
const newtClient = client as Newt;
|
||||
ws.on("ping", async () => {
|
||||
ws.on("ping", () => {
|
||||
if (!newtClient.siteId) return;
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const lastWrite = lastPingDbWrite.get(newtClient.siteId) ?? 0;
|
||||
if (now - lastWrite < PING_DB_WRITE_INTERVAL) return;
|
||||
lastPingDbWrite.set(newtClient.siteId, now);
|
||||
try {
|
||||
await db
|
||||
.update(sites)
|
||||
.set({
|
||||
online: true,
|
||||
lastPing: now
|
||||
})
|
||||
.where(eq(sites.siteId, newtClient.siteId));
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
"Error updating newt site online state on WS ping",
|
||||
{ error }
|
||||
);
|
||||
}
|
||||
// Record the ping in the accumulator instead of writing to the
|
||||
// database on every WS ping frame. The accumulator flushes all
|
||||
// pending pings in a single batched UPDATE every ~10s, which
|
||||
// prevents connection pool exhaustion under load (especially
|
||||
// with cross-region latency to the database).
|
||||
recordPing(newtClient.siteId);
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user