mirror of
https://github.com/fosrl/pangolin.git
synced 2026-02-21 12:26:40 +00:00
Merge branch 'dev' into feat/login-page-customization
This commit is contained in:
@@ -45,6 +45,11 @@ export class PrivateConfig {
|
||||
|
||||
this.rawPrivateConfig = parsedPrivateConfig;
|
||||
|
||||
process.env.BRANDING_HIDE_AUTH_LAYOUT_FOOTER =
|
||||
this.rawPrivateConfig.branding?.hide_auth_layout_footer === true
|
||||
? "true"
|
||||
: "false";
|
||||
|
||||
if (this.rawPrivateConfig.branding?.colors) {
|
||||
process.env.BRANDING_COLORS = JSON.stringify(
|
||||
this.rawPrivateConfig.branding?.colors
|
||||
|
||||
@@ -197,7 +197,7 @@ export async function listExitNodes(orgId: string, filterOnline = false, noCloud
|
||||
|
||||
// // set the item in the database if it is offline
|
||||
// if (isActuallyOnline != node.online) {
|
||||
// await db
|
||||
// await trx
|
||||
// .update(exitNodes)
|
||||
// .set({ online: isActuallyOnline })
|
||||
// .where(eq(exitNodes.exitNodeId, node.exitNodeId));
|
||||
|
||||
363
server/private/lib/lock.ts
Normal file
363
server/private/lib/lock.ts
Normal file
@@ -0,0 +1,363 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { config } from "@server/lib/config";
|
||||
import logger from "@server/logger";
|
||||
import { redis } from "#private/lib/redis";
|
||||
|
||||
export class LockManager {
|
||||
/**
|
||||
* Acquire a distributed lock using Redis SET with NX and PX options
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired, false otherwise
|
||||
*/
|
||||
async acquireLock(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000
|
||||
): Promise<boolean> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return true;
|
||||
}
|
||||
|
||||
const lockValue = `${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}:${Date.now()}`;
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
try {
|
||||
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
|
||||
// This is atomic and handles both setting and expiration
|
||||
const result = await redis.set(
|
||||
redisKey,
|
||||
lockValue,
|
||||
"PX",
|
||||
ttlMs,
|
||||
"NX"
|
||||
);
|
||||
|
||||
if (result === "OK") {
|
||||
logger.debug(
|
||||
`Lock acquired: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check if the existing lock is from this worker (reentrant behavior)
|
||||
const existingValue = await redis.get(redisKey);
|
||||
if (
|
||||
existingValue &&
|
||||
existingValue.startsWith(
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)
|
||||
) {
|
||||
// Extend the lock TTL since it's the same worker
|
||||
await redis.pexpire(redisKey, ttlMs);
|
||||
logger.debug(
|
||||
`Lock extended: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to acquire lock ${lockKey}:`, error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Release a lock using Lua script to ensure atomicity
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async releaseLock(lockKey: string): Promise<void> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return;
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
// Lua script to ensure we only delete the lock if it belongs to this worker
|
||||
const luaScript = `
|
||||
local key = KEYS[1]
|
||||
local worker_prefix = ARGV[1]
|
||||
local current_value = redis.call('GET', key)
|
||||
|
||||
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
|
||||
return redis.call('DEL', key)
|
||||
else
|
||||
return 0
|
||||
end
|
||||
`;
|
||||
|
||||
try {
|
||||
const result = (await redis.eval(
|
||||
luaScript,
|
||||
1,
|
||||
redisKey,
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)) as number;
|
||||
|
||||
if (result === 1) {
|
||||
logger.debug(
|
||||
`Lock released: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
} else {
|
||||
logger.warn(
|
||||
`Lock not released - not owned by worker: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
}`
|
||||
);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to release lock ${lockKey}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Force release a lock regardless of owner (use with caution)
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
*/
|
||||
async forceReleaseLock(lockKey: string): Promise<void> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return;
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
try {
|
||||
const result = await redis.del(redisKey);
|
||||
if (result === 1) {
|
||||
logger.debug(`Lock force released: ${lockKey}`);
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error(`Failed to force release lock ${lockKey}:`, error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if a lock exists and get its info
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
|
||||
*/
|
||||
async getLockInfo(lockKey: string): Promise<{
|
||||
exists: boolean;
|
||||
ownedByMe: boolean;
|
||||
ttl: number;
|
||||
owner?: string;
|
||||
}> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return { exists: false, ownedByMe: true, ttl: 0 };
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
try {
|
||||
const [value, ttl] = await Promise.all([
|
||||
redis.get(redisKey),
|
||||
redis.pttl(redisKey)
|
||||
]);
|
||||
|
||||
const exists = value !== null;
|
||||
const ownedByMe =
|
||||
exists &&
|
||||
value!.startsWith(`${config.getRawConfig().gerbil.exit_node_name}:`);
|
||||
const owner = exists ? value!.split(":")[0] : undefined;
|
||||
|
||||
return {
|
||||
exists,
|
||||
ownedByMe,
|
||||
ttl: ttl > 0 ? ttl : 0,
|
||||
owner
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error(`Failed to get lock info ${lockKey}:`, error);
|
||||
return { exists: false, ownedByMe: false, ttl: 0 };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Extend the TTL of an existing lock owned by this worker
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - New TTL in milliseconds
|
||||
* @returns Promise<boolean> - true if extended successfully
|
||||
*/
|
||||
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return true;
|
||||
}
|
||||
|
||||
const redisKey = `lock:${lockKey}`;
|
||||
|
||||
// Lua script to extend TTL only if lock is owned by this worker
|
||||
const luaScript = `
|
||||
local key = KEYS[1]
|
||||
local worker_prefix = ARGV[1]
|
||||
local ttl = tonumber(ARGV[2])
|
||||
local current_value = redis.call('GET', key)
|
||||
|
||||
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
|
||||
return redis.call('PEXPIRE', key, ttl)
|
||||
else
|
||||
return 0
|
||||
end
|
||||
`;
|
||||
|
||||
try {
|
||||
const result = (await redis.eval(
|
||||
luaScript,
|
||||
1,
|
||||
redisKey,
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`,
|
||||
ttlMs.toString()
|
||||
)) as number;
|
||||
|
||||
if (result === 1) {
|
||||
logger.debug(
|
||||
`Lock extended: ${lockKey} by ${
|
||||
config.getRawConfig().gerbil.exit_node_name
|
||||
} for ${ttlMs}ms`
|
||||
);
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
} catch (error) {
|
||||
logger.error(`Failed to extend lock ${lockKey}:`, error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Attempt to acquire lock with retries and exponential backoff
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param ttlMs - Time to live in milliseconds
|
||||
* @param maxRetries - Maximum number of retry attempts
|
||||
* @param baseDelayMs - Base delay between retries in milliseconds
|
||||
* @returns Promise<boolean> - true if lock acquired
|
||||
*/
|
||||
async acquireLockWithRetry(
|
||||
lockKey: string,
|
||||
ttlMs: number = 30000,
|
||||
maxRetries: number = 5,
|
||||
baseDelayMs: number = 100
|
||||
): Promise<boolean> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return true;
|
||||
}
|
||||
|
||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||
const acquired = await this.acquireLock(lockKey, ttlMs);
|
||||
|
||||
if (acquired) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (attempt < maxRetries) {
|
||||
// Exponential backoff with jitter
|
||||
const delay =
|
||||
baseDelayMs * Math.pow(2, attempt) + Math.random() * 100;
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
}
|
||||
|
||||
logger.warn(
|
||||
`Failed to acquire lock ${lockKey} after ${maxRetries + 1} attempts`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a function while holding a lock
|
||||
* @param lockKey - Unique identifier for the lock
|
||||
* @param fn - Function to execute while holding the lock
|
||||
* @param ttlMs - Lock TTL in milliseconds
|
||||
* @returns Promise<T> - Result of the executed function
|
||||
*/
|
||||
async withLock<T>(
|
||||
lockKey: string,
|
||||
fn: () => Promise<T>,
|
||||
ttlMs: number = 30000
|
||||
): Promise<T> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return await fn();
|
||||
}
|
||||
|
||||
const acquired = await this.acquireLock(lockKey, ttlMs);
|
||||
|
||||
if (!acquired) {
|
||||
throw new Error(`Failed to acquire lock: ${lockKey}`);
|
||||
}
|
||||
|
||||
try {
|
||||
return await fn();
|
||||
} finally {
|
||||
await this.releaseLock(lockKey);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up expired locks - Redis handles this automatically, but this method
|
||||
* can be used to get statistics about locks
|
||||
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
|
||||
*/
|
||||
async getLockStatistics(): Promise<{
|
||||
activeLocksCount: number;
|
||||
locksOwnedByMe: number;
|
||||
}> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return { activeLocksCount: 0, locksOwnedByMe: 0 };
|
||||
}
|
||||
|
||||
try {
|
||||
const keys = await redis.keys("lock:*");
|
||||
let locksOwnedByMe = 0;
|
||||
|
||||
if (keys.length > 0) {
|
||||
const values = await redis.mget(...keys);
|
||||
locksOwnedByMe = values.filter(
|
||||
(value) =>
|
||||
value &&
|
||||
value.startsWith(
|
||||
`${config.getRawConfig().gerbil.exit_node_name}:`
|
||||
)
|
||||
).length;
|
||||
}
|
||||
|
||||
return {
|
||||
activeLocksCount: keys.length,
|
||||
locksOwnedByMe
|
||||
};
|
||||
} catch (error) {
|
||||
logger.error("Failed to get lock statistics:", error);
|
||||
return { activeLocksCount: 0, locksOwnedByMe: 0 };
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Close the Redis connection
|
||||
*/
|
||||
async disconnect(): Promise<void> {
|
||||
if (!redis || !redis.status || redis.status !== "ready") {
|
||||
return;
|
||||
}
|
||||
await redis.quit();
|
||||
}
|
||||
}
|
||||
|
||||
export const lockManager = new LockManager();
|
||||
@@ -124,6 +124,7 @@ export const privateConfigSchema = z.object({
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
hide_auth_layout_footer: z.boolean().optional().default(false),
|
||||
login_page: z
|
||||
.object({
|
||||
subtitle_text: z.string().optional(),
|
||||
|
||||
@@ -434,9 +434,9 @@ export async function getTraefikConfig(
|
||||
routerMiddlewares.push(rewriteMiddlewareName);
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
|
||||
);
|
||||
// logger.debug(
|
||||
// `Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
|
||||
// );
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to create path rewrite middleware for resource ${resource.resourceId}: ${error}`
|
||||
|
||||
@@ -16,4 +16,5 @@ export * from "./verifyRemoteExitNodeAccess";
|
||||
export * from "./verifyIdpAccess";
|
||||
export * from "./verifyLoginPageAccess";
|
||||
export * from "./logActionAudit";
|
||||
export * from "./verifySubscription";
|
||||
export * from "./verifySubscription";
|
||||
export * from "./verifyValidLicense";
|
||||
|
||||
@@ -31,7 +31,6 @@ import {
|
||||
verifyUserIsServerAdmin,
|
||||
verifySiteAccess,
|
||||
verifyClientAccess,
|
||||
verifyClientsEnabled,
|
||||
} from "@server/middlewares";
|
||||
import { ActionsEnum } from "@server/auth/actions";
|
||||
import {
|
||||
@@ -437,7 +436,6 @@ authenticated.get(
|
||||
|
||||
authenticated.post(
|
||||
"/re-key/:clientId/regenerate-client-secret",
|
||||
verifyClientsEnabled,
|
||||
verifyClientAccess,
|
||||
verifyUserHasAction(ActionsEnum.reGenerateSecret),
|
||||
reKey.reGenerateClientSecret
|
||||
|
||||
@@ -1043,7 +1043,7 @@ hybridRouter.get(
|
||||
);
|
||||
}
|
||||
|
||||
let rules = await db
|
||||
const rules = await db
|
||||
.select()
|
||||
.from(resourceRules)
|
||||
.where(eq(resourceRules.resourceId, resourceId));
|
||||
@@ -1369,7 +1369,7 @@ const updateHolePunchSchema = z.object({
|
||||
port: z.number(),
|
||||
timestamp: z.number(),
|
||||
reachableAt: z.string().optional(),
|
||||
publicKey: z.string().optional()
|
||||
publicKey: z.string() // this is the client public key
|
||||
});
|
||||
hybridRouter.post(
|
||||
"/gerbil/update-hole-punch",
|
||||
@@ -1408,7 +1408,7 @@ hybridRouter.post(
|
||||
);
|
||||
}
|
||||
|
||||
const { olmId, newtId, ip, port, timestamp, token, reachableAt } =
|
||||
const { olmId, newtId, ip, port, timestamp, token, publicKey, reachableAt } =
|
||||
parsedParams.data;
|
||||
|
||||
const destinations = await updateAndGenerateEndpointDestinations(
|
||||
@@ -1418,6 +1418,7 @@ hybridRouter.post(
|
||||
port,
|
||||
timestamp,
|
||||
token,
|
||||
publicKey,
|
||||
exitNode,
|
||||
true
|
||||
);
|
||||
@@ -1742,7 +1743,12 @@ hybridRouter.post(
|
||||
tls: logEntry.tls
|
||||
}));
|
||||
|
||||
await db.insert(requestAuditLog).values(logEntries);
|
||||
// batch them into inserts of 100 to avoid exceeding parameter limits
|
||||
const batchSize = 100;
|
||||
for (let i = 0; i < logEntries.length; i += batchSize) {
|
||||
const batch = logEntries.slice(i, i + batchSize);
|
||||
await db.insert(requestAuditLog).values(batch);
|
||||
}
|
||||
|
||||
return response(res, {
|
||||
data: null,
|
||||
|
||||
@@ -13,13 +13,17 @@
|
||||
|
||||
import * as orgIdp from "#private/routers/orgIdp";
|
||||
import * as org from "#private/routers/org";
|
||||
import * as logs from "#private/routers/auditLogs";
|
||||
|
||||
import { Router } from "express";
|
||||
import {
|
||||
verifyApiKey,
|
||||
verifyApiKeyHasAction,
|
||||
verifyApiKeyIsRoot,
|
||||
verifyApiKeyOrgAccess,
|
||||
} from "@server/middlewares";
|
||||
import {
|
||||
verifyValidSubscription,
|
||||
verifyValidLicense
|
||||
} from "#private/middlewares";
|
||||
import { ActionsEnum } from "@server/auth/actions";
|
||||
|
||||
import { unauthenticated as ua, authenticated as a } from "@server/routers/integration";
|
||||
@@ -42,4 +46,42 @@ authenticated.delete(
|
||||
verifyApiKeyHasAction(ActionsEnum.deleteIdp),
|
||||
logActionAudit(ActionsEnum.deleteIdp),
|
||||
orgIdp.deleteOrgIdp,
|
||||
);
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/action",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logs.queryActionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/action/export",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logActionAudit(ActionsEnum.exportLogs),
|
||||
logs.exportActionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/access",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logs.queryAccessAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/access/export",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription,
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logActionAudit(ActionsEnum.exportLogs),
|
||||
logs.exportAccessAuditLogs
|
||||
);
|
||||
|
||||
@@ -164,7 +164,7 @@ export async function createLoginPage(
|
||||
.select()
|
||||
.from(exitNodes)
|
||||
.where(and(eq(exitNodes.type, "gerbil"), eq(exitNodes.online, true)))
|
||||
.limit(10);
|
||||
.limit(10);
|
||||
}
|
||||
|
||||
// select a random exit node
|
||||
|
||||
@@ -38,6 +38,7 @@ import { rateLimitService } from "#private/lib/rateLimit";
|
||||
import { messageHandlers } from "@server/routers/ws/messageHandlers";
|
||||
import { messageHandlers as privateMessageHandlers } from "#private/routers/ws/messageHandlers";
|
||||
import { AuthenticatedWebSocket, ClientType, WSMessage, TokenPayload, WebSocketRequest, RedisMessage } from "@server/routers/ws";
|
||||
import { validateSessionToken } from "@server/auth/sessions/app";
|
||||
|
||||
// Merge public and private message handlers
|
||||
Object.assign(messageHandlers, privateMessageHandlers);
|
||||
@@ -370,6 +371,9 @@ const sendToClientLocal = async (
|
||||
client.send(messageString);
|
||||
}
|
||||
});
|
||||
|
||||
logger.debug(`sendToClient: Message type ${message.type} sent to clientId ${clientId}`);
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
@@ -478,7 +482,8 @@ const getActiveNodes = async (
|
||||
// Token verification middleware
|
||||
const verifyToken = async (
|
||||
token: string,
|
||||
clientType: ClientType
|
||||
clientType: ClientType,
|
||||
userToken: string
|
||||
): Promise<TokenPayload | null> => {
|
||||
try {
|
||||
if (clientType === "newt") {
|
||||
@@ -506,6 +511,17 @@ const verifyToken = async (
|
||||
if (!existingOlm || !existingOlm[0]) {
|
||||
return null;
|
||||
}
|
||||
|
||||
if (olm.userId) { // this is a user device and we need to check the user token
|
||||
const { session: userSession, user } = await validateSessionToken(userToken);
|
||||
if (!userSession || !user) {
|
||||
return null;
|
||||
}
|
||||
if (user.userId !== olm.userId) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
|
||||
return { client: existingOlm[0], session, clientType };
|
||||
} else if (clientType === "remoteExitNode") {
|
||||
const { session, remoteExitNode } =
|
||||
@@ -652,6 +668,7 @@ const handleWSUpgrade = (server: HttpServer): void => {
|
||||
url.searchParams.get("token") ||
|
||||
request.headers["sec-websocket-protocol"] ||
|
||||
"";
|
||||
const userToken = url.searchParams.get('userToken') || '';
|
||||
let clientType = url.searchParams.get(
|
||||
"clientType"
|
||||
) as ClientType;
|
||||
@@ -673,7 +690,7 @@ const handleWSUpgrade = (server: HttpServer): void => {
|
||||
return;
|
||||
}
|
||||
|
||||
const tokenPayload = await verifyToken(token, clientType);
|
||||
const tokenPayload = await verifyToken(token, clientType, userToken);
|
||||
if (!tokenPayload) {
|
||||
logger.debug(
|
||||
"Unauthorized connection attempt: invalid token..."
|
||||
@@ -792,6 +809,28 @@ if (redisManager.isRedisEnabled()) {
|
||||
);
|
||||
}
|
||||
|
||||
// Disconnect a specific client and force them to reconnect
|
||||
const disconnectClient = async (clientId: string): Promise<boolean> => {
|
||||
const mapKey = getClientMapKey(clientId);
|
||||
const clients = connectedClients.get(mapKey);
|
||||
|
||||
if (!clients || clients.length === 0) {
|
||||
logger.debug(`No connections found for client ID: ${clientId}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
logger.info(`Disconnecting client ID: ${clientId} (${clients.length} connection(s))`);
|
||||
|
||||
// Close all connections for this client
|
||||
clients.forEach((client) => {
|
||||
if (client.readyState === WebSocket.OPEN) {
|
||||
client.close(1000, "Disconnected by server");
|
||||
}
|
||||
});
|
||||
|
||||
return true;
|
||||
};
|
||||
|
||||
// Cleanup function for graceful shutdown
|
||||
const cleanup = async (): Promise<void> => {
|
||||
try {
|
||||
@@ -829,6 +868,7 @@ export {
|
||||
connectedClients,
|
||||
hasActiveConnections,
|
||||
getActiveNodes,
|
||||
disconnectClient,
|
||||
NODE_ID,
|
||||
cleanup
|
||||
};
|
||||
|
||||
Reference in New Issue
Block a user