Merge branch 'dev' into clients-user

This commit is contained in:
miloschwartz
2025-12-05 15:17:32 -05:00
27 changed files with 2882 additions and 1896 deletions

View File

@@ -176,7 +176,8 @@ export const targetHealthCheck = pgTable("targetHealthCheck", {
hcFollowRedirects: boolean("hcFollowRedirects").default(true),
hcMethod: varchar("hcMethod").default("GET"),
hcStatus: integer("hcStatus"), // http code
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
hcTlsServerName: text("hcTlsServerName"),
});
export const exitNodes = pgTable("exitNodes", {

View File

@@ -195,7 +195,8 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", {
}).default(true),
hcMethod: text("hcMethod").default("GET"),
hcStatus: integer("hcStatus"), // http code
hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
hcTlsServerName: text("hcTlsServerName"),
});
export const exitNodes = sqliteTable("exitNodes", {

View File

@@ -221,6 +221,7 @@ export async function updateProxyResources(
domainId: domain ? domain.domainId : null,
enabled: resourceEnabled,
sso: resourceData.auth?.["sso-enabled"] || false,
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
ssl: resourceSsl,
setHostHeader: resourceData["host-header"] || null,
tlsServerName: resourceData["tls-server-name"] || null,
@@ -610,6 +611,7 @@ export async function updateProxyResources(
domainId: domain ? domain.domainId : null,
enabled: resourceEnabled,
sso: resourceData.auth?.["sso-enabled"] || false,
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
setHostHeader: resourceData["host-header"] || null,
tlsServerName: resourceData["tls-server-name"] || null,
ssl: resourceSsl,

View File

@@ -59,6 +59,7 @@ export const AuthSchema = z.object({
}),
"sso-users": z.array(z.email()).optional().default([]),
"whitelist-users": z.array(z.email()).optional().default([]),
"auto-login-idp": z.int().positive().optional(),
});
export const RuleSchema = z.object({

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.12.1";
export const APP_VERSION = "1.12.3";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

111
server/lib/lock.ts Normal file
View File

@@ -0,0 +1,111 @@
export class LockManager {
/**
* Acquire a distributed lock using Redis SET with NX and PX options
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @returns Promise<boolean> - true if lock acquired, false otherwise
*/
async acquireLock(
lockKey: string,
ttlMs: number = 30000
): Promise<boolean> {
return true;
}
/**
* Release a lock using Lua script to ensure atomicity
* @param lockKey - Unique identifier for the lock
*/
async releaseLock(lockKey: string): Promise<void> {}
/**
* Force release a lock regardless of owner (use with caution)
* @param lockKey - Unique identifier for the lock
*/
async forceReleaseLock(lockKey: string): Promise<void> {}
/**
* Check if a lock exists and get its info
* @param lockKey - Unique identifier for the lock
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
*/
async getLockInfo(lockKey: string): Promise<{
exists: boolean;
ownedByMe: boolean;
ttl: number;
owner?: string;
}> {
return { exists: true, ownedByMe: true, ttl: 0 };
}
/**
* Extend the TTL of an existing lock owned by this worker
* @param lockKey - Unique identifier for the lock
* @param ttlMs - New TTL in milliseconds
* @returns Promise<boolean> - true if extended successfully
*/
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
return true;
}
/**
* Attempt to acquire lock with retries and exponential backoff
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @param maxRetries - Maximum number of retry attempts
* @param baseDelayMs - Base delay between retries in milliseconds
* @returns Promise<boolean> - true if lock acquired
*/
async acquireLockWithRetry(
lockKey: string,
ttlMs: number = 30000,
maxRetries: number = 5,
baseDelayMs: number = 100
): Promise<boolean> {
return true;
}
/**
* Execute a function while holding a lock
* @param lockKey - Unique identifier for the lock
* @param fn - Function to execute while holding the lock
* @param ttlMs - Lock TTL in milliseconds
* @returns Promise<T> - Result of the executed function
*/
async withLock<T>(
lockKey: string,
fn: () => Promise<T>,
ttlMs: number = 30000
): Promise<T> {
const acquired = await this.acquireLock(lockKey, ttlMs);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${lockKey}`);
}
try {
return await fn();
} finally {
await this.releaseLock(lockKey);
}
}
/**
* Clean up expired locks - Redis handles this automatically, but this method
* can be used to get statistics about locks
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
*/
async getLockStatistics(): Promise<{
activeLocksCount: number;
locksOwnedByMe: number;
}> {
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
/**
* Close the Redis connection
*/
async disconnect(): Promise<void> {}
}
export const lockManager = new LockManager();

363
server/private/lib/lock.ts Normal file
View File

@@ -0,0 +1,363 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { config } from "@server/lib/config";
import logger from "@server/logger";
import { redis } from "#private/lib/redis";
export class LockManager {
/**
* Acquire a distributed lock using Redis SET with NX and PX options
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @returns Promise<boolean> - true if lock acquired, false otherwise
*/
async acquireLock(
lockKey: string,
ttlMs: number = 30000
): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
const lockValue = `${
config.getRawConfig().gerbil.exit_node_name
}:${Date.now()}`;
const redisKey = `lock:${lockKey}`;
try {
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
// This is atomic and handles both setting and expiration
const result = await redis.set(
redisKey,
lockValue,
"PX",
ttlMs,
"NX"
);
if (result === "OK") {
logger.debug(
`Lock acquired: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
// Check if the existing lock is from this worker (reentrant behavior)
const existingValue = await redis.get(redisKey);
if (
existingValue &&
existingValue.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
) {
// Extend the lock TTL since it's the same worker
await redis.pexpire(redisKey, ttlMs);
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
return false;
} catch (error) {
logger.error(`Failed to acquire lock ${lockKey}:`, error);
return false;
}
}
/**
* Release a lock using Lua script to ensure atomicity
* @param lockKey - Unique identifier for the lock
*/
async releaseLock(lockKey: string): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
const redisKey = `lock:${lockKey}`;
// Lua script to ensure we only delete the lock if it belongs to this worker
const luaScript = `
local key = KEYS[1]
local worker_prefix = ARGV[1]
local current_value = redis.call('GET', key)
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
return redis.call('DEL', key)
else
return 0
end
`;
try {
const result = (await redis.eval(
luaScript,
1,
redisKey,
`${config.getRawConfig().gerbil.exit_node_name}:`
)) as number;
if (result === 1) {
logger.debug(
`Lock released: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
} else {
logger.warn(
`Lock not released - not owned by worker: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
}
} catch (error) {
logger.error(`Failed to release lock ${lockKey}:`, error);
}
}
/**
* Force release a lock regardless of owner (use with caution)
* @param lockKey - Unique identifier for the lock
*/
async forceReleaseLock(lockKey: string): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
const redisKey = `lock:${lockKey}`;
try {
const result = await redis.del(redisKey);
if (result === 1) {
logger.debug(`Lock force released: ${lockKey}`);
}
} catch (error) {
logger.error(`Failed to force release lock ${lockKey}:`, error);
}
}
/**
* Check if a lock exists and get its info
* @param lockKey - Unique identifier for the lock
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
*/
async getLockInfo(lockKey: string): Promise<{
exists: boolean;
ownedByMe: boolean;
ttl: number;
owner?: string;
}> {
if (!redis || !redis.status || redis.status !== "ready") {
return { exists: false, ownedByMe: true, ttl: 0 };
}
const redisKey = `lock:${lockKey}`;
try {
const [value, ttl] = await Promise.all([
redis.get(redisKey),
redis.pttl(redisKey)
]);
const exists = value !== null;
const ownedByMe =
exists &&
value!.startsWith(`${config.getRawConfig().gerbil.exit_node_name}:`);
const owner = exists ? value!.split(":")[0] : undefined;
return {
exists,
ownedByMe,
ttl: ttl > 0 ? ttl : 0,
owner
};
} catch (error) {
logger.error(`Failed to get lock info ${lockKey}:`, error);
return { exists: false, ownedByMe: false, ttl: 0 };
}
}
/**
* Extend the TTL of an existing lock owned by this worker
* @param lockKey - Unique identifier for the lock
* @param ttlMs - New TTL in milliseconds
* @returns Promise<boolean> - true if extended successfully
*/
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
const redisKey = `lock:${lockKey}`;
// Lua script to extend TTL only if lock is owned by this worker
const luaScript = `
local key = KEYS[1]
local worker_prefix = ARGV[1]
local ttl = tonumber(ARGV[2])
local current_value = redis.call('GET', key)
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
return redis.call('PEXPIRE', key, ttl)
else
return 0
end
`;
try {
const result = (await redis.eval(
luaScript,
1,
redisKey,
`${config.getRawConfig().gerbil.exit_node_name}:`,
ttlMs.toString()
)) as number;
if (result === 1) {
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
} for ${ttlMs}ms`
);
return true;
}
return false;
} catch (error) {
logger.error(`Failed to extend lock ${lockKey}:`, error);
return false;
}
}
/**
* Attempt to acquire lock with retries and exponential backoff
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @param maxRetries - Maximum number of retry attempts
* @param baseDelayMs - Base delay between retries in milliseconds
* @returns Promise<boolean> - true if lock acquired
*/
async acquireLockWithRetry(
lockKey: string,
ttlMs: number = 30000,
maxRetries: number = 5,
baseDelayMs: number = 100
): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
for (let attempt = 0; attempt <= maxRetries; attempt++) {
const acquired = await this.acquireLock(lockKey, ttlMs);
if (acquired) {
return true;
}
if (attempt < maxRetries) {
// Exponential backoff with jitter
const delay =
baseDelayMs * Math.pow(2, attempt) + Math.random() * 100;
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
logger.warn(
`Failed to acquire lock ${lockKey} after ${maxRetries + 1} attempts`
);
return false;
}
/**
* Execute a function while holding a lock
* @param lockKey - Unique identifier for the lock
* @param fn - Function to execute while holding the lock
* @param ttlMs - Lock TTL in milliseconds
* @returns Promise<T> - Result of the executed function
*/
async withLock<T>(
lockKey: string,
fn: () => Promise<T>,
ttlMs: number = 30000
): Promise<T> {
if (!redis || !redis.status || redis.status !== "ready") {
return await fn();
}
const acquired = await this.acquireLock(lockKey, ttlMs);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${lockKey}`);
}
try {
return await fn();
} finally {
await this.releaseLock(lockKey);
}
}
/**
* Clean up expired locks - Redis handles this automatically, but this method
* can be used to get statistics about locks
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
*/
async getLockStatistics(): Promise<{
activeLocksCount: number;
locksOwnedByMe: number;
}> {
if (!redis || !redis.status || redis.status !== "ready") {
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
try {
const keys = await redis.keys("lock:*");
let locksOwnedByMe = 0;
if (keys.length > 0) {
const values = await redis.mget(...keys);
locksOwnedByMe = values.filter(
(value) =>
value &&
value.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
).length;
}
return {
activeLocksCount: keys.length,
locksOwnedByMe
};
} catch (error) {
logger.error("Failed to get lock statistics:", error);
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
}
/**
* Close the Redis connection
*/
async disconnect(): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
await redis.quit();
}
}
export const lockManager = new LockManager();

View File

@@ -1743,7 +1743,12 @@ hybridRouter.post(
tls: logEntry.tls
}));
await db.insert(requestAuditLog).values(logEntries);
// batch them into inserts of 100 to avoid exceeding parameter limits
const batchSize = 100;
for (let i = 0; i < logEntries.length; i += batchSize) {
const batch = logEntries.slice(i, i + batchSize);
await db.insert(requestAuditLog).values(batch);
}
return response(res, {
data: null,

View File

@@ -1,8 +1,8 @@
import { db, exitNodeOrgs, newts } from "@server/db";
import { db, ExitNode, exitNodeOrgs, newts, Transaction } from "@server/db";
import { MessageHandler } from "@server/routers/ws";
import { exitNodes, Newt, resources, sites, Target, targets } from "@server/db";
import { targetHealthCheck } from "@server/db";
import { eq, and, sql, inArray } from "drizzle-orm";
import { eq, and, sql, inArray, ne } from "drizzle-orm";
import { addPeer, deletePeer } from "../gerbil/peers";
import logger from "@server/logger";
import config from "@server/lib/config";
@@ -17,6 +17,7 @@ import {
verifyExitNodeOrgAccess
} from "#dynamic/lib/exitNodes";
import { fetchContainers } from "./dockerSocket";
import { lockManager } from "#dynamic/lib/lock";
export type ExitNodePingResult = {
exitNodeId: number;
@@ -151,27 +152,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
return;
}
const sitesQuery = await db
.select({
subnet: sites.subnet
})
.from(sites)
.where(eq(sites.exitNodeId, exitNodeId));
const newSubnet = await getUniqueSubnetForSite(exitNode);
const blockSize = config.getRawConfig().gerbil.site_block_size;
const subnets = sitesQuery
.map((site) => site.subnet)
.filter(
(subnet) =>
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
)
.filter((subnet) => subnet !== null);
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
const newSubnet = findNextAvailableCidr(
subnets,
blockSize,
exitNode.address
);
if (!newSubnet) {
logger.error(
`No available subnets found for the new exit node id ${exitNodeId} and site id ${siteId}`
@@ -272,7 +254,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval,
hcTimeout: targetHealthCheck.hcTimeout,
hcHeaders: targetHealthCheck.hcHeaders,
hcMethod: targetHealthCheck.hcMethod
hcMethod: targetHealthCheck.hcMethod,
hcTlsServerName: targetHealthCheck.hcTlsServerName,
})
.from(targets)
.innerJoin(resources, eq(targets.resourceId, resources.resourceId))
@@ -344,7 +327,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
hcUnhealthyInterval: target.hcUnhealthyInterval, // in seconds
hcTimeout: target.hcTimeout, // in seconds
hcHeaders: hcHeadersSend,
hcMethod: target.hcMethod
hcMethod: target.hcMethod,
hcTlsServerName: target.hcTlsServerName,
};
});
@@ -376,3 +360,39 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
excludeSender: false // Include sender in broadcast
};
};
async function getUniqueSubnetForSite(
exitNode: ExitNode,
trx: Transaction | typeof db = db
): Promise<string | null> {
const lockKey = `subnet-allocation:${exitNode.exitNodeId}`;
return await lockManager.withLock(
lockKey,
async () => {
const sitesQuery = await trx
.select({
subnet: sites.subnet
})
.from(sites)
.where(eq(sites.exitNodeId, exitNode.exitNodeId));
const blockSize = config.getRawConfig().gerbil.site_block_size;
const subnets = sitesQuery
.map((site) => site.subnet)
.filter(
(subnet) =>
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
)
.filter((subnet) => subnet !== null);
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
const newSubnet = findNextAvailableCidr(
subnets,
blockSize,
exitNode.address
);
return newSubnet;
},
5000 // 5 second lock TTL - subnet allocation should be quick
);
}

View File

@@ -66,7 +66,8 @@ export async function addTargets(
hcUnhealthyInterval: hc.hcUnhealthyInterval, // in seconds
hcTimeout: hc.hcTimeout, // in seconds
hcHeaders: hcHeadersSend,
hcMethod: hc.hcMethod
hcMethod: hc.hcMethod,
hcTlsServerName: hc.hcTlsServerName,
};
});

View File

@@ -198,6 +198,62 @@ export async function createSite(
}
}
if (subnet && exitNodeId) {
//make sure the subnet is in the range of the exit node if provided
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, exitNodeId));
if (!exitNode) {
return next(
createHttpError(HttpCode.NOT_FOUND, "Exit node not found")
);
}
if (!exitNode.address) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Exit node has no subnet defined"
)
);
}
const subnetIp = subnet.split("/")[0];
if (!isIpInCidr(subnetIp, exitNode.address)) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Subnet is not in the CIDR range of the exit node address."
)
);
}
// lets also make sure there is no overlap with other sites on the exit node
const sitesQuery = await db
.select({
subnet: sites.subnet
})
.from(sites)
.where(
and(
eq(sites.exitNodeId, exitNodeId),
eq(sites.subnet, subnet)
)
);
if (sitesQuery.length > 0) {
return next(
createHttpError(
HttpCode.CONFLICT,
`Subnet ${subnet} overlaps with an existing site on this exit node. Please restart site creation.`
)
);
}
}
const niceId = await getUniqueSiteName(orgId);
let newSite: Site;

View File

@@ -48,6 +48,7 @@ const createTargetSchema = z.strictObject({
hcFollowRedirects: z.boolean().optional().nullable(),
hcMethod: z.string().min(1).optional().nullable(),
hcStatus: z.int().optional().nullable(),
hcTlsServerName: z.string().optional().nullable(),
path: z.string().optional().nullable(),
pathMatchType: z
.enum(["exact", "prefix", "regex"])
@@ -247,7 +248,8 @@ export async function createTarget(
hcFollowRedirects: targetData.hcFollowRedirects ?? null,
hcMethod: targetData.hcMethod ?? null,
hcStatus: targetData.hcStatus ?? null,
hcHealth: "unknown"
hcHealth: "unknown",
hcTlsServerName: targetData.hcTlsServerName ?? null
})
.returning();

View File

@@ -57,6 +57,7 @@ function queryTargets(resourceId: number) {
hcMethod: targetHealthCheck.hcMethod,
hcStatus: targetHealthCheck.hcStatus,
hcHealth: targetHealthCheck.hcHealth,
hcTlsServerName: targetHealthCheck.hcTlsServerName,
path: targets.path,
pathMatchType: targets.pathMatchType,
rewritePath: targets.rewritePath,

View File

@@ -42,6 +42,7 @@ const updateTargetBodySchema = z.strictObject({
hcFollowRedirects: z.boolean().optional().nullable(),
hcMethod: z.string().min(1).optional().nullable(),
hcStatus: z.int().optional().nullable(),
hcTlsServerName: z.string().optional().nullable(),
path: z.string().optional().nullable(),
pathMatchType: z.enum(["exact", "prefix", "regex"]).optional().nullable(),
rewritePath: z.string().optional().nullable(),
@@ -217,7 +218,8 @@ export async function updateTarget(
hcHeaders: hcHeaders,
hcFollowRedirects: parsedBody.data.hcFollowRedirects,
hcMethod: parsedBody.data.hcMethod,
hcStatus: parsedBody.data.hcStatus
hcStatus: parsedBody.data.hcStatus,
hcTlsServerName: parsedBody.data.hcTlsServerName,
})
.where(eq(targetHealthCheck.targetId, targetId))
.returning();