Merge branch 'dev' into multi-role

This commit is contained in:
miloschwartz
2026-03-24 22:01:13 -07:00
266 changed files with 7813 additions and 5880 deletions

View File

@@ -19,6 +19,7 @@ export enum ActionsEnum {
getSite = "getSite",
listSites = "listSites",
updateSite = "updateSite",
resetSiteBandwidth = "resetSiteBandwidth",
reGenerateSecret = "reGenerateSecret",
createResource = "createResource",
deleteResource = "deleteResource",

View File

@@ -87,7 +87,7 @@ export async function validateResourceSessionToken(
if (Date.now() >= resourceSession.expiresAt) {
await db
.delete(resourceSessions)
.where(eq(resourceSessions.sessionId, resourceSessions.sessionId));
.where(eq(resourceSessions.sessionId, sessionId));
return { resourceSession: null };
} else if (
Date.now() >=
@@ -181,7 +181,7 @@ export function serializeResourceSessionCookie(
return `${cookieName}_s.${now}=${token}; HttpOnly; SameSite=Lax; Expires=${expiresAt.toUTCString()}; Path=/; Secure; Domain=${domain}`;
} else {
if (expiresAt === undefined) {
return `${cookieName}.${now}=${token}; HttpOnly; SameSite=Lax; Path=/; Domain=$domain}`;
return `${cookieName}.${now}=${token}; HttpOnly; SameSite=Lax; Path=/; Domain=${domain}`;
}
return `${cookieName}.${now}=${token}; HttpOnly; SameSite=Lax; Expires=${expiresAt.toUTCString()}; Path=/; Domain=${domain}`;
}

View File

@@ -1,6 +1,10 @@
import { flushBandwidthToDb } from "@server/routers/newt/handleReceiveBandwidthMessage";
import { flushSiteBandwidthToDb } from "@server/routers/gerbil/receiveBandwidth";
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
async function cleanup() {
await flushBandwidthToDb();
await flushSiteBandwidthToDb();
await wsCleanup();
process.exit(0);
@@ -10,4 +14,4 @@ export async function initCleanup() {
// Handle process termination
process.on("SIGTERM", () => cleanup());
process.on("SIGINT", () => cleanup());
}
}

View File

@@ -1,4 +1,5 @@
export * from "./driver";
export * from "./logsDriver";
export * from "./safeRead";
export * from "./schema/schema";
export * from "./schema/privateSchema";

View File

@@ -0,0 +1,87 @@
import { drizzle as DrizzlePostgres } from "drizzle-orm/node-postgres";
import { Pool } from "pg";
import { readConfigFile } from "@server/lib/readConfigFile";
import { withReplicas } from "drizzle-orm/pg-core";
import { build } from "@server/build";
import { db as mainDb, primaryDb as mainPrimaryDb } from "./driver";
function createLogsDb() {
// Only use separate logs database in SaaS builds
if (build !== "saas") {
return mainDb;
}
const config = readConfigFile();
// Merge configs, prioritizing private config
const logsConfig = config.postgres_logs;
// Check environment variable first
let connectionString = process.env.POSTGRES_LOGS_CONNECTION_STRING;
let replicaConnections: Array<{ connection_string: string }> = [];
if (!connectionString && logsConfig) {
connectionString = logsConfig.connection_string;
replicaConnections = logsConfig.replicas || [];
}
// If POSTGRES_LOGS_REPLICA_CONNECTION_STRINGS is set, use it
if (process.env.POSTGRES_LOGS_REPLICA_CONNECTION_STRINGS) {
replicaConnections =
process.env.POSTGRES_LOGS_REPLICA_CONNECTION_STRINGS.split(",").map(
(conn) => ({
connection_string: conn.trim()
})
);
}
// If no logs database is configured, fall back to main database
if (!connectionString) {
return mainDb;
}
// Create separate connection pool for logs database
const poolConfig = logsConfig?.pool || config.postgres?.pool;
const primaryPool = new Pool({
connectionString,
max: poolConfig?.max_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000
});
const replicas = [];
if (!replicaConnections.length) {
replicas.push(
DrizzlePostgres(primaryPool, {
logger: process.env.QUERY_LOGGING == "true"
})
);
} else {
for (const conn of replicaConnections) {
const replicaPool = new Pool({
connectionString: conn.connection_string,
max: poolConfig?.max_replica_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis:
poolConfig?.connection_timeout_ms || 5000
});
replicas.push(
DrizzlePostgres(replicaPool, {
logger: process.env.QUERY_LOGGING == "true"
})
);
}
}
return withReplicas(
DrizzlePostgres(primaryPool, {
logger: process.env.QUERY_LOGGING == "true"
}),
replicas as any
);
}
export const logsDb = createLogsDb();
export default logsDb;
export const primaryLogsDb = logsDb.$primary;

View File

@@ -328,6 +328,14 @@ export const approvals = pgTable("approvals", {
.notNull()
});
export const bannedEmails = pgTable("bannedEmails", {
email: varchar("email", { length: 255 }).primaryKey(),
});
export const bannedIps = pgTable("bannedIps", {
ip: varchar("ip", { length: 255 }).primaryKey(),
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;

View File

@@ -23,7 +23,8 @@ export const domains = pgTable("domains", {
tries: integer("tries").notNull().default(0),
certResolver: varchar("certResolver"),
customCertResolver: varchar("customCertResolver"),
preferWildcardCert: boolean("preferWildcardCert")
preferWildcardCert: boolean("preferWildcardCert"),
errorMessage: text("errorMessage")
});
export const dnsRecords = pgTable("dnsRecords", {
@@ -89,6 +90,7 @@ export const sites = pgTable("sites", {
lastBandwidthUpdate: varchar("lastBandwidthUpdate"),
type: varchar("type").notNull(), // "newt" or "wireguard"
online: boolean("online").notNull().default(false),
lastPing: integer("lastPing"),
address: varchar("address"),
endpoint: varchar("endpoint"),
publicKey: varchar("publicKey"),
@@ -284,6 +286,7 @@ export const users = pgTable("user", {
dateCreated: varchar("dateCreated").notNull(),
termsAcceptedTimestamp: varchar("termsAcceptedTimestamp"),
termsVersion: varchar("termsVersion"),
marketingEmailConsent: boolean("marketingEmailConsent").default(false),
serverAdmin: boolean("serverAdmin").notNull().default(false),
lastPasswordChange: bigint("lastPasswordChange", { mode: "number" })
});
@@ -733,6 +736,7 @@ export const clientSitesAssociationsCache = pgTable(
.notNull(),
siteId: integer("siteId").notNull(),
isRelayed: boolean("isRelayed").notNull().default(false),
isJitMode: boolean("isJitMode").notNull().default(false),
endpoint: varchar("endpoint"),
publicKey: varchar("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
}

View File

@@ -1,4 +1,5 @@
export * from "./driver";
export * from "./logsDriver";
export * from "./safeRead";
export * from "./schema/schema";
export * from "./schema/privateSchema";

View File

@@ -0,0 +1,7 @@
import { db as mainDb } from "./driver";
// SQLite doesn't support separate databases for logs in the same way as Postgres
// Always use the main database connection for SQLite
export const logsDb = mainDb;
export default logsDb;
export const primaryLogsDb = logsDb;

View File

@@ -318,6 +318,15 @@ export const approvals = sqliteTable("approvals", {
.notNull()
});
export const bannedEmails = sqliteTable("bannedEmails", {
email: text("email").primaryKey()
});
export const bannedIps = sqliteTable("bannedIps", {
ip: text("ip").primaryKey()
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;

View File

@@ -19,7 +19,8 @@ export const domains = sqliteTable("domains", {
failed: integer("failed", { mode: "boolean" }).notNull().default(false),
tries: integer("tries").notNull().default(0),
certResolver: text("certResolver"),
preferWildcardCert: integer("preferWildcardCert", { mode: "boolean" })
preferWildcardCert: integer("preferWildcardCert", { mode: "boolean" }),
errorMessage: text("errorMessage")
});
export const dnsRecords = sqliteTable("dnsRecords", {
@@ -95,6 +96,7 @@ export const sites = sqliteTable("sites", {
lastBandwidthUpdate: text("lastBandwidthUpdate"),
type: text("type").notNull(), // "newt" or "wireguard"
online: integer("online", { mode: "boolean" }).notNull().default(false),
lastPing: integer("lastPing"),
// exit node stuff that is how to connect to the site when it has a wg server
address: text("address"), // this is the address of the wireguard interface in newt
@@ -320,6 +322,9 @@ export const users = sqliteTable("user", {
dateCreated: text("dateCreated").notNull(),
termsAcceptedTimestamp: text("termsAcceptedTimestamp"),
termsVersion: text("termsVersion"),
marketingEmailConsent: integer("marketingEmailConsent", {
mode: "boolean"
}).default(false),
serverAdmin: integer("serverAdmin", { mode: "boolean" })
.notNull()
.default(false),
@@ -412,6 +417,9 @@ export const clientSitesAssociationsCache = sqliteTable(
isRelayed: integer("isRelayed", { mode: "boolean" })
.notNull()
.default(false),
isJitMode: integer("isJitMode", { mode: "boolean" })
.notNull()
.default(false),
endpoint: text("endpoint"),
publicKey: text("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
}

View File

@@ -17,6 +17,7 @@ import fs from "fs";
import path from "path";
import { APP_PATH } from "./lib/consts";
import yaml from "js-yaml";
import { z } from "zod";
const dev = process.env.ENVIRONMENT !== "prod";
const externalPort = config.getRawConfig().server.integration_port;
@@ -38,12 +39,24 @@ export function createIntegrationApiServer() {
apiServer.use(cookieParser());
apiServer.use(express.json());
const openApiDocumentation = getOpenApiDocumentation();
apiServer.use(
"/v1/docs",
swaggerUi.serve,
swaggerUi.setup(getOpenApiDocumentation())
swaggerUi.setup(openApiDocumentation)
);
// Unauthenticated OpenAPI spec endpoints
apiServer.get("/v1/openapi.json", (_req, res) => {
res.json(openApiDocumentation);
});
apiServer.get("/v1/openapi.yaml", (_req, res) => {
const yamlOutput = yaml.dump(openApiDocumentation);
res.type("application/yaml").send(yamlOutput);
});
// API routes
const prefix = `/v1`;
apiServer.use(logIncomingMiddleware);
@@ -75,16 +88,6 @@ function getOpenApiDocumentation() {
}
);
for (const def of registry.definitions) {
if (def.type === "route") {
def.route.security = [
{
[bearerAuth.name]: []
}
];
}
}
registry.registerPath({
method: "get",
path: "/",
@@ -94,6 +97,74 @@ function getOpenApiDocumentation() {
responses: {}
});
registry.registerPath({
method: "get",
path: "/openapi.json",
description: "Get OpenAPI specification as JSON",
tags: [],
request: {},
responses: {
"200": {
description: "OpenAPI specification as JSON",
content: {
"application/json": {
schema: {
type: "object"
}
}
}
}
}
});
registry.registerPath({
method: "get",
path: "/openapi.yaml",
description: "Get OpenAPI specification as YAML",
tags: [],
request: {},
responses: {
"200": {
description: "OpenAPI specification as YAML",
content: {
"application/yaml": {
schema: {
type: "string"
}
}
}
}
}
});
for (const def of registry.definitions) {
if (def.type === "route") {
def.route.security = [
{
[bearerAuth.name]: []
}
];
// Ensure every route has a generic JSON response schema so Swagger UI can render responses
const existingResponses = def.route.responses;
const hasExistingResponses =
existingResponses && Object.keys(existingResponses).length > 0;
if (!hasExistingResponses) {
def.route.responses = {
"*": {
description: "",
content: {
"application/json": {
schema: z.object({})
}
}
}
};
}
}
}
const generator = new OpenApiGeneratorV3(registry.definitions);
const generated = generator.generateDocument({

View File

@@ -16,6 +16,11 @@ const internalPort = config.getRawConfig().server.internal_port;
export function createInternalServer() {
const internalServer = express();
const trustProxy = config.getRawConfig().server.trust_proxy;
if (trustProxy) {
internalServer.set("trust proxy", trustProxy);
}
internalServer.use(helmet());
internalServer.use(cors());
internalServer.use(stripDuplicateSesions);

View File

@@ -48,5 +48,5 @@ export const tierMatrix: Record<TierFeature, Tier[]> = {
"enterprise"
],
[TierFeature.AutoProvisioning]: ["tier1", "tier3", "enterprise"],
[TierFeature.SshPam]: ["enterprise"]
[TierFeature.SshPam]: ["tier1", "tier3", "enterprise"]
};

View File

@@ -12,7 +12,7 @@ import {
import { FeatureId, getFeatureMeterId } from "./features";
import logger from "@server/logger";
import { build } from "@server/build";
import cache from "@server/lib/cache";
import cache from "#dynamic/lib/cache";
export function noop() {
if (build !== "saas") {
@@ -230,7 +230,7 @@ export class UsageService {
const orgIdToUse = await this.getBillingOrg(orgId);
const cacheKey = `customer_${orgIdToUse}_${featureId}`;
const cached = cache.get<string>(cacheKey);
const cached = await cache.get<string>(cacheKey);
if (cached) {
return cached;
@@ -253,7 +253,7 @@ export class UsageService {
const customerId = customer.customerId;
// Cache the result
cache.set(cacheKey, customerId, 300); // 5 minute TTL
await cache.set(cacheKey, customerId, 300); // 5 minute TTL
return customerId;
} catch (error) {

View File

@@ -107,7 +107,7 @@ export async function applyBlueprint({
[target],
matchingHealthcheck ? [matchingHealthcheck] : [],
result.proxyResource.protocol,
result.proxyResource.proxyPort
site.newt.version
);
}
}

View File

@@ -11,7 +11,7 @@ import {
userSiteResources
} from "@server/db";
import { sites } from "@server/db";
import { eq, and, ne, inArray } from "drizzle-orm";
import { eq, and, ne, inArray, or } from "drizzle-orm";
import { Config } from "./types";
import logger from "@server/logger";
import { getNextAvailableAliasAddress } from "../ip";
@@ -142,7 +142,10 @@ export async function updateClientResources(
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
.where(
and(
inArray(users.username, resourceData.users),
or(
inArray(users.username, resourceData.users),
inArray(users.email, resourceData.users)
),
eq(userOrgs.orgId, orgId)
)
);
@@ -276,7 +279,10 @@ export async function updateClientResources(
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
.where(
and(
inArray(users.username, resourceData.users),
or(
inArray(users.username, resourceData.users),
inArray(users.email, resourceData.users)
),
eq(userOrgs.orgId, orgId)
)
);

View File

@@ -212,7 +212,10 @@ export async function updateProxyResources(
} else {
// Update existing resource
const isLicensed = await isLicensedOrSubscribed(orgId, tierMatrix.maintencePage);
const isLicensed = await isLicensedOrSubscribed(
orgId,
tierMatrix.maintencePage
);
if (!isLicensed) {
resourceData.maintenance = undefined;
}
@@ -590,7 +593,10 @@ export async function updateProxyResources(
existingRule.action !== getRuleAction(rule.action) ||
existingRule.match !== rule.match.toUpperCase() ||
existingRule.value !==
getRuleValue(rule.match.toUpperCase(), rule.value) ||
getRuleValue(
rule.match.toUpperCase(),
rule.value
) ||
existingRule.priority !== intendedPriority
) {
validateRule(rule);
@@ -648,7 +654,10 @@ export async function updateProxyResources(
);
}
const isLicensed = await isLicensedOrSubscribed(orgId, tierMatrix.maintencePage);
const isLicensed = await isLicensedOrSubscribed(
orgId,
tierMatrix.maintencePage
);
if (!isLicensed) {
resourceData.maintenance = undefined;
}
@@ -935,7 +944,12 @@ async function syncUserResources(
.select()
.from(users)
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
.where(and(eq(users.username, username), eq(userOrgs.orgId, orgId)))
.where(
and(
or(eq(users.username, username), eq(users.email, username)),
eq(userOrgs.orgId, orgId)
)
)
.limit(1);
if (!user) {

View File

@@ -69,7 +69,7 @@ export const AuthSchema = z.object({
.refine((roles) => !roles.includes("Admin"), {
error: "Admin role cannot be included in sso-roles"
}),
"sso-users": z.array(z.email()).optional().default([]),
"sso-users": z.array(z.string()).optional().default([]),
"whitelist-users": z.array(z.email()).optional().default([]),
"auto-login-idp": z.int().positive().optional()
});
@@ -335,7 +335,7 @@ export const ClientResourceSchema = z
.refine((roles) => !roles.includes("Admin"), {
error: "Admin role cannot be included in roles"
}),
users: z.array(z.email()).optional().default([]),
users: z.array(z.string()).optional().default([]),
machines: z.array(z.string()).optional().default([])
})
.refine(

View File

@@ -1,9 +1,9 @@
import NodeCache from "node-cache";
import logger from "@server/logger";
// Create cache with maxKeys limit to prevent memory leaks
// Create local cache with maxKeys limit to prevent memory leaks
// With ~10k requests/day and 5min TTL, 10k keys should be more than sufficient
export const cache = new NodeCache({
export const localCache = new NodeCache({
stdTTL: 3600,
checkperiod: 120,
maxKeys: 10000
@@ -11,10 +11,151 @@ export const cache = new NodeCache({
// Log cache statistics periodically for monitoring
setInterval(() => {
const stats = cache.getStats();
const stats = localCache.getStats();
logger.debug(
`Cache stats - Keys: ${stats.keys}, Hits: ${stats.hits}, Misses: ${stats.misses}, Hit rate: ${stats.hits > 0 ? ((stats.hits / (stats.hits + stats.misses)) * 100).toFixed(2) : 0}%`
`Local cache stats - Keys: ${stats.keys}, Hits: ${stats.hits}, Misses: ${stats.misses}, Hit rate: ${stats.hits > 0 ? ((stats.hits / (stats.hits + stats.misses)) * 100).toFixed(2) : 0}%`
);
}, 300000); // Every 5 minutes
/**
* Adaptive cache that uses Redis when available in multi-node environments,
* otherwise falls back to local memory cache for single-node deployments.
*/
class AdaptiveCache {
/**
* Set a value in the cache
* @param key - Cache key
* @param value - Value to cache (will be JSON stringified for Redis)
* @param ttl - Time to live in seconds (0 = no expiration)
* @returns boolean indicating success
*/
async set(key: string, value: any, ttl?: number): Promise<boolean> {
const effectiveTtl = ttl === 0 ? undefined : ttl;
// Use local cache as fallback or primary
const success = localCache.set(key, value, effectiveTtl || 0);
if (success) {
logger.debug(`Set key in local cache: ${key}`);
}
return success;
}
/**
* Get a value from the cache
* @param key - Cache key
* @returns The cached value or undefined if not found
*/
async get<T = any>(key: string): Promise<T | undefined> {
// Use local cache as fallback or primary
const value = localCache.get<T>(key);
if (value !== undefined) {
logger.debug(`Cache hit in local cache: ${key}`);
} else {
logger.debug(`Cache miss in local cache: ${key}`);
}
return value;
}
/**
* Delete a value from the cache
* @param key - Cache key or array of keys
* @returns Number of deleted entries
*/
async del(key: string | string[]): Promise<number> {
const keys = Array.isArray(key) ? key : [key];
let deletedCount = 0;
// Use local cache as fallback or primary
for (const k of keys) {
const success = localCache.del(k);
if (success > 0) {
deletedCount++;
logger.debug(`Deleted key from local cache: ${k}`);
}
}
return deletedCount;
}
/**
* Check if a key exists in the cache
* @param key - Cache key
* @returns boolean indicating if key exists
*/
async has(key: string): Promise<boolean> {
// Use local cache as fallback or primary
return localCache.has(key);
}
/**
* Get multiple values from the cache
* @param keys - Array of cache keys
* @returns Array of values (undefined for missing keys)
*/
async mget<T = any>(keys: string[]): Promise<(T | undefined)[]> {
// Use local cache as fallback or primary
return keys.map((key) => localCache.get<T>(key));
}
/**
* Flush all keys from the cache
*/
async flushAll(): Promise<void> {
localCache.flushAll();
logger.debug("Flushed local cache");
}
/**
* Get cache statistics
* Note: Only returns local cache stats, Redis stats are not included
*/
getStats() {
return localCache.getStats();
}
/**
* Get the current cache backend being used
* @returns "redis" if Redis is available and healthy, "local" otherwise
*/
getCurrentBackend(): "redis" | "local" {
return "local";
}
/**
* Take a key from the cache and delete it
* @param key - Cache key
* @returns The value or undefined if not found
*/
async take<T = any>(key: string): Promise<T | undefined> {
const value = await this.get<T>(key);
if (value !== undefined) {
await this.del(key);
}
return value;
}
/**
* Get TTL (time to live) for a key
* @param key - Cache key
* @returns TTL in seconds, 0 if no expiration, -1 if key doesn't exist
*/
getTtl(key: string): number {
const ttl = localCache.getTtl(key);
if (ttl === undefined) {
return -1;
}
return Math.max(0, Math.floor((ttl - Date.now()) / 1000));
}
/**
* Get all keys from the cache
* Note: Only returns local cache keys, Redis keys are not included
*/
keys(): string[] {
return localCache.keys();
}
}
// Export singleton instance
export const cache = new AdaptiveCache();
export default cache;

View File

@@ -4,8 +4,12 @@ import { cleanUpOldLogs as cleanUpOldActionLogs } from "#dynamic/middlewares/log
import { cleanUpOldLogs as cleanUpOldRequestLogs } from "@server/routers/badger/logRequestAudit";
import { gt, or } from "drizzle-orm";
import { cleanUpOldFingerprintSnapshots } from "@server/routers/olm/fingerprintingUtils";
import { build } from "@server/build";
export function initLogCleanupInterval() {
if (build == "saas") { // skip log cleanup for saas builds
return null;
}
return setInterval(
async () => {
const orgsToClean = await db

View File

@@ -0,0 +1,20 @@
import semver from "semver";
export function canCompress(
clientVersion: string | null | undefined,
type: "newt" | "olm"
): boolean {
try {
if (!clientVersion) return false;
// check if it is a valid semver
if (!semver.valid(clientVersion)) return false;
if (type === "newt") {
return semver.gte(clientVersion, "1.10.3");
} else if (type === "olm") {
return semver.gte(clientVersion, "1.4.3");
}
return false;
} catch {
return false;
}
}

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.15.4";
export const APP_VERSION = "1.16.0";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

View File

@@ -85,9 +85,7 @@ export async function deleteOrgById(
deletedNewtIds.push(deletedNewt.newtId);
await trx
.delete(newtSessions)
.where(
eq(newtSessions.newtId, deletedNewt.newtId)
);
.where(eq(newtSessions.newtId, deletedNewt.newtId));
}
}
}
@@ -121,33 +119,38 @@ export async function deleteOrgById(
eq(clientSitesAssociationsCache.clientId, client.clientId)
);
}
await trx.delete(resources).where(eq(resources.orgId, orgId));
const allOrgDomains = await trx
.select()
.from(orgDomains)
.innerJoin(domains, eq(domains.domainId, orgDomains.domainId))
.innerJoin(domains, eq(orgDomains.domainId, domains.domainId))
.where(
and(
eq(orgDomains.orgId, orgId),
eq(domains.configManaged, false)
)
);
logger.info(`Found ${allOrgDomains.length} domains to delete`);
const domainIdsToDelete: string[] = [];
for (const orgDomain of allOrgDomains) {
const domainId = orgDomain.domains.domainId;
const orgCount = await trx
.select({ count: sql<number>`count(*)` })
const [orgCount] = await trx
.select({ count: count() })
.from(orgDomains)
.where(eq(orgDomains.domainId, domainId));
if (orgCount[0].count === 1) {
logger.info(`Found ${orgCount.count} orgs using domain ${domainId}`);
if (orgCount.count === 1) {
domainIdsToDelete.push(domainId);
}
}
logger.info(`Found ${domainIdsToDelete.length} domains to delete`);
if (domainIdsToDelete.length > 0) {
await trx
.delete(domains)
.where(inArray(domains.domainId, domainIdsToDelete));
}
await trx.delete(resources).where(eq(resources.orgId, orgId));
await usageService.add(orgId, FeatureId.ORGINIZATIONS, -1, trx); // here we are decreasing the org count BEFORE deleting the org because we need to still be able to get the org to get the billing org inside of here
@@ -231,15 +234,13 @@ export function sendTerminationMessages(result: DeleteOrgByIdResult): void {
);
}
for (const olmId of result.olmsToTerminate) {
sendTerminateClient(
0,
OlmErrorCodes.TERMINATED_REKEYED,
olmId
).catch((error) => {
logger.error(
"Failed to send termination message to olm:",
error
);
});
sendTerminateClient(0, OlmErrorCodes.TERMINATED_REKEYED, olmId).catch(
(error) => {
logger.error(
"Failed to send termination message to olm:",
error
);
}
);
}
}

View File

@@ -189,6 +189,46 @@ export const configSchema = z
.prefault({})
})
.optional(),
postgres_logs: z
.object({
connection_string: z
.string()
.optional()
.transform(getEnvOrYaml("POSTGRES_LOGS_CONNECTION_STRING")),
replicas: z
.array(
z.object({
connection_string: z.string()
})
)
.optional(),
pool: z
.object({
max_connections: z
.number()
.positive()
.optional()
.default(20),
max_replica_connections: z
.number()
.positive()
.optional()
.default(10),
idle_timeout_ms: z
.number()
.positive()
.optional()
.default(30000),
connection_timeout_ms: z
.number()
.positive()
.optional()
.default(5000)
})
.optional()
.prefault({})
})
.optional(),
traefik: z
.object({
http_entrypoint: z.string().optional().default("web"),

View File

@@ -478,6 +478,7 @@ async function handleMessagesForSiteClients(
}
if (isAdd) {
// TODO: if we are in jit mode here should we really be sending this?
await initPeerAddHandshake(
// this will kick off the add peer process for the client
client.clientId,
@@ -572,7 +573,7 @@ export async function updateClientSiteDestinations(
destinations: [
{
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
destinationPort: site.sites.listenPort || 1 // this satisfies gerbil for now but should be reevaluated
}
]
};
@@ -580,7 +581,7 @@ export async function updateClientSiteDestinations(
// add to the existing destinations
destinations.destinations.push({
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
destinationPort: site.sites.listenPort || 1 // this satisfies gerbil for now but should be reevaluated
});
}
@@ -670,7 +671,11 @@ async function handleSubnetProxyTargetUpdates(
`Adding ${targetsToAdd.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
);
proxyJobs.push(
addSubnetProxyTargets(newt.newtId, targetsToAdd)
addSubnetProxyTargets(
newt.newtId,
targetsToAdd,
newt.version
)
);
}
@@ -706,7 +711,11 @@ async function handleSubnetProxyTargetUpdates(
`Removing ${targetsToRemove.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
);
proxyJobs.push(
removeSubnetProxyTargets(newt.newtId, targetsToRemove)
removeSubnetProxyTargets(
newt.newtId,
targetsToRemove,
newt.version
)
);
}
@@ -1081,6 +1090,7 @@ async function handleMessagesForClientSites(
continue;
}
// TODO: if we are in jit mode here should we really be sending this?
await initPeerAddHandshake(
// this will kick off the add peer process for the client
client.clientId,
@@ -1147,7 +1157,7 @@ async function handleMessagesForClientResources(
// Add subnet proxy targets for each site
for (const [siteId, resources] of addedBySite.entries()) {
const [newt] = await trx
.select({ newtId: newts.newtId })
.select({ newtId: newts.newtId, version: newts.version })
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
@@ -1169,7 +1179,13 @@ async function handleMessagesForClientResources(
]);
if (targets.length > 0) {
proxyJobs.push(addSubnetProxyTargets(newt.newtId, targets));
proxyJobs.push(
addSubnetProxyTargets(
newt.newtId,
targets,
newt.version
)
);
}
try {
@@ -1218,7 +1234,7 @@ async function handleMessagesForClientResources(
// Remove subnet proxy targets for each site
for (const [siteId, resources] of removedBySite.entries()) {
const [newt] = await trx
.select({ newtId: newts.newtId })
.select({ newtId: newts.newtId, version: newts.version })
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
@@ -1241,7 +1257,11 @@ async function handleMessagesForClientResources(
if (targets.length > 0) {
proxyJobs.push(
removeSubnetProxyTargets(newt.newtId, targets)
removeSubnetProxyTargets(
newt.newtId,
targets,
newt.version
)
);
}

View File

@@ -1,16 +0,0 @@
export enum AudienceIds {
SignUps = "",
Subscribed = "",
Churned = "",
Newsletter = ""
}
let resend;
export default resend;
export async function moveEmailToAudience(
email: string,
audienceId: AudienceIds
) {
return;
}

40
server/lib/sanitize.ts Normal file
View File

@@ -0,0 +1,40 @@
/**
* Sanitize a string field before inserting into a database TEXT column.
*
* Two passes are applied:
*
* 1. Lone UTF-16 surrogates JavaScript strings can hold unpaired surrogates
* (e.g. \uD800 without a following \uDC00-\uDFFF codepoint). These are
* valid in JS but cannot be encoded as UTF-8, triggering
* `report_invalid_encoding` in SQLite / Postgres. They are replaced with
* the Unicode replacement character U+FFFD so the data is preserved as a
* visible signal that something was malformed.
*
* 2. Null bytes and C0 control characters SQLite stores TEXT as
* null-terminated C strings, so \x00 in a value causes
* `report_invalid_encoding`. Bots and scanners routinely inject null bytes
* into URLs (e.g. `/path\u0000.jpg`). All C0 control characters in the
* range \x00-\x1F are stripped except for the three that are legitimate in
* text payloads: HT (\x09), LF (\x0A), and CR (\x0D). DEL (\x7F) is also
* stripped.
*/
export function sanitizeString(value: string): string;
export function sanitizeString(
value: string | null | undefined
): string | undefined;
export function sanitizeString(
value: string | null | undefined
): string | undefined {
if (value == null) return undefined;
return (
value
// Replace lone high surrogates (not followed by a low surrogate)
// and lone low surrogates (not preceded by a high surrogate).
.replace(
/[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g,
"\uFFFD"
)
// Strip null bytes, C0 control chars (except HT/LF/CR), and DEL.
.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, "")
);
}

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import * as crypto from "crypto";
/**

View File

@@ -218,10 +218,11 @@ export class TraefikConfigManager {
return true;
}
// Fetch if it's been more than 24 hours (for renewals)
const dayInMs = 24 * 60 * 60 * 1000;
const timeSinceLastFetch =
Date.now() - this.lastCertificateFetch.getTime();
// Fetch if it's been more than 24 hours (daily routine check)
if (timeSinceLastFetch > dayInMs) {
logger.info("Fetching certificates due to 24-hour renewal check");
return true;
@@ -265,7 +266,7 @@ export class TraefikConfigManager {
return true;
}
// Check if any local certificates are missing or appear to be outdated
// Check if any local certificates are missing (needs immediate fetch)
for (const domain of domainsNeedingCerts) {
const localState = this.lastLocalCertificateState.get(domain);
if (!localState || !localState.exists) {
@@ -274,17 +275,46 @@ export class TraefikConfigManager {
);
return true;
}
}
// Check if certificate is expiring soon (within 30 days)
if (localState.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry = localState.expiresAt - nowInSeconds;
const daysUntilExpiry = secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 30) {
logger.info(
`Fetching certificates due to upcoming expiry for ${domain} (${Math.round(daysUntilExpiry)} days remaining)`
);
return true;
// For expiry checks, throttle to every 6 hours to avoid querying the
// API/DB on every monitor loop. The certificate-service renews certs
// 45 days before expiry, so checking every 6 hours is plenty frequent
// to pick up renewed certs promptly.
const renewalCheckIntervalMs = 6 * 60 * 60 * 1000; // 6 hours
if (timeSinceLastFetch > renewalCheckIntervalMs) {
// Check non-wildcard certs for expiry (within 45 days to match
// the server-side renewal window in certificate-service)
for (const domain of domainsNeedingCerts) {
const localState = this.lastLocalCertificateState.get(domain);
if (localState?.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry =
localState.expiresAt - nowInSeconds;
const daysUntilExpiry = secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 45) {
logger.info(
`Fetching certificates due to upcoming expiry for ${domain} (${Math.round(daysUntilExpiry)} days remaining)`
);
return true;
}
}
}
// Also check wildcard certificates for expiry. These are not
// included in domainsNeedingCerts since their subdomains are
// filtered out, so we must check them separately.
for (const [certDomain, state] of this.lastLocalCertificateState) {
if (state.exists && state.wildcard && state.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry = state.expiresAt - nowInSeconds;
const daysUntilExpiry = secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 45) {
logger.info(
`Fetching certificates due to upcoming expiry for wildcard cert ${certDomain} (${Math.round(daysUntilExpiry)} days remaining)`
);
return true;
}
}
}
}
@@ -361,6 +391,26 @@ export class TraefikConfigManager {
}
}
// Also include wildcard cert base domains that are
// expiring or expired so they get re-fetched even though
// their subdomains were filtered out above.
for (const [certDomain, state] of this
.lastLocalCertificateState) {
if (state.exists && state.wildcard && state.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry =
state.expiresAt - nowInSeconds;
const daysUntilExpiry =
secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 45) {
domainsToFetch.add(certDomain);
logger.info(
`Including expiring wildcard cert domain ${certDomain} in fetch (${Math.round(daysUntilExpiry)} days remaining)`
);
}
}
}
if (domainsToFetch.size > 0) {
// Get valid certificates for domains not covered by wildcards
validCertificates =
@@ -507,11 +557,18 @@ export class TraefikConfigManager {
config.getRawConfig().server
.session_cookie_name,
// deprecated
accessTokenQueryParam:
config.getRawConfig().server
.resource_access_token_param,
accessTokenIdHeader:
config.getRawConfig().server
.resource_access_token_headers.id,
accessTokenHeader:
config.getRawConfig().server
.resource_access_token_headers.token,
resourceSessionRequestParam:
config.getRawConfig().server
.resource_session_request_param

View File

@@ -14,7 +14,7 @@ import logger from "@server/logger";
import config from "@server/lib/config";
import { resources, sites, Target, targets } from "@server/db";
import createPathRewriteMiddleware from "./middleware";
import { sanitize, validatePathRewriteConfig } from "./utils";
import { sanitize, encodePath, validatePathRewriteConfig } from "./utils";
const redirectHttpsMiddlewareName = "redirect-to-https";
const badgerMiddlewareName = "badger";
@@ -44,7 +44,7 @@ export async function getTraefikConfig(
filterOutNamespaceDomains = false, // UNUSED BUT USED IN PRIVATE
generateLoginPageRouters = false, // UNUSED BUT USED IN PRIVATE
allowRawResources = true,
allowMaintenancePage = true, // UNUSED BUT USED IN PRIVATE
allowMaintenancePage = true // UNUSED BUT USED IN PRIVATE
): Promise<any> {
// Get resources with their targets and sites in a single optimized query
// Start from sites on this exit node, then join to targets and resources
@@ -127,7 +127,7 @@ export async function getTraefikConfig(
resourcesWithTargetsAndSites.forEach((row) => {
const resourceId = row.resourceId;
const resourceName = sanitize(row.resourceName) || "";
const targetPath = sanitize(row.path) || ""; // Handle null/undefined paths
const targetPath = encodePath(row.path); // Use encodePath to avoid collisions (e.g. "/a/b" vs "/a-b")
const pathMatchType = row.pathMatchType || "";
const rewritePath = row.rewritePath || "";
const rewritePathType = row.rewritePathType || "";
@@ -145,7 +145,7 @@ export async function getTraefikConfig(
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
const key = sanitize(mapKey);
if (!resourcesMap.has(key)) {
if (!resourcesMap.has(mapKey)) {
const validation = validatePathRewriteConfig(
row.path,
row.pathMatchType,
@@ -160,9 +160,10 @@ export async function getTraefikConfig(
return;
}
resourcesMap.set(key, {
resourcesMap.set(mapKey, {
resourceId: row.resourceId,
name: resourceName,
key: key,
fullDomain: row.fullDomain,
ssl: row.ssl,
http: row.http,
@@ -190,7 +191,7 @@ export async function getTraefikConfig(
});
}
resourcesMap.get(key).targets.push({
resourcesMap.get(mapKey).targets.push({
resourceId: row.resourceId,
targetId: row.targetId,
ip: row.ip,
@@ -227,8 +228,9 @@ export async function getTraefikConfig(
};
// get the key and the resource
for (const [key, resource] of resourcesMap.entries()) {
for (const [, resource] of resourcesMap.entries()) {
const targets = resource.targets as TargetWithSite[];
const key = resource.key;
const routerName = `${key}-${resource.name}-router`;
const serviceName = `${key}-${resource.name}-service`;
@@ -477,7 +479,10 @@ export async function getTraefikConfig(
// TODO: HOW TO HANDLE ^^^^^^ BETTER
const anySitesOnline = targets.some(
(target) => target.site.online
(target) =>
target.site.online ||
target.site.type === "local" ||
target.site.type === "wireguard"
);
return (
@@ -490,7 +495,7 @@ export async function getTraefikConfig(
if (target.health == "unhealthy") {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;
@@ -605,7 +610,10 @@ export async function getTraefikConfig(
servers: (() => {
// Check if any sites are online
const anySitesOnline = targets.some(
(target) => target.site.online
(target) =>
target.site.online ||
target.site.type === "local" ||
target.site.type === "wireguard"
);
return targets
@@ -613,7 +621,7 @@ export async function getTraefikConfig(
if (!target.enabled) {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;

View File

@@ -0,0 +1,323 @@
import { assertEquals } from "../../../test/assert";
// ── Pure function copies (inlined to avoid pulling in server dependencies) ──
function sanitize(input: string | null | undefined): string | undefined {
if (!input) return undefined;
if (input.length > 50) {
input = input.substring(0, 50);
}
return input
.replace(/[^a-zA-Z0-9-]/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "");
}
function encodePath(path: string | null | undefined): string {
if (!path) return "";
return path.replace(/[^a-zA-Z0-9]/g, (ch) => {
return ch.charCodeAt(0).toString(16);
});
}
// ── Helpers ──────────────────────────────────────────────────────────
/**
* Exact replica of the OLD key computation from upstream main.
* Uses sanitize() for paths — this is what had the collision bug.
*/
function oldKeyComputation(
resourceId: number,
path: string | null,
pathMatchType: string | null,
rewritePath: string | null,
rewritePathType: string | null
): string {
const targetPath = sanitize(path) || "";
const pmt = pathMatchType || "";
const rp = rewritePath || "";
const rpt = rewritePathType || "";
const pathKey = [targetPath, pmt, rp, rpt].filter(Boolean).join("-");
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
return sanitize(mapKey) || "";
}
/**
* Replica of the NEW key computation from our fix.
* Uses encodePath() for paths — collision-free.
*/
function newKeyComputation(
resourceId: number,
path: string | null,
pathMatchType: string | null,
rewritePath: string | null,
rewritePathType: string | null
): string {
const targetPath = encodePath(path);
const pmt = pathMatchType || "";
const rp = rewritePath || "";
const rpt = rewritePathType || "";
const pathKey = [targetPath, pmt, rp, rpt].filter(Boolean).join("-");
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
return sanitize(mapKey) || "";
}
// ── Tests ────────────────────────────────────────────────────────────
function runTests() {
console.log("Running path encoding tests...\n");
let passed = 0;
// ── encodePath unit tests ────────────────────────────────────────
// Test 1: null/undefined/empty
{
assertEquals(encodePath(null), "", "null should return empty");
assertEquals(
encodePath(undefined),
"",
"undefined should return empty"
);
assertEquals(encodePath(""), "", "empty string should return empty");
console.log(" PASS: encodePath handles null/undefined/empty");
passed++;
}
// Test 2: root path
{
assertEquals(encodePath("/"), "2f", "/ should encode to 2f");
console.log(" PASS: encodePath encodes root path");
passed++;
}
// Test 3: alphanumeric passthrough
{
assertEquals(encodePath("/api"), "2fapi", "/api encodes slash only");
assertEquals(encodePath("/v1"), "2fv1", "/v1 encodes slash only");
assertEquals(encodePath("abc"), "abc", "plain alpha passes through");
console.log(" PASS: encodePath preserves alphanumeric chars");
passed++;
}
// Test 4: all special chars produce unique hex
{
const paths = ["/a/b", "/a-b", "/a.b", "/a_b", "/a b"];
const results = paths.map((p) => encodePath(p));
const unique = new Set(results);
assertEquals(
unique.size,
paths.length,
"all special-char paths must produce unique encodings"
);
console.log(
" PASS: encodePath produces unique output for different special chars"
);
passed++;
}
// Test 5: output is always alphanumeric (safe for Traefik names)
{
const paths = [
"/",
"/api",
"/a/b",
"/a-b",
"/a.b",
"/complex/path/here"
];
for (const p of paths) {
const e = encodePath(p);
assertEquals(
/^[a-zA-Z0-9]+$/.test(e),
true,
`encodePath("${p}") = "${e}" must be alphanumeric`
);
}
console.log(" PASS: encodePath output is always alphanumeric");
passed++;
}
// Test 6: deterministic
{
assertEquals(
encodePath("/api"),
encodePath("/api"),
"same input same output"
);
assertEquals(
encodePath("/a/b/c"),
encodePath("/a/b/c"),
"same input same output"
);
console.log(" PASS: encodePath is deterministic");
passed++;
}
// Test 7: many distinct paths never collide
{
const paths = [
"/",
"/api",
"/api/v1",
"/api/v2",
"/a/b",
"/a-b",
"/a.b",
"/a_b",
"/health",
"/health/check",
"/admin",
"/admin/users",
"/api/v1/users",
"/api/v1/posts",
"/app",
"/app/dashboard"
];
const encoded = new Set(paths.map((p) => encodePath(p)));
assertEquals(
encoded.size,
paths.length,
`expected ${paths.length} unique encodings, got ${encoded.size}`
);
console.log(" PASS: 16 realistic paths all produce unique encodings");
passed++;
}
// ── Collision fix: the actual bug we're fixing ───────────────────
// Test 8: /a/b and /a-b now have different keys (THE BUG FIX)
{
const keyAB = newKeyComputation(1, "/a/b", "prefix", null, null);
const keyDash = newKeyComputation(1, "/a-b", "prefix", null, null);
assertEquals(
keyAB !== keyDash,
true,
"/a/b and /a-b MUST have different keys"
);
console.log(" PASS: collision fix — /a/b vs /a-b have different keys");
passed++;
}
// Test 9: demonstrate the old bug — old code maps /a/b and /a-b to same key
{
const oldKeyAB = oldKeyComputation(1, "/a/b", "prefix", null, null);
const oldKeyDash = oldKeyComputation(1, "/a-b", "prefix", null, null);
assertEquals(
oldKeyAB,
oldKeyDash,
"old code MUST have this collision (confirms the bug exists)"
);
console.log(" PASS: confirmed old code bug — /a/b and /a-b collided");
passed++;
}
// Test 10: /api/v1 and /api-v1 — old code collision, new code fixes it
{
const oldKey1 = oldKeyComputation(1, "/api/v1", "prefix", null, null);
const oldKey2 = oldKeyComputation(1, "/api-v1", "prefix", null, null);
assertEquals(
oldKey1,
oldKey2,
"old code collision for /api/v1 vs /api-v1"
);
const newKey1 = newKeyComputation(1, "/api/v1", "prefix", null, null);
const newKey2 = newKeyComputation(1, "/api-v1", "prefix", null, null);
assertEquals(
newKey1 !== newKey2,
true,
"new code must separate /api/v1 and /api-v1"
);
console.log(" PASS: collision fix — /api/v1 vs /api-v1");
passed++;
}
// Test 11: /app.v2 and /app/v2 and /app-v2 — three-way collision fixed
{
const a = newKeyComputation(1, "/app.v2", "prefix", null, null);
const b = newKeyComputation(1, "/app/v2", "prefix", null, null);
const c = newKeyComputation(1, "/app-v2", "prefix", null, null);
const keys = new Set([a, b, c]);
assertEquals(
keys.size,
3,
"three paths must produce three unique keys"
);
console.log(
" PASS: collision fix — three-way /app.v2, /app/v2, /app-v2"
);
passed++;
}
// ── Edge cases ───────────────────────────────────────────────────
// Test 12: same path in different resources — always separate
{
const key1 = newKeyComputation(1, "/api", "prefix", null, null);
const key2 = newKeyComputation(2, "/api", "prefix", null, null);
assertEquals(
key1 !== key2,
true,
"different resources with same path must have different keys"
);
console.log(" PASS: edge case — same path, different resources");
passed++;
}
// Test 13: same resource, different pathMatchType — separate keys
{
const exact = newKeyComputation(1, "/api", "exact", null, null);
const prefix = newKeyComputation(1, "/api", "prefix", null, null);
assertEquals(
exact !== prefix,
true,
"exact vs prefix must have different keys"
);
console.log(" PASS: edge case — same path, different match types");
passed++;
}
// Test 14: same resource and path, different rewrite config — separate keys
{
const noRewrite = newKeyComputation(1, "/api", "prefix", null, null);
const withRewrite = newKeyComputation(
1,
"/api",
"prefix",
"/backend",
"prefix"
);
assertEquals(
noRewrite !== withRewrite,
true,
"with vs without rewrite must have different keys"
);
console.log(" PASS: edge case — same path, different rewrite config");
passed++;
}
// Test 15: paths with special URL characters
{
const paths = ["/api?foo", "/api#bar", "/api%20baz", "/api+qux"];
const keys = new Set(
paths.map((p) => newKeyComputation(1, p, "prefix", null, null))
);
assertEquals(
keys.size,
paths.length,
"special URL chars must produce unique keys"
);
console.log(" PASS: edge case — special URL characters in paths");
passed++;
}
console.log(`\nAll ${passed} tests passed!`);
}
try {
runTests();
} catch (error) {
console.error("Test failed:", error);
process.exit(1);
}

View File

@@ -13,6 +13,26 @@ export function sanitize(input: string | null | undefined): string | undefined {
.replace(/^-|-$/g, "");
}
/**
* Encode a URL path into a collision-free alphanumeric string suitable for use
* in Traefik map keys.
*
* Unlike sanitize(), this preserves uniqueness by encoding each non-alphanumeric
* character as its hex code. Different paths always produce different outputs.
*
* encodePath("/api") => "2fapi"
* encodePath("/a/b") => "2fa2fb"
* encodePath("/a-b") => "2fa2db" (different from /a/b)
* encodePath("/") => "2f"
* encodePath(null) => ""
*/
export function encodePath(path: string | null | undefined): string {
if (!path) return "";
return path.replace(/[^a-zA-Z0-9]/g, (ch) => {
return ch.charCodeAt(0).toString(16);
});
}
export function validatePathRewriteConfig(
path: string | null,
pathMatchType: string | null,

View File

@@ -14,3 +14,4 @@ export * from "./verifyApiKeyApiKeyAccess";
export * from "./verifyApiKeyClientAccess";
export * from "./verifyApiKeySiteResourceAccess";
export * from "./verifyApiKeyIdpAccess";
export * from "./verifyApiKeyDomainAccess";

View File

@@ -0,0 +1,90 @@
import { Request, Response, NextFunction } from "express";
import { db, domains, orgDomains, apiKeyOrg } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
export async function verifyApiKeyDomainAccess(
req: Request,
res: Response,
next: NextFunction
) {
try {
const apiKey = req.apiKey;
const domainId =
req.params.domainId || req.body.domainId || req.query.domainId;
const orgId = req.params.orgId;
if (!apiKey) {
return next(
createHttpError(HttpCode.UNAUTHORIZED, "Key not authenticated")
);
}
if (!domainId) {
return next(
createHttpError(HttpCode.BAD_REQUEST, "Invalid domain ID")
);
}
if (apiKey.isRoot) {
// Root keys can access any domain in any org
return next();
}
// Verify domain exists and belongs to the organization
const [domain] = await db
.select()
.from(domains)
.innerJoin(orgDomains, eq(orgDomains.domainId, domains.domainId))
.where(
and(
eq(orgDomains.domainId, domainId),
eq(orgDomains.orgId, orgId)
)
)
.limit(1);
if (!domain) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`Domain with ID ${domainId} not found in organization ${orgId}`
)
);
}
// Verify the API key has access to this organization
if (!req.apiKeyOrg) {
const apiKeyOrgRes = await db
.select()
.from(apiKeyOrg)
.where(
and(
eq(apiKeyOrg.apiKeyId, apiKey.apiKeyId),
eq(apiKeyOrg.orgId, orgId)
)
)
.limit(1);
req.apiKeyOrg = apiKeyOrgRes[0];
}
if (!req.apiKeyOrg) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Key does not have access to this organization"
)
);
}
return next();
} catch (error) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Error verifying domain access"
)
);
}
}

View File

@@ -5,17 +5,20 @@ export const registry = new OpenAPIRegistry();
export enum OpenAPITags {
Site = "Site",
Org = "Organization",
Resource = "Resource",
PublicResource = "Public Resource",
PrivateResource = "Private Resource",
Role = "Role",
User = "User",
Invitation = "Invitation",
Target = "Target",
Invitation = "User Invitation",
Target = "Resource Target",
Rule = "Rule",
AccessToken = "Access Token",
Idp = "Identity Provider",
GlobalIdp = "Identity Provider (Global)",
OrgIdp = "Identity Provider (Organization Only)",
Client = "Client",
ApiKey = "API Key",
Domain = "Domain",
Blueprint = "Blueprint",
Ssh = "SSH"
Ssh = "SSH",
Logs = "Logs"
}

View File

@@ -13,8 +13,12 @@
import { rateLimitService } from "#private/lib/rateLimit";
import { cleanup as wsCleanup } from "#private/routers/ws";
import { flushBandwidthToDb } from "@server/routers/newt/handleReceiveBandwidthMessage";
import { flushSiteBandwidthToDb } from "@server/routers/gerbil/receiveBandwidth";
async function cleanup() {
await flushBandwidthToDb();
await flushSiteBandwidthToDb();
await rateLimitService.cleanup();
await wsCleanup();
@@ -25,4 +29,4 @@ export async function initCleanup() {
// Handle process termination
process.on("SIGTERM", () => cleanup());
process.on("SIGINT", () => cleanup());
}
}

287
server/private/lib/cache.ts Normal file
View File

@@ -0,0 +1,287 @@
import NodeCache from "node-cache";
import logger from "@server/logger";
import { redisManager } from "@server/private/lib/redis";
// Create local cache with maxKeys limit to prevent memory leaks
// With ~10k requests/day and 5min TTL, 10k keys should be more than sufficient
export const localCache = new NodeCache({
stdTTL: 3600,
checkperiod: 120,
maxKeys: 10000
});
// Log cache statistics periodically for monitoring
setInterval(() => {
const stats = localCache.getStats();
logger.debug(
`Local cache stats - Keys: ${stats.keys}, Hits: ${stats.hits}, Misses: ${stats.misses}, Hit rate: ${stats.hits > 0 ? ((stats.hits / (stats.hits + stats.misses)) * 100).toFixed(2) : 0}%`
);
}, 300000); // Every 5 minutes
/**
* Adaptive cache that uses Redis when available in multi-node environments,
* otherwise falls back to local memory cache for single-node deployments.
*/
class AdaptiveCache {
private useRedis(): boolean {
return (
redisManager.isRedisEnabled() &&
redisManager.getHealthStatus().isHealthy
);
}
/**
* Set a value in the cache
* @param key - Cache key
* @param value - Value to cache (will be JSON stringified for Redis)
* @param ttl - Time to live in seconds (0 = no expiration; omit = 3600s for Redis)
* @returns boolean indicating success
*/
async set(key: string, value: any, ttl?: number): Promise<boolean> {
const effectiveTtl = ttl === 0 ? undefined : ttl;
const redisTtl = ttl === 0 ? undefined : (ttl ?? 3600);
if (this.useRedis()) {
try {
const serialized = JSON.stringify(value);
const success = await redisManager.set(
key,
serialized,
redisTtl
);
if (success) {
logger.debug(`Set key in Redis: ${key}`);
return true;
}
// Redis failed, fall through to local cache
logger.debug(
`Redis set failed for key ${key}, falling back to local cache`
);
} catch (error) {
logger.error(`Redis set error for key ${key}:`, error);
// Fall through to local cache
}
}
// Use local cache as fallback or primary
const success = localCache.set(key, value, effectiveTtl || 0);
if (success) {
logger.debug(`Set key in local cache: ${key}`);
}
return success;
}
/**
* Get a value from the cache
* @param key - Cache key
* @returns The cached value or undefined if not found
*/
async get<T = any>(key: string): Promise<T | undefined> {
if (this.useRedis()) {
try {
const value = await redisManager.get(key);
if (value !== null) {
logger.debug(`Cache hit in Redis: ${key}`);
return JSON.parse(value) as T;
}
logger.debug(`Cache miss in Redis: ${key}`);
return undefined;
} catch (error) {
logger.error(`Redis get error for key ${key}:`, error);
// Fall through to local cache
}
}
// Use local cache as fallback or primary
const value = localCache.get<T>(key);
if (value !== undefined) {
logger.debug(`Cache hit in local cache: ${key}`);
} else {
logger.debug(`Cache miss in local cache: ${key}`);
}
return value;
}
/**
* Delete a value from the cache
* @param key - Cache key or array of keys
* @returns Number of deleted entries
*/
async del(key: string | string[]): Promise<number> {
const keys = Array.isArray(key) ? key : [key];
let deletedCount = 0;
if (this.useRedis()) {
try {
for (const k of keys) {
const success = await redisManager.del(k);
if (success) {
deletedCount++;
logger.debug(`Deleted key from Redis: ${k}`);
}
}
if (deletedCount === keys.length) {
return deletedCount;
}
// Some Redis deletes failed, fall through to local cache
logger.debug(
`Some Redis deletes failed, falling back to local cache`
);
} catch (error) {
logger.error(
`Redis del error for keys ${keys.join(", ")}:`,
error
);
// Fall through to local cache
deletedCount = 0;
}
}
// Use local cache as fallback or primary
for (const k of keys) {
const success = localCache.del(k);
if (success > 0) {
deletedCount++;
logger.debug(`Deleted key from local cache: ${k}`);
}
}
return deletedCount;
}
/**
* Check if a key exists in the cache
* @param key - Cache key
* @returns boolean indicating if key exists
*/
async has(key: string): Promise<boolean> {
if (this.useRedis()) {
try {
const value = await redisManager.get(key);
return value !== null;
} catch (error) {
logger.error(`Redis has error for key ${key}:`, error);
// Fall through to local cache
}
}
// Use local cache as fallback or primary
return localCache.has(key);
}
/**
* Get multiple values from the cache
* @param keys - Array of cache keys
* @returns Array of values (undefined for missing keys)
*/
async mget<T = any>(keys: string[]): Promise<(T | undefined)[]> {
if (this.useRedis()) {
try {
const results: (T | undefined)[] = [];
for (const key of keys) {
const value = await redisManager.get(key);
if (value !== null) {
results.push(JSON.parse(value) as T);
} else {
results.push(undefined);
}
}
return results;
} catch (error) {
logger.error(`Redis mget error:`, error);
// Fall through to local cache
}
}
// Use local cache as fallback or primary
return keys.map((key) => localCache.get<T>(key));
}
/**
* Flush all keys from the cache
*/
async flushAll(): Promise<void> {
if (this.useRedis()) {
logger.warn(
"Adaptive cache flushAll called - Redis flush not implemented, only local cache will be flushed"
);
}
localCache.flushAll();
logger.debug("Flushed local cache");
}
/**
* Get cache statistics
* Note: Only returns local cache stats, Redis stats are not included
*/
getStats() {
return localCache.getStats();
}
/**
* Get the current cache backend being used
* @returns "redis" if Redis is available and healthy, "local" otherwise
*/
getCurrentBackend(): "redis" | "local" {
return this.useRedis() ? "redis" : "local";
}
/**
* Take a key from the cache and delete it
* @param key - Cache key
* @returns The value or undefined if not found
*/
async take<T = any>(key: string): Promise<T | undefined> {
const value = await this.get<T>(key);
if (value !== undefined) {
await this.del(key);
}
return value;
}
/**
* Get TTL (time to live) for a key
* @param key - Cache key
* @returns TTL in seconds, 0 if no expiration, -1 if key doesn't exist
*/
getTtl(key: string): number {
// Note: This only works for local cache, Redis TTL is not supported
if (this.useRedis()) {
logger.warn(
`getTtl called for key ${key} but Redis TTL lookup is not implemented`
);
}
const ttl = localCache.getTtl(key);
if (ttl === undefined) {
return -1;
}
return Math.max(0, Math.floor((ttl - Date.now()) / 1000));
}
/**
* Get all keys from the cache
* Note: Only returns local cache keys, Redis keys are not included
*/
keys(): string[] {
if (this.useRedis()) {
logger.warn(
"keys() called but Redis keys are not included, only local cache keys returned"
);
}
return localCache.keys();
}
}
// Export singleton instance
export const cache = new AdaptiveCache();
export default cache;

View File

@@ -15,9 +15,8 @@ import config from "./config";
import { certificates, db } from "@server/db";
import { and, eq, isNotNull, or, inArray, sql } from "drizzle-orm";
import { decryptData } from "@server/lib/encryption";
import * as fs from "fs";
import logger from "@server/logger";
import cache from "@server/lib/cache";
import cache from "#private/lib/cache";
let encryptionKeyHex = "";
let encryptionKey: Buffer;
@@ -55,7 +54,7 @@ export async function getValidCertificatesForDomains(
if (useCache) {
for (const domain of domains) {
const cacheKey = `cert:${domain}`;
const cachedCert = cache.get<CertificateResult>(cacheKey);
const cachedCert = await cache.get<CertificateResult>(cacheKey);
if (cachedCert) {
finalResults.push(cachedCert); // Valid cache hit
} else {
@@ -169,7 +168,7 @@ export async function getValidCertificatesForDomains(
// Add to cache for future requests, using the *requested domain* as the key
if (useCache) {
const cacheKey = `cert:${domain}`;
cache.set(cacheKey, resultCert, 180);
await cache.set(cacheKey, resultCert, 180);
}
}
}

View File

@@ -11,17 +11,17 @@
* This file is not licensed under the AGPLv3.
*/
import { accessAuditLog, db, orgs } from "@server/db";
import { accessAuditLog, logsDb, db, orgs } from "@server/db";
import { getCountryCodeForIp } from "@server/lib/geoip";
import logger from "@server/logger";
import { and, eq, lt } from "drizzle-orm";
import cache from "@server/lib/cache";
import cache from "#private/lib/cache";
import { calculateCutoffTimestamp } from "@server/lib/cleanupLogs";
import { stripPortFromHost } from "@server/lib/ip";
async function getAccessDays(orgId: string): Promise<number> {
// check cache first
const cached = cache.get<number>(`org_${orgId}_accessDays`);
const cached = await cache.get<number>(`org_${orgId}_accessDays`);
if (cached !== undefined) {
return cached;
}
@@ -39,7 +39,7 @@ async function getAccessDays(orgId: string): Promise<number> {
}
// store the result in cache
cache.set(
await cache.set(
`org_${orgId}_accessDays`,
org.settingsLogRetentionDaysAction,
300
@@ -52,7 +52,7 @@ export async function cleanUpOldLogs(orgId: string, retentionDays: number) {
const cutoffTimestamp = calculateCutoffTimestamp(retentionDays);
try {
await db
await logsDb
.delete(accessAuditLog)
.where(
and(
@@ -124,7 +124,7 @@ export async function logAccessAudit(data: {
? await getCountryCodeFromIp(data.requestIp)
: undefined;
await db.insert(accessAuditLog).values({
await logsDb.insert(accessAuditLog).values({
timestamp: timestamp,
orgId: data.orgId,
actorType,
@@ -146,14 +146,14 @@ export async function logAccessAudit(data: {
async function getCountryCodeFromIp(ip: string): Promise<string | undefined> {
const geoIpCacheKey = `geoip_access:${ip}`;
let cachedCountryCode: string | undefined = cache.get(geoIpCacheKey);
let cachedCountryCode: string | undefined = await cache.get(geoIpCacheKey);
if (!cachedCountryCode) {
cachedCountryCode = await getCountryCodeForIp(ip); // do it locally
// Only cache successful lookups to avoid filling cache with undefined values
if (cachedCountryCode) {
// Cache for longer since IP geolocation doesn't change frequently
cache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
await cache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
}
}

View File

@@ -38,10 +38,6 @@ export const privateConfigSchema = z.object({
.string()
.optional()
.transform(getEnvOrYaml("SERVER_ENCRYPTION_KEY")),
resend_api_key: z
.string()
.optional()
.transform(getEnvOrYaml("RESEND_API_KEY")),
reo_client_id: z
.string()
.optional()

View File

@@ -1,127 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { Resend } from "resend";
import privateConfig from "#private/lib/config";
import logger from "@server/logger";
export enum AudienceIds {
SignUps = "6c4e77b2-0851-4bd6-bac8-f51f91360f1a",
Subscribed = "870b43fd-387f-44de-8fc1-707335f30b20",
Churned = "f3ae92bd-2fdb-4d77-8746-2118afd62549",
Newsletter = "5500c431-191c-42f0-a5d4-8b6d445b4ea0"
}
const resend = new Resend(
privateConfig.getRawPrivateConfig().server.resend_api_key || "missing"
);
export default resend;
export async function moveEmailToAudience(
email: string,
audienceId: AudienceIds
) {
if (process.env.ENVIRONMENT !== "prod") {
logger.debug(
`Skipping moving email ${email} to audience ${audienceId} in non-prod environment`
);
return;
}
const { error, data } = await retryWithBackoff(async () => {
const { data, error } = await resend.contacts.create({
email,
unsubscribed: false,
audienceId
});
if (error) {
throw new Error(
`Error adding email ${email} to audience ${audienceId}: ${error}`
);
}
return { error, data };
});
if (error) {
logger.error(
`Error adding email ${email} to audience ${audienceId}: ${error}`
);
return;
}
if (data) {
logger.debug(
`Added email ${email} to audience ${audienceId} with contact ID ${data.id}`
);
}
const otherAudiences = Object.values(AudienceIds).filter(
(id) => id !== audienceId
);
for (const otherAudienceId of otherAudiences) {
const { error, data } = await retryWithBackoff(async () => {
const { data, error } = await resend.contacts.remove({
email,
audienceId: otherAudienceId
});
if (error) {
throw new Error(
`Error removing email ${email} from audience ${otherAudienceId}: ${error}`
);
}
return { error, data };
});
if (error) {
logger.error(
`Error removing email ${email} from audience ${otherAudienceId}: ${error}`
);
}
if (data) {
logger.info(
`Removed email ${email} from audience ${otherAudienceId}`
);
}
}
}
type RetryOptions = {
retries?: number;
initialDelayMs?: number;
factor?: number;
};
export async function retryWithBackoff<T>(
fn: () => Promise<T>,
options: RetryOptions = {}
): Promise<T> {
const { retries = 5, initialDelayMs = 500, factor = 2 } = options;
let attempt = 0;
let delay = initialDelayMs;
while (true) {
try {
return await fn();
} catch (err) {
attempt++;
if (attempt > retries) throw err;
await new Promise((resolve) => setTimeout(resolve, delay));
delay *= factor;
}
}
}

View File

@@ -34,7 +34,11 @@ import {
import logger from "@server/logger";
import config from "@server/lib/config";
import { orgs, resources, sites, Target, targets } from "@server/db";
import { sanitize, validatePathRewriteConfig } from "@server/lib/traefik/utils";
import {
sanitize,
encodePath,
validatePathRewriteConfig
} from "@server/lib/traefik/utils";
import privateConfig from "#private/lib/config";
import createPathRewriteMiddleware from "@server/lib/traefik/middleware";
import {
@@ -170,7 +174,7 @@ export async function getTraefikConfig(
resourcesWithTargetsAndSites.forEach((row) => {
const resourceId = row.resourceId;
const resourceName = sanitize(row.resourceName) || "";
const targetPath = sanitize(row.path) || ""; // Handle null/undefined paths
const targetPath = encodePath(row.path); // Use encodePath to avoid collisions (e.g. "/a/b" vs "/a-b")
const pathMatchType = row.pathMatchType || "";
const rewritePath = row.rewritePath || "";
const rewritePathType = row.rewritePathType || "";
@@ -192,7 +196,7 @@ export async function getTraefikConfig(
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
const key = sanitize(mapKey);
if (!resourcesMap.has(key)) {
if (!resourcesMap.has(mapKey)) {
const validation = validatePathRewriteConfig(
row.path,
row.pathMatchType,
@@ -207,9 +211,10 @@ export async function getTraefikConfig(
return;
}
resourcesMap.set(key, {
resourcesMap.set(mapKey, {
resourceId: row.resourceId,
name: resourceName,
key: key,
fullDomain: row.fullDomain,
ssl: row.ssl,
http: row.http,
@@ -243,7 +248,7 @@ export async function getTraefikConfig(
}
// Add target with its associated site data
resourcesMap.get(key).targets.push({
resourcesMap.get(mapKey).targets.push({
resourceId: row.resourceId,
targetId: row.targetId,
ip: row.ip,
@@ -296,8 +301,9 @@ export async function getTraefikConfig(
};
// get the key and the resource
for (const [key, resource] of resourcesMap.entries()) {
for (const [, resource] of resourcesMap.entries()) {
const targets = resource.targets as TargetWithSite[];
const key = resource.key;
const routerName = `${key}-${resource.name}-router`;
const serviceName = `${key}-${resource.name}-service`;
@@ -665,7 +671,10 @@ export async function getTraefikConfig(
// TODO: HOW TO HANDLE ^^^^^^ BETTER
const anySitesOnline = targets.some(
(target) => target.site.online
(target) =>
target.site.online ||
target.site.type === "local" ||
target.site.type === "wireguard"
);
return (
@@ -793,7 +802,10 @@ export async function getTraefikConfig(
servers: (() => {
// Check if any sites are online
const anySitesOnline = targets.some(
(target) => target.site.online
(target) =>
target.site.online ||
target.site.type === "local" ||
target.site.type === "wireguard"
);
return targets

View File

@@ -12,18 +12,18 @@
*/
import { ActionsEnum } from "@server/auth/actions";
import { actionAuditLog, db, orgs } from "@server/db";
import { actionAuditLog, logsDb, db, orgs } from "@server/db";
import logger from "@server/logger";
import HttpCode from "@server/types/HttpCode";
import { Request, Response, NextFunction } from "express";
import createHttpError from "http-errors";
import { and, eq, lt } from "drizzle-orm";
import cache from "@server/lib/cache";
import cache from "#private/lib/cache";
import { calculateCutoffTimestamp } from "@server/lib/cleanupLogs";
async function getActionDays(orgId: string): Promise<number> {
// check cache first
const cached = cache.get<number>(`org_${orgId}_actionDays`);
const cached = await cache.get<number>(`org_${orgId}_actionDays`);
if (cached !== undefined) {
return cached;
}
@@ -41,7 +41,7 @@ async function getActionDays(orgId: string): Promise<number> {
}
// store the result in cache
cache.set(
await cache.set(
`org_${orgId}_actionDays`,
org.settingsLogRetentionDaysAction,
300
@@ -54,7 +54,7 @@ export async function cleanUpOldLogs(orgId: string, retentionDays: number) {
const cutoffTimestamp = calculateCutoffTimestamp(retentionDays);
try {
await db
await logsDb
.delete(actionAuditLog)
.where(
and(
@@ -123,7 +123,7 @@ export function logActionAudit(action: ActionsEnum) {
metadata = JSON.stringify(req.params);
}
await db.insert(actionAuditLog).values({
await logsDb.insert(actionAuditLog).values({
timestamp,
orgId,
actorType,

View File

@@ -32,7 +32,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/logs/access/export",
description: "Export the access audit log for an organization as CSV",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Logs],
request: {
query: queryAccessAuditLogsQuery,
params: queryAccessAuditLogsParams

View File

@@ -32,7 +32,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/logs/action/export",
description: "Export the action audit log for an organization as CSV",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Logs],
request: {
query: queryActionAuditLogsQuery,
params: queryActionAuditLogsParams

View File

@@ -11,11 +11,11 @@
* This file is not licensed under the AGPLv3.
*/
import { accessAuditLog, db, resources } from "@server/db";
import { accessAuditLog, logsDb, resources, db, primaryDb } from "@server/db";
import { registry } from "@server/openApi";
import { NextFunction } from "express";
import { Request, Response } from "express";
import { eq, gt, lt, and, count, desc } from "drizzle-orm";
import { eq, gt, lt, and, count, desc, inArray } from "drizzle-orm";
import { OpenAPITags } from "@server/openApi";
import { z } from "zod";
import createHttpError from "http-errors";
@@ -115,15 +115,13 @@ function getWhere(data: Q) {
}
export function queryAccess(data: Q) {
return db
return logsDb
.select({
orgId: accessAuditLog.orgId,
action: accessAuditLog.action,
actorType: accessAuditLog.actorType,
actorId: accessAuditLog.actorId,
resourceId: accessAuditLog.resourceId,
resourceName: resources.name,
resourceNiceId: resources.niceId,
ip: accessAuditLog.ip,
location: accessAuditLog.location,
userAgent: accessAuditLog.userAgent,
@@ -133,16 +131,46 @@ export function queryAccess(data: Q) {
actor: accessAuditLog.actor
})
.from(accessAuditLog)
.leftJoin(
resources,
eq(accessAuditLog.resourceId, resources.resourceId)
)
.where(getWhere(data))
.orderBy(desc(accessAuditLog.timestamp), desc(accessAuditLog.id));
}
async function enrichWithResourceDetails(logs: Awaited<ReturnType<typeof queryAccess>>) {
// If logs database is the same as main database, we can do a join
// Otherwise, we need to fetch resource details separately
const resourceIds = logs
.map(log => log.resourceId)
.filter((id): id is number => id !== null && id !== undefined);
if (resourceIds.length === 0) {
return logs.map(log => ({ ...log, resourceName: null, resourceNiceId: null }));
}
// Fetch resource details from main database
const resourceDetails = await primaryDb
.select({
resourceId: resources.resourceId,
name: resources.name,
niceId: resources.niceId
})
.from(resources)
.where(inArray(resources.resourceId, resourceIds));
// Create a map for quick lookup
const resourceMap = new Map(
resourceDetails.map(r => [r.resourceId, { name: r.name, niceId: r.niceId }])
);
// Enrich logs with resource details
return logs.map(log => ({
...log,
resourceName: log.resourceId ? resourceMap.get(log.resourceId)?.name ?? null : null,
resourceNiceId: log.resourceId ? resourceMap.get(log.resourceId)?.niceId ?? null : null
}));
}
export function countAccessQuery(data: Q) {
const countQuery = db
const countQuery = logsDb
.select({ count: count() })
.from(accessAuditLog)
.where(getWhere(data));
@@ -161,7 +189,7 @@ async function queryUniqueFilterAttributes(
);
// Get unique actors
const uniqueActors = await db
const uniqueActors = await logsDb
.selectDistinct({
actor: accessAuditLog.actor
})
@@ -169,7 +197,7 @@ async function queryUniqueFilterAttributes(
.where(baseConditions);
// Get unique locations
const uniqueLocations = await db
const uniqueLocations = await logsDb
.selectDistinct({
locations: accessAuditLog.location
})
@@ -177,25 +205,40 @@ async function queryUniqueFilterAttributes(
.where(baseConditions);
// Get unique resources with names
const uniqueResources = await db
const uniqueResources = await logsDb
.selectDistinct({
id: accessAuditLog.resourceId,
name: resources.name
id: accessAuditLog.resourceId
})
.from(accessAuditLog)
.leftJoin(
resources,
eq(accessAuditLog.resourceId, resources.resourceId)
)
.where(baseConditions);
// Fetch resource names from main database for the unique resource IDs
const resourceIds = uniqueResources
.map(row => row.id)
.filter((id): id is number => id !== null);
let resourcesWithNames: Array<{ id: number; name: string | null }> = [];
if (resourceIds.length > 0) {
const resourceDetails = await primaryDb
.select({
resourceId: resources.resourceId,
name: resources.name
})
.from(resources)
.where(inArray(resources.resourceId, resourceIds));
resourcesWithNames = resourceDetails.map(r => ({
id: r.resourceId,
name: r.name
}));
}
return {
actors: uniqueActors
.map((row) => row.actor)
.filter((actor): actor is string => actor !== null),
resources: uniqueResources.filter(
(row): row is { id: number; name: string | null } => row.id !== null
),
resources: resourcesWithNames,
locations: uniqueLocations
.map((row) => row.locations)
.filter((location): location is string => location !== null)
@@ -206,7 +249,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/logs/access",
description: "Query the access audit log for an organization",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Logs],
request: {
query: queryAccessAuditLogsQuery,
params: queryAccessAuditLogsParams
@@ -243,7 +286,10 @@ export async function queryAccessAuditLogs(
const baseQuery = queryAccess(data);
const log = await baseQuery.limit(data.limit).offset(data.offset);
const logsRaw = await baseQuery.limit(data.limit).offset(data.offset);
// Enrich with resource details (handles cross-database scenario)
const log = await enrichWithResourceDetails(logsRaw);
const totalCountResult = await countAccessQuery(data);
const totalCount = totalCountResult[0].count;

View File

@@ -11,7 +11,7 @@
* This file is not licensed under the AGPLv3.
*/
import { actionAuditLog, db } from "@server/db";
import { actionAuditLog, logsDb } from "@server/db";
import { registry } from "@server/openApi";
import { NextFunction } from "express";
import { Request, Response } from "express";
@@ -97,7 +97,7 @@ function getWhere(data: Q) {
}
export function queryAction(data: Q) {
return db
return logsDb
.select({
orgId: actionAuditLog.orgId,
action: actionAuditLog.action,
@@ -113,7 +113,7 @@ export function queryAction(data: Q) {
}
export function countActionQuery(data: Q) {
const countQuery = db
const countQuery = logsDb
.select({ count: count() })
.from(actionAuditLog)
.where(getWhere(data));
@@ -132,14 +132,14 @@ async function queryUniqueFilterAttributes(
);
// Get unique actors
const uniqueActors = await db
const uniqueActors = await logsDb
.selectDistinct({
actor: actionAuditLog.actor
})
.from(actionAuditLog)
.where(baseConditions);
const uniqueActions = await db
const uniqueActions = await logsDb
.selectDistinct({
action: actionAuditLog.action
})
@@ -160,7 +160,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/logs/action",
description: "Query the action audit log for an organization",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Logs],
request: {
query: queryActionAuditLogsQuery,
params: queryActionAuditLogsParams

View File

@@ -31,16 +31,16 @@ const getOrgSchema = z.strictObject({
orgId: z.string()
});
registry.registerPath({
method: "get",
path: "/org/{orgId}/billing/usage",
description: "Get an organization's billing usage",
tags: [OpenAPITags.Org],
request: {
params: getOrgSchema
},
responses: {}
});
// registry.registerPath({
// method: "get",
// path: "/org/{orgId}/billing/usage",
// description: "Get an organization's billing usage",
// tags: [OpenAPITags.Org],
// request: {
// params: getOrgSchema
// },
// responses: {}
// });
export async function getOrgUsage(
req: Request,

View File

@@ -24,7 +24,6 @@ import { eq, and } from "drizzle-orm";
import logger from "@server/logger";
import stripe from "#private/lib/stripe";
import { handleSubscriptionLifesycle } from "../subscriptionLifecycle";
import { AudienceIds, moveEmailToAudience } from "#private/lib/resend";
import { getSubType } from "./getSubType";
import privateConfig from "#private/lib/config";
import { getLicensePriceSet, LicenseId } from "@server/lib/billing/licenses";
@@ -172,7 +171,7 @@ export async function handleSubscriptionCreated(
const email = orgUserRes.user.email;
if (email) {
moveEmailToAudience(email, AudienceIds.Subscribed);
// TODO: update user in Sendy
}
}
} else if (type === "license") {

View File

@@ -23,7 +23,6 @@ import {
import { eq, and } from "drizzle-orm";
import logger from "@server/logger";
import { handleSubscriptionLifesycle } from "../subscriptionLifecycle";
import { AudienceIds, moveEmailToAudience } from "#private/lib/resend";
import { getSubType } from "./getSubType";
import stripe from "#private/lib/stripe";
import privateConfig from "#private/lib/config";
@@ -109,7 +108,7 @@ export async function handleSubscriptionDeleted(
const email = orgUserRes.user.email;
if (email) {
moveEmailToAudience(email, AudienceIds.Churned);
// TODO: update user in Sendy
}
}
} else if (type === "license") {

View File

@@ -480,9 +480,9 @@ authenticated.get(
authenticated.post(
"/re-key/:clientId/regenerate-client-secret",
verifyClientAccess, // this is first to set the org id
verifyValidLicense,
verifyValidSubscription(tierMatrix.rotateCredentials),
verifyClientAccess, // this is first to set the org id
verifyLimits,
verifyUserHasAction(ActionsEnum.reGenerateSecret),
reKey.reGenerateClientSecret
@@ -490,9 +490,9 @@ authenticated.post(
authenticated.post(
"/re-key/:siteId/regenerate-site-secret",
verifySiteAccess, // this is first to set the org id
verifyValidLicense,
verifyValidSubscription(tierMatrix.rotateCredentials),
verifySiteAccess, // this is first to set the org id
verifyLimits,
verifyUserHasAction(ActionsEnum.reGenerateSecret),
reKey.reGenerateSiteSecret
@@ -515,6 +515,6 @@ authenticated.post(
verifyOrgAccess,
verifyLimits,
verifyUserHasAction(ActionsEnum.signSshKey),
logActionAudit(ActionsEnum.signSshKey),
// logActionAudit(ActionsEnum.signSshKey), // it is handled inside of the function below so we can include more metadata
ssh.signSshKey
);

View File

@@ -15,6 +15,7 @@ import { verifySessionRemoteExitNodeMiddleware } from "#private/middlewares/veri
import { Router } from "express";
import {
db,
logsDb,
exitNodes,
Resource,
ResourcePassword,
@@ -81,6 +82,7 @@ import { verifyResourceAccessToken } from "@server/auth/verifyResourceAccessToke
import semver from "semver";
import { maxmindAsnLookup } from "@server/db/maxmindAsn";
import { checkOrgAccessPolicy } from "@server/lib/checkOrgAccessPolicy";
import { sanitizeString } from "@server/lib/sanitize";
// Zod schemas for request validation
const getResourceByDomainParamsSchema = z.strictObject({
@@ -1859,24 +1861,24 @@ hybridRouter.post(
})
.map((logEntry) => ({
timestamp: logEntry.timestamp,
orgId: logEntry.orgId,
actorType: logEntry.actorType,
actor: logEntry.actor,
actorId: logEntry.actorId,
metadata: logEntry.metadata,
orgId: sanitizeString(logEntry.orgId),
actorType: sanitizeString(logEntry.actorType),
actor: sanitizeString(logEntry.actor),
actorId: sanitizeString(logEntry.actorId),
metadata: sanitizeString(logEntry.metadata),
action: logEntry.action,
resourceId: logEntry.resourceId,
reason: logEntry.reason,
location: logEntry.location,
location: sanitizeString(logEntry.location),
// userAgent: data.userAgent, // TODO: add this
// headers: data.body.headers,
// query: data.body.query,
originalRequestURL: logEntry.originalRequestURL,
scheme: logEntry.scheme,
host: logEntry.host,
path: logEntry.path,
method: logEntry.method,
ip: logEntry.ip,
originalRequestURL: sanitizeString(logEntry.originalRequestURL) ?? "",
scheme: sanitizeString(logEntry.scheme) ?? "",
host: sanitizeString(logEntry.host) ?? "",
path: sanitizeString(logEntry.path) ?? "",
method: sanitizeString(logEntry.method) ?? "",
ip: sanitizeString(logEntry.ip),
tls: logEntry.tls
}));
@@ -1884,7 +1886,7 @@ hybridRouter.post(
const batchSize = 100;
for (let i = 0; i < logEntries.length; i += batchSize) {
const batch = logEntries.slice(i, i + batchSize);
await db.insert(requestAuditLog).values(batch);
await logsDb.insert(requestAuditLog).values(batch);
}
return response(res, {

View File

@@ -52,7 +52,7 @@ registry.registerPath({
method: "put",
path: "/org/{orgId}/idp/oidc",
description: "Create an OIDC IdP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
tags: [OpenAPITags.OrgIdp],
request: {
params: paramsSchema,
body: {

View File

@@ -35,7 +35,7 @@ registry.registerPath({
method: "delete",
path: "/org/{orgId}/idp/{idpId}",
description: "Delete IDP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
tags: [OpenAPITags.OrgIdp],
request: {
params: paramsSchema
},

View File

@@ -50,9 +50,9 @@ async function query(idpId: number, orgId: string) {
registry.registerPath({
method: "get",
path: "/org/:orgId/idp/:idpId",
path: "/org/{orgId}/idp/{idpId}",
description: "Get an IDP by its IDP ID for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
tags: [OpenAPITags.OrgIdp],
request: {
params: paramsSchema
},

View File

@@ -67,7 +67,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/idp",
description: "List all IDP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
tags: [OpenAPITags.OrgIdp],
request: {
query: querySchema,
params: paramsSchema

View File

@@ -59,7 +59,7 @@ registry.registerPath({
method: "post",
path: "/org/{orgId}/idp/{idpId}/oidc",
description: "Update an OIDC IdP for a specific organization.",
tags: [OpenAPITags.Idp, OpenAPITags.Org],
tags: [OpenAPITags.OrgIdp],
request: {
params: paramsSchema,
body: {

View File

@@ -38,7 +38,7 @@ export const startRemoteExitNodeOfflineChecker = (): void => {
);
// Find clients that haven't pinged in the last 2 minutes and mark them as offline
const newlyOfflineNodes = await db
const offlineNodes = await db
.update(exitNodes)
.set({ online: false })
.where(
@@ -53,32 +53,15 @@ export const startRemoteExitNodeOfflineChecker = (): void => {
)
.returning();
// Update the sites to offline if they have not pinged either
const exitNodeIds = newlyOfflineNodes.map(
(node) => node.exitNodeId
);
const sitesOnNode = await db
.select()
.from(sites)
.where(
and(
eq(sites.online, true),
inArray(sites.exitNodeId, exitNodeIds)
)
if (offlineNodes.length > 0) {
logger.info(
`checkRemoteExitNodeOffline: Marked ${offlineNodes.length} remoteExitNode client(s) offline due to inactivity`
);
// loop through the sites and process their lastBandwidthUpdate as an iso string and if its more than 1 minute old then mark the site offline
for (const site of sitesOnNode) {
if (!site.lastBandwidthUpdate) {
continue;
}
const lastBandwidthUpdate = new Date(site.lastBandwidthUpdate);
if (Date.now() - lastBandwidthUpdate.getTime() > 60 * 1000) {
await db
.update(sites)
.set({ online: false })
.where(eq(sites.siteId, site.siteId));
for (const offlineClient of offlineNodes) {
logger.debug(
`checkRemoteExitNodeOffline: Client ${offlineClient.exitNodeId} marked offline (lastPing: ${offlineClient.lastPing})`
);
}
}
} catch (error) {

View File

@@ -52,7 +52,7 @@ registry.registerPath({
method: "get",
path: "/maintenance/info",
description: "Get maintenance information for a resource by domain.",
tags: [OpenAPITags.Resource],
tags: [OpenAPITags.PublicResource],
request: {
query: z.object({
fullDomain: z.string()

View File

@@ -14,7 +14,9 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import {
actionAuditLog,
db,
logsDb,
newts,
roles,
roundTripMessageTracker,
@@ -29,12 +31,12 @@ import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { and, eq, inArray, or } from "drizzle-orm";
import { canUserAccessSiteResource } from "@server/auth/canUserAccessSiteResource";
import { signPublicKey, getOrgCAKeys } from "#private/lib/sshCA";
import { signPublicKey, getOrgCAKeys } from "@server/lib/sshCA";
import config from "@server/lib/config";
import { sendToClient } from "#private/routers/ws";
import { ActionsEnum } from "@server/auth/actions";
const paramsSchema = z.strictObject({
orgId: z.string().nonempty()
@@ -64,6 +66,7 @@ export type SignSshKeyResponse = {
sshUsername: string;
sshHost: string;
resourceId: number;
siteId: number;
keyId: string;
validPrincipals: string[];
validAfter: string;
@@ -185,7 +188,7 @@ export async function signSshKey(
} else if (req.user?.username) {
usernameToUse = req.user.username;
// We need to clean out any spaces or special characters from the username to ensure it's valid for SSH certificates
usernameToUse = usernameToUse.replace(/[^a-zA-Z0-9_-]/g, "");
usernameToUse = usernameToUse.replace(/[^a-zA-Z0-9_-]/g, "-");
if (!usernameToUse) {
return next(
createHttpError(
@@ -203,6 +206,9 @@ export async function signSshKey(
);
}
// prefix with p-
usernameToUse = `p-${usernameToUse}`;
// check if we have a existing user in this org with the same
const [existingUserWithSameName] = await db
.select()
@@ -248,6 +254,16 @@ export async function signSshKey(
);
}
}
await db
.update(userOrgs)
.set({ pamUsername: usernameToUse })
.where(
and(
eq(userOrgs.orgId, orgId),
eq(userOrgs.userId, userId)
)
);
} else {
usernameToUse = userOrg.pamUsername;
}
@@ -319,7 +335,16 @@ export async function signSshKey(
);
}
// Check if the user has access to the resource (any of their roles)
if (resource.mode == "cidr") {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"SSHing is not supported for CIDR resources"
)
);
}
// Check if the user has access to the resource
const hasAccess = await canUserAccessSiteResource({
userId: userId,
resourceId: resource.siteResourceId,
@@ -444,6 +469,20 @@ export async function signSshKey(
sshHost = resource.destination;
}
await logsDb.insert(actionAuditLog).values({
timestamp: Math.floor(Date.now() / 1000),
orgId: orgId,
actorType: "user",
actor: req.user?.username ?? "",
actorId: req.user?.userId ?? "",
action: ActionsEnum.signSshKey,
metadata: JSON.stringify({
resourceId: resource.siteResourceId,
resource: resource.name,
siteId: resource.siteId,
})
});
return response<SignSshKeyResponse>(res, {
data: {
certificate: cert.certificate,
@@ -451,6 +490,7 @@ export async function signSshKey(
sshUsername: usernameToUse,
sshHost: sshHost,
resourceId: resource.siteResourceId,
siteId: resource.siteId,
keyId: cert.keyId,
validPrincipals: cert.validPrincipals,
validAfter: cert.validAfter.toISOString(),

View File

@@ -17,10 +17,13 @@ import {
startRemoteExitNodeOfflineChecker
} from "#private/routers/remoteExitNode";
import { MessageHandler } from "@server/routers/ws";
import { build } from "@server/build";
export const messageHandlers: Record<string, MessageHandler> = {
"remoteExitNode/register": handleRemoteExitNodeRegisterMessage,
"remoteExitNode/ping": handleRemoteExitNodePingMessage
};
startRemoteExitNodeOfflineChecker(); // this is to handle the offline check for remote exit nodes
if (build != "saas") {
startRemoteExitNodeOfflineChecker(); // this is to handle the offline check for remote exit nodes
}

View File

@@ -12,6 +12,7 @@
*/
import { Router, Request, Response } from "express";
import zlib from "zlib";
import { Server as HttpServer } from "http";
import { WebSocket, WebSocketServer } from "ws";
import { Socket } from "net";
@@ -24,7 +25,8 @@ import {
OlmSession,
RemoteExitNode,
RemoteExitNodeSession,
remoteExitNodes
remoteExitNodes,
sites
} from "@server/db";
import { eq } from "drizzle-orm";
import { db } from "@server/db";
@@ -57,11 +59,13 @@ const MAX_PENDING_MESSAGES = 50; // Maximum messages to queue during connection
const processMessage = async (
ws: AuthenticatedWebSocket,
data: Buffer,
isBinary: boolean,
clientId: string,
clientType: ClientType
): Promise<void> => {
try {
const message: WSMessage = JSON.parse(data.toString());
const messageBuffer = isBinary ? zlib.gunzipSync(data) : data;
const message: WSMessage = JSON.parse(messageBuffer.toString());
// logger.debug(
// `Processing message from ${clientType.toUpperCase()} ID: ${clientId}, type: ${message.type}`
@@ -76,7 +80,7 @@ const processMessage = async (
clientId,
message.type, // Pass message type for granular limiting
100, // max requests per window
20, // max requests per message type per window
100, // max requests per message type per window
60 * 1000 // window in milliseconds
);
if (rateLimitResult.isLimited) {
@@ -163,8 +167,16 @@ const processPendingMessages = async (
);
const jobs = [];
for (const messageData of ws.pendingMessages) {
jobs.push(processMessage(ws, messageData, clientId, clientType));
for (const pending of ws.pendingMessages) {
jobs.push(
processMessage(
ws,
pending.data,
pending.isBinary,
clientId,
clientType
)
);
}
await Promise.all(jobs);
@@ -185,6 +197,12 @@ const connectedClients: Map<string, AuthenticatedWebSocket[]> = new Map();
// Config version tracking map (local to this node, resets on server restart)
const clientConfigVersions: Map<string, number> = new Map();
// Tracks the last Unix timestamp (seconds) at which a ping was flushed to the
// DB for a given siteId. Resets on server restart which is fine the first
// ping after startup will always write, re-establishing the online state.
const lastPingDbWrite: Map<number, number> = new Map();
const PING_DB_WRITE_INTERVAL = 45; // seconds
// Recovery tracking
let isRedisRecoveryInProgress = false;
@@ -325,7 +343,9 @@ const addClient = async (
// Check Redis first if enabled
if (redisManager.isRedisEnabled()) {
try {
const redisVersion = await redisManager.get(getConfigVersionKey(clientId));
const redisVersion = await redisManager.get(
getConfigVersionKey(clientId)
);
if (redisVersion !== null) {
configVersion = parseInt(redisVersion, 10);
// Sync to local cache
@@ -337,7 +357,10 @@ const addClient = async (
} else {
// Use local cache version and sync to Redis
configVersion = clientConfigVersions.get(clientId) || 0;
await redisManager.set(getConfigVersionKey(clientId), configVersion.toString());
await redisManager.set(
getConfigVersionKey(clientId),
configVersion.toString()
);
}
} catch (error) {
logger.error("Failed to get/set config version in Redis:", error);
@@ -432,7 +455,9 @@ const removeClient = async (
};
// Helper to get the current config version for a client
const getClientConfigVersion = async (clientId: string): Promise<number | undefined> => {
const getClientConfigVersion = async (
clientId: string
): Promise<number | undefined> => {
// Try Redis first if available
if (redisManager.isRedisEnabled()) {
try {
@@ -502,11 +527,26 @@ const sendToClientLocal = async (
};
const messageString = JSON.stringify(messageWithVersion);
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(messageString);
}
});
if (options.compress) {
logger.debug(
`Message size before compression: ${messageString.length} bytes`
);
const compressed = zlib.gzipSync(Buffer.from(messageString, "utf8"));
logger.debug(
`Message size after compression: ${compressed.length} bytes`
);
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(compressed);
}
});
} else {
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(messageString);
}
});
}
return true;
};
@@ -532,11 +572,22 @@ const broadcastToAllExceptLocal = async (
configVersion
};
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(JSON.stringify(messageWithVersion));
}
});
if (options.compress) {
const compressed = zlib.gzipSync(
Buffer.from(JSON.stringify(messageWithVersion), "utf8")
);
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(compressed);
}
});
} else {
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.send(JSON.stringify(messageWithVersion));
}
});
}
}
}
};
@@ -762,7 +813,7 @@ const setupConnection = async (
}
// Set up message handler FIRST to prevent race condition
ws.on("message", async (data) => {
ws.on("message", async (data, isBinary) => {
if (!ws.isFullyConnected) {
// Queue message for later processing with limits
ws.pendingMessages = ws.pendingMessages || [];
@@ -777,11 +828,17 @@ const setupConnection = async (
logger.debug(
`Queueing message from ${clientType.toUpperCase()} ID: ${clientId} (connection not fully established)`
);
ws.pendingMessages.push(data as Buffer);
ws.pendingMessages.push({ data: data as Buffer, isBinary });
return;
}
await processMessage(ws, data as Buffer, clientId, clientType);
await processMessage(
ws,
data as Buffer,
isBinary,
clientId,
clientType
);
});
// Set up other event handlers before async operations
@@ -796,6 +853,35 @@ const setupConnection = async (
);
});
// Handle WebSocket protocol-level pings from older newt clients that do
// not send application-level "newt/ping" messages. Update the site's
// online state and lastPing timestamp so the offline checker treats them
// the same as modern newt clients.
if (clientType === "newt") {
const newtClient = client as Newt;
ws.on("ping", async () => {
if (!newtClient.siteId) return;
const now = Math.floor(Date.now() / 1000);
const lastWrite = lastPingDbWrite.get(newtClient.siteId) ?? 0;
if (now - lastWrite < PING_DB_WRITE_INTERVAL) return;
lastPingDbWrite.set(newtClient.siteId, now);
try {
await db
.update(sites)
.set({
online: true,
lastPing: now
})
.where(eq(sites.siteId, newtClient.siteId));
} catch (error) {
logger.error(
"Error updating newt site online state on WS ping",
{ error }
);
}
});
}
ws.on("error", (error: Error) => {
logger.error(
`WebSocket error for ${clientType.toUpperCase()} ID ${clientId}:`,

View File

@@ -43,7 +43,7 @@ registry.registerPath({
method: "post",
path: "/resource/{resourceId}/access-token",
description: "Generate a new access token for a resource.",
tags: [OpenAPITags.Resource, OpenAPITags.AccessToken],
tags: [OpenAPITags.PublicResource, OpenAPITags.AccessToken],
request: {
params: generateAccssTokenParamsSchema,
body: {

View File

@@ -122,7 +122,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/access-tokens",
description: "List all access tokens in an organization.",
tags: [OpenAPITags.Org, OpenAPITags.AccessToken],
tags: [OpenAPITags.AccessToken],
request: {
params: z.object({
orgId: z.string()
@@ -135,8 +135,8 @@ registry.registerPath({
registry.registerPath({
method: "get",
path: "/resource/{resourceId}/access-tokens",
description: "List all access tokens in an organization.",
tags: [OpenAPITags.Resource, OpenAPITags.AccessToken],
description: "List all access tokens for a resource.",
tags: [OpenAPITags.PublicResource, OpenAPITags.AccessToken],
request: {
params: z.object({
resourceId: z.number()

View File

@@ -37,7 +37,7 @@ registry.registerPath({
method: "put",
path: "/org/{orgId}/api-key",
description: "Create a new API key scoped to the organization.",
tags: [OpenAPITags.Org, OpenAPITags.ApiKey],
tags: [OpenAPITags.ApiKey],
request: {
params: paramsSchema,
body: {

View File

@@ -18,7 +18,7 @@ registry.registerPath({
method: "delete",
path: "/org/{orgId}/api-key/{apiKeyId}",
description: "Delete an API key.",
tags: [OpenAPITags.Org, OpenAPITags.ApiKey],
tags: [OpenAPITags.ApiKey],
request: {
params: paramsSchema
},

View File

@@ -48,7 +48,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/api-key/{apiKeyId}/actions",
description: "List all actions set for an API key.",
tags: [OpenAPITags.Org, OpenAPITags.ApiKey],
tags: [OpenAPITags.ApiKey],
request: {
params: paramsSchema,
query: querySchema

View File

@@ -52,7 +52,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/api-keys",
description: "List all API keys for an organization",
tags: [OpenAPITags.Org, OpenAPITags.ApiKey],
tags: [OpenAPITags.ApiKey],
request: {
params: paramsSchema,
query: querySchema

View File

@@ -25,7 +25,7 @@ registry.registerPath({
path: "/org/{orgId}/api-key/{apiKeyId}/actions",
description:
"Set actions for an API key. This will replace any existing actions.",
tags: [OpenAPITags.Org, OpenAPITags.ApiKey],
tags: [OpenAPITags.ApiKey],
request: {
params: paramsSchema,
body: {

View File

@@ -20,7 +20,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/logs/request",
description: "Query the request audit log for an organization",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Logs],
request: {
query: queryAccessAuditLogsQuery.omit({
limit: true,

View File

@@ -1,4 +1,4 @@
import { db, requestAuditLog, driver, primaryDb } from "@server/db";
import { logsDb, requestAuditLog, driver, primaryLogsDb } from "@server/db";
import { registry } from "@server/openApi";
import { NextFunction } from "express";
import { Request, Response } from "express";
@@ -74,12 +74,12 @@ async function query(query: Q) {
);
}
const [all] = await primaryDb
const [all] = await primaryLogsDb
.select({ total: count() })
.from(requestAuditLog)
.where(baseConditions);
const [blocked] = await primaryDb
const [blocked] = await primaryLogsDb
.select({ total: count() })
.from(requestAuditLog)
.where(and(baseConditions, eq(requestAuditLog.action, false)));
@@ -90,7 +90,7 @@ async function query(query: Q) {
const DISTINCT_LIMIT = 500;
const requestsPerCountry = await primaryDb
const requestsPerCountry = await primaryLogsDb
.selectDistinct({
code: requestAuditLog.location,
count: totalQ
@@ -118,7 +118,7 @@ async function query(query: Q) {
const booleanTrue = driver === "pg" ? sql`true` : sql`1`;
const booleanFalse = driver === "pg" ? sql`false` : sql`0`;
const requestsPerDay = await primaryDb
const requestsPerDay = await primaryLogsDb
.select({
day: groupByDayFunction.as("day"),
allowedCount:
@@ -151,7 +151,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/logs/analytics",
description: "Query the request audit analytics for an organization",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Logs],
request: {
query: queryAccessAuditLogsQuery,
params: queryRequestAuditLogsParams

View File

@@ -1,8 +1,8 @@
import { db, primaryDb, requestAuditLog, resources } from "@server/db";
import { logsDb, primaryLogsDb, requestAuditLog, resources, db, primaryDb } from "@server/db";
import { registry } from "@server/openApi";
import { NextFunction } from "express";
import { Request, Response } from "express";
import { eq, gt, lt, and, count, desc } from "drizzle-orm";
import { eq, gt, lt, and, count, desc, inArray } from "drizzle-orm";
import { OpenAPITags } from "@server/openApi";
import { z } from "zod";
import createHttpError from "http-errors";
@@ -107,7 +107,7 @@ function getWhere(data: Q) {
}
export function queryRequest(data: Q) {
return primaryDb
return primaryLogsDb
.select({
id: requestAuditLog.id,
timestamp: requestAuditLog.timestamp,
@@ -129,21 +129,49 @@ export function queryRequest(data: Q) {
host: requestAuditLog.host,
path: requestAuditLog.path,
method: requestAuditLog.method,
tls: requestAuditLog.tls,
resourceName: resources.name,
resourceNiceId: resources.niceId
tls: requestAuditLog.tls
})
.from(requestAuditLog)
.leftJoin(
resources,
eq(requestAuditLog.resourceId, resources.resourceId)
) // TODO: Is this efficient?
.where(getWhere(data))
.orderBy(desc(requestAuditLog.timestamp));
}
async function enrichWithResourceDetails(logs: Awaited<ReturnType<typeof queryRequest>>) {
// If logs database is the same as main database, we can do a join
// Otherwise, we need to fetch resource details separately
const resourceIds = logs
.map(log => log.resourceId)
.filter((id): id is number => id !== null && id !== undefined);
if (resourceIds.length === 0) {
return logs.map(log => ({ ...log, resourceName: null, resourceNiceId: null }));
}
// Fetch resource details from main database
const resourceDetails = await primaryDb
.select({
resourceId: resources.resourceId,
name: resources.name,
niceId: resources.niceId
})
.from(resources)
.where(inArray(resources.resourceId, resourceIds));
// Create a map for quick lookup
const resourceMap = new Map(
resourceDetails.map(r => [r.resourceId, { name: r.name, niceId: r.niceId }])
);
// Enrich logs with resource details
return logs.map(log => ({
...log,
resourceName: log.resourceId ? resourceMap.get(log.resourceId)?.name ?? null : null,
resourceNiceId: log.resourceId ? resourceMap.get(log.resourceId)?.niceId ?? null : null
}));
}
export function countRequestQuery(data: Q) {
const countQuery = primaryDb
const countQuery = primaryLogsDb
.select({ count: count() })
.from(requestAuditLog)
.where(getWhere(data));
@@ -154,7 +182,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/logs/request",
description: "Query the request audit log for an organization",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Logs],
request: {
query: queryAccessAuditLogsQuery,
params: queryRequestAuditLogsParams
@@ -185,36 +213,31 @@ async function queryUniqueFilterAttributes(
uniquePaths,
uniqueResources
] = await Promise.all([
primaryDb
primaryLogsDb
.selectDistinct({ actor: requestAuditLog.actor })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT + 1),
primaryDb
primaryLogsDb
.selectDistinct({ locations: requestAuditLog.location })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT + 1),
primaryDb
primaryLogsDb
.selectDistinct({ hosts: requestAuditLog.host })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT + 1),
primaryDb
primaryLogsDb
.selectDistinct({ paths: requestAuditLog.path })
.from(requestAuditLog)
.where(baseConditions)
.limit(DISTINCT_LIMIT + 1),
primaryDb
primaryLogsDb
.selectDistinct({
id: requestAuditLog.resourceId,
name: resources.name
id: requestAuditLog.resourceId
})
.from(requestAuditLog)
.leftJoin(
resources,
eq(requestAuditLog.resourceId, resources.resourceId)
)
.where(baseConditions)
.limit(DISTINCT_LIMIT + 1)
]);
@@ -231,13 +254,33 @@ async function queryUniqueFilterAttributes(
// throw new Error("Too many distinct filter attributes to retrieve. Please refine your time range.");
// }
// Fetch resource names from main database for the unique resource IDs
const resourceIds = uniqueResources
.map(row => row.id)
.filter((id): id is number => id !== null);
let resourcesWithNames: Array<{ id: number; name: string | null }> = [];
if (resourceIds.length > 0) {
const resourceDetails = await primaryDb
.select({
resourceId: resources.resourceId,
name: resources.name
})
.from(resources)
.where(inArray(resources.resourceId, resourceIds));
resourcesWithNames = resourceDetails.map(r => ({
id: r.resourceId,
name: r.name
}));
}
return {
actors: uniqueActors
.map((row) => row.actor)
.filter((actor): actor is string => actor !== null),
resources: uniqueResources.filter(
(row): row is { id: number; name: string | null } => row.id !== null
),
resources: resourcesWithNames,
locations: uniqueLocations
.map((row) => row.locations)
.filter((location): location is string => location !== null),
@@ -280,7 +323,10 @@ export async function queryRequestAuditLogs(
const baseQuery = queryRequest(data);
const log = await baseQuery.limit(data.limit).offset(data.offset);
const logsRaw = await baseQuery.limit(data.limit).offset(data.offset);
// Enrich with resource details (handles cross-database scenario)
const log = await enrichWithResourceDetails(logsRaw);
const totalCountResult = await countRequestQuery(data);
const totalCount = totalCountResult[0].count;

View File

@@ -1,5 +1,5 @@
import { NextFunction, Request, Response } from "express";
import { db, users } from "@server/db";
import { bannedEmails, bannedIps, db, users } from "@server/db";
import HttpCode from "@server/types/HttpCode";
import { email, z } from "zod";
import { fromError } from "zod-validation-error";
@@ -22,7 +22,6 @@ import { checkValidInvite } from "@server/auth/checkValidInvite";
import { passwordSchema } from "@server/auth/passwordSchema";
import { UserType } from "@server/types/UserTypes";
import { build } from "@server/build";
import resend, { AudienceIds, moveEmailToAudience } from "#dynamic/lib/resend";
export const signupBodySchema = z.object({
email: z.email().toLowerCase(),
@@ -66,6 +65,30 @@ export async function signup(
skipVerificationEmail
} = parsedBody.data;
const [bannedEmail] = await db
.select()
.from(bannedEmails)
.where(eq(bannedEmails.email, email))
.limit(1);
if (bannedEmail) {
return next(
createHttpError(HttpCode.FORBIDDEN, "Signup blocked. Do not attempt to continue to use this service.")
);
}
if (req.ip) {
const [bannedIp] = await db
.select()
.from(bannedIps)
.where(eq(bannedIps.ip, req.ip))
.limit(1);
if (bannedIp) {
return next(
createHttpError(HttpCode.FORBIDDEN, "Signup blocked. Do not attempt to continue to use this service.")
);
}
}
const passwordHash = await hashPassword(password);
const userId = generateId(15);
@@ -189,6 +212,7 @@ export async function signup(
dateCreated: moment().toISOString(),
termsAcceptedTimestamp: termsAcceptedTimestamp || null,
termsVersion: "1",
marketingEmailConsent: marketingEmailConsent ?? false,
lastPasswordChange: new Date().getTime()
});
@@ -212,7 +236,7 @@ export async function signup(
logger.debug(
`User ${email} opted in to marketing emails during signup.`
);
moveEmailToAudience(email, AudienceIds.SignUps);
// TODO: update user in Sendy
}
if (config.getRawConfig().flags?.require_email_verification) {

View File

@@ -1,10 +1,12 @@
import { db, orgs, requestAuditLog } from "@server/db";
import { logsDb, primaryLogsDb, db, orgs, requestAuditLog } from "@server/db";
import logger from "@server/logger";
import { and, eq, lt, sql } from "drizzle-orm";
import cache from "@server/lib/cache";
import cache from "#dynamic/lib/cache";
import { calculateCutoffTimestamp } from "@server/lib/cleanupLogs";
import { stripPortFromHost } from "@server/lib/ip";
import { sanitizeString } from "@server/lib/sanitize";
/**
Reasons:
@@ -69,7 +71,7 @@ async function flushAuditLogs() {
try {
// Use a transaction to ensure all inserts succeed or fail together
// This prevents index corruption from partial writes
await db.transaction(async (tx) => {
await logsDb.transaction(async (tx) => {
// Batch insert logs in groups of 25 to avoid overwhelming the database
const BATCH_DB_SIZE = 25;
for (let i = 0; i < logsToWrite.length; i += BATCH_DB_SIZE) {
@@ -130,7 +132,7 @@ export async function shutdownAuditLogger() {
async function getRetentionDays(orgId: string): Promise<number> {
// check cache first
const cached = cache.get<number>(`org_${orgId}_retentionDays`);
const cached = await cache.get<number>(`org_${orgId}_retentionDays`);
if (cached !== undefined) {
return cached;
}
@@ -149,7 +151,7 @@ async function getRetentionDays(orgId: string): Promise<number> {
}
// store the result in cache
cache.set(
await cache.set(
`org_${orgId}_retentionDays`,
org.settingsLogRetentionDaysRequest,
300
@@ -162,7 +164,7 @@ export async function cleanUpOldLogs(orgId: string, retentionDays: number) {
const cutoffTimestamp = calculateCutoffTimestamp(retentionDays);
try {
await db
await logsDb
.delete(requestAuditLog)
.where(
and(
@@ -253,24 +255,23 @@ export async function logRequestAudit(
// Add to buffer instead of writing directly to DB
auditLogBuffer.push({
timestamp,
orgId: data.orgId,
actorType,
actor,
actorId,
metadata,
orgId: sanitizeString(data.orgId),
actorType: sanitizeString(actorType),
actor: sanitizeString(actor),
actorId: sanitizeString(actorId),
metadata: sanitizeString(metadata),
action: data.action,
resourceId: data.resourceId,
reason: data.reason,
location: data.location,
originalRequestURL: body.originalRequestURL,
scheme: body.scheme,
host: body.host,
path: body.path,
method: body.method,
ip: clientIp,
location: sanitizeString(data.location),
originalRequestURL: sanitizeString(body.originalRequestURL) ?? "",
scheme: sanitizeString(body.scheme) ?? "",
host: sanitizeString(body.host) ?? "",
path: sanitizeString(body.path) ?? "",
method: sanitizeString(body.method) ?? "",
ip: sanitizeString(clientIp),
tls: body.tls
});
// Flush immediately if buffer is full, otherwise schedule a flush
if (auditLogBuffer.length >= BATCH_SIZE) {
// Fire and forget - don't block the caller

View File

@@ -38,7 +38,7 @@ import {
enforceResourceSessionLength
} from "#dynamic/lib/checkOrgAccessPolicy";
import { logRequestAudit } from "./logRequestAudit";
import cache from "@server/lib/cache";
import { localCache } from "#dynamic/lib/cache";
import { APP_VERSION } from "@server/lib/consts";
import { isSubscribed } from "#dynamic/lib/isSubscribed";
import { tierMatrix } from "@server/lib/billing/tierMatrix";
@@ -138,7 +138,7 @@ export async function verifyResourceSession(
headerAuthExtendedCompatibility: ResourceHeaderAuthExtendedCompatibility | null;
org: Org;
}
| undefined = cache.get(resourceCacheKey);
| undefined = localCache.get(resourceCacheKey);
if (!resourceData) {
const result = await getResourceByDomain(cleanHost);
@@ -162,7 +162,7 @@ export async function verifyResourceSession(
}
resourceData = result;
cache.set(resourceCacheKey, resourceData, 5);
localCache.set(resourceCacheKey, resourceData, 5);
}
const {
@@ -406,7 +406,7 @@ export async function verifyResourceSession(
// check for HTTP Basic Auth header
const clientHeaderAuthKey = `headerAuth:${clientHeaderAuth}`;
if (headerAuth && clientHeaderAuth) {
if (cache.get(clientHeaderAuthKey)) {
if (localCache.get(clientHeaderAuthKey)) {
logger.debug(
"Resource allowed because header auth is valid (cached)"
);
@@ -429,7 +429,7 @@ export async function verifyResourceSession(
headerAuth.headerAuthHash
)
) {
cache.set(clientHeaderAuthKey, clientHeaderAuth, 5);
localCache.set(clientHeaderAuthKey, clientHeaderAuth, 5);
logger.debug("Resource allowed because header auth is valid");
logRequestAudit(
@@ -521,7 +521,7 @@ export async function verifyResourceSession(
if (resourceSessionToken) {
const sessionCacheKey = `session:${resourceSessionToken}`;
let resourceSession: any = cache.get(sessionCacheKey);
let resourceSession: any = localCache.get(sessionCacheKey);
if (!resourceSession) {
const result = await validateResourceSessionToken(
@@ -530,7 +530,7 @@ export async function verifyResourceSession(
);
resourceSession = result?.resourceSession;
cache.set(sessionCacheKey, resourceSession, 5);
localCache.set(sessionCacheKey, resourceSession, 5);
}
if (resourceSession?.isRequestToken) {
@@ -663,7 +663,7 @@ export async function verifyResourceSession(
}:${resource.resourceId}`;
let allowedUserData: BasicUserData | null | undefined =
cache.get(userAccessCacheKey);
localCache.get(userAccessCacheKey);
if (allowedUserData === undefined) {
allowedUserData = await isUserAllowedToAccessResource(
@@ -672,7 +672,7 @@ export async function verifyResourceSession(
resourceData.org
);
cache.set(userAccessCacheKey, allowedUserData, 5);
localCache.set(userAccessCacheKey, allowedUserData, 5);
}
if (
@@ -985,11 +985,11 @@ async function checkRules(
): Promise<"ACCEPT" | "DROP" | "PASS" | undefined> {
const ruleCacheKey = `rules:${resourceId}`;
let rules: ResourceRule[] | undefined = cache.get(ruleCacheKey);
let rules: ResourceRule[] | undefined = localCache.get(ruleCacheKey);
if (!rules) {
rules = await getResourceRules(resourceId);
cache.set(ruleCacheKey, rules, 5);
localCache.set(ruleCacheKey, rules, 5);
}
if (rules.length === 0) {
@@ -1219,13 +1219,13 @@ async function isIpInAsn(
async function getAsnFromIp(ip: string): Promise<number | undefined> {
const asnCacheKey = `asn:${ip}`;
let cachedAsn: number | undefined = cache.get(asnCacheKey);
let cachedAsn: number | undefined = localCache.get(asnCacheKey);
if (!cachedAsn) {
cachedAsn = await getAsnForIp(ip); // do it locally
// Cache for longer since IP ASN doesn't change frequently
if (cachedAsn) {
cache.set(asnCacheKey, cachedAsn, 300); // 5 minutes
localCache.set(asnCacheKey, cachedAsn, 300); // 5 minutes
}
}
@@ -1235,14 +1235,14 @@ async function getAsnFromIp(ip: string): Promise<number | undefined> {
async function getCountryCodeFromIp(ip: string): Promise<string | undefined> {
const geoIpCacheKey = `geoip:${ip}`;
let cachedCountryCode: string | undefined = cache.get(geoIpCacheKey);
let cachedCountryCode: string | undefined = localCache.get(geoIpCacheKey);
if (!cachedCountryCode) {
cachedCountryCode = await getCountryCodeForIp(ip); // do it locally
// Only cache successful lookups to avoid filling cache with undefined values
if (cachedCountryCode) {
// Cache for longer since IP geolocation doesn't change frequently
cache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
localCache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
}
}

View File

@@ -20,7 +20,7 @@ registry.registerPath({
method: "put",
path: "/org/{orgId}/blueprint",
description: "Apply a base64 encoded JSON blueprint to an organization",
tags: [OpenAPITags.Org, OpenAPITags.Blueprint],
tags: [OpenAPITags.Blueprint],
request: {
params: applyBlueprintParamsSchema,
body: {

View File

@@ -43,7 +43,7 @@ registry.registerPath({
method: "put",
path: "/org/{orgId}/blueprint",
description: "Create and apply a YAML blueprint to an organization",
tags: [OpenAPITags.Org, OpenAPITags.Blueprint],
tags: [OpenAPITags.Blueprint],
request: {
params: applyBlueprintParamsSchema,
body: {

View File

@@ -53,7 +53,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/blueprint/{blueprintId}",
description: "Get a blueprint by its blueprint ID.",
tags: [OpenAPITags.Org, OpenAPITags.Blueprint],
tags: [OpenAPITags.Blueprint],
request: {
params: getBlueprintSchema
},

View File

@@ -67,7 +67,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/blueprints",
description: "List all blueprints for a organization.",
tags: [OpenAPITags.Org, OpenAPITags.Blueprint],
tags: [OpenAPITags.Blueprint],
request: {
params: z.object({
orgId: z.string()

View File

@@ -48,7 +48,7 @@ registry.registerPath({
method: "put",
path: "/org/{orgId}/client",
description: "Create a new client for an organization.",
tags: [OpenAPITags.Client, OpenAPITags.Org],
tags: [OpenAPITags.Client],
request: {
params: createClientParamsSchema,
body: {

View File

@@ -49,7 +49,7 @@ registry.registerPath({
path: "/org/{orgId}/user/{userId}/client",
description:
"Create a new client for a user and associate it with an existing olm.",
tags: [OpenAPITags.Client, OpenAPITags.Org, OpenAPITags.User],
tags: [OpenAPITags.Client],
request: {
params: paramsSchema,
body: {

View File

@@ -243,7 +243,7 @@ registry.registerPath({
path: "/org/{orgId}/client/{niceId}",
description:
"Get a client by orgId and niceId. NiceId is a readable ID for the site and unique on a per org basis.",
tags: [OpenAPITags.Org, OpenAPITags.Site],
tags: [OpenAPITags.Site],
request: {
params: z.object({
orgId: z.string(),

View File

@@ -70,7 +70,7 @@ async function getLatestOlmVersion(): Promise<string | null> {
tags = tags.filter((version) => !version.name.includes("rc"));
const latestVersion = tags[0].name;
olmVersionCache.set("latestOlmVersion", latestVersion);
olmVersionCache.set("latestOlmVersion", latestVersion, 3600);
return latestVersion;
} catch (error: any) {
@@ -119,12 +119,12 @@ const listClientsSchema = z.object({
}),
query: z.string().optional(),
sort_by: z
.enum(["megabytesIn", "megabytesOut"])
.enum(["name", "megabytesIn", "megabytesOut"])
.optional()
.catch(undefined)
.openapi({
type: "string",
enum: ["megabytesIn", "megabytesOut"],
enum: ["name", "megabytesIn", "megabytesOut"],
description: "Field to sort by"
}),
order: z
@@ -237,7 +237,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/clients",
description: "List all clients for an organization.",
tags: [OpenAPITags.Client, OpenAPITags.Org],
tags: [OpenAPITags.Client],
request: {
query: listClientsSchema,
params: listClientsParamsSchema
@@ -363,14 +363,14 @@ export async function listClients(
const countQuery = db.$count(baseQuery.as("filtered_clients"));
const listMachinesQuery = baseQuery
.limit(page)
.limit(pageSize)
.offset(pageSize * (page - 1))
.orderBy(
sort_by
? order === "asc"
? asc(clients[sort_by])
: desc(clients[sort_by])
: asc(clients.clientId)
: asc(clients.name)
);
const [clientsList, totalCount] = await Promise.all([

View File

@@ -71,7 +71,7 @@ async function getLatestOlmVersion(): Promise<string | null> {
tags = tags.filter((version) => !version.name.includes("rc"));
const latestVersion = tags[0].name;
olmVersionCache.set("latestOlmVersion", latestVersion);
olmVersionCache.set("latestOlmVersion", latestVersion, 3600);
return latestVersion;
} catch (error: any) {
@@ -256,7 +256,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/user-devices",
description: "List all user devices for an organization.",
tags: [OpenAPITags.Client, OpenAPITags.Org],
tags: [OpenAPITags.Client],
request: {
query: listUserDevicesSchema,
params: listUserDevicesParamsSchema

View File

@@ -23,7 +23,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/pick-client-defaults",
description: "Return pre-requisite data for creating a client.",
tags: [OpenAPITags.Client, OpenAPITags.Site],
tags: [OpenAPITags.Client],
request: {
params: pickClientDefaultsSchema
},

View File

@@ -1,51 +1,38 @@
import { sendToClient } from "#dynamic/routers/ws";
import { db, olms, Transaction } from "@server/db";
import { canCompress } from "@server/lib/clientVersionChecks";
import { Alias, SubnetProxyTarget } from "@server/lib/ip";
import logger from "@server/logger";
import { eq } from "drizzle-orm";
const BATCH_SIZE = 50;
const BATCH_DELAY_MS = 50;
function sleep(ms: number): Promise<void> {
return new Promise((resolve) => setTimeout(resolve, ms));
}
function chunkArray<T>(array: T[], size: number): T[][] {
const chunks: T[][] = [];
for (let i = 0; i < array.length; i += size) {
chunks.push(array.slice(i, i + size));
}
return chunks;
}
export async function addTargets(newtId: string, targets: SubnetProxyTarget[]) {
const batches = chunkArray(targets, BATCH_SIZE);
for (let i = 0; i < batches.length; i++) {
if (i > 0) {
await sleep(BATCH_DELAY_MS);
}
await sendToClient(newtId, {
export async function addTargets(
newtId: string,
targets: SubnetProxyTarget[],
version?: string | null
) {
await sendToClient(
newtId,
{
type: `newt/wg/targets/add`,
data: batches[i]
}, { incrementConfigVersion: true });
}
data: targets
},
{ incrementConfigVersion: true, compress: canCompress(version, "newt") }
);
}
export async function removeTargets(
newtId: string,
targets: SubnetProxyTarget[]
targets: SubnetProxyTarget[],
version?: string | null
) {
const batches = chunkArray(targets, BATCH_SIZE);
for (let i = 0; i < batches.length; i++) {
if (i > 0) {
await sleep(BATCH_DELAY_MS);
}
await sendToClient(newtId, {
await sendToClient(
newtId,
{
type: `newt/wg/targets/remove`,
data: batches[i]
},{ incrementConfigVersion: true });
}
data: targets
},
{ incrementConfigVersion: true, compress: canCompress(version, "newt") }
);
}
export async function updateTargets(
@@ -53,26 +40,22 @@ export async function updateTargets(
targets: {
oldTargets: SubnetProxyTarget[];
newTargets: SubnetProxyTarget[];
}
},
version?: string | null
) {
const oldBatches = chunkArray(targets.oldTargets, BATCH_SIZE);
const newBatches = chunkArray(targets.newTargets, BATCH_SIZE);
const maxBatches = Math.max(oldBatches.length, newBatches.length);
for (let i = 0; i < maxBatches; i++) {
if (i > 0) {
await sleep(BATCH_DELAY_MS);
}
await sendToClient(newtId, {
await sendToClient(
newtId,
{
type: `newt/wg/targets/update`,
data: {
oldTargets: oldBatches[i] || [],
newTargets: newBatches[i] || []
oldTargets: targets.oldTargets,
newTargets: targets.newTargets
}
}, { incrementConfigVersion: true }).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
},
{ incrementConfigVersion: true, compress: canCompress(version, "newt") }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
export async function addPeerData(
@@ -80,7 +63,8 @@ export async function addPeerData(
siteId: number,
remoteSubnets: string[],
aliases: Alias[],
olmId?: string
olmId?: string,
version?: string | null
) {
if (!olmId) {
const [olm] = await db
@@ -92,16 +76,21 @@ export async function addPeerData(
return; // ignore this because an olm might not be associated with the client anymore
}
olmId = olm.olmId;
version = olm.version;
}
await sendToClient(olmId, {
type: `olm/wg/peer/data/add`,
data: {
siteId: siteId,
remoteSubnets: remoteSubnets,
aliases: aliases
}
}, { incrementConfigVersion: true }).catch((error) => {
await sendToClient(
olmId,
{
type: `olm/wg/peer/data/add`,
data: {
siteId: siteId,
remoteSubnets: remoteSubnets,
aliases: aliases
}
},
{ incrementConfigVersion: true, compress: canCompress(version, "olm") }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
@@ -111,7 +100,8 @@ export async function removePeerData(
siteId: number,
remoteSubnets: string[],
aliases: Alias[],
olmId?: string
olmId?: string,
version?: string | null
) {
if (!olmId) {
const [olm] = await db
@@ -123,16 +113,21 @@ export async function removePeerData(
return;
}
olmId = olm.olmId;
version = olm.version;
}
await sendToClient(olmId, {
type: `olm/wg/peer/data/remove`,
data: {
siteId: siteId,
remoteSubnets: remoteSubnets,
aliases: aliases
}
}, { incrementConfigVersion: true }).catch((error) => {
await sendToClient(
olmId,
{
type: `olm/wg/peer/data/remove`,
data: {
siteId: siteId,
remoteSubnets: remoteSubnets,
aliases: aliases
}
},
{ incrementConfigVersion: true, compress: canCompress(version, "olm") }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
@@ -152,7 +147,8 @@ export async function updatePeerData(
newAliases: Alias[];
}
| undefined,
olmId?: string
olmId?: string,
version?: string | null
) {
if (!olmId) {
const [olm] = await db
@@ -164,16 +160,21 @@ export async function updatePeerData(
return;
}
olmId = olm.olmId;
version = olm.version;
}
await sendToClient(olmId, {
type: `olm/wg/peer/data/update`,
data: {
siteId: siteId,
...remoteSubnets,
...aliases
}
}, { incrementConfigVersion: true }).catch((error) => {
await sendToClient(
olmId,
{
type: `olm/wg/peer/data/update`,
data: {
siteId: siteId,
...remoteSubnets,
...aliases
}
},
{ incrementConfigVersion: true, compress: canCompress(version, "olm") }
).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}

View File

@@ -40,7 +40,8 @@ async function queryDomains(orgId: string, limit: number, offset: number) {
tries: domains.tries,
configManaged: domains.configManaged,
certResolver: domains.certResolver,
preferWildcardCert: domains.preferWildcardCert
preferWildcardCert: domains.preferWildcardCert,
errorMessage: domains.errorMessage
})
.from(orgDomains)
.where(eq(orgDomains.orgId, orgId))
@@ -59,7 +60,7 @@ registry.registerPath({
method: "get",
path: "/org/{orgId}/domains",
description: "List all domains for a organization.",
tags: [OpenAPITags.Org],
tags: [OpenAPITags.Domain],
request: {
params: z.object({
orgId: z.string()

View File

@@ -125,7 +125,7 @@ export async function generateRelayMappings(exitNode: ExitNode) {
// Add site as a destination for this client
const destination: PeerDestination = {
destinationIP: site.subnet.split("/")[0],
destinationPort: site.listenPort
destinationPort: site.listenPort || 1 // this satisfies gerbil for now but should be reevaluated
};
// Check if this destination is already in the array to avoid duplicates
@@ -165,7 +165,7 @@ export async function generateRelayMappings(exitNode: ExitNode) {
const destination: PeerDestination = {
destinationIP: peer.subnet.split("/")[0],
destinationPort: peer.listenPort
destinationPort: peer.listenPort || 1 // this satisfies gerbil for now but should be reevaluated
};
// Check for duplicates

View File

@@ -1,5 +1,5 @@
import { Request, Response, NextFunction } from "express";
import { eq, and, lt, inArray, sql } from "drizzle-orm";
import { eq, sql } from "drizzle-orm";
import { sites } from "@server/db";
import { db } from "@server/db";
import logger from "@server/logger";
@@ -11,19 +11,31 @@ import { FeatureId } from "@server/lib/billing/features";
import { checkExitNodeOrg } from "#dynamic/lib/exitNodes";
import { build } from "@server/build";
// Track sites that are already offline to avoid unnecessary queries
const offlineSites = new Set<string>();
// Retry configuration for deadlock handling
const MAX_RETRIES = 3;
const BASE_DELAY_MS = 50;
interface PeerBandwidth {
publicKey: string;
bytesIn: number;
bytesOut: number;
}
interface AccumulatorEntry {
bytesIn: number;
bytesOut: number;
/** Present when the update came through a remote exit node. */
exitNodeId?: number;
/** Whether to record egress usage for billing purposes. */
calcUsage: boolean;
}
// Retry configuration for deadlock handling
const MAX_RETRIES = 3;
const BASE_DELAY_MS = 50;
// How often to flush accumulated bandwidth data to the database
const FLUSH_INTERVAL_MS = 30_000; // 30 seconds
// In-memory accumulator: publicKey -> AccumulatorEntry
let accumulator = new Map<string, AccumulatorEntry>();
/**
* Check if an error is a deadlock error
*/
@@ -63,6 +75,220 @@ async function withDeadlockRetry<T>(
}
}
/**
* Flush all accumulated site bandwidth data to the database.
*
* Swaps out the accumulator before writing so that any bandwidth messages
* received during the flush are captured in the new accumulator rather than
* being lost or causing contention. Entries that fail to write are re-queued
* back into the accumulator so they will be retried on the next flush.
*
* This function is exported so that the application's graceful-shutdown
* cleanup handler can call it before the process exits.
*/
export async function flushSiteBandwidthToDb(): Promise<void> {
if (accumulator.size === 0) {
return;
}
// Atomically swap out the accumulator so new data keeps flowing in
// while we write the snapshot to the database.
const snapshot = accumulator;
accumulator = new Map<string, AccumulatorEntry>();
const currentTime = new Date().toISOString();
// Sort by publicKey for consistent lock ordering across concurrent
// writers — deadlock-prevention strategy.
const sortedEntries = [...snapshot.entries()].sort(([a], [b]) =>
a.localeCompare(b)
);
logger.debug(
`Flushing accumulated bandwidth data for ${sortedEntries.length} site(s) to the database`
);
// Aggregate billing usage by org, collected during the DB update loop.
const orgUsageMap = new Map<string, number>();
for (const [publicKey, { bytesIn, bytesOut, exitNodeId, calcUsage }] of sortedEntries) {
try {
const updatedSite = await withDeadlockRetry(async () => {
const [result] = await db
.update(sites)
.set({
megabytesOut: sql`COALESCE(${sites.megabytesOut}, 0) + ${bytesIn}`,
megabytesIn: sql`COALESCE(${sites.megabytesIn}, 0) + ${bytesOut}`,
lastBandwidthUpdate: currentTime,
})
.where(eq(sites.pubKey, publicKey))
.returning({
orgId: sites.orgId,
siteId: sites.siteId
});
return result;
}, `flush bandwidth for site ${publicKey}`);
if (updatedSite) {
if (exitNodeId) {
const notAllowed = await checkExitNodeOrg(
exitNodeId,
updatedSite.orgId
);
if (notAllowed) {
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
);
// Skip usage tracking for this site but continue
// processing the rest.
continue;
}
}
if (calcUsage) {
const totalBandwidth = bytesIn + bytesOut;
const current = orgUsageMap.get(updatedSite.orgId) ?? 0;
orgUsageMap.set(updatedSite.orgId, current + totalBandwidth);
}
}
} catch (error) {
logger.error(
`Failed to flush bandwidth for site ${publicKey}:`,
error
);
// Re-queue the failed entry so it is retried on the next flush
// rather than silently dropped.
const existing = accumulator.get(publicKey);
if (existing) {
existing.bytesIn += bytesIn;
existing.bytesOut += bytesOut;
} else {
accumulator.set(publicKey, {
bytesIn,
bytesOut,
exitNodeId,
calcUsage
});
}
}
}
// Process billing usage updates outside the site-update loop to keep
// lock scope small and concerns separated.
if (orgUsageMap.size > 0) {
// Sort org IDs for consistent lock ordering.
const sortedOrgIds = [...orgUsageMap.keys()].sort();
for (const orgId of sortedOrgIds) {
try {
const totalBandwidth = orgUsageMap.get(orgId)!;
const bandwidthUsage = await usageService.add(
orgId,
FeatureId.EGRESS_DATA_MB,
totalBandwidth
);
if (bandwidthUsage) {
// Fire-and-forget — don't block the flush on limit checking.
usageService
.checkLimitSet(
orgId,
FeatureId.EGRESS_DATA_MB,
bandwidthUsage
)
.catch((error: any) => {
logger.error(
`Error checking bandwidth limits for org ${orgId}:`,
error
);
});
}
} catch (error) {
logger.error(
`Error processing usage for org ${orgId}:`,
error
);
// Continue with other orgs.
}
}
}
}
// ---------------------------------------------------------------------------
// Periodic flush timer
// ---------------------------------------------------------------------------
const flushTimer = setInterval(async () => {
try {
await flushSiteBandwidthToDb();
} catch (error) {
logger.error(
"Unexpected error during periodic site bandwidth flush:",
error
);
}
}, FLUSH_INTERVAL_MS);
// Allow the process to exit normally even while the timer is pending.
// The graceful-shutdown path (see server/cleanup.ts) will call
// flushSiteBandwidthToDb() explicitly before process.exit(), so no data
// is lost.
flushTimer.unref();
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/**
* Accumulate bandwidth data reported by a gerbil or remote exit node.
*
* Only peers that actually transferred data (bytesIn > 0) are added to the
* accumulator; peers with no activity are silently ignored, which means the
* flush will only write rows that have genuinely changed.
*
* The function is intentionally synchronous in its fast path so that the
* HTTP handler can respond immediately without waiting for any I/O.
*/
export async function updateSiteBandwidth(
bandwidthData: PeerBandwidth[],
calcUsageAndLimits: boolean,
exitNodeId?: number
): Promise<void> {
for (const { publicKey, bytesIn, bytesOut } of bandwidthData) {
// Skip peers that haven't transferred any data — writing zeros to the
// database would be a no-op anyway.
if (bytesIn <= 0 && bytesOut <= 0) {
continue;
}
const existing = accumulator.get(publicKey);
if (existing) {
existing.bytesIn += bytesIn;
existing.bytesOut += bytesOut;
// Retain the most-recent exitNodeId for this peer.
if (exitNodeId !== undefined) {
existing.exitNodeId = exitNodeId;
}
// Once calcUsage has been requested for a peer, keep it set for
// the lifetime of this flush window.
if (calcUsageAndLimits) {
existing.calcUsage = true;
}
} else {
accumulator.set(publicKey, {
bytesIn,
bytesOut,
exitNodeId,
calcUsage: calcUsageAndLimits
});
}
}
}
// ---------------------------------------------------------------------------
// HTTP handler
// ---------------------------------------------------------------------------
export const receiveBandwidth = async (
req: Request,
res: Response,
@@ -75,7 +301,9 @@ export const receiveBandwidth = async (
throw new Error("Invalid bandwidth data");
}
await updateSiteBandwidth(bandwidthData, build == "saas"); // we are checking the usage on saas only
// Accumulate in memory; the periodic timer (and the shutdown hook)
// will write to the database.
await updateSiteBandwidth(bandwidthData, build == "saas");
return response(res, {
data: {},
@@ -94,201 +322,3 @@ export const receiveBandwidth = async (
);
}
};
export async function updateSiteBandwidth(
bandwidthData: PeerBandwidth[],
calcUsageAndLimits: boolean,
exitNodeId?: number
) {
const currentTime = new Date();
const oneMinuteAgo = new Date(currentTime.getTime() - 60000); // 1 minute ago
// Sort bandwidth data by publicKey to ensure consistent lock ordering across all instances
// This is critical for preventing deadlocks when multiple instances update the same sites
const sortedBandwidthData = [...bandwidthData].sort((a, b) =>
a.publicKey.localeCompare(b.publicKey)
);
// First, handle sites that are actively reporting bandwidth
const activePeers = sortedBandwidthData.filter((peer) => peer.bytesIn > 0);
// Aggregate usage data by organization (collected outside transaction)
const orgUsageMap = new Map<string, number>();
if (activePeers.length > 0) {
// Remove any active peers from offline tracking since they're sending data
activePeers.forEach((peer) => offlineSites.delete(peer.publicKey));
// Update each active site individually with retry logic
// This reduces transaction scope and allows retries per-site
for (const peer of activePeers) {
try {
const updatedSite = await withDeadlockRetry(async () => {
const [result] = await db
.update(sites)
.set({
megabytesOut: sql`${sites.megabytesOut} + ${peer.bytesIn}`,
megabytesIn: sql`${sites.megabytesIn} + ${peer.bytesOut}`,
lastBandwidthUpdate: currentTime.toISOString(),
online: true
})
.where(eq(sites.pubKey, peer.publicKey))
.returning({
online: sites.online,
orgId: sites.orgId,
siteId: sites.siteId,
lastBandwidthUpdate: sites.lastBandwidthUpdate
});
return result;
}, `update active site ${peer.publicKey}`);
if (updatedSite) {
if (exitNodeId) {
const notAllowed = await checkExitNodeOrg(
exitNodeId,
updatedSite.orgId
);
if (notAllowed) {
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
);
// Skip this site but continue processing others
continue;
}
}
// Aggregate bandwidth usage for the org
const totalBandwidth = peer.bytesIn + peer.bytesOut;
const currentOrgUsage =
orgUsageMap.get(updatedSite.orgId) || 0;
orgUsageMap.set(
updatedSite.orgId,
currentOrgUsage + totalBandwidth
);
}
} catch (error) {
logger.error(
`Failed to update bandwidth for site ${peer.publicKey}:`,
error
);
// Continue with other sites
}
}
}
// Process usage updates outside of site update transactions
// This separates the concerns and reduces lock contention
if (calcUsageAndLimits && orgUsageMap.size > 0) {
// Sort org IDs to ensure consistent lock ordering
const allOrgIds = [...new Set([...orgUsageMap.keys()])].sort();
for (const orgId of allOrgIds) {
try {
// Process bandwidth usage for this org
const totalBandwidth = orgUsageMap.get(orgId);
if (totalBandwidth) {
const bandwidthUsage = await usageService.add(
orgId,
FeatureId.EGRESS_DATA_MB,
totalBandwidth
);
if (bandwidthUsage) {
// Fire and forget - don't block on limit checking
usageService
.checkLimitSet(
orgId,
FeatureId.EGRESS_DATA_MB,
bandwidthUsage
)
.catch((error: any) => {
logger.error(
`Error checking bandwidth limits for org ${orgId}:`,
error
);
});
}
}
} catch (error) {
logger.error(`Error processing usage for org ${orgId}:`, error);
// Continue with other orgs
}
}
}
// Handle sites that reported zero bandwidth but need online status updated
const zeroBandwidthPeers = sortedBandwidthData.filter(
(peer) => peer.bytesIn === 0 && !offlineSites.has(peer.publicKey)
);
if (zeroBandwidthPeers.length > 0) {
// Fetch all zero bandwidth sites in one query
const zeroBandwidthSites = await db
.select()
.from(sites)
.where(
inArray(
sites.pubKey,
zeroBandwidthPeers.map((p) => p.publicKey)
)
);
// Sort by siteId to ensure consistent lock ordering
const sortedZeroBandwidthSites = zeroBandwidthSites.sort(
(a, b) => a.siteId - b.siteId
);
for (const site of sortedZeroBandwidthSites) {
let newOnlineStatus = site.online;
// Check if site should go offline based on last bandwidth update WITH DATA
if (site.lastBandwidthUpdate) {
const lastUpdateWithData = new Date(site.lastBandwidthUpdate);
if (lastUpdateWithData < oneMinuteAgo) {
newOnlineStatus = false;
}
} else {
// No previous data update recorded, set to offline
newOnlineStatus = false;
}
// Only update online status if it changed
if (site.online !== newOnlineStatus) {
try {
const updatedSite = await withDeadlockRetry(async () => {
const [result] = await db
.update(sites)
.set({
online: newOnlineStatus
})
.where(eq(sites.siteId, site.siteId))
.returning();
return result;
}, `update offline status for site ${site.siteId}`);
if (updatedSite && exitNodeId) {
const notAllowed = await checkExitNodeOrg(
exitNodeId,
updatedSite.orgId
);
if (notAllowed) {
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
);
}
}
// If site went offline, add it to our tracking set
if (!newOnlineStatus && site.pubKey) {
offlineSites.add(site.pubKey);
}
} catch (error) {
logger.error(
`Failed to update offline status for site ${site.siteId}:`,
error
);
// Continue with other sites
}
}
}
}
}

View File

@@ -112,7 +112,7 @@ export async function updateHolePunch(
destinations: destinations
});
} catch (error) {
// logger.error(error); // FIX THIS
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
@@ -262,7 +262,7 @@ export async function updateAndGenerateEndpointDestinations(
if (site.subnet && site.listenPort) {
destinations.push({
destinationIP: site.subnet.split("/")[0],
destinationPort: site.listenPort
destinationPort: site.listenPort || 1 // this satisfies gerbil for now but should be reevaluated
});
}
}
@@ -339,10 +339,10 @@ export async function updateAndGenerateEndpointDestinations(
handleSiteEndpointChange(newt.siteId, updatedSite.endpoint!);
}
if (!updatedSite || !updatedSite.subnet) {
logger.warn(`Site not found: ${newt.siteId}`);
throw new Error("Site not found");
}
// if (!updatedSite || !updatedSite.subnet) {
// logger.warn(`Site not found: ${newt.siteId}`);
// throw new Error("Site not found");
// }
// Find all clients that connect to this site
// const sitesClientPairs = await db

View File

@@ -27,7 +27,7 @@ registry.registerPath({
method: "put",
path: "/idp/{idpId}/org/{orgId}",
description: "Create an IDP policy for an existing IDP on an organization.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
params: paramsSchema,
body: {

View File

@@ -37,7 +37,7 @@ registry.registerPath({
method: "put",
path: "/idp/oidc",
description: "Create an OIDC IdP.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
body: {
content: {

View File

@@ -21,7 +21,7 @@ registry.registerPath({
method: "delete",
path: "/idp/{idpId}",
description: "Delete IDP.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
params: paramsSchema
},

View File

@@ -19,7 +19,7 @@ registry.registerPath({
method: "delete",
path: "/idp/{idpId}/org/{orgId}",
description: "Create an OIDC IdP for an organization.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
params: paramsSchema
},

View File

@@ -34,7 +34,7 @@ registry.registerPath({
method: "get",
path: "/idp/{idpId}",
description: "Get an IDP by its IDP ID.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
params: paramsSchema
},

View File

@@ -48,7 +48,7 @@ registry.registerPath({
method: "get",
path: "/idp/{idpId}/org",
description: "List all org policies on an IDP.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
params: paramsSchema,
query: querySchema

View File

@@ -58,7 +58,7 @@ registry.registerPath({
method: "get",
path: "/idp",
description: "List all IDP in the system.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
query: querySchema
},

View File

@@ -26,7 +26,7 @@ registry.registerPath({
method: "post",
path: "/idp/{idpId}/org/{orgId}",
description: "Update an IDP org policy.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
params: paramsSchema,
body: {

View File

@@ -42,7 +42,7 @@ registry.registerPath({
method: "post",
path: "/idp/{idpId}/oidc",
description: "Update an OIDC IdP.",
tags: [OpenAPITags.Idp],
tags: [OpenAPITags.GlobalIdp],
request: {
params: paramsSchema,
body: {

Some files were not shown because too many files have changed in this diff Show More