This commit is contained in:
Owen
2025-10-04 18:36:44 -07:00
parent 3123f858bb
commit c2c907852d
320 changed files with 35785 additions and 2984 deletions

View File

@@ -69,9 +69,16 @@ export async function applyBlueprint(
`Updating target ${target.targetId} on site ${site.sites.siteId}`
);
// see if you can find a matching target health check from the healthchecksToUpdate array
const matchingHealthcheck =
result.healthchecksToUpdate.find(
(hc) => hc.targetId === target.targetId
);
await addProxyTargets(
site.newt.newtId,
[target],
matchingHealthcheck ? [matchingHealthcheck] : [],
result.proxyResource.protocol,
result.proxyResource.proxyPort
);

View File

@@ -8,6 +8,8 @@ import {
roleResources,
roles,
Target,
TargetHealthCheck,
targetHealthCheck,
Transaction,
userOrgs,
userResources,
@@ -22,6 +24,7 @@ import {
TargetData
} from "./types";
import logger from "@server/logger";
import { createCertificate } from "@server/routers/private/certificates/createCertificate";
import { pickPort } from "@server/routers/target/helpers";
import { resourcePassword } from "@server/db";
import { hashPassword } from "@server/auth/password";
@@ -30,6 +33,7 @@ import { isValidCIDR, isValidIP, isValidUrlGlobPattern } from "../validators";
export type ProxyResourcesResults = {
proxyResource: Resource;
targetsToUpdate: Target[];
healthchecksToUpdate: TargetHealthCheck[];
}[];
export async function updateProxyResources(
@@ -43,7 +47,8 @@ export async function updateProxyResources(
for (const [resourceNiceId, resourceData] of Object.entries(
config["proxy-resources"]
)) {
const targetsToUpdate: Target[] = [];
let targetsToUpdate: Target[] = [];
let healthchecksToUpdate: TargetHealthCheck[] = [];
let resource: Resource;
async function createTarget( // reusable function to create a target
@@ -114,6 +119,33 @@ export async function updateProxyResources(
.returning();
targetsToUpdate.push(newTarget);
const healthcheckData = targetData.healthcheck;
const hcHeaders = healthcheckData?.headers ? JSON.stringify(healthcheckData.headers) : null;
const [newHealthcheck] = await trx
.insert(targetHealthCheck)
.values({
targetId: newTarget.targetId,
hcEnabled: healthcheckData?.enabled || false,
hcPath: healthcheckData?.path,
hcScheme: healthcheckData?.scheme,
hcMode: healthcheckData?.mode,
hcHostname: healthcheckData?.hostname,
hcPort: healthcheckData?.port,
hcInterval: healthcheckData?.interval,
hcUnhealthyInterval: healthcheckData?.unhealthyInterval,
hcTimeout: healthcheckData?.timeout,
hcHeaders: hcHeaders,
hcFollowRedirects: healthcheckData?.followRedirects,
hcMethod: healthcheckData?.method,
hcStatus: healthcheckData?.status,
hcHealth: "unknown"
})
.returning();
healthchecksToUpdate.push(newHealthcheck);
}
// Find existing resource by niceId and orgId
@@ -360,6 +392,64 @@ export async function updateProxyResources(
targetsToUpdate.push(finalUpdatedTarget);
}
const healthcheckData = targetData.healthcheck;
const [oldHealthcheck] = await trx
.select()
.from(targetHealthCheck)
.where(
eq(
targetHealthCheck.targetId,
existingTarget.targetId
)
)
.limit(1);
const hcHeaders = healthcheckData?.headers ? JSON.stringify(healthcheckData.headers) : null;
const [newHealthcheck] = await trx
.update(targetHealthCheck)
.set({
hcEnabled: healthcheckData?.enabled || false,
hcPath: healthcheckData?.path,
hcScheme: healthcheckData?.scheme,
hcMode: healthcheckData?.mode,
hcHostname: healthcheckData?.hostname,
hcPort: healthcheckData?.port,
hcInterval: healthcheckData?.interval,
hcUnhealthyInterval:
healthcheckData?.unhealthyInterval,
hcTimeout: healthcheckData?.timeout,
hcHeaders: hcHeaders,
hcFollowRedirects: healthcheckData?.followRedirects,
hcMethod: healthcheckData?.method,
hcStatus: healthcheckData?.status
})
.where(
eq(
targetHealthCheck.targetId,
existingTarget.targetId
)
)
.returning();
if (
checkIfHealthcheckChanged(
oldHealthcheck,
newHealthcheck
)
) {
healthchecksToUpdate.push(newHealthcheck);
// if the target is not already in the targetsToUpdate array, add it
if (
!targetsToUpdate.find(
(t) => t.targetId === updatedTarget.targetId
)
) {
targetsToUpdate.push(updatedTarget);
}
}
} else {
await createTarget(existingResource.resourceId, targetData);
}
@@ -573,7 +663,8 @@ export async function updateProxyResources(
results.push({
proxyResource: resource,
targetsToUpdate
targetsToUpdate,
healthchecksToUpdate
});
}
@@ -783,6 +874,36 @@ async function syncWhitelistUsers(
}
}
function checkIfHealthcheckChanged(
existing: TargetHealthCheck | undefined,
incoming: TargetHealthCheck | undefined
) {
if (!existing && incoming) return true;
if (existing && !incoming) return true;
if (!existing || !incoming) return false;
if (existing.hcEnabled !== incoming.hcEnabled) return true;
if (existing.hcPath !== incoming.hcPath) return true;
if (existing.hcScheme !== incoming.hcScheme) return true;
if (existing.hcMode !== incoming.hcMode) return true;
if (existing.hcHostname !== incoming.hcHostname) return true;
if (existing.hcPort !== incoming.hcPort) return true;
if (existing.hcInterval !== incoming.hcInterval) return true;
if (existing.hcUnhealthyInterval !== incoming.hcUnhealthyInterval)
return true;
if (existing.hcTimeout !== incoming.hcTimeout) return true;
if (existing.hcFollowRedirects !== incoming.hcFollowRedirects) return true;
if (existing.hcMethod !== incoming.hcMethod) return true;
if (existing.hcStatus !== incoming.hcStatus) return true;
if (
JSON.stringify(existing.hcHeaders) !==
JSON.stringify(incoming.hcHeaders)
)
return true;
return false;
}
function checkIfTargetChanged(
existing: Target | undefined,
incoming: Target | undefined
@@ -832,6 +953,8 @@ async function getDomain(
);
}
await createCertificate(domain.domainId, fullDomain, trx);
return domain;
}

View File

@@ -5,6 +5,22 @@ export const SiteSchema = z.object({
"docker-socket-enabled": z.boolean().optional().default(true)
});
export const TargetHealthCheckSchema = z.object({
hostname: z.string(),
port: z.number().int().min(1).max(65535),
enabled: z.boolean().optional().default(true),
path: z.string().optional(),
scheme: z.string().optional(),
mode: z.string().default("http"),
interval: z.number().int().default(30),
unhealthyInterval: z.number().int().default(30),
timeout: z.number().int().default(5),
headers: z.array(z.object({ name: z.string(), value: z.string() })).nullable().optional().default(null),
followRedirects: z.boolean().default(true),
method: z.string().default("GET"),
status: z.number().int().optional()
});
// Schema for individual target within a resource
export const TargetSchema = z.object({
site: z.string().optional(),
@@ -15,6 +31,7 @@ export const TargetSchema = z.object({
"internal-port": z.number().int().min(1).max(65535).optional(),
path: z.string().optional(),
"path-match": z.enum(["exact", "prefix", "regex"]).optional().nullable(),
healthcheck: TargetHealthCheckSchema.optional(),
rewritePath: z.string().optional(),
"rewrite-match": z.enum(["exact", "prefix", "regex", "stripPrefix"]).optional().nullable()
});

View File

@@ -0,0 +1,29 @@
import { z } from "zod";
export const colorsSchema = z.object({
background: z.string().optional(),
foreground: z.string().optional(),
card: z.string().optional(),
"card-foreground": z.string().optional(),
popover: z.string().optional(),
"popover-foreground": z.string().optional(),
primary: z.string().optional(),
"primary-foreground": z.string().optional(),
secondary: z.string().optional(),
"secondary-foreground": z.string().optional(),
muted: z.string().optional(),
"muted-foreground": z.string().optional(),
accent: z.string().optional(),
"accent-foreground": z.string().optional(),
destructive: z.string().optional(),
"destructive-foreground": z.string().optional(),
border: z.string().optional(),
input: z.string().optional(),
ring: z.string().optional(),
radius: z.string().optional(),
"chart-1": z.string().optional(),
"chart-2": z.string().optional(),
"chart-3": z.string().optional(),
"chart-4": z.string().optional(),
"chart-5": z.string().optional()
});

View File

@@ -6,9 +6,16 @@ import { eq } from "drizzle-orm";
import { license } from "@server/license/license";
import { configSchema, readConfigFile } from "./readConfigFile";
import { fromError } from "zod-validation-error";
import {
privateConfigSchema,
readPrivateConfigFile
} from "@server/lib/private/readConfigFile";
import logger from "@server/logger";
import { build } from "@server/build";
export class Config {
private rawConfig!: z.infer<typeof configSchema>;
private rawPrivateConfig!: z.infer<typeof privateConfigSchema>;
supporterData: SupporterKey | null = null;
@@ -30,6 +37,19 @@ export class Config {
throw new Error(`Invalid configuration file: ${errors}`);
}
const privateEnvironment = readPrivateConfigFile();
const {
data: parsedPrivateConfig,
success: privateSuccess,
error: privateError
} = privateConfigSchema.safeParse(privateEnvironment);
if (!privateSuccess) {
const errors = fromError(privateError);
throw new Error(`Invalid private configuration file: ${errors}`);
}
if (
// @ts-ignore
parsedConfig.users ||
@@ -89,7 +109,110 @@ export class Config {
? "true"
: "false";
if (parsedPrivateConfig.branding?.colors) {
process.env.BRANDING_COLORS = JSON.stringify(
parsedPrivateConfig.branding?.colors
);
}
if (parsedPrivateConfig.branding?.logo?.light_path) {
process.env.BRANDING_LOGO_LIGHT_PATH =
parsedPrivateConfig.branding?.logo?.light_path;
}
if (parsedPrivateConfig.branding?.logo?.dark_path) {
process.env.BRANDING_LOGO_DARK_PATH =
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
}
process.env.HIDE_SUPPORTER_KEY = parsedPrivateConfig.flags
?.hide_supporter_key
? "true"
: "false";
if (build != "oss") {
if (parsedPrivateConfig.branding?.logo?.light_path) {
process.env.BRANDING_LOGO_LIGHT_PATH =
parsedPrivateConfig.branding?.logo?.light_path;
}
if (parsedPrivateConfig.branding?.logo?.dark_path) {
process.env.BRANDING_LOGO_DARK_PATH =
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
}
process.env.BRANDING_LOGO_AUTH_WIDTH = parsedPrivateConfig.branding
?.logo?.auth_page?.width
? parsedPrivateConfig.branding?.logo?.auth_page?.width.toString()
: undefined;
process.env.BRANDING_LOGO_AUTH_HEIGHT = parsedPrivateConfig.branding
?.logo?.auth_page?.height
? parsedPrivateConfig.branding?.logo?.auth_page?.height.toString()
: undefined;
process.env.BRANDING_LOGO_NAVBAR_WIDTH = parsedPrivateConfig
.branding?.logo?.navbar?.width
? parsedPrivateConfig.branding?.logo?.navbar?.width.toString()
: undefined;
process.env.BRANDING_LOGO_NAVBAR_HEIGHT = parsedPrivateConfig
.branding?.logo?.navbar?.height
? parsedPrivateConfig.branding?.logo?.navbar?.height.toString()
: undefined;
process.env.BRANDING_FAVICON_PATH =
parsedPrivateConfig.branding?.favicon_path;
process.env.BRANDING_APP_NAME =
parsedPrivateConfig.branding?.app_name || "Pangolin";
if (parsedPrivateConfig.branding?.footer) {
process.env.BRANDING_FOOTER = JSON.stringify(
parsedPrivateConfig.branding?.footer
);
}
process.env.LOGIN_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.login_page?.title_text || "";
process.env.LOGIN_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.login_page?.subtitle_text || "";
process.env.SIGNUP_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.signup_page?.title_text || "";
process.env.SIGNUP_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.signup_page?.subtitle_text || "";
process.env.RESOURCE_AUTH_PAGE_HIDE_POWERED_BY =
parsedPrivateConfig.branding?.resource_auth_page
?.hide_powered_by === true
? "true"
: "false";
process.env.RESOURCE_AUTH_PAGE_SHOW_LOGO =
parsedPrivateConfig.branding?.resource_auth_page?.show_logo ===
true
? "true"
: "false";
process.env.RESOURCE_AUTH_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.resource_auth_page?.title_text ||
"";
process.env.RESOURCE_AUTH_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.resource_auth_page
?.subtitle_text || "";
if (parsedPrivateConfig.branding?.background_image_path) {
process.env.BACKGROUND_IMAGE_PATH =
parsedPrivateConfig.branding?.background_image_path;
}
if (parsedPrivateConfig.server.reo_client_id) {
process.env.REO_CLIENT_ID =
parsedPrivateConfig.server.reo_client_id;
}
}
if (parsedConfig.server.maxmind_db_path) {
process.env.MAXMIND_DB_PATH = parsedConfig.server.maxmind_db_path;
}
this.rawConfig = parsedConfig;
this.rawPrivateConfig = parsedPrivateConfig;
}
public async initServer() {
@@ -107,7 +230,11 @@ export class Config {
private async checkKeyStatus() {
const licenseStatus = await license.check();
if (!licenseStatus.isHostLicensed) {
if (
!this.rawPrivateConfig.flags?.hide_supporter_key &&
build != "oss" &&
!licenseStatus.isHostLicensed
) {
this.checkSupporterKey();
}
}
@@ -116,6 +243,10 @@ export class Config {
return this.rawConfig;
}
public getRawPrivateConfig() {
return this.rawPrivateConfig;
}
public getNoReplyEmail(): string | undefined {
return (
this.rawConfig.email?.no_reply || this.rawConfig.email?.smtp_user

View File

@@ -11,3 +11,5 @@ export const APP_PATH = path.join("config");
export const configFilePath1 = path.join(APP_PATH, "config.yml");
export const configFilePath2 = path.join(APP_PATH, "config.yaml");
export const privateConfigFilePath1 = path.join(APP_PATH, "privateConfig.yml");

39
server/lib/encryption.ts Normal file
View File

@@ -0,0 +1,39 @@
import crypto from 'crypto';
export function encryptData(data: string, key: Buffer): string {
const algorithm = 'aes-256-gcm';
const iv = crypto.randomBytes(16);
const cipher = crypto.createCipheriv(algorithm, key, iv);
let encrypted = cipher.update(data, 'utf8', 'hex');
encrypted += cipher.final('hex');
const authTag = cipher.getAuthTag();
// Combine IV, auth tag, and encrypted data
return iv.toString('hex') + ':' + authTag.toString('hex') + ':' + encrypted;
}
// Helper function to decrypt data (you'll need this to read certificates)
export function decryptData(encryptedData: string, key: Buffer): string {
const algorithm = 'aes-256-gcm';
const parts = encryptedData.split(':');
if (parts.length !== 3) {
throw new Error('Invalid encrypted data format');
}
const iv = Buffer.from(parts[0], 'hex');
const authTag = Buffer.from(parts[1], 'hex');
const encrypted = parts[2];
const decipher = crypto.createDecipheriv(algorithm, key, iv);
decipher.setAuthTag(authTag);
let decrypted = decipher.update(encrypted, 'hex', 'utf8');
decrypted += decipher.final('utf8');
return decrypted;
}
// openssl rand -hex 32 > config/encryption.key

View File

@@ -16,7 +16,7 @@ export async function verifyExitNodeOrgAccess(
return { hasAccess: true, exitNode };
}
export async function listExitNodes(orgId: string, filterOnline = false) {
export async function listExitNodes(orgId: string, filterOnline = false, noCloud = false) {
// TODO: pick which nodes to send and ping better than just all of them that are not remote
const allExitNodes = await db
.select({
@@ -57,4 +57,9 @@ export function selectBestExitNode(
export async function checkExitNodeOrg(exitNodeId: number, orgId: string) {
return false;
}
}
export async function resolveExitNodes(hostname: string, publicKey: string) {
// OSS version: simple implementation that returns empty array
return [];
}

View File

@@ -0,0 +1,33 @@
import { eq } from "drizzle-orm";
import { db, exitNodes } from "@server/db";
import config from "@server/lib/config";
let currentExitNodeId: number; // we really only need to look this up once per instance
export async function getCurrentExitNodeId(): Promise<number> {
if (!currentExitNodeId) {
if (config.getRawConfig().gerbil.exit_node_name) {
const exitNodeName = config.getRawConfig().gerbil.exit_node_name!;
const [exitNode] = await db
.select({
exitNodeId: exitNodes.exitNodeId
})
.from(exitNodes)
.where(eq(exitNodes.name, exitNodeName));
if (exitNode) {
currentExitNodeId = exitNode.exitNodeId;
}
} else {
const [exitNode] = await db
.select({
exitNodeId: exitNodes.exitNodeId
})
.from(exitNodes)
.limit(1);
if (exitNode) {
currentExitNodeId = exitNode.exitNodeId;
}
}
}
return currentExitNodeId;
}

View File

@@ -1,2 +1,33 @@
export * from "./exitNodes";
export * from "./shared";
import { build } from "@server/build";
// Import both modules
import * as exitNodesModule from "./exitNodes";
import * as privateExitNodesModule from "./privateExitNodes";
// Conditionally export exit nodes implementation based on build type
const exitNodesImplementation = build === "oss" ? exitNodesModule : privateExitNodesModule;
// Re-export all items from the selected implementation
export const {
verifyExitNodeOrgAccess,
listExitNodes,
selectBestExitNode,
checkExitNodeOrg,
resolveExitNodes
} = exitNodesImplementation;
// Import communications modules
import * as exitNodeCommsModule from "./exitNodeComms";
import * as privateExitNodeCommsModule from "./privateExitNodeComms";
// Conditionally export communications implementation based on build type
const exitNodeCommsImplementation = build === "oss" ? exitNodeCommsModule : privateExitNodeCommsModule;
// Re-export communications functions from the selected implementation
export const {
sendToExitNode
} = exitNodeCommsImplementation;
// Re-export shared modules
export * from "./subnet";
export * from "./getCurrentExitNodeId";

View File

@@ -0,0 +1,145 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import axios from "axios";
import logger from "@server/logger";
import { db, ExitNode, remoteExitNodes } from "@server/db";
import { eq } from "drizzle-orm";
import { sendToClient } from "../../routers/ws";
import { config } from "../config";
interface ExitNodeRequest {
remoteType?: string;
localPath: string;
method?: "POST" | "DELETE" | "GET" | "PUT";
data?: any;
queryParams?: Record<string, string>;
}
/**
* Sends a request to an exit node, handling both remote and local exit nodes
* @param exitNode The exit node to send the request to
* @param request The request configuration
* @returns Promise<any> Response data for local nodes, undefined for remote nodes
*/
export async function sendToExitNode(
exitNode: ExitNode,
request: ExitNodeRequest
): Promise<any> {
if (exitNode.type === "remoteExitNode" && request.remoteType) {
const [remoteExitNode] = await db
.select()
.from(remoteExitNodes)
.where(eq(remoteExitNodes.exitNodeId, exitNode.exitNodeId))
.limit(1);
if (!remoteExitNode) {
throw new Error(
`Remote exit node with ID ${exitNode.exitNodeId} not found`
);
}
return sendToClient(remoteExitNode.remoteExitNodeId, {
type: request.remoteType,
data: request.data
});
} else {
let hostname = exitNode.reachableAt;
logger.debug(`Exit node details:`, {
type: exitNode.type,
name: exitNode.name,
reachableAt: exitNode.reachableAt,
});
logger.debug(`Configured local exit node name: ${config.getRawConfig().gerbil.exit_node_name}`);
if (exitNode.name == config.getRawConfig().gerbil.exit_node_name) {
hostname = config.getRawPrivateConfig().gerbil.local_exit_node_reachable_at;
}
if (!hostname) {
throw new Error(
`Exit node with ID ${exitNode.exitNodeId} is not reachable`
);
}
logger.debug(`Sending request to exit node at ${hostname}`, {
type: request.remoteType,
data: request.data
});
// Handle local exit node with HTTP API
const method = request.method || "POST";
let url = `${hostname}${request.localPath}`;
// Add query parameters if provided
if (request.queryParams) {
const params = new URLSearchParams(request.queryParams);
url += `?${params.toString()}`;
}
try {
let response;
switch (method) {
case "POST":
response = await axios.post(url, request.data, {
headers: {
"Content-Type": "application/json"
},
timeout: 8000
});
break;
case "DELETE":
response = await axios.delete(url, {
timeout: 8000
});
break;
case "GET":
response = await axios.get(url, {
timeout: 8000
});
break;
case "PUT":
response = await axios.put(url, request.data, {
headers: {
"Content-Type": "application/json"
},
timeout: 8000
});
break;
default:
throw new Error(`Unsupported HTTP method: ${method}`);
}
logger.debug(`Exit node request successful:`, {
method,
url,
status: response.data.status
});
return response.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error(
`Error making ${method} request (can Pangolin see Gerbil HTTP API?) for exit node at ${hostname} (status: ${error.response?.status}): ${error.message}`
);
} else {
logger.error(
`Error making ${method} request for exit node at ${hostname}: ${error}`
);
}
}
}
}

View File

@@ -0,0 +1,379 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import {
db,
exitNodes,
exitNodeOrgs,
resources,
targets,
sites,
targetHealthCheck
} from "@server/db";
import logger from "@server/logger";
import { ExitNodePingResult } from "@server/routers/newt";
import { eq, and, or, ne, isNull } from "drizzle-orm";
import axios from "axios";
import config from "../config";
/**
* Checks if an exit node is actually online by making HTTP requests to its endpoint/ping
* Makes up to 3 attempts in parallel with small delays, returns as soon as one succeeds
*/
async function checkExitNodeOnlineStatus(
endpoint: string | undefined
): Promise<boolean> {
if (!endpoint || endpoint == "") {
// the endpoint can start out as a empty string
return false;
}
const maxAttempts = 3;
const timeoutMs = 5000; // 5 second timeout per request
const delayBetweenAttempts = 100; // 100ms delay between starting each attempt
// Create promises for all attempts with staggered delays
const attemptPromises = Array.from({ length: maxAttempts }, async (_, index) => {
const attemptNumber = index + 1;
// Add delay before each attempt (except the first)
if (index > 0) {
await new Promise((resolve) => setTimeout(resolve, delayBetweenAttempts * index));
}
try {
const response = await axios.get(`http://${endpoint}/ping`, {
timeout: timeoutMs,
validateStatus: (status) => status === 200
});
if (response.status === 200) {
logger.debug(
`Exit node ${endpoint} is online (attempt ${attemptNumber}/${maxAttempts})`
);
return { success: true, attemptNumber };
}
return { success: false, attemptNumber, error: 'Non-200 status' };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : "Unknown error";
logger.debug(
`Exit node ${endpoint} ping failed (attempt ${attemptNumber}/${maxAttempts}): ${errorMessage}`
);
return { success: false, attemptNumber, error: errorMessage };
}
});
try {
// Wait for the first successful response or all to fail
const results = await Promise.allSettled(attemptPromises);
// Check if any attempt succeeded
for (const result of results) {
if (result.status === 'fulfilled' && result.value.success) {
return true;
}
}
// All attempts failed
logger.warn(
`Exit node ${endpoint} is offline after ${maxAttempts} parallel attempts`
);
return false;
} catch (error) {
logger.warn(
`Unexpected error checking exit node ${endpoint}: ${error instanceof Error ? error.message : "Unknown error"}`
);
return false;
}
}
export async function verifyExitNodeOrgAccess(
exitNodeId: number,
orgId: string
) {
const [result] = await db
.select({
exitNode: exitNodes,
exitNodeOrgId: exitNodeOrgs.exitNodeId
})
.from(exitNodes)
.leftJoin(
exitNodeOrgs,
and(
eq(exitNodeOrgs.exitNodeId, exitNodes.exitNodeId),
eq(exitNodeOrgs.orgId, orgId)
)
)
.where(eq(exitNodes.exitNodeId, exitNodeId));
if (!result) {
return { hasAccess: false, exitNode: null };
}
const { exitNode } = result;
// If the exit node is type "gerbil", access is allowed
if (exitNode.type === "gerbil") {
return { hasAccess: true, exitNode };
}
// If the exit node is type "remoteExitNode", check if it has org access
if (exitNode.type === "remoteExitNode") {
return { hasAccess: !!result.exitNodeOrgId, exitNode };
}
// For any other type, deny access
return { hasAccess: false, exitNode };
}
export async function listExitNodes(orgId: string, filterOnline = false, noCloud = false) {
const allExitNodes = await db
.select({
exitNodeId: exitNodes.exitNodeId,
name: exitNodes.name,
address: exitNodes.address,
endpoint: exitNodes.endpoint,
publicKey: exitNodes.publicKey,
listenPort: exitNodes.listenPort,
reachableAt: exitNodes.reachableAt,
maxConnections: exitNodes.maxConnections,
online: exitNodes.online,
lastPing: exitNodes.lastPing,
type: exitNodes.type,
orgId: exitNodeOrgs.orgId,
region: exitNodes.region
})
.from(exitNodes)
.leftJoin(
exitNodeOrgs,
eq(exitNodes.exitNodeId, exitNodeOrgs.exitNodeId)
)
.where(
or(
// Include all exit nodes that are NOT of type remoteExitNode
and(
eq(exitNodes.type, "gerbil"),
or(
// only choose nodes that are in the same region
eq(exitNodes.region, config.getRawPrivateConfig().app.region),
isNull(exitNodes.region) // or for enterprise where region is not set
)
),
// Include remoteExitNode types where the orgId matches the newt's organization
and(
eq(exitNodes.type, "remoteExitNode"),
eq(exitNodeOrgs.orgId, orgId)
)
)
);
// Filter the nodes. If there are NO remoteExitNodes then do nothing. If there are then remove all of the non-remoteExitNodes
if (allExitNodes.length === 0) {
logger.warn("No exit nodes found for ping request!");
return [];
}
// Enhanced online checking: consider node offline if either DB says offline OR HTTP ping fails
const nodesWithRealOnlineStatus = await Promise.all(
allExitNodes.map(async (node) => {
// If database says it's online, verify with HTTP ping
let online: boolean;
if (filterOnline && node.type == "remoteExitNode") {
try {
const isActuallyOnline = await checkExitNodeOnlineStatus(
node.endpoint
);
// set the item in the database if it is offline
if (isActuallyOnline != node.online) {
await db
.update(exitNodes)
.set({ online: isActuallyOnline })
.where(eq(exitNodes.exitNodeId, node.exitNodeId));
}
online = isActuallyOnline;
} catch (error) {
logger.warn(
`Failed to check online status for exit node ${node.name} (${node.endpoint}): ${error instanceof Error ? error.message : "Unknown error"}`
);
online = false;
}
} else {
online = node.online;
}
return {
...node,
online
};
})
);
const remoteExitNodes = nodesWithRealOnlineStatus.filter(
(node) =>
node.type === "remoteExitNode" && (!filterOnline || node.online)
);
const gerbilExitNodes = nodesWithRealOnlineStatus.filter(
(node) => node.type === "gerbil" && (!filterOnline || node.online) && !noCloud
);
// THIS PROVIDES THE FALL
const exitNodesList =
remoteExitNodes.length > 0 ? remoteExitNodes : gerbilExitNodes;
return exitNodesList;
}
/**
* Selects the most suitable exit node from a list of ping results.
*
* The selection algorithm follows these steps:
*
* 1. **Filter Invalid Nodes**: Excludes nodes with errors or zero weight.
*
* 2. **Sort by Latency**: Sorts valid nodes in ascending order of latency.
*
* 3. **Preferred Selection**:
* - If the lowest-latency node has sufficient capacity (≥10% weight),
* check if a previously connected node is also acceptable.
* - The previously connected node is preferred if its latency is within
* 30ms or 15% of the best nodes latency.
*
* 4. **Fallback to Next Best**:
* - If the lowest-latency node is under capacity, find the next node
* with acceptable capacity.
*
* 5. **Final Fallback**:
* - If no nodes meet the capacity threshold, fall back to the node
* with the highest weight (i.e., most available capacity).
*
*/
export function selectBestExitNode(
pingResults: ExitNodePingResult[]
): ExitNodePingResult | null {
const MIN_CAPACITY_THRESHOLD = 0.1;
const LATENCY_TOLERANCE_MS = 30;
const LATENCY_TOLERANCE_PERCENT = 0.15;
// Filter out invalid nodes
const validNodes = pingResults.filter((n) => !n.error && n.weight > 0);
if (validNodes.length === 0) {
logger.error("No valid exit nodes available");
return null;
}
// Sort by latency (ascending)
const sortedNodes = validNodes
.slice()
.sort((a, b) => a.latencyMs - b.latencyMs);
const lowestLatencyNode = sortedNodes[0];
logger.debug(
`Lowest latency node: ${lowestLatencyNode.exitNodeName} (${lowestLatencyNode.latencyMs} ms, weight=${lowestLatencyNode.weight.toFixed(2)})`
);
// If lowest latency node has enough capacity, check if previously connected node is acceptable
if (lowestLatencyNode.weight >= MIN_CAPACITY_THRESHOLD) {
const previouslyConnectedNode = sortedNodes.find(
(n) =>
n.wasPreviouslyConnected && n.weight >= MIN_CAPACITY_THRESHOLD
);
if (previouslyConnectedNode) {
const latencyDiff =
previouslyConnectedNode.latencyMs - lowestLatencyNode.latencyMs;
const percentDiff = latencyDiff / lowestLatencyNode.latencyMs;
if (
latencyDiff <= LATENCY_TOLERANCE_MS ||
percentDiff <= LATENCY_TOLERANCE_PERCENT
) {
logger.info(
`Sticking with previously connected node: ${previouslyConnectedNode.exitNodeName} ` +
`(${previouslyConnectedNode.latencyMs} ms), latency diff = ${latencyDiff.toFixed(1)}ms ` +
`/ ${(percentDiff * 100).toFixed(1)}%.`
);
return previouslyConnectedNode;
}
}
return lowestLatencyNode;
}
// Otherwise, find the next node (after the lowest) that has enough capacity
for (let i = 1; i < sortedNodes.length; i++) {
const node = sortedNodes[i];
if (node.weight >= MIN_CAPACITY_THRESHOLD) {
logger.info(
`Lowest latency node under capacity. Using next best: ${node.exitNodeName} ` +
`(${node.latencyMs} ms, weight=${node.weight.toFixed(2)})`
);
return node;
}
}
// Fallback: pick the highest weight node
const fallbackNode = validNodes.reduce((a, b) =>
a.weight > b.weight ? a : b
);
logger.warn(
`No nodes with ≥10% weight. Falling back to highest capacity node: ${fallbackNode.exitNodeName}`
);
return fallbackNode;
}
export async function checkExitNodeOrg(exitNodeId: number, orgId: string) {
const [exitNodeOrg] = await db
.select()
.from(exitNodeOrgs)
.where(
and(
eq(exitNodeOrgs.exitNodeId, exitNodeId),
eq(exitNodeOrgs.orgId, orgId)
)
)
.limit(1);
if (!exitNodeOrg) {
return true;
}
return false;
}
export async function resolveExitNodes(hostname: string, publicKey: string) {
const resourceExitNodes = await db
.select({
endpoint: exitNodes.endpoint,
publicKey: exitNodes.publicKey,
orgId: resources.orgId
})
.from(resources)
.innerJoin(targets, eq(resources.resourceId, targets.resourceId))
.leftJoin(
targetHealthCheck,
eq(targetHealthCheck.targetId, targets.targetId)
)
.innerJoin(sites, eq(targets.siteId, sites.siteId))
.innerJoin(exitNodes, eq(sites.exitNodeId, exitNodes.exitNodeId))
.where(
and(
eq(resources.fullDomain, hostname),
ne(exitNodes.publicKey, publicKey),
ne(targetHealthCheck.hcHealth, "unhealthy")
)
);
return resourceExitNodes;
}

View File

@@ -1,10 +1,42 @@
import logger from "@server/logger";
import { maxmindLookup } from "@server/db/maxmind";
import axios from "axios";
import config from "./config";
import { tokenManager } from "./tokenManager";
import logger from "@server/logger";
export async function getCountryCodeForIp(
ip: string
): Promise<string | undefined> {
try {
if (!maxmindLookup) {
logger.warn(
"MaxMind DB path not configured, cannot perform GeoIP lookup"
);
return;
}
const result = maxmindLookup.get(ip);
if (!result || !result.country) {
return;
}
const { country } = result;
logger.debug(
`GeoIP lookup successful for IP ${ip}: ${country.iso_code}`
);
return country.iso_code;
} catch (error) {
logger.error("Error fetching config in verify session:", error);
}
return;
}
export async function remoteGetCountryCodeForIp(
ip: string
): Promise<string | undefined> {
try {
const response = await axios.get(

View File

@@ -1,8 +1,48 @@
import { db, loginPage, loginPageOrg } from "@server/db";
import config from "@server/lib/config";
import { eq } from "drizzle-orm";
export async function generateOidcRedirectUrl(
idpId: number,
orgId?: string,
loginPageId?: number
): Promise<string> {
let baseUrl: string | undefined;
const secure = config.getRawConfig().app.dashboard_url?.startsWith("https");
const method = secure ? "https" : "http";
if (loginPageId) {
const [res] = await db
.select()
.from(loginPage)
.where(eq(loginPage.loginPageId, loginPageId))
.limit(1);
if (res && res.fullDomain) {
baseUrl = `${method}://${res.fullDomain}`;
}
} else if (orgId) {
const [res] = await db
.select()
.from(loginPageOrg)
.where(eq(loginPageOrg.orgId, orgId))
.innerJoin(
loginPage,
eq(loginPage.loginPageId, loginPageOrg.loginPageId)
)
.limit(1);
if (res?.loginPage && res.loginPage.domainId && res.loginPage.fullDomain) {
baseUrl = `${method}://${res.loginPage.fullDomain}`;
}
}
if (!baseUrl) {
baseUrl = config.getRawConfig().app.dashboard_url!;
}
export function generateOidcRedirectUrl(idpId: number) {
const dashboardUrl = config.getRawConfig().app.dashboard_url;
const redirectPath = `/auth/idp/${idpId}/oidc/callback`;
const redirectUrl = new URL(redirectPath, dashboardUrl).toString();
const redirectUrl = new URL(redirectPath, baseUrl!).toString();
return redirectUrl;
}

View File

@@ -1,3 +0,0 @@
export * from "./response";
export { tokenManager, TokenManager } from "./tokenManager";
export * from "./geoip";

View File

@@ -0,0 +1,85 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import Stripe from "stripe";
export enum FeatureId {
SITE_UPTIME = "siteUptime",
USERS = "users",
EGRESS_DATA_MB = "egressDataMb",
DOMAINS = "domains",
REMOTE_EXIT_NODES = "remoteExitNodes"
}
export const FeatureMeterIds: Record<FeatureId, string> = {
[FeatureId.SITE_UPTIME]: "mtr_61Srrej5wUJuiTWgo41D3Ee2Ir7WmDLU",
[FeatureId.USERS]: "mtr_61SrreISyIWpwUNGR41D3Ee2Ir7WmQro",
[FeatureId.EGRESS_DATA_MB]: "mtr_61Srreh9eWrExDSCe41D3Ee2Ir7Wm5YW",
[FeatureId.DOMAINS]: "mtr_61Ss9nIKDNMw0LDRU41D3Ee2Ir7WmRPU",
[FeatureId.REMOTE_EXIT_NODES]: "mtr_61T86UXnfxTVXy9sD41D3Ee2Ir7WmFTE"
};
export const FeatureMeterIdsSandbox: Record<FeatureId, string> = {
[FeatureId.SITE_UPTIME]: "mtr_test_61Snh3cees4w60gv841DCpkOb237BDEu",
[FeatureId.USERS]: "mtr_test_61Sn5fLtq1gSfRkyA41DCpkOb237B6au",
[FeatureId.EGRESS_DATA_MB]: "mtr_test_61Snh2a2m6qome5Kv41DCpkOb237B3dQ",
[FeatureId.DOMAINS]: "mtr_test_61SsA8qrdAlgPpFRQ41DCpkOb237BGts",
[FeatureId.REMOTE_EXIT_NODES]: "mtr_test_61T86Vqmwa3D9ra3341DCpkOb237B94K"
};
export function getFeatureMeterId(featureId: FeatureId): string {
if (process.env.ENVIRONMENT == "prod" && process.env.SANDBOX_MODE !== "true") {
return FeatureMeterIds[featureId];
} else {
return FeatureMeterIdsSandbox[featureId];
}
}
export function getFeatureIdByMetricId(metricId: string): FeatureId | undefined {
return (Object.entries(FeatureMeterIds) as [FeatureId, string][])
.find(([_, v]) => v === metricId)?.[0];
}
export type FeaturePriceSet = {
[key in FeatureId]: string;
};
export const standardFeaturePriceSet: FeaturePriceSet = { // Free tier matches the freeLimitSet
[FeatureId.SITE_UPTIME]: "price_1RrQc4D3Ee2Ir7WmaJGZ3MtF",
[FeatureId.USERS]: "price_1RrQeJD3Ee2Ir7WmgveP3xea",
[FeatureId.EGRESS_DATA_MB]: "price_1RrQXFD3Ee2Ir7WmvGDlgxQk",
[FeatureId.DOMAINS]: "price_1Rz3tMD3Ee2Ir7Wm5qLeASzC",
[FeatureId.REMOTE_EXIT_NODES]: "price_1S46weD3Ee2Ir7Wm94KEHI4h"
};
export const standardFeaturePriceSetSandbox: FeaturePriceSet = { // Free tier matches the freeLimitSet
[FeatureId.SITE_UPTIME]: "price_1RefFBDCpkOb237BPrKZ8IEU",
[FeatureId.USERS]: "price_1ReNa4DCpkOb237Bc67G5muF",
[FeatureId.EGRESS_DATA_MB]: "price_1Rfp9LDCpkOb237BwuN5Oiu0",
[FeatureId.DOMAINS]: "price_1Ryi88DCpkOb237B2D6DM80b",
[FeatureId.REMOTE_EXIT_NODES]: "price_1RyiZvDCpkOb237BXpmoIYJL"
};
export function getStandardFeaturePriceSet(): FeaturePriceSet {
if (process.env.ENVIRONMENT == "prod" && process.env.SANDBOX_MODE !== "true") {
return standardFeaturePriceSet;
} else {
return standardFeaturePriceSetSandbox;
}
}
export function getLineItems(featurePriceSet: FeaturePriceSet): Stripe.Checkout.SessionCreateParams.LineItem[] {
return Object.entries(featurePriceSet).map(([featureId, priceId]) => ({
price: priceId,
}));
}

View File

@@ -0,0 +1,16 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
export * from "./limitSet";
export * from "./features";
export * from "./limitsService";

View File

@@ -0,0 +1,63 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { FeatureId } from "./features";
export type LimitSet = {
[key in FeatureId]: {
value: number | null; // null indicates no limit
description?: string;
};
};
export const sandboxLimitSet: LimitSet = {
[FeatureId.SITE_UPTIME]: { value: 2880, description: "Sandbox limit" }, // 1 site up for 2 days
[FeatureId.USERS]: { value: 1, description: "Sandbox limit" },
[FeatureId.EGRESS_DATA_MB]: { value: 1000, description: "Sandbox limit" }, // 1 GB
[FeatureId.DOMAINS]: { value: 0, description: "Sandbox limit" },
[FeatureId.REMOTE_EXIT_NODES]: { value: 0, description: "Sandbox limit" },
};
export const freeLimitSet: LimitSet = {
[FeatureId.SITE_UPTIME]: { value: 46080, description: "Free tier limit" }, // 1 site up for 32 days
[FeatureId.USERS]: { value: 3, description: "Free tier limit" },
[FeatureId.EGRESS_DATA_MB]: {
value: 25000,
description: "Free tier limit"
}, // 25 GB
[FeatureId.DOMAINS]: { value: 3, description: "Free tier limit" },
[FeatureId.REMOTE_EXIT_NODES]: { value: 1, description: "Free tier limit" }
};
export const subscribedLimitSet: LimitSet = {
[FeatureId.SITE_UPTIME]: {
value: 2232000,
description: "Contact us to increase soft limit.",
}, // 50 sites up for 31 days
[FeatureId.USERS]: {
value: 150,
description: "Contact us to increase soft limit."
},
[FeatureId.EGRESS_DATA_MB]: {
value: 12000000,
description: "Contact us to increase soft limit."
}, // 12000 GB
[FeatureId.DOMAINS]: {
value: 20,
description: "Contact us to increase soft limit."
},
[FeatureId.REMOTE_EXIT_NODES]: {
value: 5,
description: "Contact us to increase soft limit."
}
};

View File

@@ -0,0 +1,51 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { db, limits } from "@server/db";
import { and, eq } from "drizzle-orm";
import { LimitSet } from "./limitSet";
import { FeatureId } from "./features";
class LimitService {
async applyLimitSetToOrg(orgId: string, limitSet: LimitSet): Promise<void> {
const limitEntries = Object.entries(limitSet);
// delete existing limits for the org
await db.transaction(async (trx) => {
await trx.delete(limits).where(eq(limits.orgId, orgId));
for (const [featureId, entry] of limitEntries) {
const limitId = `${orgId}-${featureId}`;
const { value, description } = entry;
await trx
.insert(limits)
.values({ limitId, orgId, featureId, value, description });
}
});
}
async getOrgLimit(
orgId: string,
featureId: FeatureId
): Promise<number | null> {
const limitId = `${orgId}-${featureId}`;
const [limit] = await db
.select()
.from(limits)
.where(and(eq(limits.limitId, limitId)))
.limit(1);
return limit ? limit.value : null;
}
}
export const limitsService = new LimitService();

View File

@@ -0,0 +1,37 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
export enum TierId {
STANDARD = "standard",
}
export type TierPriceSet = {
[key in TierId]: string;
};
export const tierPriceSet: TierPriceSet = { // Free tier matches the freeLimitSet
[TierId.STANDARD]: "price_1RrQ9cD3Ee2Ir7Wmqdy3KBa0",
};
export const tierPriceSetSandbox: TierPriceSet = { // Free tier matches the freeLimitSet
// when matching tier the keys closer to 0 index are matched first so list the tiers in descending order of value
[TierId.STANDARD]: "price_1RrAYJDCpkOb237By2s1P32m",
};
export function getTierPriceSet(environment?: string, sandbox_mode?: boolean): TierPriceSet {
if ((process.env.ENVIRONMENT == "prod" && process.env.SANDBOX_MODE !== "true") || (environment === "prod" && sandbox_mode !== true)) { // THIS GETS LOADED CLIENT SIDE AND SERVER SIDE
return tierPriceSet;
} else {
return tierPriceSetSandbox;
}
}

View File

@@ -0,0 +1,889 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { eq, sql, and } from "drizzle-orm";
import NodeCache from "node-cache";
import { v4 as uuidv4 } from "uuid";
import { PutObjectCommand } from "@aws-sdk/client-s3";
import { s3Client } from "../s3";
import * as fs from "fs/promises";
import * as path from "path";
import {
db,
usage,
customers,
sites,
newts,
limits,
Usage,
Limit,
Transaction
} from "@server/db";
import { FeatureId, getFeatureMeterId } from "./features";
import config from "@server/lib/config";
import logger from "@server/logger";
import { sendToClient } from "@server/routers/ws";
import { build } from "@server/build";
interface StripeEvent {
identifier?: string;
timestamp: number;
event_name: string;
payload: {
value: number;
stripe_customer_id: string;
};
}
export class UsageService {
private cache: NodeCache;
private bucketName: string | undefined;
private currentEventFile: string | null = null;
private currentFileStartTime: number = 0;
private eventsDir: string | undefined;
private uploadingFiles: Set<string> = new Set();
constructor() {
this.cache = new NodeCache({ stdTTL: 300 }); // 5 minute TTL
if (build !== "saas") {
return;
}
this.bucketName = config.getRawPrivateConfig().stripe?.s3Bucket;
this.eventsDir = config.getRawPrivateConfig().stripe?.localFilePath;
// Ensure events directory exists
this.initializeEventsDirectory().then(() => {
this.uploadPendingEventFilesOnStartup();
});
// Periodically check for old event files to upload
setInterval(() => {
this.uploadOldEventFiles().catch((err) => {
logger.error("Error in periodic event file upload:", err);
});
}, 30000); // every 30 seconds
}
/**
* Truncate a number to 11 decimal places to prevent precision issues
*/
private truncateValue(value: number): number {
return Math.round(value * 100000000000) / 100000000000; // 11 decimal places
}
private async initializeEventsDirectory(): Promise<void> {
if (!this.eventsDir) {
logger.warn("Stripe local file path is not configured, skipping events directory initialization.");
return;
}
try {
await fs.mkdir(this.eventsDir, { recursive: true });
} catch (error) {
logger.error("Failed to create events directory:", error);
}
}
private async uploadPendingEventFilesOnStartup(): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn("Stripe local file path or bucket name is not configured, skipping leftover event file upload.");
return;
}
try {
const files = await fs.readdir(this.eventsDir);
for (const file of files) {
if (file.endsWith(".json")) {
const filePath = path.join(this.eventsDir, file);
try {
const fileContent = await fs.readFile(
filePath,
"utf-8"
);
const events = JSON.parse(fileContent);
if (Array.isArray(events) && events.length > 0) {
// Upload to S3
const uploadCommand = new PutObjectCommand({
Bucket: this.bucketName,
Key: file,
Body: fileContent,
ContentType: "application/json"
});
await s3Client.send(uploadCommand);
// Check if file still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`Startup file ${file} was already deleted`);
}
logger.info(
`Uploaded leftover event file ${file} to S3 with ${events.length} events`
);
} else {
// Remove empty file
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`Empty startup file ${file} was already deleted`);
}
}
} catch (err) {
logger.error(
`Error processing leftover event file ${file}:`,
err
);
}
}
}
} catch (err) {
logger.error("Failed to scan for leftover event files:", err);
}
}
public async add(
orgId: string,
featureId: FeatureId,
value: number,
transaction: any = null
): Promise<Usage | null> {
if (build !== "saas") {
return null;
}
// Truncate value to 11 decimal places
value = this.truncateValue(value);
// Implement retry logic for deadlock handling
const maxRetries = 3;
let attempt = 0;
while (attempt <= maxRetries) {
try {
// Get subscription data for this org (with caching)
const customerId = await this.getCustomerId(orgId, featureId);
if (!customerId) {
logger.warn(
`No subscription data found for org ${orgId} and feature ${featureId}`
);
return null;
}
let usage;
if (transaction) {
usage = await this.internalAddUsage(
orgId,
featureId,
value,
transaction
);
} else {
await db.transaction(async (trx) => {
usage = await this.internalAddUsage(orgId, featureId, value, trx);
});
}
// Log event for Stripe
await this.logStripeEvent(featureId, value, customerId);
return usage || null;
} catch (error: any) {
// Check if this is a deadlock error
const isDeadlock = error?.code === '40P01' ||
error?.cause?.code === '40P01' ||
(error?.message && error.message.includes('deadlock'));
if (isDeadlock && attempt < maxRetries) {
attempt++;
// Exponential backoff with jitter: 50-150ms, 100-300ms, 200-600ms
const baseDelay = Math.pow(2, attempt - 1) * 50;
const jitter = Math.random() * baseDelay;
const delay = baseDelay + jitter;
logger.warn(
`Deadlock detected for ${orgId}/${featureId}, retrying attempt ${attempt}/${maxRetries} after ${delay.toFixed(0)}ms`
);
await new Promise(resolve => setTimeout(resolve, delay));
continue;
}
logger.error(
`Failed to add usage for ${orgId}/${featureId} after ${attempt} attempts:`,
error
);
break;
}
}
return null;
}
private async internalAddUsage(
orgId: string,
featureId: FeatureId,
value: number,
trx: Transaction
): Promise<Usage> {
// Truncate value to 11 decimal places
value = this.truncateValue(value);
const usageId = `${orgId}-${featureId}`;
const meterId = getFeatureMeterId(featureId);
// Use upsert: insert if not exists, otherwise increment
const [returnUsage] = await trx
.insert(usage)
.values({
usageId,
featureId,
orgId,
meterId,
latestValue: value,
updatedAt: Math.floor(Date.now() / 1000)
})
.onConflictDoUpdate({
target: usage.usageId,
set: {
latestValue: sql`${usage.latestValue} + ${value}`
}
}).returning();
return returnUsage;
}
// Helper function to get today's date as string (YYYY-MM-DD)
getTodayDateString(): string {
return new Date().toISOString().split("T")[0];
}
// Helper function to get date string from Date object
getDateString(date: number): string {
return new Date(date * 1000).toISOString().split("T")[0];
}
async updateDaily(
orgId: string,
featureId: FeatureId,
value?: number,
customerId?: string
): Promise<void> {
if (build !== "saas") {
return;
}
try {
if (!customerId) {
customerId =
(await this.getCustomerId(orgId, featureId)) || undefined;
if (!customerId) {
logger.warn(
`No subscription data found for org ${orgId} and feature ${featureId}`
);
return;
}
}
// Truncate value to 11 decimal places if provided
if (value !== undefined && value !== null) {
value = this.truncateValue(value);
}
const today = this.getTodayDateString();
let currentUsage: Usage | null = null;
await db.transaction(async (trx) => {
// Get existing meter record
const usageId = `${orgId}-${featureId}`;
// Get current usage record
[currentUsage] = await trx
.select()
.from(usage)
.where(eq(usage.usageId, usageId))
.limit(1);
if (currentUsage) {
const lastUpdateDate = this.getDateString(
currentUsage.updatedAt
);
const currentRunningTotal = currentUsage.latestValue;
const lastDailyValue = currentUsage.instantaneousValue || 0;
if (value == undefined || value === null) {
value = currentUsage.instantaneousValue || 0;
}
if (lastUpdateDate === today) {
// Same day update: replace the daily value
// Remove old daily value from running total, add new value
const newRunningTotal = this.truncateValue(
currentRunningTotal - lastDailyValue + value
);
await trx
.update(usage)
.set({
latestValue: newRunningTotal,
instantaneousValue: value,
updatedAt: Math.floor(Date.now() / 1000)
})
.where(eq(usage.usageId, usageId));
} else {
// New day: add to running total
const newRunningTotal = this.truncateValue(
currentRunningTotal + value
);
await trx
.update(usage)
.set({
latestValue: newRunningTotal,
instantaneousValue: value,
updatedAt: Math.floor(Date.now() / 1000)
})
.where(eq(usage.usageId, usageId));
}
} else {
// First record for this meter
const meterId = getFeatureMeterId(featureId);
const truncatedValue = this.truncateValue(value || 0);
await trx.insert(usage).values({
usageId,
featureId,
orgId,
meterId,
instantaneousValue: truncatedValue,
latestValue: truncatedValue,
updatedAt: Math.floor(Date.now() / 1000)
});
}
});
await this.logStripeEvent(featureId, value || 0, customerId);
} catch (error) {
logger.error(
`Failed to update daily usage for ${orgId}/${featureId}:`,
error
);
}
}
private async getCustomerId(
orgId: string,
featureId: FeatureId
): Promise<string | null> {
const cacheKey = `customer_${orgId}_${featureId}`;
const cached = this.cache.get<string>(cacheKey);
if (cached) {
return cached;
}
try {
// Query subscription data
const [customer] = await db
.select({
customerId: customers.customerId
})
.from(customers)
.where(eq(customers.orgId, orgId))
.limit(1);
if (!customer) {
return null;
}
const customerId = customer.customerId;
// Cache the result
this.cache.set(cacheKey, customerId);
return customerId;
} catch (error) {
logger.error(
`Failed to get subscription data for ${orgId}/${featureId}:`,
error
);
return null;
}
}
private async logStripeEvent(
featureId: FeatureId,
value: number,
customerId: string
): Promise<void> {
// Truncate value to 11 decimal places before sending to Stripe
const truncatedValue = this.truncateValue(value);
const event: StripeEvent = {
identifier: uuidv4(),
timestamp: Math.floor(new Date().getTime() / 1000),
event_name: featureId,
payload: {
value: truncatedValue,
stripe_customer_id: customerId
}
};
await this.writeEventToFile(event);
await this.checkAndUploadFile();
}
private async writeEventToFile(event: StripeEvent): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn("Stripe local file path or bucket name is not configured, skipping event file write.");
return;
}
if (!this.currentEventFile) {
this.currentEventFile = this.generateEventFileName();
this.currentFileStartTime = Date.now();
}
const filePath = path.join(this.eventsDir, this.currentEventFile);
try {
let events: StripeEvent[] = [];
// Try to read existing file
try {
const fileContent = await fs.readFile(filePath, "utf-8");
events = JSON.parse(fileContent);
} catch (error) {
// File doesn't exist or is empty, start with empty array
events = [];
}
// Add new event
events.push(event);
// Write back to file
await fs.writeFile(filePath, JSON.stringify(events, null, 2));
} catch (error) {
logger.error("Failed to write event to file:", error);
}
}
private async checkAndUploadFile(): Promise<void> {
if (!this.currentEventFile) {
return;
}
const now = Date.now();
const fileAge = now - this.currentFileStartTime;
// Check if file is at least 1 minute old
if (fileAge >= 60000) {
// 60 seconds
await this.uploadFileToS3();
}
}
private async uploadFileToS3(): Promise<void> {
if (!this.bucketName || !this.eventsDir) {
logger.warn("Stripe local file path or bucket name is not configured, skipping S3 upload.");
return;
}
if (!this.currentEventFile) {
return;
}
const fileName = this.currentEventFile;
const filePath = path.join(this.eventsDir, fileName);
// Check if this file is already being uploaded
if (this.uploadingFiles.has(fileName)) {
logger.debug(`File ${fileName} is already being uploaded, skipping`);
return;
}
// Mark file as being uploaded
this.uploadingFiles.add(fileName);
try {
// Check if file exists before trying to read it
try {
await fs.access(filePath);
} catch (error) {
logger.debug(`File ${fileName} does not exist, may have been already processed`);
this.uploadingFiles.delete(fileName);
// Reset current file if it was this file
if (this.currentEventFile === fileName) {
this.currentEventFile = null;
this.currentFileStartTime = 0;
}
return;
}
// Check if file exists and has content
const fileContent = await fs.readFile(filePath, "utf-8");
const events = JSON.parse(fileContent);
if (events.length === 0) {
// No events to upload, just clean up
try {
await fs.unlink(filePath);
} catch (unlinkError) {
// File may have been already deleted
logger.debug(`File ${fileName} was already deleted during cleanup`);
}
this.currentEventFile = null;
this.uploadingFiles.delete(fileName);
return;
}
// Upload to S3
const uploadCommand = new PutObjectCommand({
Bucket: this.bucketName,
Key: fileName,
Body: fileContent,
ContentType: "application/json"
});
await s3Client.send(uploadCommand);
// Clean up local file - check if it still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
// File may have been already deleted by another process
logger.debug(`File ${fileName} was already deleted during upload`);
}
logger.info(
`Uploaded ${fileName} to S3 with ${events.length} events`
);
// Reset for next file
this.currentEventFile = null;
this.currentFileStartTime = 0;
} catch (error) {
logger.error(
`Failed to upload ${fileName} to S3:`,
error
);
} finally {
// Always remove from uploading set
this.uploadingFiles.delete(fileName);
}
}
private generateEventFileName(): string {
const timestamp = new Date().toISOString().replace(/[:.]/g, "-");
const uuid = uuidv4().substring(0, 8);
return `events-${timestamp}-${uuid}.json`;
}
public async getUsage(
orgId: string,
featureId: FeatureId
): Promise<Usage | null> {
if (build !== "saas") {
return null;
}
const usageId = `${orgId}-${featureId}`;
try {
const [result] = await db
.select()
.from(usage)
.where(eq(usage.usageId, usageId))
.limit(1);
if (!result) {
// Lets create one if it doesn't exist using upsert to handle race conditions
logger.info(
`Creating new usage record for ${orgId}/${featureId}`
);
const meterId = getFeatureMeterId(featureId);
try {
const [newUsage] = await db
.insert(usage)
.values({
usageId,
featureId,
orgId,
meterId,
latestValue: 0,
updatedAt: Math.floor(Date.now() / 1000)
})
.onConflictDoNothing()
.returning();
if (newUsage) {
return newUsage;
} else {
// Record was created by another process, fetch it
const [existingUsage] = await db
.select()
.from(usage)
.where(eq(usage.usageId, usageId))
.limit(1);
return existingUsage || null;
}
} catch (insertError) {
// Fallback: try to fetch existing record in case of any insert issues
logger.warn(
`Insert failed for ${orgId}/${featureId}, attempting to fetch existing record:`,
insertError
);
const [existingUsage] = await db
.select()
.from(usage)
.where(eq(usage.usageId, usageId))
.limit(1);
return existingUsage || null;
}
}
return result
} catch (error) {
logger.error(
`Failed to get usage for ${orgId}/${featureId}:`,
error
);
throw error;
}
}
public async getUsageDaily(
orgId: string,
featureId: FeatureId
): Promise<Usage | null> {
if (build !== "saas") {
return null;
}
await this.updateDaily(orgId, featureId); // Ensure daily usage is updated
return this.getUsage(orgId, featureId);
}
public async forceUpload(): Promise<void> {
await this.uploadFileToS3();
}
public clearCache(): void {
this.cache.flushAll();
}
/**
* Scan the events directory for files older than 1 minute and upload them if not empty.
*/
private async uploadOldEventFiles(): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn("Stripe local file path or bucket name is not configured, skipping old event file upload.");
return;
}
try {
const files = await fs.readdir(this.eventsDir);
const now = Date.now();
for (const file of files) {
if (!file.endsWith(".json")) continue;
// Skip files that are already being uploaded
if (this.uploadingFiles.has(file)) {
logger.debug(`Skipping file ${file} as it's already being uploaded`);
continue;
}
const filePath = path.join(this.eventsDir, file);
try {
// Check if file still exists before processing
try {
await fs.access(filePath);
} catch (accessError) {
logger.debug(`File ${file} does not exist, skipping`);
continue;
}
const stat = await fs.stat(filePath);
const age = now - stat.mtimeMs;
if (age >= 90000) {
// 1.5 minutes - Mark as being uploaded
this.uploadingFiles.add(file);
try {
const fileContent = await fs.readFile(
filePath,
"utf-8"
);
const events = JSON.parse(fileContent);
if (Array.isArray(events) && events.length > 0) {
// Upload to S3
const uploadCommand = new PutObjectCommand({
Bucket: this.bucketName,
Key: file,
Body: fileContent,
ContentType: "application/json"
});
await s3Client.send(uploadCommand);
// Check if file still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`File ${file} was already deleted during interval upload`);
}
logger.info(
`Interval: Uploaded event file ${file} to S3 with ${events.length} events`
);
// If this was the current event file, reset it
if (this.currentEventFile === file) {
this.currentEventFile = null;
this.currentFileStartTime = 0;
}
} else {
// Remove empty file
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`Empty file ${file} was already deleted`);
}
}
} finally {
// Always remove from uploading set
this.uploadingFiles.delete(file);
}
}
} catch (err) {
logger.error(
`Interval: Error processing event file ${file}:`,
err
);
// Remove from uploading set on error
this.uploadingFiles.delete(file);
}
}
} catch (err) {
logger.error("Interval: Failed to scan for event files:", err);
}
}
public async checkLimitSet(orgId: string, kickSites = false, featureId?: FeatureId, usage?: Usage): Promise<boolean> {
if (build !== "saas") {
return false;
}
// This method should check the current usage against the limits set for the organization
// and kick out all of the sites on the org
let hasExceededLimits = false;
try {
let orgLimits: Limit[] = [];
if (featureId) {
// Get all limits set for this organization
orgLimits = await db
.select()
.from(limits)
.where(
and(
eq(limits.orgId, orgId),
eq(limits.featureId, featureId)
)
);
} else {
// Get all limits set for this organization
orgLimits = await db
.select()
.from(limits)
.where(eq(limits.orgId, orgId));
}
if (orgLimits.length === 0) {
logger.debug(`No limits set for org ${orgId}`);
return false;
}
// Check each limit against current usage
for (const limit of orgLimits) {
let currentUsage: Usage | null;
if (usage) {
currentUsage = usage;
} else {
currentUsage = await this.getUsage(orgId, limit.featureId as FeatureId);
}
const usageValue = currentUsage?.instantaneousValue || currentUsage?.latestValue || 0;
logger.debug(`Current usage for org ${orgId} on feature ${limit.featureId}: ${usageValue}`);
logger.debug(`Limit for org ${orgId} on feature ${limit.featureId}: ${limit.value}`);
if (currentUsage && limit.value !== null && usageValue > limit.value) {
logger.debug(
`Org ${orgId} has exceeded limit for ${limit.featureId}: ` +
`${usageValue} > ${limit.value}`
);
hasExceededLimits = true;
break; // Exit early if any limit is exceeded
}
}
// If any limits are exceeded, disconnect all sites for this organization
if (hasExceededLimits && kickSites) {
logger.warn(`Disconnecting all sites for org ${orgId} due to exceeded limits`);
// Get all sites for this organization
const orgSites = await db
.select()
.from(sites)
.where(eq(sites.orgId, orgId));
// Mark all sites as offline and send termination messages
const siteUpdates = orgSites.map(site => site.siteId);
if (siteUpdates.length > 0) {
// Send termination messages to newt sites
for (const site of orgSites) {
if (site.type === "newt") {
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, site.siteId))
.limit(1);
if (newt) {
const payload = {
type: `newt/wg/terminate`,
data: {
reason: "Usage limits exceeded"
}
};
// Don't await to prevent blocking
sendToClient(newt.newtId, payload).catch((error: any) => {
logger.error(
`Failed to send termination message to newt ${newt.newtId}:`,
error
);
});
}
}
}
logger.info(`Disconnected ${orgSites.length} sites for org ${orgId} due to exceeded limits`);
}
}
} catch (error) {
logger.error(`Error checking limits for org ${orgId}:`, error);
}
return hasExceededLimits;
}
}
export const usageService = new UsageService();

View File

@@ -0,0 +1,206 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { isValidCIDR } from "@server/lib/validators";
import { getNextAvailableOrgSubnet } from "@server/lib/ip";
import {
actions,
apiKeyOrg,
apiKeys,
db,
domains,
Org,
orgDomains,
orgs,
roleActions,
roles,
userOrgs
} from "@server/db";
import { eq } from "drizzle-orm";
import { defaultRoleAllowedActions } from "@server/routers/role";
import { FeatureId, limitsService, sandboxLimitSet } from "@server/lib/private/billing";
import { createCustomer } from "@server/routers/private/billing/createCustomer";
import { usageService } from "@server/lib/private/billing/usageService";
export async function createUserAccountOrg(
userId: string,
userEmail: string
): Promise<{
success: boolean;
org?: {
orgId: string;
name: string;
subnet: string;
};
error?: string;
}> {
// const subnet = await getNextAvailableOrgSubnet();
const orgId = "org_" + userId;
const name = `${userEmail}'s Organization`;
// if (!isValidCIDR(subnet)) {
// return {
// success: false,
// error: "Invalid subnet format. Please provide a valid CIDR notation."
// };
// }
// // make sure the subnet is unique
// const subnetExists = await db
// .select()
// .from(orgs)
// .where(eq(orgs.subnet, subnet))
// .limit(1);
// if (subnetExists.length > 0) {
// return { success: false, error: `Subnet ${subnet} already exists` };
// }
// make sure the orgId is unique
const orgExists = await db
.select()
.from(orgs)
.where(eq(orgs.orgId, orgId))
.limit(1);
if (orgExists.length > 0) {
return {
success: false,
error: `Organization with ID ${orgId} already exists`
};
}
let error = "";
let org: Org | null = null;
await db.transaction(async (trx) => {
const allDomains = await trx
.select()
.from(domains)
.where(eq(domains.configManaged, true));
const newOrg = await trx
.insert(orgs)
.values({
orgId,
name,
// subnet
subnet: "100.90.128.0/24", // TODO: this should not be hardcoded - or can it be the same in all orgs?
createdAt: new Date().toISOString()
})
.returning();
if (newOrg.length === 0) {
error = "Failed to create organization";
trx.rollback();
return;
}
org = newOrg[0];
// Create admin role within the same transaction
const [insertedRole] = await trx
.insert(roles)
.values({
orgId: newOrg[0].orgId,
isAdmin: true,
name: "Admin",
description: "Admin role with the most permissions"
})
.returning({ roleId: roles.roleId });
if (!insertedRole || !insertedRole.roleId) {
error = "Failed to create Admin role";
trx.rollback();
return;
}
const roleId = insertedRole.roleId;
// Get all actions and create role actions
const actionIds = await trx.select().from(actions).execute();
if (actionIds.length > 0) {
await trx.insert(roleActions).values(
actionIds.map((action) => ({
roleId,
actionId: action.actionId,
orgId: newOrg[0].orgId
}))
);
}
if (allDomains.length) {
await trx.insert(orgDomains).values(
allDomains.map((domain) => ({
orgId: newOrg[0].orgId,
domainId: domain.domainId
}))
);
}
await trx.insert(userOrgs).values({
userId,
orgId: newOrg[0].orgId,
roleId: roleId,
isOwner: true
});
const memberRole = await trx
.insert(roles)
.values({
name: "Member",
description: "Members can only view resources",
orgId
})
.returning();
await trx.insert(roleActions).values(
defaultRoleAllowedActions.map((action) => ({
roleId: memberRole[0].roleId,
actionId: action,
orgId
}))
);
});
await limitsService.applyLimitSetToOrg(orgId, sandboxLimitSet);
if (!org) {
return { success: false, error: "Failed to create org" };
}
if (error) {
return {
success: false,
error: `Failed to create org: ${error}`
};
}
// make sure we have the stripe customer
const customerId = await createCustomer(orgId, userEmail);
if (customerId) {
await usageService.updateDaily(orgId, FeatureId.USERS, 1, customerId); // Only 1 because we are crating the org
}
return {
org: {
orgId,
name,
// subnet
subnet: "100.90.128.0/24"
},
success: true
};
}

View File

@@ -0,0 +1,25 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import RedisStore from "@server/db/private/redisStore";
import { MemoryStore, Store } from "express-rate-limit";
export function createStore(): Store {
const rateLimitStore: Store = new RedisStore({
prefix: 'api-rate-limit', // Optional: customize Redis key prefix
skipFailedRequests: true, // Don't count failed requests
skipSuccessfulRequests: false, // Count successful requests
});
return rateLimitStore;
}

View File

@@ -0,0 +1,192 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import fs from "fs";
import yaml from "js-yaml";
import { privateConfigFilePath1 } from "@server/lib/consts";
import { z } from "zod";
import { colorsSchema } from "@server/lib/colorsSchema";
import { build } from "@server/build";
const portSchema = z.number().positive().gt(0).lte(65535);
export const privateConfigSchema = z
.object({
app: z.object({
region: z.string().optional().default("default"),
base_domain: z.string().optional()
}).optional().default({
region: "default"
}),
server: z.object({
encryption_key_path: z
.string()
.optional()
.default("./config/encryption.pem")
.pipe(z.string().min(8)),
resend_api_key: z.string().optional(),
reo_client_id: z.string().optional(),
}).optional().default({
encryption_key_path: "./config/encryption.pem"
}),
redis: z
.object({
host: z.string(),
port: portSchema,
password: z.string().optional(),
db: z.number().int().nonnegative().optional().default(0),
replicas: z
.array(
z.object({
host: z.string(),
port: portSchema,
password: z.string().optional(),
db: z.number().int().nonnegative().optional().default(0)
})
)
.optional()
// tls: z
// .object({
// reject_unauthorized: z
// .boolean()
// .optional()
// .default(true)
// })
// .optional()
})
.optional(),
gerbil: z
.object({
local_exit_node_reachable_at: z.string().optional().default("http://gerbil:3003")
})
.optional()
.default({}),
flags: z
.object({
enable_redis: z.boolean().optional(),
hide_supporter_key: z.boolean().optional()
})
.optional(),
branding: z
.object({
app_name: z.string().optional(),
background_image_path: z.string().optional(),
colors: z
.object({
light: colorsSchema.optional(),
dark: colorsSchema.optional()
})
.optional(),
logo: z
.object({
light_path: z.string().optional(),
dark_path: z.string().optional(),
auth_page: z
.object({
width: z.number().optional(),
height: z.number().optional()
})
.optional(),
navbar: z
.object({
width: z.number().optional(),
height: z.number().optional()
})
.optional()
})
.optional(),
favicon_path: z.string().optional(),
footer: z
.array(
z.object({
text: z.string(),
href: z.string().optional()
})
)
.optional(),
login_page: z
.object({
subtitle_text: z.string().optional(),
title_text: z.string().optional()
})
.optional(),
signup_page: z
.object({
subtitle_text: z.string().optional(),
title_text: z.string().optional()
})
.optional(),
resource_auth_page: z
.object({
show_logo: z.boolean().optional(),
hide_powered_by: z.boolean().optional(),
title_text: z.string().optional(),
subtitle_text: z.string().optional()
})
.optional(),
emails: z
.object({
signature: z.string().optional(),
colors: z
.object({
primary: z.string().optional()
})
.optional()
})
.optional()
})
.optional(),
stripe: z
.object({
secret_key: z.string(),
webhook_secret: z.string(),
s3Bucket: z.string(),
s3Region: z.string().default("us-east-1"),
localFilePath: z.string()
})
.optional(),
})
export function readPrivateConfigFile() {
if (build == "oss") {
return {};
}
const loadConfig = (configPath: string) => {
try {
const yamlContent = fs.readFileSync(configPath, "utf8");
const config = yaml.load(yamlContent);
return config;
} catch (error) {
if (error instanceof Error) {
throw new Error(
`Error loading configuration file: ${error.message}`
);
}
throw error;
}
};
let environment: any;
if (fs.existsSync(privateConfigFilePath1)) {
environment = loadConfig(privateConfigFilePath1);
}
if (!environment) {
throw new Error(
"No private configuration file found."
);
}
return environment;
}

View File

@@ -0,0 +1,124 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { Resend } from "resend";
import config from "../config";
import logger from "@server/logger";
export enum AudienceIds {
General = "5cfbf99b-c592-40a9-9b8a-577a4681c158",
Subscribed = "870b43fd-387f-44de-8fc1-707335f30b20",
Churned = "f3ae92bd-2fdb-4d77-8746-2118afd62549"
}
const resend = new Resend(
config.getRawPrivateConfig().server.resend_api_key || "missing"
);
export default resend;
export async function moveEmailToAudience(
email: string,
audienceId: AudienceIds
) {
if (process.env.ENVIRONMENT !== "prod") {
logger.debug(`Skipping moving email ${email} to audience ${audienceId} in non-prod environment`);
return;
}
const { error, data } = await retryWithBackoff(async () => {
const { data, error } = await resend.contacts.create({
email,
unsubscribed: false,
audienceId
});
if (error) {
throw new Error(
`Error adding email ${email} to audience ${audienceId}: ${error}`
);
}
return { error, data };
});
if (error) {
logger.error(
`Error adding email ${email} to audience ${audienceId}: ${error}`
);
return;
}
if (data) {
logger.debug(
`Added email ${email} to audience ${audienceId} with contact ID ${data.id}`
)
}
const otherAudiences = Object.values(AudienceIds).filter(
(id) => id !== audienceId
);
for (const otherAudienceId of otherAudiences) {
const { error, data } = await retryWithBackoff(async () => {
const { data, error } = await resend.contacts.remove({
email,
audienceId: otherAudienceId
});
if (error) {
throw new Error(
`Error removing email ${email} from audience ${otherAudienceId}: ${error}`
);
}
return { error, data };
});
if (error) {
logger.error(
`Error removing email ${email} from audience ${otherAudienceId}: ${error}`
);
}
if (data) {
logger.info(
`Removed email ${email} from audience ${otherAudienceId}`
);
}
}
}
type RetryOptions = {
retries?: number;
initialDelayMs?: number;
factor?: number;
};
export async function retryWithBackoff<T>(
fn: () => Promise<T>,
options: RetryOptions = {}
): Promise<T> {
const { retries = 5, initialDelayMs = 500, factor = 2 } = options;
let attempt = 0;
let delay = initialDelayMs;
while (true) {
try {
return await fn();
} catch (err) {
attempt++;
if (attempt > retries) throw err;
await new Promise((resolve) => setTimeout(resolve, delay));
delay *= factor;
}
}
}

19
server/lib/private/s3.ts Normal file
View File

@@ -0,0 +1,19 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { S3Client } from "@aws-sdk/client-s3";
import config from "@server/lib/config";
export const s3Client = new S3Client({
region: config.getRawPrivateConfig().stripe?.s3Region || "us-east-1",
});

View File

@@ -0,0 +1,28 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import Stripe from "stripe";
import config from "@server/lib/config";
import logger from "@server/logger";
import { build } from "@server/build";
let stripe: Stripe | undefined = undefined;
if (build == "saas") {
const stripeApiKey = config.getRawPrivateConfig().stripe?.secret_key;
if (!stripeApiKey) {
logger.error("Stripe secret key is not configured");
}
stripe = new Stripe(stripeApiKey!);
}
export default stripe;

View File

@@ -30,7 +30,7 @@ export const configSchema = z
anonymous_usage: z.boolean().optional().default(true)
})
.optional()
.default({})
.default({}),
}).optional().default({
log_level: "info",
save_logs: false,
@@ -130,7 +130,8 @@ export const configSchema = z
secret: z
.string()
.pipe(z.string().min(8))
.optional()
.optional(),
maxmind_db_path: z.string().optional()
}).optional().default({
integration_port: 3003,
external_port: 3000,

View File

@@ -13,8 +13,8 @@ export async function getValidCertificatesForDomainsHybrid(domains: Set<string>)
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: Date | null;
updatedAt?: Date | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
if (domains.size === 0) {
@@ -72,8 +72,8 @@ export async function getValidCertificatesForDomains(domains: Set<string>): Prom
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: Date | null;
updatedAt?: Date | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
return []; // stub

View File

@@ -1 +1,14 @@
export * from "./certificates";
import { build } from "@server/build";
// Import both modules
import * as certificateModule from "./certificates";
import * as privateCertificateModule from "./privateCertificates";
// Conditionally export Remote Certificates implementation based on build type
const remoteCertificatesImplementation = build === "oss" ? certificateModule : privateCertificateModule;
// Re-export all items from the selected implementation
export const {
getValidCertificatesForDomains,
getValidCertificatesForDomainsHybrid
} = remoteCertificatesImplementation;

View File

@@ -0,0 +1,116 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import config from "../config";
import { certificates, db } from "@server/db";
import { and, eq, isNotNull } from "drizzle-orm";
import { decryptData } from "../encryption";
import * as fs from "fs";
export async function getValidCertificatesForDomains(
domains: Set<string>
): Promise<
Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
if (domains.size === 0) {
return [];
}
const domainArray = Array.from(domains);
// TODO: add more foreign keys to make this query more efficient - we dont need to keep getting every certificate
const validCerts = await db
.select({
id: certificates.certId,
domain: certificates.domain,
certFile: certificates.certFile,
keyFile: certificates.keyFile,
expiresAt: certificates.expiresAt,
updatedAt: certificates.updatedAt,
wildcard: certificates.wildcard
})
.from(certificates)
.where(
and(
eq(certificates.status, "valid"),
isNotNull(certificates.certFile),
isNotNull(certificates.keyFile)
)
);
// Filter certificates for the specified domains and if it is a wildcard then you can match on everything up to the first dot
const validCertsFiltered = validCerts.filter((cert) => {
return (
domainArray.includes(cert.domain) ||
(cert.wildcard &&
domainArray.some((domain) =>
domain.endsWith(`.${cert.domain}`)
))
);
});
const encryptionKeyPath = config.getRawPrivateConfig().server.encryption_key_path;
if (!fs.existsSync(encryptionKeyPath)) {
throw new Error(
"Encryption key file not found. Please generate one first."
);
}
const encryptionKeyHex = fs.readFileSync(encryptionKeyPath, "utf8").trim();
const encryptionKey = Buffer.from(encryptionKeyHex, "hex");
const validCertsDecrypted = validCertsFiltered.map((cert) => {
// Decrypt and save certificate file
const decryptedCert = decryptData(
cert.certFile!, // is not null from query
encryptionKey
);
// Decrypt and save key file
const decryptedKey = decryptData(cert.keyFile!, encryptionKey);
// Return only the certificate data without org information
return {
...cert,
certFile: decryptedCert,
keyFile: decryptedKey
};
});
return validCertsDecrypted;
}
export async function getValidCertificatesForDomainsHybrid(
domains: Set<string>
): Promise<
Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
return []; // stub
}

View File

@@ -17,3 +17,12 @@ export const tlsNameSchema = z
)
.transform((val) => val.toLowerCase());
export const privateNamespaceSubdomainSchema = z
.string()
.regex(
/^[a-zA-Z0-9-]+$/,
"Namespace subdomain can only contain letters, numbers, and hyphens"
)
.min(1, "Namespace subdomain must be at least 1 character long")
.max(32, "Namespace subdomain must be at most 32 characters long")
.transform((val) => val.toLowerCase());

View File

@@ -6,16 +6,15 @@ import * as yaml from "js-yaml";
import axios from "axios";
import { db, exitNodes } from "@server/db";
import { eq } from "drizzle-orm";
import { tokenManager } from "./tokenManager";
import {
getCurrentExitNodeId,
getTraefikConfig
} from "@server/routers/traefik";
import { tokenManager } from "../tokenManager";
import { getCurrentExitNodeId } from "@server/lib/exitNodes";
import { getTraefikConfig } from "./";
import {
getValidCertificatesForDomains,
getValidCertificatesForDomainsHybrid
} from "./remoteCertificates";
import { sendToExitNode } from "./exitNodeComms";
} from "../remoteCertificates";
import { sendToExitNode } from "../exitNodes";
import { build } from "@server/build";
export class TraefikConfigManager {
private intervalId: NodeJS.Timeout | null = null;
@@ -28,8 +27,8 @@ export class TraefikConfigManager {
string,
{
exists: boolean;
lastModified: Date | null;
expiresAt: Date | null;
lastModified: number | null;
expiresAt: number | null;
wildcard: boolean | null;
}
>();
@@ -115,8 +114,8 @@ export class TraefikConfigManager {
string,
{
exists: boolean;
lastModified: Date | null;
expiresAt: Date | null;
lastModified: number | null;
expiresAt: number | null;
wildcard: boolean;
}
>
@@ -217,7 +216,12 @@ export class TraefikConfigManager {
// Filter out domains covered by wildcard certificates
const domainsNeedingCerts = new Set<string>();
for (const domain of currentDomains) {
if (!isDomainCoveredByWildcard(domain, this.lastLocalCertificateState)) {
if (
!isDomainCoveredByWildcard(
domain,
this.lastLocalCertificateState
)
) {
domainsNeedingCerts.add(domain);
}
}
@@ -225,7 +229,12 @@ export class TraefikConfigManager {
// Fetch if domains needing certificates have changed
const lastDomainsNeedingCerts = new Set<string>();
for (const domain of this.lastKnownDomains) {
if (!isDomainCoveredByWildcard(domain, this.lastLocalCertificateState)) {
if (
!isDomainCoveredByWildcard(
domain,
this.lastLocalCertificateState
)
) {
lastDomainsNeedingCerts.add(domain);
}
}
@@ -255,7 +264,7 @@ export class TraefikConfigManager {
// Check if certificate is expiring soon (within 30 days)
if (localState.expiresAt) {
const daysUntilExpiry =
(localState.expiresAt.getTime() - Date.now()) /
(localState.expiresAt - Math.floor(Date.now() / 1000)) /
(1000 * 60 * 60 * 24);
if (daysUntilExpiry < 30) {
logger.info(
@@ -276,7 +285,7 @@ export class TraefikConfigManager {
public async HandleTraefikConfig(): Promise<void> {
try {
// Get all active domains for this exit node via HTTP call
const getTraefikConfig = await this.getTraefikConfig();
const getTraefikConfig = await this.internalGetTraefikConfig();
if (!getTraefikConfig) {
logger.error(
@@ -315,15 +324,20 @@ export class TraefikConfigManager {
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: Date | null;
updatedAt?: Date | null;
expiresAt: number | null;
updatedAt?: number | null;
}> = [];
if (this.shouldFetchCertificates(domains)) {
// Filter out domains that are already covered by wildcard certificates
const domainsToFetch = new Set<string>();
for (const domain of domains) {
if (!isDomainCoveredByWildcard(domain, this.lastLocalCertificateState)) {
if (
!isDomainCoveredByWildcard(
domain,
this.lastLocalCertificateState
)
) {
domainsToFetch.add(domain);
} else {
logger.debug(
@@ -339,7 +353,7 @@ export class TraefikConfigManager {
await getValidCertificatesForDomainsHybrid(
domainsToFetch
);
} else {
} else {
validCertificates =
await getValidCertificatesForDomains(
domainsToFetch
@@ -428,7 +442,7 @@ export class TraefikConfigManager {
/**
* Get all domains currently in use from traefik config API
*/
private async getTraefikConfig(): Promise<{
private async internalGetTraefikConfig(): Promise<{
domains: Set<string>;
traefikConfig: any;
} | null> {
@@ -451,9 +465,13 @@ export class TraefikConfigManager {
traefikConfig = resp.data.data;
} else {
const currentExitNode = await getCurrentExitNodeId();
// logger.debug(`Fetching traefik config for exit node: ${currentExitNode}`);
traefikConfig = await getTraefikConfig(
// this is called by the local exit node to get its own config
currentExitNode,
config.getRawConfig().traefik.site_types
config.getRawConfig().traefik.site_types,
build == "oss", // filter out the namespace domains in open source
build != "oss" // generate the login pages on the cloud and hybrid
);
}
@@ -621,7 +639,8 @@ export class TraefikConfigManager {
}
// If no exact match, check for wildcard certificates that cover this domain
for (const [certDomain, certState] of this.lastLocalCertificateState) {
for (const [certDomain, certState] of this
.lastLocalCertificateState) {
if (certState.exists && certState.wildcard) {
// Check if this wildcard certificate covers the domain
if (domain.endsWith("." + certDomain)) {
@@ -671,8 +690,8 @@ export class TraefikConfigManager {
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: Date | null;
updatedAt?: Date | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
): Promise<void> {
const dynamicConfigPath =
@@ -758,7 +777,7 @@ export class TraefikConfigManager {
// Update local state tracking
this.lastLocalCertificateState.set(cert.domain, {
exists: true,
lastModified: new Date(),
lastModified: Math.floor(Date.now() / 1000),
expiresAt: cert.expiresAt,
wildcard: cert.wildcard
});
@@ -800,8 +819,8 @@ export class TraefikConfigManager {
cert: {
id: number;
domain: string;
expiresAt: Date | null;
updatedAt?: Date | null;
expiresAt: number | null;
updatedAt?: number | null;
},
certPath: string,
keyPath: string,
@@ -818,12 +837,12 @@ export class TraefikConfigManager {
}
// Read last update time from .last_update file
let lastUpdateTime: Date | null = null;
let lastUpdateTime: number | null = null;
try {
const lastUpdateStr = fs
.readFileSync(lastUpdatePath, "utf8")
.trim();
lastUpdateTime = new Date(lastUpdateStr);
lastUpdateTime = Math.floor(new Date(lastUpdateStr).getTime() / 1000);
} catch {
lastUpdateTime = null;
}
@@ -1004,7 +1023,12 @@ export class TraefikConfigManager {
// Find domains covered by wildcards
for (const domain of this.activeDomains) {
if (isDomainCoveredByWildcard(domain, this.lastLocalCertificateState)) {
if (
isDomainCoveredByWildcard(
domain,
this.lastLocalCertificateState
)
) {
domainsCoveredByWildcards.push(domain);
}
}
@@ -1025,7 +1049,13 @@ export class TraefikConfigManager {
/**
* Check if a domain is covered by existing wildcard certificates
*/
export function isDomainCoveredByWildcard(domain: string, lastLocalCertificateState: Map<string, { exists: boolean; wildcard: boolean | null }>): boolean {
export function isDomainCoveredByWildcard(
domain: string,
lastLocalCertificateState: Map<
string,
{ exists: boolean; wildcard: boolean | null }
>
): boolean {
for (const [certDomain, state] of lastLocalCertificateState) {
if (state.exists && state.wildcard) {
// If stored as example.com but is wildcard, check subdomains

View File

@@ -0,0 +1,665 @@
import { db, exitNodes, targetHealthCheck } from "@server/db";
import { and, eq, inArray, or, isNull, ne, isNotNull } from "drizzle-orm";
import logger from "@server/logger";
import config from "@server/lib/config";
import { orgs, resources, sites, Target, targets } from "@server/db";
import { build } from "@server/build";
import createPathRewriteMiddleware from "./middleware";
const redirectHttpsMiddlewareName = "redirect-to-https";
const badgerMiddlewareName = "badger";
function validatePathRewriteConfig(
path: string | null,
pathMatchType: string | null,
rewritePath: string | null,
rewritePathType: string | null
): { isValid: boolean; error?: string } {
// If no path matching is configured, no rewriting is possible
if (!path || !pathMatchType) {
if (rewritePath || rewritePathType) {
return {
isValid: false,
error: "Path rewriting requires path matching to be configured"
};
}
return { isValid: true };
}
if (rewritePathType !== "stripPrefix") {
if ((rewritePath && !rewritePathType) || (!rewritePath && rewritePathType)) {
return { isValid: false, error: "Both rewritePath and rewritePathType must be specified together" };
}
}
if (!rewritePath || !rewritePathType) {
return { isValid: true };
}
const validPathMatchTypes = ["exact", "prefix", "regex"];
if (!validPathMatchTypes.includes(pathMatchType)) {
return {
isValid: false,
error: `Invalid pathMatchType: ${pathMatchType}. Must be one of: ${validPathMatchTypes.join(", ")}`
};
}
const validRewritePathTypes = ["exact", "prefix", "regex", "stripPrefix"];
if (!validRewritePathTypes.includes(rewritePathType)) {
return {
isValid: false,
error: `Invalid rewritePathType: ${rewritePathType}. Must be one of: ${validRewritePathTypes.join(", ")}`
};
}
if (pathMatchType === "regex") {
try {
new RegExp(path);
} catch (e) {
return {
isValid: false,
error: `Invalid regex pattern in path: ${path}`
};
}
}
// Additional validation for stripPrefix
if (rewritePathType === "stripPrefix") {
if (pathMatchType !== "prefix") {
logger.warn(`stripPrefix rewrite type is most effective with prefix path matching. Current match type: ${pathMatchType}`);
}
}
return { isValid: true };
}
export async function getTraefikConfig(
exitNodeId: number,
siteTypes: string[],
filterOutNamespaceDomains = false,
generateLoginPageRouters = false
): Promise<any> {
// Define extended target type with site information
type TargetWithSite = Target & {
site: {
siteId: number;
type: string;
subnet: string | null;
exitNodeId: number | null;
online: boolean;
};
};
// Get resources with their targets and sites in a single optimized query
// Start from sites on this exit node, then join to targets and resources
const resourcesWithTargetsAndSites = await db
.select({
// Resource fields
resourceId: resources.resourceId,
fullDomain: resources.fullDomain,
ssl: resources.ssl,
http: resources.http,
proxyPort: resources.proxyPort,
protocol: resources.protocol,
subdomain: resources.subdomain,
domainId: resources.domainId,
enabled: resources.enabled,
stickySession: resources.stickySession,
tlsServerName: resources.tlsServerName,
setHostHeader: resources.setHostHeader,
enableProxy: resources.enableProxy,
headers: resources.headers,
// Target fields
targetId: targets.targetId,
targetEnabled: targets.enabled,
ip: targets.ip,
method: targets.method,
port: targets.port,
internalPort: targets.internalPort,
hcHealth: targetHealthCheck.hcHealth,
path: targets.path,
pathMatchType: targets.pathMatchType,
rewritePath: targets.rewritePath,
rewritePathType: targets.rewritePathType,
// Site fields
siteId: sites.siteId,
siteType: sites.type,
siteOnline: sites.online,
subnet: sites.subnet,
exitNodeId: sites.exitNodeId
})
.from(sites)
.innerJoin(targets, eq(targets.siteId, sites.siteId))
.innerJoin(resources, eq(resources.resourceId, targets.resourceId))
.leftJoin(
targetHealthCheck,
eq(targetHealthCheck.targetId, targets.targetId)
)
.where(
and(
eq(targets.enabled, true),
eq(resources.enabled, true),
or(eq(sites.exitNodeId, exitNodeId), isNull(sites.exitNodeId)),
or(
ne(targetHealthCheck.hcHealth, "unhealthy"), // Exclude unhealthy targets
isNull(targetHealthCheck.hcHealth) // Include targets with no health check record
),
inArray(sites.type, siteTypes),
config.getRawConfig().traefik.allow_raw_resources
? isNotNull(resources.http) // ignore the http check if allow_raw_resources is true
: eq(resources.http, true)
)
);
// Group by resource and include targets with their unique site data
const resourcesMap = new Map();
resourcesWithTargetsAndSites.forEach((row) => {
const resourceId = row.resourceId;
const targetPath = sanitizePath(row.path) || ""; // Handle null/undefined paths
const pathMatchType = row.pathMatchType || "";
const rewritePath = row.rewritePath || "";
const rewritePathType = row.rewritePathType || "";
// Create a unique key combining resourceId, path config, and rewrite config
const pathKey = [targetPath, pathMatchType, rewritePath, rewritePathType]
.filter(Boolean)
.join("-");
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
if (!resourcesMap.has(mapKey)) {
const validation = validatePathRewriteConfig(
row.path,
row.pathMatchType,
row.rewritePath,
row.rewritePathType
);
if (!validation.isValid) {
logger.error(`Invalid path rewrite configuration for resource ${resourceId}: ${validation.error}`);
return;
}
resourcesMap.set(mapKey, {
resourceId: row.resourceId,
fullDomain: row.fullDomain,
ssl: row.ssl,
http: row.http,
proxyPort: row.proxyPort,
protocol: row.protocol,
subdomain: row.subdomain,
domainId: row.domainId,
enabled: row.enabled,
stickySession: row.stickySession,
tlsServerName: row.tlsServerName,
setHostHeader: row.setHostHeader,
enableProxy: row.enableProxy,
targets: [],
headers: row.headers,
path: row.path, // the targets will all have the same path
pathMatchType: row.pathMatchType, // the targets will all have the same pathMatchType
rewritePath: row.rewritePath,
rewritePathType: row.rewritePathType
});
}
// Add target with its associated site data
resourcesMap.get(mapKey).targets.push({
resourceId: row.resourceId,
targetId: row.targetId,
ip: row.ip,
method: row.method,
port: row.port,
internalPort: row.internalPort,
enabled: row.targetEnabled,
rewritePath: row.rewritePath,
rewritePathType: row.rewritePathType,
site: {
siteId: row.siteId,
type: row.siteType,
subnet: row.subnet,
exitNodeId: row.exitNodeId,
online: row.siteOnline
}
});
});
// make sure we have at least one resource
if (resourcesMap.size === 0) {
return {};
}
const config_output: any = {
http: {
middlewares: {
[redirectHttpsMiddlewareName]: {
redirectScheme: {
scheme: "https"
}
}
}
}
};
// get the key and the resource
for (const [key, resource] of resourcesMap.entries()) {
const targets = resource.targets;
const sanatizedKey = sanitizeForMiddlewareName(key);
const routerName = `${sanatizedKey}-router`;
const serviceName = `${sanatizedKey}-service`;
const fullDomain = `${resource.fullDomain}`;
const transportName = `${sanatizedKey}-transport`;
const headersMiddlewareName = `${sanatizedKey}-headers-middleware`;
if (!resource.enabled) {
continue;
}
if (resource.http) {
if (!resource.domainId || !resource.fullDomain) {
continue;
}
// Initialize routers and services if they don't exist
if (!config_output.http.routers) {
config_output.http.routers = {};
}
if (!config_output.http.services) {
config_output.http.services = {};
}
const domainParts = fullDomain.split(".");
let wildCard;
if (domainParts.length <= 2) {
wildCard = `*.${domainParts.join(".")}`;
} else {
wildCard = `*.${domainParts.slice(1).join(".")}`;
}
if (!resource.subdomain) {
wildCard = resource.fullDomain;
}
const configDomain = config.getDomain(resource.domainId);
let certResolver: string, preferWildcardCert: boolean;
if (!configDomain) {
certResolver = config.getRawConfig().traefik.cert_resolver;
preferWildcardCert =
config.getRawConfig().traefik.prefer_wildcard_cert;
} else {
certResolver = configDomain.cert_resolver;
preferWildcardCert = configDomain.prefer_wildcard_cert;
}
let tls = {};
if (build == "oss") {
tls = {
certResolver: certResolver,
...(preferWildcardCert
? {
domains: [
{
main: wildCard
}
]
}
: {})
};
}
const additionalMiddlewares =
config.getRawConfig().traefik.additional_middlewares || [];
const routerMiddlewares = [
badgerMiddlewareName,
...additionalMiddlewares
];
// Handle path rewriting middleware
if (resource.rewritePath &&
resource.path &&
resource.pathMatchType &&
resource.rewritePathType) {
// Create a unique middleware name
const rewriteMiddlewareName = `rewrite-r${resource.resourceId}-${sanitizeForMiddlewareName(key)}`;
try {
const rewriteResult = createPathRewriteMiddleware(
rewriteMiddlewareName,
resource.path,
resource.pathMatchType,
resource.rewritePath,
resource.rewritePathType
);
// Initialize middlewares object if it doesn't exist
if (!config_output.http.middlewares) {
config_output.http.middlewares = {};
}
// the middleware to the config
Object.assign(config_output.http.middlewares, rewriteResult.middlewares);
// middlewares to the router middleware chain
if (rewriteResult.chain) {
// For chained middlewares (like stripPrefix + addPrefix)
routerMiddlewares.push(...rewriteResult.chain);
} else {
// Single middleware
routerMiddlewares.push(rewriteMiddlewareName);
}
logger.debug(`Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`);
} catch (error) {
logger.error(`Failed to create path rewrite middleware for resource ${resource.resourceId}: ${error}`);
}
}
// Handle custom headers middleware
if (resource.headers || resource.setHostHeader) {
const headersObj: { [key: string]: string } = {};
if (resource.headers) {
let headersArr: { name: string; value: string }[] = [];
try {
headersArr = JSON.parse(resource.headers) as {
name: string;
value: string;
}[];
} catch (e) {
logger.warn(`Failed to parse headers for resource ${resource.resourceId}: ${e}`);
}
headersArr.forEach((header) => {
headersObj[header.name] = header.value;
});
}
if (resource.setHostHeader) {
headersObj["Host"] = resource.setHostHeader;
}
if (Object.keys(headersObj).length > 0) {
if (!config_output.http.middlewares) {
config_output.http.middlewares = {};
}
config_output.http.middlewares[headersMiddlewareName] = {
headers: {
customRequestHeaders: headersObj
}
};
routerMiddlewares.push(headersMiddlewareName);
}
}
// Build routing rules
let rule = `Host(\`${fullDomain}\`)`;
let priority = 100;
if (resource.path && resource.pathMatchType) {
priority += 1;
// add path to rule based on match type
let path = resource.path;
// if the path doesn't start with a /, add it
if (!path.startsWith("/")) {
path = `/${path}`;
}
if (resource.pathMatchType === "exact") {
rule += ` && Path(\`${path}\`)`;
} else if (resource.pathMatchType === "prefix") {
rule += ` && PathPrefix(\`${path}\`)`;
} else if (resource.pathMatchType === "regex") {
rule += ` && PathRegexp(\`${resource.path}\`)`; // this is the raw path because it's a regex
}
}
config_output.http.routers![routerName] = {
entryPoints: [
resource.ssl
? config.getRawConfig().traefik.https_entrypoint
: config.getRawConfig().traefik.http_entrypoint
],
middlewares: routerMiddlewares,
service: serviceName,
rule: rule,
priority: priority,
...(resource.ssl ? { tls } : {})
};
if (resource.ssl) {
config_output.http.routers![routerName + "-redirect"] = {
entryPoints: [
config.getRawConfig().traefik.http_entrypoint
],
middlewares: [redirectHttpsMiddlewareName],
service: serviceName,
rule: rule,
priority: priority
};
}
config_output.http.services![serviceName] = {
loadBalancer: {
servers: (() => {
// Check if any sites are online
// THIS IS SO THAT THERE IS SOME IMMEDIATE FEEDBACK
// EVEN IF THE SITES HAVE NOT UPDATED YET FROM THE
// RECEIVE BANDWIDTH ENDPOINT.
// TODO: HOW TO HANDLE ^^^^^^ BETTER
const anySitesOnline = (
targets as TargetWithSite[]
).some((target: TargetWithSite) => target.site.online);
return (
(targets as TargetWithSite[])
.filter((target: TargetWithSite) => {
if (!target.enabled) {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;
}
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
if (
!target.ip ||
!target.port ||
!target.method
) {
return false;
}
} else if (target.site.type === "newt") {
if (
!target.internalPort ||
!target.method ||
!target.site.subnet
) {
return false;
}
}
return true;
})
.map((target: TargetWithSite) => {
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
return {
url: `${target.method}://${target.ip}:${target.port}`
};
} else if (target.site.type === "newt") {
const ip =
target.site.subnet!.split("/")[0];
return {
url: `${target.method}://${ip}:${target.internalPort}`
};
}
})
// filter out duplicates
.filter(
(v, i, a) =>
a.findIndex(
(t) => t && v && t.url === v.url
) === i
)
);
})(),
...(resource.stickySession
? {
sticky: {
cookie: {
name: "p_sticky", // TODO: make this configurable via config.yml like other cookies
secure: resource.ssl,
httpOnly: true
}
}
}
: {})
}
};
// Add the serversTransport if TLS server name is provided
if (resource.tlsServerName) {
if (!config_output.http.serversTransports) {
config_output.http.serversTransports = {};
}
config_output.http.serversTransports![transportName] = {
serverName: resource.tlsServerName,
//unfortunately the following needs to be set. traefik doesn't merge the default serverTransport settings
// if defined in the static config and here. if not set, self-signed certs won't work
insecureSkipVerify: true
};
config_output.http.services![
serviceName
].loadBalancer.serversTransport = transportName;
}
} else {
// Non-HTTP (TCP/UDP) configuration
if (!resource.enableProxy || !resource.proxyPort) {
continue;
}
const protocol = resource.protocol.toLowerCase();
const port = resource.proxyPort;
if (!port) {
continue;
}
if (!config_output[protocol]) {
config_output[protocol] = {
routers: {},
services: {}
};
}
config_output[protocol].routers[routerName] = {
entryPoints: [`${protocol}-${port}`],
service: serviceName,
...(protocol === "tcp" ? { rule: "HostSNI(`*`)" } : {})
};
config_output[protocol].services[serviceName] = {
loadBalancer: {
servers: (() => {
// Check if any sites are online
const anySitesOnline = (
targets as TargetWithSite[]
).some((target: TargetWithSite) => target.site.online);
return (targets as TargetWithSite[])
.filter((target: TargetWithSite) => {
if (!target.enabled) {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;
}
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
if (!target.ip || !target.port) {
return false;
}
} else if (target.site.type === "newt") {
if (
!target.internalPort ||
!target.site.subnet
) {
return false;
}
}
return true;
})
.map((target: TargetWithSite) => {
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
return {
address: `${target.ip}:${target.port}`
};
} else if (target.site.type === "newt") {
const ip =
target.site.subnet!.split("/")[0];
return {
address: `${ip}:${target.internalPort}`
};
}
});
})(),
...(resource.stickySession
? {
sticky: {
ipStrategy: {
depth: 0,
sourcePort: true
}
}
}
: {})
}
};
}
}
return config_output;
}
function sanitizePath(path: string | null | undefined): string | undefined {
if (!path) return undefined;
const trimmed = path.trim();
if (!trimmed) return undefined;
// Preserve path structure for rewriting, only warn if very long
if (trimmed.length > 1000) {
logger.warn(`Path exceeds 1000 characters: ${trimmed.substring(0, 100)}...`);
return trimmed.substring(0, 1000);
}
return trimmed;
}
function sanitizeForMiddlewareName(str: string): string {
// Replace any characters that aren't alphanumeric or dash with dash
// and remove consecutive dashes
return str.replace(/[^a-zA-Z0-9-]/g, '-').replace(/-+/g, '-').replace(/^-|-$/g, '');
}

View File

@@ -0,0 +1,11 @@
import { build } from "@server/build";
// Import both modules
import * as traefikModule from "./getTraefikConfig";
import * as privateTraefikModule from "./privateGetTraefikConfig";
// Conditionally export Traefik configuration implementation based on build type
const traefikImplementation = build === "oss" ? traefikModule : privateTraefikModule;
// Re-export all items from the selected implementation
export const { getTraefikConfig } = traefikImplementation;

View File

@@ -0,0 +1,140 @@
import logger from "@server/logger";
export default function createPathRewriteMiddleware(
middlewareName: string,
path: string,
pathMatchType: string,
rewritePath: string,
rewritePathType: string
): { middlewares: { [key: string]: any }; chain?: string[] } {
const middlewares: { [key: string]: any } = {};
if (pathMatchType !== "regex" && !path.startsWith("/")) {
path = `/${path}`;
}
if (
rewritePathType !== "regex" &&
rewritePath !== "" &&
!rewritePath.startsWith("/")
) {
rewritePath = `/${rewritePath}`;
}
switch (rewritePathType) {
case "exact":
// Replace the path with the exact rewrite path
let exactPattern = `^${escapeRegex(path)}$`;
middlewares[middlewareName] = {
replacePathRegex: {
regex: exactPattern,
replacement: rewritePath
}
};
break;
case "prefix":
// Replace matched prefix with new prefix, preserve the rest
switch (pathMatchType) {
case "prefix":
middlewares[middlewareName] = {
replacePathRegex: {
regex: `^${escapeRegex(path)}(.*)`,
replacement: `${rewritePath}$1`
}
};
break;
case "exact":
middlewares[middlewareName] = {
replacePathRegex: {
regex: `^${escapeRegex(path)}$`,
replacement: rewritePath
}
};
break;
case "regex":
// For regex path matching with prefix rewrite, we assume the regex has capture groups
middlewares[middlewareName] = {
replacePathRegex: {
regex: path,
replacement: rewritePath
}
};
break;
}
break;
case "regex":
// Use advanced regex replacement - works with any match type
let regexPattern: string;
if (pathMatchType === "regex") {
regexPattern = path;
} else if (pathMatchType === "prefix") {
regexPattern = `^${escapeRegex(path)}(.*)`;
} else {
// exact
regexPattern = `^${escapeRegex(path)}$`;
}
middlewares[middlewareName] = {
replacePathRegex: {
regex: regexPattern,
replacement: rewritePath
}
};
break;
case "stripPrefix":
// Strip the matched prefix and optionally add new path
if (pathMatchType === "prefix") {
middlewares[middlewareName] = {
stripPrefix: {
prefixes: [path]
}
};
// If rewritePath is provided and not empty, add it as a prefix after stripping
if (rewritePath && rewritePath !== "" && rewritePath !== "/") {
const addPrefixMiddlewareName = `addprefix-${middlewareName.replace("rewrite-", "")}`;
middlewares[addPrefixMiddlewareName] = {
addPrefix: {
prefix: rewritePath
}
};
return {
middlewares,
chain: [middlewareName, addPrefixMiddlewareName]
};
}
} else {
// For exact and regex matches, use replacePathRegex to strip
let regexPattern: string;
if (pathMatchType === "exact") {
regexPattern = `^${escapeRegex(path)}$`;
} else if (pathMatchType === "regex") {
regexPattern = path;
} else {
regexPattern = `^${escapeRegex(path)}`;
}
const replacement = rewritePath || "/";
middlewares[middlewareName] = {
replacePathRegex: {
regex: regexPattern,
replacement: replacement
}
};
}
break;
default:
logger.error(`Unknown rewritePathType: ${rewritePathType}`);
throw new Error(`Unknown rewritePathType: ${rewritePathType}`);
}
return { middlewares };
}
function escapeRegex(string: string): string {
return string.replace(/[.*+?^${}()|[\]\\]/g, "\\$&");
}

View File

@@ -0,0 +1,692 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { Request, Response } from "express";
import {
certificates,
db,
domainNamespaces,
exitNodes,
loginPage,
targetHealthCheck
} from "@server/db";
import { and, eq, inArray, or, isNull, ne, isNotNull } from "drizzle-orm";
import logger from "@server/logger";
import HttpCode from "@server/types/HttpCode";
import config from "@server/lib/config";
import { orgs, resources, sites, Target, targets } from "@server/db";
import { build } from "@server/build";
const redirectHttpsMiddlewareName = "redirect-to-https";
const redirectToRootMiddlewareName = "redirect-to-root";
const badgerMiddlewareName = "badger";
export async function getTraefikConfig(
exitNodeId: number,
siteTypes: string[],
filterOutNamespaceDomains = false,
generateLoginPageRouters = false
): Promise<any> {
// Define extended target type with site information
type TargetWithSite = Target & {
site: {
siteId: number;
type: string;
subnet: string | null;
exitNodeId: number | null;
online: boolean;
};
};
// Get resources with their targets and sites in a single optimized query
// Start from sites on this exit node, then join to targets and resources
const resourcesWithTargetsAndSites = await db
.select({
// Resource fields
resourceId: resources.resourceId,
fullDomain: resources.fullDomain,
ssl: resources.ssl,
http: resources.http,
proxyPort: resources.proxyPort,
protocol: resources.protocol,
subdomain: resources.subdomain,
domainId: resources.domainId,
enabled: resources.enabled,
stickySession: resources.stickySession,
tlsServerName: resources.tlsServerName,
setHostHeader: resources.setHostHeader,
enableProxy: resources.enableProxy,
headers: resources.headers,
// Target fields
targetId: targets.targetId,
targetEnabled: targets.enabled,
ip: targets.ip,
method: targets.method,
port: targets.port,
internalPort: targets.internalPort,
hcHealth: targetHealthCheck.hcHealth,
path: targets.path,
pathMatchType: targets.pathMatchType,
// Site fields
siteId: sites.siteId,
siteType: sites.type,
siteOnline: sites.online,
subnet: sites.subnet,
exitNodeId: sites.exitNodeId,
// Namespace
domainNamespaceId: domainNamespaces.domainNamespaceId,
// Certificate
certificateStatus: certificates.status
})
.from(sites)
.innerJoin(targets, eq(targets.siteId, sites.siteId))
.innerJoin(resources, eq(resources.resourceId, targets.resourceId))
.leftJoin(certificates, eq(certificates.domainId, resources.domainId))
.leftJoin(
targetHealthCheck,
eq(targetHealthCheck.targetId, targets.targetId)
)
.leftJoin(
domainNamespaces,
eq(domainNamespaces.domainId, resources.domainId)
) // THIS IS CLOUD ONLY TO FILTER OUT THE DOMAIN NAMESPACES IF REQUIRED
.where(
and(
eq(targets.enabled, true),
eq(resources.enabled, true),
// or(
eq(sites.exitNodeId, exitNodeId),
// isNull(sites.exitNodeId)
// ),
or(
ne(targetHealthCheck.hcHealth, "unhealthy"), // Exclude unhealthy targets
isNull(targetHealthCheck.hcHealth) // Include targets with no health check record
),
inArray(sites.type, siteTypes),
config.getRawConfig().traefik.allow_raw_resources
? isNotNull(resources.http) // ignore the http check if allow_raw_resources is true
: eq(resources.http, true)
)
);
// Group by resource and include targets with their unique site data
const resourcesMap = new Map();
resourcesWithTargetsAndSites.forEach((row) => {
const resourceId = row.resourceId;
const targetPath = sanitizePath(row.path) || ""; // Handle null/undefined paths
const pathMatchType = row.pathMatchType || "";
if (filterOutNamespaceDomains && row.domainNamespaceId) {
return;
}
// Create a unique key combining resourceId and path+pathMatchType
const pathKey = [targetPath, pathMatchType].filter(Boolean).join("-");
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
if (!resourcesMap.has(mapKey)) {
resourcesMap.set(mapKey, {
resourceId: row.resourceId,
fullDomain: row.fullDomain,
ssl: row.ssl,
http: row.http,
proxyPort: row.proxyPort,
protocol: row.protocol,
subdomain: row.subdomain,
domainId: row.domainId,
enabled: row.enabled,
stickySession: row.stickySession,
tlsServerName: row.tlsServerName,
setHostHeader: row.setHostHeader,
enableProxy: row.enableProxy,
certificateStatus: row.certificateStatus,
targets: [],
headers: row.headers,
path: row.path, // the targets will all have the same path
pathMatchType: row.pathMatchType // the targets will all have the same pathMatchType
});
}
// Add target with its associated site data
resourcesMap.get(mapKey).targets.push({
resourceId: row.resourceId,
targetId: row.targetId,
ip: row.ip,
method: row.method,
port: row.port,
internalPort: row.internalPort,
enabled: row.targetEnabled,
site: {
siteId: row.siteId,
type: row.siteType,
subnet: row.subnet,
exitNodeId: row.exitNodeId,
online: row.siteOnline
}
});
});
// make sure we have at least one resource
if (resourcesMap.size === 0) {
return {};
}
const config_output: any = {
http: {
middlewares: {
[redirectHttpsMiddlewareName]: {
redirectScheme: {
scheme: "https"
}
},
[redirectToRootMiddlewareName]: {
redirectRegex: {
regex: "^(https?)://([^/]+)(/.*)?",
replacement: "${1}://${2}/auth/org",
permanent: false
}
}
}
}
};
// get the key and the resource
for (const [key, resource] of resourcesMap.entries()) {
const targets = resource.targets;
const routerName = `${key}-router`;
const serviceName = `${key}-service`;
const fullDomain = `${resource.fullDomain}`;
const transportName = `${key}-transport`;
const headersMiddlewareName = `${key}-headers-middleware`;
if (!resource.enabled) {
continue;
}
if (resource.http) {
if (!resource.domainId) {
continue;
}
if (!resource.fullDomain) {
continue;
}
if (resource.certificateStatus !== "valid") {
logger.debug(
`Resource ${resource.resourceId} has certificate stats ${resource.certificateStats}`
);
continue;
}
// add routers and services empty objects if they don't exist
if (!config_output.http.routers) {
config_output.http.routers = {};
}
if (!config_output.http.services) {
config_output.http.services = {};
}
const domainParts = fullDomain.split(".");
let wildCard;
if (domainParts.length <= 2) {
wildCard = `*.${domainParts.join(".")}`;
} else {
wildCard = `*.${domainParts.slice(1).join(".")}`;
}
if (!resource.subdomain) {
wildCard = resource.fullDomain;
}
const configDomain = config.getDomain(resource.domainId);
let certResolver: string, preferWildcardCert: boolean;
if (!configDomain) {
certResolver = config.getRawConfig().traefik.cert_resolver;
preferWildcardCert =
config.getRawConfig().traefik.prefer_wildcard_cert;
} else {
certResolver = configDomain.cert_resolver;
preferWildcardCert = configDomain.prefer_wildcard_cert;
}
let tls = {};
if (build == "oss") {
tls = {
certResolver: certResolver,
...(preferWildcardCert
? {
domains: [
{
main: wildCard
}
]
}
: {})
};
}
const additionalMiddlewares =
config.getRawConfig().traefik.additional_middlewares || [];
const routerMiddlewares = [
badgerMiddlewareName,
...additionalMiddlewares
];
if (resource.headers || resource.setHostHeader) {
// if there are headers, parse them into an object
const headersObj: { [key: string]: string } = {};
if (resource.headers) {
let headersArr: { name: string; value: string }[] = [];
try {
headersArr = JSON.parse(resource.headers) as {
name: string;
value: string;
}[];
} catch (e) {
logger.warn(
`Failed to parse headers for resource ${resource.resourceId}: ${e}`
);
}
headersArr.forEach((header) => {
headersObj[header.name] = header.value;
});
}
if (resource.setHostHeader) {
headersObj["Host"] = resource.setHostHeader;
}
// check if the object is not empty
if (Object.keys(headersObj).length > 0) {
// Add the headers middleware
if (!config_output.http.middlewares) {
config_output.http.middlewares = {};
}
config_output.http.middlewares[headersMiddlewareName] = {
headers: {
customRequestHeaders: headersObj
}
};
routerMiddlewares.push(headersMiddlewareName);
}
}
let rule = `Host(\`${fullDomain}\`)`;
let priority = 100;
if (resource.path && resource.pathMatchType) {
priority += 1;
// add path to rule based on match type
let path = resource.path;
// if the path doesn't start with a /, add it
if (!path.startsWith("/")) {
path = `/${path}`;
}
if (resource.pathMatchType === "exact") {
rule += ` && Path(\`${path}\`)`;
} else if (resource.pathMatchType === "prefix") {
rule += ` && PathPrefix(\`${path}\`)`;
} else if (resource.pathMatchType === "regex") {
rule += ` && PathRegexp(\`${resource.path}\`)`; // this is the raw path because it's a regex
}
}
config_output.http.routers![routerName] = {
entryPoints: [
resource.ssl
? config.getRawConfig().traefik.https_entrypoint
: config.getRawConfig().traefik.http_entrypoint
],
middlewares: routerMiddlewares,
service: serviceName,
rule: rule,
priority: priority,
...(resource.ssl ? { tls } : {})
};
if (resource.ssl) {
config_output.http.routers![routerName + "-redirect"] = {
entryPoints: [
config.getRawConfig().traefik.http_entrypoint
],
middlewares: [redirectHttpsMiddlewareName],
service: serviceName,
rule: rule,
priority: priority
};
}
config_output.http.services![serviceName] = {
loadBalancer: {
servers: (() => {
// Check if any sites are online
// THIS IS SO THAT THERE IS SOME IMMEDIATE FEEDBACK
// EVEN IF THE SITES HAVE NOT UPDATED YET FROM THE
// RECEIVE BANDWIDTH ENDPOINT.
// TODO: HOW TO HANDLE ^^^^^^ BETTER
const anySitesOnline = (
targets as TargetWithSite[]
).some((target: TargetWithSite) => target.site.online);
return (
(targets as TargetWithSite[])
.filter((target: TargetWithSite) => {
if (!target.enabled) {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;
}
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
if (
!target.ip ||
!target.port ||
!target.method
) {
return false;
}
} else if (target.site.type === "newt") {
if (
!target.internalPort ||
!target.method ||
!target.site.subnet
) {
return false;
}
}
return true;
})
.map((target: TargetWithSite) => {
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
return {
url: `${target.method}://${target.ip}:${target.port}`
};
} else if (target.site.type === "newt") {
const ip =
target.site.subnet!.split("/")[0];
return {
url: `${target.method}://${ip}:${target.internalPort}`
};
}
})
// filter out duplicates
.filter(
(v, i, a) =>
a.findIndex(
(t) => t && v && t.url === v.url
) === i
)
);
})(),
...(resource.stickySession
? {
sticky: {
cookie: {
name: "p_sticky", // TODO: make this configurable via config.yml like other cookies
secure: resource.ssl,
httpOnly: true
}
}
}
: {})
}
};
// Add the serversTransport if TLS server name is provided
if (resource.tlsServerName) {
if (!config_output.http.serversTransports) {
config_output.http.serversTransports = {};
}
config_output.http.serversTransports![transportName] = {
serverName: resource.tlsServerName,
//unfortunately the following needs to be set. traefik doesn't merge the default serverTransport settings
// if defined in the static config and here. if not set, self-signed certs won't work
insecureSkipVerify: true
};
config_output.http.services![
serviceName
].loadBalancer.serversTransport = transportName;
}
} else {
// Non-HTTP (TCP/UDP) configuration
if (!resource.enableProxy) {
continue;
}
const protocol = resource.protocol.toLowerCase();
const port = resource.proxyPort;
if (!port) {
continue;
}
if (!config_output[protocol]) {
config_output[protocol] = {
routers: {},
services: {}
};
}
config_output[protocol].routers[routerName] = {
entryPoints: [`${protocol}-${port}`],
service: serviceName,
...(protocol === "tcp" ? { rule: "HostSNI(`*`)" } : {})
};
config_output[protocol].services[serviceName] = {
loadBalancer: {
servers: (() => {
// Check if any sites are online
const anySitesOnline = (
targets as TargetWithSite[]
).some((target: TargetWithSite) => target.site.online);
return (targets as TargetWithSite[])
.filter((target: TargetWithSite) => {
if (!target.enabled) {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;
}
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
if (!target.ip || !target.port) {
return false;
}
} else if (target.site.type === "newt") {
if (
!target.internalPort ||
!target.site.subnet
) {
return false;
}
}
return true;
})
.map((target: TargetWithSite) => {
if (
target.site.type === "local" ||
target.site.type === "wireguard"
) {
return {
address: `${target.ip}:${target.port}`
};
} else if (target.site.type === "newt") {
const ip =
target.site.subnet!.split("/")[0];
return {
address: `${ip}:${target.internalPort}`
};
}
});
})(),
...(resource.stickySession
? {
sticky: {
ipStrategy: {
depth: 0,
sourcePort: true
}
}
}
: {})
}
};
}
}
if (generateLoginPageRouters) {
const exitNodeLoginPages = await db
.select({
loginPageId: loginPage.loginPageId,
fullDomain: loginPage.fullDomain,
exitNodeId: exitNodes.exitNodeId,
domainId: loginPage.domainId,
certificateStatus: certificates.status
})
.from(loginPage)
.innerJoin(
exitNodes,
eq(exitNodes.exitNodeId, loginPage.exitNodeId)
)
.leftJoin(
certificates,
eq(certificates.domainId, loginPage.domainId)
)
.where(eq(exitNodes.exitNodeId, exitNodeId));
if (exitNodeLoginPages.length > 0) {
if (!config_output.http.services) {
config_output.http.services = {};
}
if (!config_output.http.services["landing-service"]) {
config_output.http.services["landing-service"] = {
loadBalancer: {
servers: [
{
url: `http://${
config.getRawConfig().server
.internal_hostname
}:${config.getRawConfig().server.next_port}`
}
]
}
};
}
for (const lp of exitNodeLoginPages) {
if (!lp.domainId) {
continue;
}
if (!lp.fullDomain) {
continue;
}
if (lp.certificateStatus !== "valid") {
continue;
}
// auth-allowed:
// rule: "Host(`auth.pangolin.internal`) && (PathRegexp(`^/auth/resource/[0-9]+$`) || PathPrefix(`/_next`))"
// service: next-service
// entryPoints:
// - websecure
const routerName = `loginpage-${lp.loginPageId}`;
const fullDomain = `${lp.fullDomain}`;
if (!config_output.http.routers) {
config_output.http.routers = {};
}
config_output.http.routers![routerName + "-router"] = {
entryPoints: [
config.getRawConfig().traefik.https_entrypoint
],
service: "landing-service",
rule: `Host(\`${fullDomain}\`) && (PathRegexp(\`^/auth/resource/[^/]+$\`) || PathRegexp(\`^/auth/idp/[0-9]+/oidc/callback\`) || PathPrefix(\`/_next\`) || Path(\`/auth/org\`) || PathRegexp(\`^/__nextjs*\`))`,
priority: 203,
tls: {}
};
// auth-catchall:
// rule: "Host(`auth.example.com`)"
// middlewares:
// - redirect-to-root
// service: next-service
// entryPoints:
// - web
config_output.http.routers![routerName + "-catchall"] = {
entryPoints: [
config.getRawConfig().traefik.https_entrypoint
],
middlewares: [redirectToRootMiddlewareName],
service: "landing-service",
rule: `Host(\`${fullDomain}\`)`,
priority: 202,
tls: {}
};
// we need to add a redirect from http to https too
config_output.http.routers![routerName + "-redirect"] = {
entryPoints: [
config.getRawConfig().traefik.http_entrypoint
],
middlewares: [redirectHttpsMiddlewareName],
service: "landing-service",
rule: `Host(\`${fullDomain}\`)`,
priority: 201
};
}
}
}
return config_output;
}
function sanitizePath(path: string | null | undefined): string | undefined {
if (!path) return undefined;
// clean any non alphanumeric characters from the path and replace with dashes
// the path cant be too long either, so limit to 50 characters
if (path.length > 50) {
path = path.substring(0, 50);
}
return path.replace(/[^a-zA-Z0-9]/g, "");
}

View File

@@ -1,5 +1,5 @@
import { assertEquals } from "@test/assert";
import { isDomainCoveredByWildcard } from "./traefikConfig";
import { isDomainCoveredByWildcard } from "./TraefikConfigManager";
function runTests() {
console.log('Running wildcard domain coverage tests...');

View File

@@ -163,6 +163,26 @@ export function validateHeaders(headers: string): boolean {
});
}
export function isSecondLevelDomain(domain: string): boolean {
if (!domain || typeof domain !== 'string') {
return false;
}
const trimmedDomain = domain.trim().toLowerCase();
// Split into parts
const parts = trimmedDomain.split('.');
// Should have exactly 2 parts for a second-level domain (e.g., "example.com")
if (parts.length !== 2) {
return false;
}
// Check if the TLD part is valid
const tld = parts[1].toUpperCase();
return validTlds.includes(tld);
}
const validTlds = [
"AAA",
"AARP",