Merge branch 'dev' into multi-role

This commit is contained in:
miloschwartz
2026-03-24 22:01:13 -07:00
266 changed files with 7813 additions and 5880 deletions

View File

@@ -48,5 +48,5 @@ export const tierMatrix: Record<TierFeature, Tier[]> = {
"enterprise"
],
[TierFeature.AutoProvisioning]: ["tier1", "tier3", "enterprise"],
[TierFeature.SshPam]: ["enterprise"]
[TierFeature.SshPam]: ["tier1", "tier3", "enterprise"]
};

View File

@@ -12,7 +12,7 @@ import {
import { FeatureId, getFeatureMeterId } from "./features";
import logger from "@server/logger";
import { build } from "@server/build";
import cache from "@server/lib/cache";
import cache from "#dynamic/lib/cache";
export function noop() {
if (build !== "saas") {
@@ -230,7 +230,7 @@ export class UsageService {
const orgIdToUse = await this.getBillingOrg(orgId);
const cacheKey = `customer_${orgIdToUse}_${featureId}`;
const cached = cache.get<string>(cacheKey);
const cached = await cache.get<string>(cacheKey);
if (cached) {
return cached;
@@ -253,7 +253,7 @@ export class UsageService {
const customerId = customer.customerId;
// Cache the result
cache.set(cacheKey, customerId, 300); // 5 minute TTL
await cache.set(cacheKey, customerId, 300); // 5 minute TTL
return customerId;
} catch (error) {

View File

@@ -107,7 +107,7 @@ export async function applyBlueprint({
[target],
matchingHealthcheck ? [matchingHealthcheck] : [],
result.proxyResource.protocol,
result.proxyResource.proxyPort
site.newt.version
);
}
}

View File

@@ -11,7 +11,7 @@ import {
userSiteResources
} from "@server/db";
import { sites } from "@server/db";
import { eq, and, ne, inArray } from "drizzle-orm";
import { eq, and, ne, inArray, or } from "drizzle-orm";
import { Config } from "./types";
import logger from "@server/logger";
import { getNextAvailableAliasAddress } from "../ip";
@@ -142,7 +142,10 @@ export async function updateClientResources(
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
.where(
and(
inArray(users.username, resourceData.users),
or(
inArray(users.username, resourceData.users),
inArray(users.email, resourceData.users)
),
eq(userOrgs.orgId, orgId)
)
);
@@ -276,7 +279,10 @@ export async function updateClientResources(
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
.where(
and(
inArray(users.username, resourceData.users),
or(
inArray(users.username, resourceData.users),
inArray(users.email, resourceData.users)
),
eq(userOrgs.orgId, orgId)
)
);

View File

@@ -212,7 +212,10 @@ export async function updateProxyResources(
} else {
// Update existing resource
const isLicensed = await isLicensedOrSubscribed(orgId, tierMatrix.maintencePage);
const isLicensed = await isLicensedOrSubscribed(
orgId,
tierMatrix.maintencePage
);
if (!isLicensed) {
resourceData.maintenance = undefined;
}
@@ -590,7 +593,10 @@ export async function updateProxyResources(
existingRule.action !== getRuleAction(rule.action) ||
existingRule.match !== rule.match.toUpperCase() ||
existingRule.value !==
getRuleValue(rule.match.toUpperCase(), rule.value) ||
getRuleValue(
rule.match.toUpperCase(),
rule.value
) ||
existingRule.priority !== intendedPriority
) {
validateRule(rule);
@@ -648,7 +654,10 @@ export async function updateProxyResources(
);
}
const isLicensed = await isLicensedOrSubscribed(orgId, tierMatrix.maintencePage);
const isLicensed = await isLicensedOrSubscribed(
orgId,
tierMatrix.maintencePage
);
if (!isLicensed) {
resourceData.maintenance = undefined;
}
@@ -935,7 +944,12 @@ async function syncUserResources(
.select()
.from(users)
.innerJoin(userOrgs, eq(users.userId, userOrgs.userId))
.where(and(eq(users.username, username), eq(userOrgs.orgId, orgId)))
.where(
and(
or(eq(users.username, username), eq(users.email, username)),
eq(userOrgs.orgId, orgId)
)
)
.limit(1);
if (!user) {

View File

@@ -69,7 +69,7 @@ export const AuthSchema = z.object({
.refine((roles) => !roles.includes("Admin"), {
error: "Admin role cannot be included in sso-roles"
}),
"sso-users": z.array(z.email()).optional().default([]),
"sso-users": z.array(z.string()).optional().default([]),
"whitelist-users": z.array(z.email()).optional().default([]),
"auto-login-idp": z.int().positive().optional()
});
@@ -335,7 +335,7 @@ export const ClientResourceSchema = z
.refine((roles) => !roles.includes("Admin"), {
error: "Admin role cannot be included in roles"
}),
users: z.array(z.email()).optional().default([]),
users: z.array(z.string()).optional().default([]),
machines: z.array(z.string()).optional().default([])
})
.refine(

View File

@@ -1,9 +1,9 @@
import NodeCache from "node-cache";
import logger from "@server/logger";
// Create cache with maxKeys limit to prevent memory leaks
// Create local cache with maxKeys limit to prevent memory leaks
// With ~10k requests/day and 5min TTL, 10k keys should be more than sufficient
export const cache = new NodeCache({
export const localCache = new NodeCache({
stdTTL: 3600,
checkperiod: 120,
maxKeys: 10000
@@ -11,10 +11,151 @@ export const cache = new NodeCache({
// Log cache statistics periodically for monitoring
setInterval(() => {
const stats = cache.getStats();
const stats = localCache.getStats();
logger.debug(
`Cache stats - Keys: ${stats.keys}, Hits: ${stats.hits}, Misses: ${stats.misses}, Hit rate: ${stats.hits > 0 ? ((stats.hits / (stats.hits + stats.misses)) * 100).toFixed(2) : 0}%`
`Local cache stats - Keys: ${stats.keys}, Hits: ${stats.hits}, Misses: ${stats.misses}, Hit rate: ${stats.hits > 0 ? ((stats.hits / (stats.hits + stats.misses)) * 100).toFixed(2) : 0}%`
);
}, 300000); // Every 5 minutes
/**
* Adaptive cache that uses Redis when available in multi-node environments,
* otherwise falls back to local memory cache for single-node deployments.
*/
class AdaptiveCache {
/**
* Set a value in the cache
* @param key - Cache key
* @param value - Value to cache (will be JSON stringified for Redis)
* @param ttl - Time to live in seconds (0 = no expiration)
* @returns boolean indicating success
*/
async set(key: string, value: any, ttl?: number): Promise<boolean> {
const effectiveTtl = ttl === 0 ? undefined : ttl;
// Use local cache as fallback or primary
const success = localCache.set(key, value, effectiveTtl || 0);
if (success) {
logger.debug(`Set key in local cache: ${key}`);
}
return success;
}
/**
* Get a value from the cache
* @param key - Cache key
* @returns The cached value or undefined if not found
*/
async get<T = any>(key: string): Promise<T | undefined> {
// Use local cache as fallback or primary
const value = localCache.get<T>(key);
if (value !== undefined) {
logger.debug(`Cache hit in local cache: ${key}`);
} else {
logger.debug(`Cache miss in local cache: ${key}`);
}
return value;
}
/**
* Delete a value from the cache
* @param key - Cache key or array of keys
* @returns Number of deleted entries
*/
async del(key: string | string[]): Promise<number> {
const keys = Array.isArray(key) ? key : [key];
let deletedCount = 0;
// Use local cache as fallback or primary
for (const k of keys) {
const success = localCache.del(k);
if (success > 0) {
deletedCount++;
logger.debug(`Deleted key from local cache: ${k}`);
}
}
return deletedCount;
}
/**
* Check if a key exists in the cache
* @param key - Cache key
* @returns boolean indicating if key exists
*/
async has(key: string): Promise<boolean> {
// Use local cache as fallback or primary
return localCache.has(key);
}
/**
* Get multiple values from the cache
* @param keys - Array of cache keys
* @returns Array of values (undefined for missing keys)
*/
async mget<T = any>(keys: string[]): Promise<(T | undefined)[]> {
// Use local cache as fallback or primary
return keys.map((key) => localCache.get<T>(key));
}
/**
* Flush all keys from the cache
*/
async flushAll(): Promise<void> {
localCache.flushAll();
logger.debug("Flushed local cache");
}
/**
* Get cache statistics
* Note: Only returns local cache stats, Redis stats are not included
*/
getStats() {
return localCache.getStats();
}
/**
* Get the current cache backend being used
* @returns "redis" if Redis is available and healthy, "local" otherwise
*/
getCurrentBackend(): "redis" | "local" {
return "local";
}
/**
* Take a key from the cache and delete it
* @param key - Cache key
* @returns The value or undefined if not found
*/
async take<T = any>(key: string): Promise<T | undefined> {
const value = await this.get<T>(key);
if (value !== undefined) {
await this.del(key);
}
return value;
}
/**
* Get TTL (time to live) for a key
* @param key - Cache key
* @returns TTL in seconds, 0 if no expiration, -1 if key doesn't exist
*/
getTtl(key: string): number {
const ttl = localCache.getTtl(key);
if (ttl === undefined) {
return -1;
}
return Math.max(0, Math.floor((ttl - Date.now()) / 1000));
}
/**
* Get all keys from the cache
* Note: Only returns local cache keys, Redis keys are not included
*/
keys(): string[] {
return localCache.keys();
}
}
// Export singleton instance
export const cache = new AdaptiveCache();
export default cache;

View File

@@ -4,8 +4,12 @@ import { cleanUpOldLogs as cleanUpOldActionLogs } from "#dynamic/middlewares/log
import { cleanUpOldLogs as cleanUpOldRequestLogs } from "@server/routers/badger/logRequestAudit";
import { gt, or } from "drizzle-orm";
import { cleanUpOldFingerprintSnapshots } from "@server/routers/olm/fingerprintingUtils";
import { build } from "@server/build";
export function initLogCleanupInterval() {
if (build == "saas") { // skip log cleanup for saas builds
return null;
}
return setInterval(
async () => {
const orgsToClean = await db

View File

@@ -0,0 +1,20 @@
import semver from "semver";
export function canCompress(
clientVersion: string | null | undefined,
type: "newt" | "olm"
): boolean {
try {
if (!clientVersion) return false;
// check if it is a valid semver
if (!semver.valid(clientVersion)) return false;
if (type === "newt") {
return semver.gte(clientVersion, "1.10.3");
} else if (type === "olm") {
return semver.gte(clientVersion, "1.4.3");
}
return false;
} catch {
return false;
}
}

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.15.4";
export const APP_VERSION = "1.16.0";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

View File

@@ -85,9 +85,7 @@ export async function deleteOrgById(
deletedNewtIds.push(deletedNewt.newtId);
await trx
.delete(newtSessions)
.where(
eq(newtSessions.newtId, deletedNewt.newtId)
);
.where(eq(newtSessions.newtId, deletedNewt.newtId));
}
}
}
@@ -121,33 +119,38 @@ export async function deleteOrgById(
eq(clientSitesAssociationsCache.clientId, client.clientId)
);
}
await trx.delete(resources).where(eq(resources.orgId, orgId));
const allOrgDomains = await trx
.select()
.from(orgDomains)
.innerJoin(domains, eq(domains.domainId, orgDomains.domainId))
.innerJoin(domains, eq(orgDomains.domainId, domains.domainId))
.where(
and(
eq(orgDomains.orgId, orgId),
eq(domains.configManaged, false)
)
);
logger.info(`Found ${allOrgDomains.length} domains to delete`);
const domainIdsToDelete: string[] = [];
for (const orgDomain of allOrgDomains) {
const domainId = orgDomain.domains.domainId;
const orgCount = await trx
.select({ count: sql<number>`count(*)` })
const [orgCount] = await trx
.select({ count: count() })
.from(orgDomains)
.where(eq(orgDomains.domainId, domainId));
if (orgCount[0].count === 1) {
logger.info(`Found ${orgCount.count} orgs using domain ${domainId}`);
if (orgCount.count === 1) {
domainIdsToDelete.push(domainId);
}
}
logger.info(`Found ${domainIdsToDelete.length} domains to delete`);
if (domainIdsToDelete.length > 0) {
await trx
.delete(domains)
.where(inArray(domains.domainId, domainIdsToDelete));
}
await trx.delete(resources).where(eq(resources.orgId, orgId));
await usageService.add(orgId, FeatureId.ORGINIZATIONS, -1, trx); // here we are decreasing the org count BEFORE deleting the org because we need to still be able to get the org to get the billing org inside of here
@@ -231,15 +234,13 @@ export function sendTerminationMessages(result: DeleteOrgByIdResult): void {
);
}
for (const olmId of result.olmsToTerminate) {
sendTerminateClient(
0,
OlmErrorCodes.TERMINATED_REKEYED,
olmId
).catch((error) => {
logger.error(
"Failed to send termination message to olm:",
error
);
});
sendTerminateClient(0, OlmErrorCodes.TERMINATED_REKEYED, olmId).catch(
(error) => {
logger.error(
"Failed to send termination message to olm:",
error
);
}
);
}
}

View File

@@ -189,6 +189,46 @@ export const configSchema = z
.prefault({})
})
.optional(),
postgres_logs: z
.object({
connection_string: z
.string()
.optional()
.transform(getEnvOrYaml("POSTGRES_LOGS_CONNECTION_STRING")),
replicas: z
.array(
z.object({
connection_string: z.string()
})
)
.optional(),
pool: z
.object({
max_connections: z
.number()
.positive()
.optional()
.default(20),
max_replica_connections: z
.number()
.positive()
.optional()
.default(10),
idle_timeout_ms: z
.number()
.positive()
.optional()
.default(30000),
connection_timeout_ms: z
.number()
.positive()
.optional()
.default(5000)
})
.optional()
.prefault({})
})
.optional(),
traefik: z
.object({
http_entrypoint: z.string().optional().default("web"),

View File

@@ -478,6 +478,7 @@ async function handleMessagesForSiteClients(
}
if (isAdd) {
// TODO: if we are in jit mode here should we really be sending this?
await initPeerAddHandshake(
// this will kick off the add peer process for the client
client.clientId,
@@ -572,7 +573,7 @@ export async function updateClientSiteDestinations(
destinations: [
{
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
destinationPort: site.sites.listenPort || 1 // this satisfies gerbil for now but should be reevaluated
}
]
};
@@ -580,7 +581,7 @@ export async function updateClientSiteDestinations(
// add to the existing destinations
destinations.destinations.push({
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
destinationPort: site.sites.listenPort || 1 // this satisfies gerbil for now but should be reevaluated
});
}
@@ -670,7 +671,11 @@ async function handleSubnetProxyTargetUpdates(
`Adding ${targetsToAdd.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
);
proxyJobs.push(
addSubnetProxyTargets(newt.newtId, targetsToAdd)
addSubnetProxyTargets(
newt.newtId,
targetsToAdd,
newt.version
)
);
}
@@ -706,7 +711,11 @@ async function handleSubnetProxyTargetUpdates(
`Removing ${targetsToRemove.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
);
proxyJobs.push(
removeSubnetProxyTargets(newt.newtId, targetsToRemove)
removeSubnetProxyTargets(
newt.newtId,
targetsToRemove,
newt.version
)
);
}
@@ -1081,6 +1090,7 @@ async function handleMessagesForClientSites(
continue;
}
// TODO: if we are in jit mode here should we really be sending this?
await initPeerAddHandshake(
// this will kick off the add peer process for the client
client.clientId,
@@ -1147,7 +1157,7 @@ async function handleMessagesForClientResources(
// Add subnet proxy targets for each site
for (const [siteId, resources] of addedBySite.entries()) {
const [newt] = await trx
.select({ newtId: newts.newtId })
.select({ newtId: newts.newtId, version: newts.version })
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
@@ -1169,7 +1179,13 @@ async function handleMessagesForClientResources(
]);
if (targets.length > 0) {
proxyJobs.push(addSubnetProxyTargets(newt.newtId, targets));
proxyJobs.push(
addSubnetProxyTargets(
newt.newtId,
targets,
newt.version
)
);
}
try {
@@ -1218,7 +1234,7 @@ async function handleMessagesForClientResources(
// Remove subnet proxy targets for each site
for (const [siteId, resources] of removedBySite.entries()) {
const [newt] = await trx
.select({ newtId: newts.newtId })
.select({ newtId: newts.newtId, version: newts.version })
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
@@ -1241,7 +1257,11 @@ async function handleMessagesForClientResources(
if (targets.length > 0) {
proxyJobs.push(
removeSubnetProxyTargets(newt.newtId, targets)
removeSubnetProxyTargets(
newt.newtId,
targets,
newt.version
)
);
}

View File

@@ -1,16 +0,0 @@
export enum AudienceIds {
SignUps = "",
Subscribed = "",
Churned = "",
Newsletter = ""
}
let resend;
export default resend;
export async function moveEmailToAudience(
email: string,
audienceId: AudienceIds
) {
return;
}

40
server/lib/sanitize.ts Normal file
View File

@@ -0,0 +1,40 @@
/**
* Sanitize a string field before inserting into a database TEXT column.
*
* Two passes are applied:
*
* 1. Lone UTF-16 surrogates JavaScript strings can hold unpaired surrogates
* (e.g. \uD800 without a following \uDC00-\uDFFF codepoint). These are
* valid in JS but cannot be encoded as UTF-8, triggering
* `report_invalid_encoding` in SQLite / Postgres. They are replaced with
* the Unicode replacement character U+FFFD so the data is preserved as a
* visible signal that something was malformed.
*
* 2. Null bytes and C0 control characters SQLite stores TEXT as
* null-terminated C strings, so \x00 in a value causes
* `report_invalid_encoding`. Bots and scanners routinely inject null bytes
* into URLs (e.g. `/path\u0000.jpg`). All C0 control characters in the
* range \x00-\x1F are stripped except for the three that are legitimate in
* text payloads: HT (\x09), LF (\x0A), and CR (\x0D). DEL (\x7F) is also
* stripped.
*/
export function sanitizeString(value: string): string;
export function sanitizeString(
value: string | null | undefined
): string | undefined;
export function sanitizeString(
value: string | null | undefined
): string | undefined {
if (value == null) return undefined;
return (
value
// Replace lone high surrogates (not followed by a low surrogate)
// and lone low surrogates (not preceded by a high surrogate).
.replace(
/[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF]/g,
"\uFFFD"
)
// Strip null bytes, C0 control chars (except HT/LF/CR), and DEL.
.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, "")
);
}

434
server/lib/sshCA.ts Normal file
View File

@@ -0,0 +1,434 @@
import * as crypto from "crypto";
/**
* SSH CA "Server" - Pure TypeScript Implementation
*
* This module provides basic SSH Certificate Authority functionality using
* only Node.js built-in crypto module. No external dependencies or subprocesses.
*
* Usage:
* 1. generateCA() - Creates a new CA key pair, returns CA info including the
* TrustedUserCAKeys line to add to servers
* 2. signPublicKey() - Signs a user's public key with the CA, returns a certificate
*/
// ============================================================================
// SSH Wire Format Helpers
// ============================================================================
/**
* Encode a string in SSH wire format (4-byte length prefix + data)
*/
function encodeString(data: Buffer | string): Buffer {
const buf = typeof data === "string" ? Buffer.from(data, "utf8") : data;
const len = Buffer.alloc(4);
len.writeUInt32BE(buf.length, 0);
return Buffer.concat([len, buf]);
}
/**
* Encode a uint32 in SSH wire format (big-endian)
*/
function encodeUInt32(value: number): Buffer {
const buf = Buffer.alloc(4);
buf.writeUInt32BE(value, 0);
return buf;
}
/**
* Encode a uint64 in SSH wire format (big-endian)
*/
function encodeUInt64(value: bigint): Buffer {
const buf = Buffer.alloc(8);
buf.writeBigUInt64BE(value, 0);
return buf;
}
/**
* Decode a string from SSH wire format at the given offset
* Returns the string buffer and the new offset
*/
function decodeString(
data: Buffer,
offset: number
): { value: Buffer; newOffset: number } {
const len = data.readUInt32BE(offset);
const value = data.subarray(offset + 4, offset + 4 + len);
return { value, newOffset: offset + 4 + len };
}
// ============================================================================
// SSH Public Key Parsing/Encoding
// ============================================================================
/**
* Parse an OpenSSH public key line (e.g., "ssh-ed25519 AAAA... comment")
*/
function parseOpenSSHPublicKey(pubKeyLine: string): {
keyType: string;
keyData: Buffer;
comment: string;
} {
const parts = pubKeyLine.trim().split(/\s+/);
if (parts.length < 2) {
throw new Error("Invalid public key format");
}
const keyType = parts[0];
const keyData = Buffer.from(parts[1], "base64");
const comment = parts.slice(2).join(" ") || "";
// Verify the key type in the blob matches
const { value: blobKeyType } = decodeString(keyData, 0);
if (blobKeyType.toString("utf8") !== keyType) {
throw new Error(
`Key type mismatch: ${blobKeyType.toString("utf8")} vs ${keyType}`
);
}
return { keyType, keyData, comment };
}
/**
* Encode an Ed25519 public key in OpenSSH format
*/
function encodeEd25519PublicKey(publicKey: Buffer): Buffer {
return Buffer.concat([
encodeString("ssh-ed25519"),
encodeString(publicKey)
]);
}
/**
* Format a public key blob as an OpenSSH public key line
*/
function formatOpenSSHPublicKey(keyBlob: Buffer, comment: string = ""): string {
const { value: keyType } = decodeString(keyBlob, 0);
const base64 = keyBlob.toString("base64");
return `${keyType.toString("utf8")} ${base64}${comment ? " " + comment : ""}`;
}
// ============================================================================
// SSH Certificate Building
// ============================================================================
interface CertificateOptions {
/** Serial number for the certificate */
serial?: bigint;
/** Certificate type: 1 = user, 2 = host */
certType?: number;
/** Key ID (usually username or identifier) */
keyId: string;
/** List of valid principals (usernames the cert is valid for) */
validPrincipals: string[];
/** Valid after timestamp (seconds since epoch) */
validAfter?: bigint;
/** Valid before timestamp (seconds since epoch) */
validBefore?: bigint;
/** Critical options (usually empty for user certs) */
criticalOptions?: Map<string, string>;
/** Extensions to enable */
extensions?: string[];
}
/**
* Build the extensions section of the certificate
*/
function buildExtensions(extensions: string[]): Buffer {
// Extensions are a series of name-value pairs, sorted by name
// For boolean extensions, the value is empty
const sortedExtensions = [...extensions].sort();
const parts: Buffer[] = [];
for (const ext of sortedExtensions) {
parts.push(encodeString(ext));
parts.push(encodeString("")); // Empty value for boolean extensions
}
return encodeString(Buffer.concat(parts));
}
/**
* Build the critical options section
*/
function buildCriticalOptions(options: Map<string, string>): Buffer {
const sortedKeys = [...options.keys()].sort();
const parts: Buffer[] = [];
for (const key of sortedKeys) {
parts.push(encodeString(key));
parts.push(encodeString(encodeString(options.get(key)!)));
}
return encodeString(Buffer.concat(parts));
}
/**
* Build the valid principals section
*/
function buildPrincipals(principals: string[]): Buffer {
const parts: Buffer[] = [];
for (const principal of principals) {
parts.push(encodeString(principal));
}
return encodeString(Buffer.concat(parts));
}
/**
* Extract the raw Ed25519 public key from an OpenSSH public key blob
*/
function extractEd25519PublicKey(keyBlob: Buffer): Buffer {
const { newOffset } = decodeString(keyBlob, 0); // Skip key type
const { value: publicKey } = decodeString(keyBlob, newOffset);
return publicKey;
}
// ============================================================================
// CA Interface
// ============================================================================
export interface CAKeyPair {
/** CA private key in PEM format (keep this secret!) */
privateKeyPem: string;
/** CA public key in PEM format */
publicKeyPem: string;
/** CA public key in OpenSSH format (for TrustedUserCAKeys) */
publicKeyOpenSSH: string;
/** Raw CA public key bytes (Ed25519) */
publicKeyRaw: Buffer;
}
export interface SignedCertificate {
/** The certificate in OpenSSH format (save as id_ed25519-cert.pub or similar) */
certificate: string;
/** The certificate type string */
certType: string;
/** Serial number */
serial: bigint;
/** Key ID */
keyId: string;
/** Valid principals */
validPrincipals: string[];
/** Valid from timestamp */
validAfter: Date;
/** Valid until timestamp */
validBefore: Date;
}
// ============================================================================
// Main Functions
// ============================================================================
/**
* Generate a new SSH Certificate Authority key pair.
*
* Returns the CA keys and the line to add to /etc/ssh/sshd_config:
* TrustedUserCAKeys /etc/ssh/ca.pub
*
* Then save the publicKeyOpenSSH to /etc/ssh/ca.pub on the server.
*
* @param comment - Optional comment for the CA public key
* @returns CA key pair and configuration info
*/
export function generateCA(comment: string = "pangolin-ssh-ca"): CAKeyPair {
// Generate Ed25519 key pair
const { publicKey, privateKey } = crypto.generateKeyPairSync("ed25519", {
publicKeyEncoding: { type: "spki", format: "pem" },
privateKeyEncoding: { type: "pkcs8", format: "pem" }
});
// Get raw public key bytes
const pubKeyObj = crypto.createPublicKey(publicKey);
const rawPubKey = pubKeyObj.export({ type: "spki", format: "der" });
// Ed25519 SPKI format: 12 byte header + 32 byte key
const ed25519PubKey = rawPubKey.subarray(rawPubKey.length - 32);
// Create OpenSSH format public key
const pubKeyBlob = encodeEd25519PublicKey(ed25519PubKey);
const publicKeyOpenSSH = formatOpenSSHPublicKey(pubKeyBlob, comment);
return {
privateKeyPem: privateKey,
publicKeyPem: publicKey,
publicKeyOpenSSH,
publicKeyRaw: ed25519PubKey
};
}
// ============================================================================
// Helper Functions
// ============================================================================
/**
* Get and decrypt the SSH CA keys for an organization.
*
* @param orgId - Organization ID
* @param decryptionKey - Key to decrypt the CA private key (typically server.secret from config)
* @returns CA key pair or null if not found
*/
export async function getOrgCAKeys(
orgId: string,
decryptionKey: string
): Promise<CAKeyPair | null> {
const { db, orgs } = await import("@server/db");
const { eq } = await import("drizzle-orm");
const { decrypt } = await import("@server/lib/crypto");
const [org] = await db
.select({
sshCaPrivateKey: orgs.sshCaPrivateKey,
sshCaPublicKey: orgs.sshCaPublicKey
})
.from(orgs)
.where(eq(orgs.orgId, orgId))
.limit(1);
if (!org || !org.sshCaPrivateKey || !org.sshCaPublicKey) {
return null;
}
const privateKeyPem = decrypt(org.sshCaPrivateKey, decryptionKey);
// Extract raw public key from the OpenSSH format
const { keyData } = parseOpenSSHPublicKey(org.sshCaPublicKey);
const { newOffset } = decodeString(keyData, 0); // Skip key type
const { value: publicKeyRaw } = decodeString(keyData, newOffset);
// Get PEM format of public key
const pubKeyObj = crypto.createPublicKey({
key: privateKeyPem,
format: "pem"
});
const publicKeyPem = pubKeyObj.export({
type: "spki",
format: "pem"
}) as string;
return {
privateKeyPem,
publicKeyPem,
publicKeyOpenSSH: org.sshCaPublicKey,
publicKeyRaw
};
}
/**
* Sign a user's SSH public key with the CA, producing a certificate.
*
* The resulting certificate should be saved alongside the user's private key
* with a -cert.pub suffix. For example:
* - Private key: ~/.ssh/id_ed25519
* - Certificate: ~/.ssh/id_ed25519-cert.pub
*
* @param caPrivateKeyPem - CA private key in PEM format
* @param userPublicKeyLine - User's public key in OpenSSH format
* @param options - Certificate options (principals, validity, etc.)
* @returns Signed certificate
*/
export function signPublicKey(
caPrivateKeyPem: string,
userPublicKeyLine: string,
options: CertificateOptions
): SignedCertificate {
// Parse the user's public key
const { keyType, keyData } = parseOpenSSHPublicKey(userPublicKeyLine);
// Determine certificate type string
let certTypeString: string;
if (keyType === "ssh-ed25519") {
certTypeString = "ssh-ed25519-cert-v01@openssh.com";
} else if (keyType === "ssh-rsa") {
certTypeString = "ssh-rsa-cert-v01@openssh.com";
} else if (keyType === "ecdsa-sha2-nistp256") {
certTypeString = "ecdsa-sha2-nistp256-cert-v01@openssh.com";
} else if (keyType === "ecdsa-sha2-nistp384") {
certTypeString = "ecdsa-sha2-nistp384-cert-v01@openssh.com";
} else if (keyType === "ecdsa-sha2-nistp521") {
certTypeString = "ecdsa-sha2-nistp521-cert-v01@openssh.com";
} else {
throw new Error(`Unsupported key type: ${keyType}`);
}
// Get CA public key from private key
const caPrivKey = crypto.createPrivateKey(caPrivateKeyPem);
const caPubKey = crypto.createPublicKey(caPrivKey);
const caRawPubKey = caPubKey.export({ type: "spki", format: "der" });
const caEd25519PubKey = caRawPubKey.subarray(caRawPubKey.length - 32);
const caPubKeyBlob = encodeEd25519PublicKey(caEd25519PubKey);
// Set defaults
const serial = options.serial ?? BigInt(Date.now());
const certType = options.certType ?? 1; // 1 = user cert
const now = BigInt(Math.floor(Date.now() / 1000));
const validAfter = options.validAfter ?? now - 60n; // 1 minute ago
const validBefore = options.validBefore ?? now + 86400n * 365n; // 1 year from now
// Default extensions for user certificates
const defaultExtensions = [
"permit-X11-forwarding",
"permit-agent-forwarding",
"permit-port-forwarding",
"permit-pty",
"permit-user-rc"
];
const extensions = options.extensions ?? defaultExtensions;
const criticalOptions = options.criticalOptions ?? new Map();
// Generate nonce (random bytes)
const nonce = crypto.randomBytes(32);
// Extract the public key portion from the user's key blob
// For Ed25519: skip the key type string, get the public key (already encoded)
let userKeyPortion: Buffer;
if (keyType === "ssh-ed25519") {
// Skip the key type string, take the rest (which is encodeString(32-byte-key))
const { newOffset } = decodeString(keyData, 0);
userKeyPortion = keyData.subarray(newOffset);
} else {
// For other key types, extract everything after the key type
const { newOffset } = decodeString(keyData, 0);
userKeyPortion = keyData.subarray(newOffset);
}
// Build the certificate body (to be signed)
const certBody = Buffer.concat([
encodeString(certTypeString),
encodeString(nonce),
userKeyPortion,
encodeUInt64(serial),
encodeUInt32(certType),
encodeString(options.keyId),
buildPrincipals(options.validPrincipals),
encodeUInt64(validAfter),
encodeUInt64(validBefore),
buildCriticalOptions(criticalOptions),
buildExtensions(extensions),
encodeString(""), // reserved
encodeString(caPubKeyBlob) // signature key (CA public key)
]);
// Sign the certificate body
const signature = crypto.sign(null, certBody, caPrivKey);
// Build the full signature blob (algorithm + signature)
const signatureBlob = Buffer.concat([
encodeString("ssh-ed25519"),
encodeString(signature)
]);
// Build complete certificate
const certificate = Buffer.concat([certBody, encodeString(signatureBlob)]);
// Format as OpenSSH certificate line
const certLine = `${certTypeString} ${certificate.toString("base64")} ${options.keyId}`;
return {
certificate: certLine,
certType: certTypeString,
serial,
keyId: options.keyId,
validPrincipals: options.validPrincipals,
validAfter: new Date(Number(validAfter) * 1000),
validBefore: new Date(Number(validBefore) * 1000)
};
}

View File

@@ -218,10 +218,11 @@ export class TraefikConfigManager {
return true;
}
// Fetch if it's been more than 24 hours (for renewals)
const dayInMs = 24 * 60 * 60 * 1000;
const timeSinceLastFetch =
Date.now() - this.lastCertificateFetch.getTime();
// Fetch if it's been more than 24 hours (daily routine check)
if (timeSinceLastFetch > dayInMs) {
logger.info("Fetching certificates due to 24-hour renewal check");
return true;
@@ -265,7 +266,7 @@ export class TraefikConfigManager {
return true;
}
// Check if any local certificates are missing or appear to be outdated
// Check if any local certificates are missing (needs immediate fetch)
for (const domain of domainsNeedingCerts) {
const localState = this.lastLocalCertificateState.get(domain);
if (!localState || !localState.exists) {
@@ -274,17 +275,46 @@ export class TraefikConfigManager {
);
return true;
}
}
// Check if certificate is expiring soon (within 30 days)
if (localState.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry = localState.expiresAt - nowInSeconds;
const daysUntilExpiry = secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 30) {
logger.info(
`Fetching certificates due to upcoming expiry for ${domain} (${Math.round(daysUntilExpiry)} days remaining)`
);
return true;
// For expiry checks, throttle to every 6 hours to avoid querying the
// API/DB on every monitor loop. The certificate-service renews certs
// 45 days before expiry, so checking every 6 hours is plenty frequent
// to pick up renewed certs promptly.
const renewalCheckIntervalMs = 6 * 60 * 60 * 1000; // 6 hours
if (timeSinceLastFetch > renewalCheckIntervalMs) {
// Check non-wildcard certs for expiry (within 45 days to match
// the server-side renewal window in certificate-service)
for (const domain of domainsNeedingCerts) {
const localState = this.lastLocalCertificateState.get(domain);
if (localState?.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry =
localState.expiresAt - nowInSeconds;
const daysUntilExpiry = secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 45) {
logger.info(
`Fetching certificates due to upcoming expiry for ${domain} (${Math.round(daysUntilExpiry)} days remaining)`
);
return true;
}
}
}
// Also check wildcard certificates for expiry. These are not
// included in domainsNeedingCerts since their subdomains are
// filtered out, so we must check them separately.
for (const [certDomain, state] of this.lastLocalCertificateState) {
if (state.exists && state.wildcard && state.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry = state.expiresAt - nowInSeconds;
const daysUntilExpiry = secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 45) {
logger.info(
`Fetching certificates due to upcoming expiry for wildcard cert ${certDomain} (${Math.round(daysUntilExpiry)} days remaining)`
);
return true;
}
}
}
}
@@ -361,6 +391,26 @@ export class TraefikConfigManager {
}
}
// Also include wildcard cert base domains that are
// expiring or expired so they get re-fetched even though
// their subdomains were filtered out above.
for (const [certDomain, state] of this
.lastLocalCertificateState) {
if (state.exists && state.wildcard && state.expiresAt) {
const nowInSeconds = Math.floor(Date.now() / 1000);
const secondsUntilExpiry =
state.expiresAt - nowInSeconds;
const daysUntilExpiry =
secondsUntilExpiry / (60 * 60 * 24);
if (daysUntilExpiry < 45) {
domainsToFetch.add(certDomain);
logger.info(
`Including expiring wildcard cert domain ${certDomain} in fetch (${Math.round(daysUntilExpiry)} days remaining)`
);
}
}
}
if (domainsToFetch.size > 0) {
// Get valid certificates for domains not covered by wildcards
validCertificates =
@@ -507,11 +557,18 @@ export class TraefikConfigManager {
config.getRawConfig().server
.session_cookie_name,
// deprecated
accessTokenQueryParam:
config.getRawConfig().server
.resource_access_token_param,
accessTokenIdHeader:
config.getRawConfig().server
.resource_access_token_headers.id,
accessTokenHeader:
config.getRawConfig().server
.resource_access_token_headers.token,
resourceSessionRequestParam:
config.getRawConfig().server
.resource_session_request_param

View File

@@ -14,7 +14,7 @@ import logger from "@server/logger";
import config from "@server/lib/config";
import { resources, sites, Target, targets } from "@server/db";
import createPathRewriteMiddleware from "./middleware";
import { sanitize, validatePathRewriteConfig } from "./utils";
import { sanitize, encodePath, validatePathRewriteConfig } from "./utils";
const redirectHttpsMiddlewareName = "redirect-to-https";
const badgerMiddlewareName = "badger";
@@ -44,7 +44,7 @@ export async function getTraefikConfig(
filterOutNamespaceDomains = false, // UNUSED BUT USED IN PRIVATE
generateLoginPageRouters = false, // UNUSED BUT USED IN PRIVATE
allowRawResources = true,
allowMaintenancePage = true, // UNUSED BUT USED IN PRIVATE
allowMaintenancePage = true // UNUSED BUT USED IN PRIVATE
): Promise<any> {
// Get resources with their targets and sites in a single optimized query
// Start from sites on this exit node, then join to targets and resources
@@ -127,7 +127,7 @@ export async function getTraefikConfig(
resourcesWithTargetsAndSites.forEach((row) => {
const resourceId = row.resourceId;
const resourceName = sanitize(row.resourceName) || "";
const targetPath = sanitize(row.path) || ""; // Handle null/undefined paths
const targetPath = encodePath(row.path); // Use encodePath to avoid collisions (e.g. "/a/b" vs "/a-b")
const pathMatchType = row.pathMatchType || "";
const rewritePath = row.rewritePath || "";
const rewritePathType = row.rewritePathType || "";
@@ -145,7 +145,7 @@ export async function getTraefikConfig(
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
const key = sanitize(mapKey);
if (!resourcesMap.has(key)) {
if (!resourcesMap.has(mapKey)) {
const validation = validatePathRewriteConfig(
row.path,
row.pathMatchType,
@@ -160,9 +160,10 @@ export async function getTraefikConfig(
return;
}
resourcesMap.set(key, {
resourcesMap.set(mapKey, {
resourceId: row.resourceId,
name: resourceName,
key: key,
fullDomain: row.fullDomain,
ssl: row.ssl,
http: row.http,
@@ -190,7 +191,7 @@ export async function getTraefikConfig(
});
}
resourcesMap.get(key).targets.push({
resourcesMap.get(mapKey).targets.push({
resourceId: row.resourceId,
targetId: row.targetId,
ip: row.ip,
@@ -227,8 +228,9 @@ export async function getTraefikConfig(
};
// get the key and the resource
for (const [key, resource] of resourcesMap.entries()) {
for (const [, resource] of resourcesMap.entries()) {
const targets = resource.targets as TargetWithSite[];
const key = resource.key;
const routerName = `${key}-${resource.name}-router`;
const serviceName = `${key}-${resource.name}-service`;
@@ -477,7 +479,10 @@ export async function getTraefikConfig(
// TODO: HOW TO HANDLE ^^^^^^ BETTER
const anySitesOnline = targets.some(
(target) => target.site.online
(target) =>
target.site.online ||
target.site.type === "local" ||
target.site.type === "wireguard"
);
return (
@@ -490,7 +495,7 @@ export async function getTraefikConfig(
if (target.health == "unhealthy") {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;
@@ -605,7 +610,10 @@ export async function getTraefikConfig(
servers: (() => {
// Check if any sites are online
const anySitesOnline = targets.some(
(target) => target.site.online
(target) =>
target.site.online ||
target.site.type === "local" ||
target.site.type === "wireguard"
);
return targets
@@ -613,7 +621,7 @@ export async function getTraefikConfig(
if (!target.enabled) {
return false;
}
// If any sites are online, exclude offline sites
if (anySitesOnline && !target.site.online) {
return false;

View File

@@ -0,0 +1,323 @@
import { assertEquals } from "../../../test/assert";
// ── Pure function copies (inlined to avoid pulling in server dependencies) ──
function sanitize(input: string | null | undefined): string | undefined {
if (!input) return undefined;
if (input.length > 50) {
input = input.substring(0, 50);
}
return input
.replace(/[^a-zA-Z0-9-]/g, "-")
.replace(/-+/g, "-")
.replace(/^-|-$/g, "");
}
function encodePath(path: string | null | undefined): string {
if (!path) return "";
return path.replace(/[^a-zA-Z0-9]/g, (ch) => {
return ch.charCodeAt(0).toString(16);
});
}
// ── Helpers ──────────────────────────────────────────────────────────
/**
* Exact replica of the OLD key computation from upstream main.
* Uses sanitize() for paths — this is what had the collision bug.
*/
function oldKeyComputation(
resourceId: number,
path: string | null,
pathMatchType: string | null,
rewritePath: string | null,
rewritePathType: string | null
): string {
const targetPath = sanitize(path) || "";
const pmt = pathMatchType || "";
const rp = rewritePath || "";
const rpt = rewritePathType || "";
const pathKey = [targetPath, pmt, rp, rpt].filter(Boolean).join("-");
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
return sanitize(mapKey) || "";
}
/**
* Replica of the NEW key computation from our fix.
* Uses encodePath() for paths — collision-free.
*/
function newKeyComputation(
resourceId: number,
path: string | null,
pathMatchType: string | null,
rewritePath: string | null,
rewritePathType: string | null
): string {
const targetPath = encodePath(path);
const pmt = pathMatchType || "";
const rp = rewritePath || "";
const rpt = rewritePathType || "";
const pathKey = [targetPath, pmt, rp, rpt].filter(Boolean).join("-");
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
return sanitize(mapKey) || "";
}
// ── Tests ────────────────────────────────────────────────────────────
function runTests() {
console.log("Running path encoding tests...\n");
let passed = 0;
// ── encodePath unit tests ────────────────────────────────────────
// Test 1: null/undefined/empty
{
assertEquals(encodePath(null), "", "null should return empty");
assertEquals(
encodePath(undefined),
"",
"undefined should return empty"
);
assertEquals(encodePath(""), "", "empty string should return empty");
console.log(" PASS: encodePath handles null/undefined/empty");
passed++;
}
// Test 2: root path
{
assertEquals(encodePath("/"), "2f", "/ should encode to 2f");
console.log(" PASS: encodePath encodes root path");
passed++;
}
// Test 3: alphanumeric passthrough
{
assertEquals(encodePath("/api"), "2fapi", "/api encodes slash only");
assertEquals(encodePath("/v1"), "2fv1", "/v1 encodes slash only");
assertEquals(encodePath("abc"), "abc", "plain alpha passes through");
console.log(" PASS: encodePath preserves alphanumeric chars");
passed++;
}
// Test 4: all special chars produce unique hex
{
const paths = ["/a/b", "/a-b", "/a.b", "/a_b", "/a b"];
const results = paths.map((p) => encodePath(p));
const unique = new Set(results);
assertEquals(
unique.size,
paths.length,
"all special-char paths must produce unique encodings"
);
console.log(
" PASS: encodePath produces unique output for different special chars"
);
passed++;
}
// Test 5: output is always alphanumeric (safe for Traefik names)
{
const paths = [
"/",
"/api",
"/a/b",
"/a-b",
"/a.b",
"/complex/path/here"
];
for (const p of paths) {
const e = encodePath(p);
assertEquals(
/^[a-zA-Z0-9]+$/.test(e),
true,
`encodePath("${p}") = "${e}" must be alphanumeric`
);
}
console.log(" PASS: encodePath output is always alphanumeric");
passed++;
}
// Test 6: deterministic
{
assertEquals(
encodePath("/api"),
encodePath("/api"),
"same input same output"
);
assertEquals(
encodePath("/a/b/c"),
encodePath("/a/b/c"),
"same input same output"
);
console.log(" PASS: encodePath is deterministic");
passed++;
}
// Test 7: many distinct paths never collide
{
const paths = [
"/",
"/api",
"/api/v1",
"/api/v2",
"/a/b",
"/a-b",
"/a.b",
"/a_b",
"/health",
"/health/check",
"/admin",
"/admin/users",
"/api/v1/users",
"/api/v1/posts",
"/app",
"/app/dashboard"
];
const encoded = new Set(paths.map((p) => encodePath(p)));
assertEquals(
encoded.size,
paths.length,
`expected ${paths.length} unique encodings, got ${encoded.size}`
);
console.log(" PASS: 16 realistic paths all produce unique encodings");
passed++;
}
// ── Collision fix: the actual bug we're fixing ───────────────────
// Test 8: /a/b and /a-b now have different keys (THE BUG FIX)
{
const keyAB = newKeyComputation(1, "/a/b", "prefix", null, null);
const keyDash = newKeyComputation(1, "/a-b", "prefix", null, null);
assertEquals(
keyAB !== keyDash,
true,
"/a/b and /a-b MUST have different keys"
);
console.log(" PASS: collision fix — /a/b vs /a-b have different keys");
passed++;
}
// Test 9: demonstrate the old bug — old code maps /a/b and /a-b to same key
{
const oldKeyAB = oldKeyComputation(1, "/a/b", "prefix", null, null);
const oldKeyDash = oldKeyComputation(1, "/a-b", "prefix", null, null);
assertEquals(
oldKeyAB,
oldKeyDash,
"old code MUST have this collision (confirms the bug exists)"
);
console.log(" PASS: confirmed old code bug — /a/b and /a-b collided");
passed++;
}
// Test 10: /api/v1 and /api-v1 — old code collision, new code fixes it
{
const oldKey1 = oldKeyComputation(1, "/api/v1", "prefix", null, null);
const oldKey2 = oldKeyComputation(1, "/api-v1", "prefix", null, null);
assertEquals(
oldKey1,
oldKey2,
"old code collision for /api/v1 vs /api-v1"
);
const newKey1 = newKeyComputation(1, "/api/v1", "prefix", null, null);
const newKey2 = newKeyComputation(1, "/api-v1", "prefix", null, null);
assertEquals(
newKey1 !== newKey2,
true,
"new code must separate /api/v1 and /api-v1"
);
console.log(" PASS: collision fix — /api/v1 vs /api-v1");
passed++;
}
// Test 11: /app.v2 and /app/v2 and /app-v2 — three-way collision fixed
{
const a = newKeyComputation(1, "/app.v2", "prefix", null, null);
const b = newKeyComputation(1, "/app/v2", "prefix", null, null);
const c = newKeyComputation(1, "/app-v2", "prefix", null, null);
const keys = new Set([a, b, c]);
assertEquals(
keys.size,
3,
"three paths must produce three unique keys"
);
console.log(
" PASS: collision fix — three-way /app.v2, /app/v2, /app-v2"
);
passed++;
}
// ── Edge cases ───────────────────────────────────────────────────
// Test 12: same path in different resources — always separate
{
const key1 = newKeyComputation(1, "/api", "prefix", null, null);
const key2 = newKeyComputation(2, "/api", "prefix", null, null);
assertEquals(
key1 !== key2,
true,
"different resources with same path must have different keys"
);
console.log(" PASS: edge case — same path, different resources");
passed++;
}
// Test 13: same resource, different pathMatchType — separate keys
{
const exact = newKeyComputation(1, "/api", "exact", null, null);
const prefix = newKeyComputation(1, "/api", "prefix", null, null);
assertEquals(
exact !== prefix,
true,
"exact vs prefix must have different keys"
);
console.log(" PASS: edge case — same path, different match types");
passed++;
}
// Test 14: same resource and path, different rewrite config — separate keys
{
const noRewrite = newKeyComputation(1, "/api", "prefix", null, null);
const withRewrite = newKeyComputation(
1,
"/api",
"prefix",
"/backend",
"prefix"
);
assertEquals(
noRewrite !== withRewrite,
true,
"with vs without rewrite must have different keys"
);
console.log(" PASS: edge case — same path, different rewrite config");
passed++;
}
// Test 15: paths with special URL characters
{
const paths = ["/api?foo", "/api#bar", "/api%20baz", "/api+qux"];
const keys = new Set(
paths.map((p) => newKeyComputation(1, p, "prefix", null, null))
);
assertEquals(
keys.size,
paths.length,
"special URL chars must produce unique keys"
);
console.log(" PASS: edge case — special URL characters in paths");
passed++;
}
console.log(`\nAll ${passed} tests passed!`);
}
try {
runTests();
} catch (error) {
console.error("Test failed:", error);
process.exit(1);
}

View File

@@ -13,6 +13,26 @@ export function sanitize(input: string | null | undefined): string | undefined {
.replace(/^-|-$/g, "");
}
/**
* Encode a URL path into a collision-free alphanumeric string suitable for use
* in Traefik map keys.
*
* Unlike sanitize(), this preserves uniqueness by encoding each non-alphanumeric
* character as its hex code. Different paths always produce different outputs.
*
* encodePath("/api") => "2fapi"
* encodePath("/a/b") => "2fa2fb"
* encodePath("/a-b") => "2fa2db" (different from /a/b)
* encodePath("/") => "2f"
* encodePath(null) => ""
*/
export function encodePath(path: string | null | undefined): string {
if (!path) return "";
return path.replace(/[^a-zA-Z0-9]/g, (ch) => {
return ch.charCodeAt(0).toString(16);
});
}
export function validatePathRewriteConfig(
path: string | null,
pathMatchType: string | null,