mirror of
https://github.com/fosrl/pangolin.git
synced 2026-02-16 09:56:36 +00:00
Chungus 2.0
This commit is contained in:
6
server/lib/billing/createCustomer.ts
Normal file
6
server/lib/billing/createCustomer.ts
Normal file
@@ -0,0 +1,6 @@
|
||||
export async function createCustomer(
|
||||
orgId: string,
|
||||
email: string | null | undefined
|
||||
): Promise<string | undefined> {
|
||||
return;
|
||||
}
|
||||
@@ -1,16 +1,3 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import Stripe from "stripe";
|
||||
|
||||
export enum FeatureId {
|
||||
8
server/lib/billing/getOrgTierData.ts
Normal file
8
server/lib/billing/getOrgTierData.ts
Normal file
@@ -0,0 +1,8 @@
|
||||
export async function getOrgTierData(
|
||||
orgId: string
|
||||
): Promise<{ tier: string | null; active: boolean }> {
|
||||
let tier = null;
|
||||
let active = false;
|
||||
|
||||
return { tier, active };
|
||||
}
|
||||
5
server/lib/billing/index.ts
Normal file
5
server/lib/billing/index.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
export * from "./limitSet";
|
||||
export * from "./features";
|
||||
export * from "./limitsService";
|
||||
export * from "./getOrgTierData";
|
||||
export * from "./createCustomer";
|
||||
@@ -1,16 +1,3 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { FeatureId } from "./features";
|
||||
|
||||
export type LimitSet = {
|
||||
@@ -1,16 +1,3 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { db, limits } from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import { LimitSet } from "./limitSet";
|
||||
@@ -1,16 +1,3 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export enum TierId {
|
||||
STANDARD = "standard",
|
||||
}
|
||||
@@ -1,21 +1,7 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { eq, sql, and } from "drizzle-orm";
|
||||
import NodeCache from "node-cache";
|
||||
import { v4 as uuidv4 } from "uuid";
|
||||
import { PutObjectCommand } from "@aws-sdk/client-s3";
|
||||
import { s3Client } from "../s3";
|
||||
import * as fs from "fs/promises";
|
||||
import * as path from "path";
|
||||
import {
|
||||
@@ -30,10 +16,10 @@ import {
|
||||
Transaction
|
||||
} from "@server/db";
|
||||
import { FeatureId, getFeatureMeterId } from "./features";
|
||||
import config from "@server/lib/config";
|
||||
import logger from "@server/logger";
|
||||
import { sendToClient } from "@server/routers/ws";
|
||||
import { sendToClient } from "#dynamic/routers/ws";
|
||||
import { build } from "@server/build";
|
||||
import { s3Client } from "@server/lib/s3";
|
||||
|
||||
interface StripeEvent {
|
||||
identifier?: string;
|
||||
@@ -58,8 +44,10 @@ export class UsageService {
|
||||
if (build !== "saas") {
|
||||
return;
|
||||
}
|
||||
this.bucketName = config.getRawPrivateConfig().stripe?.s3Bucket;
|
||||
this.eventsDir = config.getRawPrivateConfig().stripe?.localFilePath;
|
||||
// this.bucketName = privateConfig.getRawPrivateConfig().stripe?.s3Bucket;
|
||||
// this.eventsDir = privateConfig.getRawPrivateConfig().stripe?.localFilePath;
|
||||
this.bucketName = process.env.S3_BUCKET || undefined;
|
||||
this.eventsDir = process.env.LOCAL_FILE_PATH || undefined;
|
||||
|
||||
// Ensure events directory exists
|
||||
this.initializeEventsDirectory().then(() => {
|
||||
@@ -1,4 +1,4 @@
|
||||
import { sendToClient } from "@server/routers/ws";
|
||||
import { sendToClient } from "#dynamic/routers/ws";
|
||||
import { processContainerLabels } from "./parseDockerContainers";
|
||||
import { applyBlueprint } from "./applyBlueprint";
|
||||
import { db, sites } from "@server/db";
|
||||
|
||||
@@ -25,7 +25,7 @@ import {
|
||||
TargetData
|
||||
} from "./types";
|
||||
import logger from "@server/logger";
|
||||
import { createCertificate } from "@server/routers/private/certificates/createCertificate";
|
||||
import { createCertificate } from "#dynamic/routers/certificates/createCertificate";
|
||||
import { pickPort } from "@server/routers/target/helpers";
|
||||
import { resourcePassword } from "@server/db";
|
||||
import { hashPassword } from "@server/auth/password";
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import axios from "axios";
|
||||
import { tokenManager } from "../tokenManager";
|
||||
import { tokenManager } from "./tokenManager";
|
||||
import logger from "@server/logger";
|
||||
import config from "../config";
|
||||
import config from "./config";
|
||||
|
||||
/**
|
||||
* Get valid certificates for the specified domains
|
||||
@@ -6,16 +6,10 @@ import { eq } from "drizzle-orm";
|
||||
import { license } from "@server/license/license";
|
||||
import { configSchema, readConfigFile } from "./readConfigFile";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import {
|
||||
privateConfigSchema,
|
||||
readPrivateConfigFile
|
||||
} from "@server/lib/private/readConfigFile";
|
||||
import logger from "@server/logger";
|
||||
import { build } from "@server/build";
|
||||
|
||||
export class Config {
|
||||
private rawConfig!: z.infer<typeof configSchema>;
|
||||
private rawPrivateConfig!: z.infer<typeof privateConfigSchema>;
|
||||
|
||||
supporterData: SupporterKey | null = null;
|
||||
|
||||
@@ -37,19 +31,6 @@ export class Config {
|
||||
throw new Error(`Invalid configuration file: ${errors}`);
|
||||
}
|
||||
|
||||
const privateEnvironment = readPrivateConfigFile();
|
||||
|
||||
const {
|
||||
data: parsedPrivateConfig,
|
||||
success: privateSuccess,
|
||||
error: privateError
|
||||
} = privateConfigSchema.safeParse(privateEnvironment);
|
||||
|
||||
if (!privateSuccess) {
|
||||
const errors = fromError(privateError);
|
||||
throw new Error(`Invalid private configuration file: ${errors}`);
|
||||
}
|
||||
|
||||
if (
|
||||
// @ts-ignore
|
||||
parsedConfig.users ||
|
||||
@@ -109,110 +90,11 @@ export class Config {
|
||||
? "true"
|
||||
: "false";
|
||||
|
||||
if (parsedPrivateConfig.branding?.colors) {
|
||||
process.env.BRANDING_COLORS = JSON.stringify(
|
||||
parsedPrivateConfig.branding?.colors
|
||||
);
|
||||
}
|
||||
|
||||
if (parsedPrivateConfig.branding?.logo?.light_path) {
|
||||
process.env.BRANDING_LOGO_LIGHT_PATH =
|
||||
parsedPrivateConfig.branding?.logo?.light_path;
|
||||
}
|
||||
if (parsedPrivateConfig.branding?.logo?.dark_path) {
|
||||
process.env.BRANDING_LOGO_DARK_PATH =
|
||||
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
|
||||
}
|
||||
|
||||
process.env.HIDE_SUPPORTER_KEY = parsedPrivateConfig.flags
|
||||
?.hide_supporter_key
|
||||
? "true"
|
||||
: "false";
|
||||
|
||||
if (build != "oss") {
|
||||
if (parsedPrivateConfig.branding?.logo?.light_path) {
|
||||
process.env.BRANDING_LOGO_LIGHT_PATH =
|
||||
parsedPrivateConfig.branding?.logo?.light_path;
|
||||
}
|
||||
if (parsedPrivateConfig.branding?.logo?.dark_path) {
|
||||
process.env.BRANDING_LOGO_DARK_PATH =
|
||||
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
|
||||
}
|
||||
|
||||
process.env.BRANDING_LOGO_AUTH_WIDTH = parsedPrivateConfig.branding
|
||||
?.logo?.auth_page?.width
|
||||
? parsedPrivateConfig.branding?.logo?.auth_page?.width.toString()
|
||||
: undefined;
|
||||
process.env.BRANDING_LOGO_AUTH_HEIGHT = parsedPrivateConfig.branding
|
||||
?.logo?.auth_page?.height
|
||||
? parsedPrivateConfig.branding?.logo?.auth_page?.height.toString()
|
||||
: undefined;
|
||||
|
||||
process.env.BRANDING_LOGO_NAVBAR_WIDTH = parsedPrivateConfig
|
||||
.branding?.logo?.navbar?.width
|
||||
? parsedPrivateConfig.branding?.logo?.navbar?.width.toString()
|
||||
: undefined;
|
||||
process.env.BRANDING_LOGO_NAVBAR_HEIGHT = parsedPrivateConfig
|
||||
.branding?.logo?.navbar?.height
|
||||
? parsedPrivateConfig.branding?.logo?.navbar?.height.toString()
|
||||
: undefined;
|
||||
|
||||
process.env.BRANDING_FAVICON_PATH =
|
||||
parsedPrivateConfig.branding?.favicon_path;
|
||||
|
||||
process.env.BRANDING_APP_NAME =
|
||||
parsedPrivateConfig.branding?.app_name || "Pangolin";
|
||||
|
||||
if (parsedPrivateConfig.branding?.footer) {
|
||||
process.env.BRANDING_FOOTER = JSON.stringify(
|
||||
parsedPrivateConfig.branding?.footer
|
||||
);
|
||||
}
|
||||
|
||||
process.env.LOGIN_PAGE_TITLE_TEXT =
|
||||
parsedPrivateConfig.branding?.login_page?.title_text || "";
|
||||
process.env.LOGIN_PAGE_SUBTITLE_TEXT =
|
||||
parsedPrivateConfig.branding?.login_page?.subtitle_text || "";
|
||||
|
||||
process.env.SIGNUP_PAGE_TITLE_TEXT =
|
||||
parsedPrivateConfig.branding?.signup_page?.title_text || "";
|
||||
process.env.SIGNUP_PAGE_SUBTITLE_TEXT =
|
||||
parsedPrivateConfig.branding?.signup_page?.subtitle_text || "";
|
||||
|
||||
process.env.RESOURCE_AUTH_PAGE_HIDE_POWERED_BY =
|
||||
parsedPrivateConfig.branding?.resource_auth_page
|
||||
?.hide_powered_by === true
|
||||
? "true"
|
||||
: "false";
|
||||
process.env.RESOURCE_AUTH_PAGE_SHOW_LOGO =
|
||||
parsedPrivateConfig.branding?.resource_auth_page?.show_logo ===
|
||||
true
|
||||
? "true"
|
||||
: "false";
|
||||
process.env.RESOURCE_AUTH_PAGE_TITLE_TEXT =
|
||||
parsedPrivateConfig.branding?.resource_auth_page?.title_text ||
|
||||
"";
|
||||
process.env.RESOURCE_AUTH_PAGE_SUBTITLE_TEXT =
|
||||
parsedPrivateConfig.branding?.resource_auth_page
|
||||
?.subtitle_text || "";
|
||||
|
||||
if (parsedPrivateConfig.branding?.background_image_path) {
|
||||
process.env.BACKGROUND_IMAGE_PATH =
|
||||
parsedPrivateConfig.branding?.background_image_path;
|
||||
}
|
||||
|
||||
if (parsedPrivateConfig.server.reo_client_id) {
|
||||
process.env.REO_CLIENT_ID =
|
||||
parsedPrivateConfig.server.reo_client_id;
|
||||
}
|
||||
}
|
||||
|
||||
if (parsedConfig.server.maxmind_db_path) {
|
||||
process.env.MAXMIND_DB_PATH = parsedConfig.server.maxmind_db_path;
|
||||
}
|
||||
|
||||
this.rawConfig = parsedConfig;
|
||||
this.rawPrivateConfig = parsedPrivateConfig;
|
||||
}
|
||||
|
||||
public async initServer() {
|
||||
@@ -231,7 +113,6 @@ export class Config {
|
||||
private async checkKeyStatus() {
|
||||
const licenseStatus = await license.check();
|
||||
if (
|
||||
!this.rawPrivateConfig.flags?.hide_supporter_key &&
|
||||
build != "oss" &&
|
||||
!licenseStatus.isHostLicensed
|
||||
) {
|
||||
@@ -243,10 +124,6 @@ export class Config {
|
||||
return this.rawConfig;
|
||||
}
|
||||
|
||||
public getRawPrivateConfig() {
|
||||
return this.rawPrivateConfig;
|
||||
}
|
||||
|
||||
public getNoReplyEmail(): string | undefined {
|
||||
return (
|
||||
this.rawConfig.email?.no_reply || this.rawConfig.email?.smtp_user
|
||||
|
||||
92
server/lib/corsWithLoginPage.ts
Normal file
92
server/lib/corsWithLoginPage.ts
Normal file
@@ -0,0 +1,92 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import cors, { CorsOptions } from "cors";
|
||||
import config from "@server/lib/config";
|
||||
import logger from "@server/logger";
|
||||
import { db, loginPage } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
async function isValidLoginPageDomain(host: string): Promise<boolean> {
|
||||
try {
|
||||
const [result] = await db
|
||||
.select()
|
||||
.from(loginPage)
|
||||
.where(eq(loginPage.fullDomain, host))
|
||||
.limit(1);
|
||||
|
||||
const isValid = !!result;
|
||||
|
||||
return isValid;
|
||||
} catch (error) {
|
||||
logger.error("Error checking loginPage domain:", error);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export function corsWithLoginPageSupport(corsConfig: any) {
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
: {
|
||||
origin: (origin: any, callback: any) => {
|
||||
callback(null, true);
|
||||
}
|
||||
}),
|
||||
...(corsConfig?.methods && { methods: corsConfig.methods }),
|
||||
...(corsConfig?.allowed_headers && {
|
||||
allowedHeaders: corsConfig.allowed_headers
|
||||
}),
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
return async (req: Request, res: Response, next: NextFunction) => {
|
||||
const originValidatedCorsConfig = {
|
||||
origin: async (
|
||||
origin: string | undefined,
|
||||
callback: (err: Error | null, allow?: boolean) => void
|
||||
) => {
|
||||
// If no origin (e.g., same-origin request), allow it
|
||||
|
||||
if (!origin) {
|
||||
return callback(null, true);
|
||||
}
|
||||
|
||||
const dashboardUrl = config.getRawConfig().app.dashboard_url;
|
||||
|
||||
// If no dashboard_url is configured, allow all origins
|
||||
if (!dashboardUrl) {
|
||||
return callback(null, true);
|
||||
}
|
||||
|
||||
// Check if origin matches dashboard URL
|
||||
const dashboardHost = new URL(dashboardUrl).host;
|
||||
const originHost = new URL(origin).host;
|
||||
|
||||
if (originHost === dashboardHost) {
|
||||
return callback(null, true);
|
||||
}
|
||||
|
||||
if (
|
||||
corsConfig?.origins &&
|
||||
corsConfig.origins.includes(origin)
|
||||
) {
|
||||
return callback(null, true);
|
||||
}
|
||||
|
||||
// If origin doesn't match dashboard URL, check if it's a valid loginPage domain
|
||||
const isValidDomain = await isValidLoginPageDomain(originHost);
|
||||
|
||||
if (isValidDomain) {
|
||||
return callback(null, true);
|
||||
}
|
||||
|
||||
// Origin is not valid
|
||||
return callback(null, false);
|
||||
},
|
||||
methods: corsConfig?.methods,
|
||||
allowedHeaders: corsConfig?.allowed_headers,
|
||||
credentials: corsConfig?.credentials !== false
|
||||
} as CorsOptions;
|
||||
|
||||
return cors(originValidatedCorsConfig)(req, res, next);
|
||||
};
|
||||
}
|
||||
@@ -1,16 +1,3 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { isValidCIDR } from "@server/lib/validators";
|
||||
import { getNextAvailableOrgSubnet } from "@server/lib/ip";
|
||||
import {
|
||||
@@ -28,9 +15,9 @@ import {
|
||||
} from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { defaultRoleAllowedActions } from "@server/routers/role";
|
||||
import { FeatureId, limitsService, sandboxLimitSet } from "@server/lib/private/billing";
|
||||
import { createCustomer } from "@server/routers/private/billing/createCustomer";
|
||||
import { usageService } from "@server/lib/private/billing/usageService";
|
||||
import { FeatureId, limitsService, sandboxLimitSet } from "@server/lib/billing";
|
||||
import { createCustomer } from "@server/private/lib/billing/createCustomer";
|
||||
import { usageService } from "@server/lib/billing/usageService";
|
||||
|
||||
export async function createUserAccountOrg(
|
||||
userId: string,
|
||||
@@ -16,7 +16,11 @@ export async function verifyExitNodeOrgAccess(
|
||||
return { hasAccess: true, exitNode };
|
||||
}
|
||||
|
||||
export async function listExitNodes(orgId: string, filterOnline = false, noCloud = false) {
|
||||
export async function listExitNodes(
|
||||
orgId: string,
|
||||
filterOnline = false,
|
||||
noCloud = false
|
||||
) {
|
||||
// TODO: pick which nodes to send and ping better than just all of them that are not remote
|
||||
const allExitNodes = await db
|
||||
.select({
|
||||
@@ -59,7 +63,16 @@ export async function checkExitNodeOrg(exitNodeId: number, orgId: string) {
|
||||
return false;
|
||||
}
|
||||
|
||||
export async function resolveExitNodes(hostname: string, publicKey: string) {
|
||||
export async function resolveExitNodes(
|
||||
hostname: string,
|
||||
publicKey: string
|
||||
): Promise<
|
||||
{
|
||||
endpoint: string;
|
||||
publicKey: string;
|
||||
orgId: string;
|
||||
}[]
|
||||
> {
|
||||
// OSS version: simple implementation that returns empty array
|
||||
return [];
|
||||
}
|
||||
|
||||
@@ -1,33 +1,4 @@
|
||||
import { build } from "@server/build";
|
||||
|
||||
// Import both modules
|
||||
import * as exitNodesModule from "./exitNodes";
|
||||
import * as privateExitNodesModule from "./privateExitNodes";
|
||||
|
||||
// Conditionally export exit nodes implementation based on build type
|
||||
const exitNodesImplementation = build === "oss" ? exitNodesModule : privateExitNodesModule;
|
||||
|
||||
// Re-export all items from the selected implementation
|
||||
export const {
|
||||
verifyExitNodeOrgAccess,
|
||||
listExitNodes,
|
||||
selectBestExitNode,
|
||||
checkExitNodeOrg,
|
||||
resolveExitNodes
|
||||
} = exitNodesImplementation;
|
||||
|
||||
// Import communications modules
|
||||
import * as exitNodeCommsModule from "./exitNodeComms";
|
||||
import * as privateExitNodeCommsModule from "./privateExitNodeComms";
|
||||
|
||||
// Conditionally export communications implementation based on build type
|
||||
const exitNodeCommsImplementation = build === "oss" ? exitNodeCommsModule : privateExitNodeCommsModule;
|
||||
|
||||
// Re-export communications functions from the selected implementation
|
||||
export const {
|
||||
sendToExitNode
|
||||
} = exitNodeCommsImplementation;
|
||||
|
||||
// Re-export shared modules
|
||||
export * from "./exitNodes";
|
||||
export * from "./exitNodeComms";
|
||||
export * from "./subnet";
|
||||
export * from "./getCurrentExitNodeId";
|
||||
@@ -1,145 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import axios from "axios";
|
||||
import logger from "@server/logger";
|
||||
import { db, ExitNode, remoteExitNodes } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { sendToClient } from "../../routers/ws";
|
||||
import { config } from "../config";
|
||||
|
||||
interface ExitNodeRequest {
|
||||
remoteType?: string;
|
||||
localPath: string;
|
||||
method?: "POST" | "DELETE" | "GET" | "PUT";
|
||||
data?: any;
|
||||
queryParams?: Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sends a request to an exit node, handling both remote and local exit nodes
|
||||
* @param exitNode The exit node to send the request to
|
||||
* @param request The request configuration
|
||||
* @returns Promise<any> Response data for local nodes, undefined for remote nodes
|
||||
*/
|
||||
export async function sendToExitNode(
|
||||
exitNode: ExitNode,
|
||||
request: ExitNodeRequest
|
||||
): Promise<any> {
|
||||
if (exitNode.type === "remoteExitNode" && request.remoteType) {
|
||||
const [remoteExitNode] = await db
|
||||
.select()
|
||||
.from(remoteExitNodes)
|
||||
.where(eq(remoteExitNodes.exitNodeId, exitNode.exitNodeId))
|
||||
.limit(1);
|
||||
|
||||
if (!remoteExitNode) {
|
||||
throw new Error(
|
||||
`Remote exit node with ID ${exitNode.exitNodeId} not found`
|
||||
);
|
||||
}
|
||||
|
||||
return sendToClient(remoteExitNode.remoteExitNodeId, {
|
||||
type: request.remoteType,
|
||||
data: request.data
|
||||
});
|
||||
} else {
|
||||
let hostname = exitNode.reachableAt;
|
||||
|
||||
logger.debug(`Exit node details:`, {
|
||||
type: exitNode.type,
|
||||
name: exitNode.name,
|
||||
reachableAt: exitNode.reachableAt,
|
||||
});
|
||||
|
||||
logger.debug(`Configured local exit node name: ${config.getRawConfig().gerbil.exit_node_name}`);
|
||||
|
||||
if (exitNode.name == config.getRawConfig().gerbil.exit_node_name) {
|
||||
hostname = config.getRawPrivateConfig().gerbil.local_exit_node_reachable_at;
|
||||
}
|
||||
|
||||
if (!hostname) {
|
||||
throw new Error(
|
||||
`Exit node with ID ${exitNode.exitNodeId} is not reachable`
|
||||
);
|
||||
}
|
||||
|
||||
logger.debug(`Sending request to exit node at ${hostname}`, {
|
||||
type: request.remoteType,
|
||||
data: request.data
|
||||
});
|
||||
|
||||
// Handle local exit node with HTTP API
|
||||
const method = request.method || "POST";
|
||||
let url = `${hostname}${request.localPath}`;
|
||||
|
||||
// Add query parameters if provided
|
||||
if (request.queryParams) {
|
||||
const params = new URLSearchParams(request.queryParams);
|
||||
url += `?${params.toString()}`;
|
||||
}
|
||||
|
||||
try {
|
||||
let response;
|
||||
|
||||
switch (method) {
|
||||
case "POST":
|
||||
response = await axios.post(url, request.data, {
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
timeout: 8000
|
||||
});
|
||||
break;
|
||||
case "DELETE":
|
||||
response = await axios.delete(url, {
|
||||
timeout: 8000
|
||||
});
|
||||
break;
|
||||
case "GET":
|
||||
response = await axios.get(url, {
|
||||
timeout: 8000
|
||||
});
|
||||
break;
|
||||
case "PUT":
|
||||
response = await axios.put(url, request.data, {
|
||||
headers: {
|
||||
"Content-Type": "application/json"
|
||||
},
|
||||
timeout: 8000
|
||||
});
|
||||
break;
|
||||
default:
|
||||
throw new Error(`Unsupported HTTP method: ${method}`);
|
||||
}
|
||||
|
||||
logger.debug(`Exit node request successful:`, {
|
||||
method,
|
||||
url,
|
||||
status: response.data.status
|
||||
});
|
||||
|
||||
return response.data;
|
||||
} catch (error) {
|
||||
if (axios.isAxiosError(error)) {
|
||||
logger.error(
|
||||
`Error making ${method} request (can Pangolin see Gerbil HTTP API?) for exit node at ${hostname} (status: ${error.response?.status}): ${error.message}`
|
||||
);
|
||||
} else {
|
||||
logger.error(
|
||||
`Error making ${method} request for exit node at ${hostname}: ${error}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,379 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
db,
|
||||
exitNodes,
|
||||
exitNodeOrgs,
|
||||
resources,
|
||||
targets,
|
||||
sites,
|
||||
targetHealthCheck
|
||||
} from "@server/db";
|
||||
import logger from "@server/logger";
|
||||
import { ExitNodePingResult } from "@server/routers/newt";
|
||||
import { eq, and, or, ne, isNull } from "drizzle-orm";
|
||||
import axios from "axios";
|
||||
import config from "../config";
|
||||
|
||||
/**
|
||||
* Checks if an exit node is actually online by making HTTP requests to its endpoint/ping
|
||||
* Makes up to 3 attempts in parallel with small delays, returns as soon as one succeeds
|
||||
*/
|
||||
async function checkExitNodeOnlineStatus(
|
||||
endpoint: string | undefined
|
||||
): Promise<boolean> {
|
||||
if (!endpoint || endpoint == "") {
|
||||
// the endpoint can start out as a empty string
|
||||
return false;
|
||||
}
|
||||
|
||||
const maxAttempts = 3;
|
||||
const timeoutMs = 5000; // 5 second timeout per request
|
||||
const delayBetweenAttempts = 100; // 100ms delay between starting each attempt
|
||||
|
||||
// Create promises for all attempts with staggered delays
|
||||
const attemptPromises = Array.from({ length: maxAttempts }, async (_, index) => {
|
||||
const attemptNumber = index + 1;
|
||||
|
||||
// Add delay before each attempt (except the first)
|
||||
if (index > 0) {
|
||||
await new Promise((resolve) => setTimeout(resolve, delayBetweenAttempts * index));
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await axios.get(`http://${endpoint}/ping`, {
|
||||
timeout: timeoutMs,
|
||||
validateStatus: (status) => status === 200
|
||||
});
|
||||
|
||||
if (response.status === 200) {
|
||||
logger.debug(
|
||||
`Exit node ${endpoint} is online (attempt ${attemptNumber}/${maxAttempts})`
|
||||
);
|
||||
return { success: true, attemptNumber };
|
||||
}
|
||||
return { success: false, attemptNumber, error: 'Non-200 status' };
|
||||
} catch (error) {
|
||||
const errorMessage = error instanceof Error ? error.message : "Unknown error";
|
||||
logger.debug(
|
||||
`Exit node ${endpoint} ping failed (attempt ${attemptNumber}/${maxAttempts}): ${errorMessage}`
|
||||
);
|
||||
return { success: false, attemptNumber, error: errorMessage };
|
||||
}
|
||||
});
|
||||
|
||||
try {
|
||||
// Wait for the first successful response or all to fail
|
||||
const results = await Promise.allSettled(attemptPromises);
|
||||
|
||||
// Check if any attempt succeeded
|
||||
for (const result of results) {
|
||||
if (result.status === 'fulfilled' && result.value.success) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// All attempts failed
|
||||
logger.warn(
|
||||
`Exit node ${endpoint} is offline after ${maxAttempts} parallel attempts`
|
||||
);
|
||||
return false;
|
||||
} catch (error) {
|
||||
logger.warn(
|
||||
`Unexpected error checking exit node ${endpoint}: ${error instanceof Error ? error.message : "Unknown error"}`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export async function verifyExitNodeOrgAccess(
|
||||
exitNodeId: number,
|
||||
orgId: string
|
||||
) {
|
||||
const [result] = await db
|
||||
.select({
|
||||
exitNode: exitNodes,
|
||||
exitNodeOrgId: exitNodeOrgs.exitNodeId
|
||||
})
|
||||
.from(exitNodes)
|
||||
.leftJoin(
|
||||
exitNodeOrgs,
|
||||
and(
|
||||
eq(exitNodeOrgs.exitNodeId, exitNodes.exitNodeId),
|
||||
eq(exitNodeOrgs.orgId, orgId)
|
||||
)
|
||||
)
|
||||
.where(eq(exitNodes.exitNodeId, exitNodeId));
|
||||
|
||||
if (!result) {
|
||||
return { hasAccess: false, exitNode: null };
|
||||
}
|
||||
|
||||
const { exitNode } = result;
|
||||
|
||||
// If the exit node is type "gerbil", access is allowed
|
||||
if (exitNode.type === "gerbil") {
|
||||
return { hasAccess: true, exitNode };
|
||||
}
|
||||
|
||||
// If the exit node is type "remoteExitNode", check if it has org access
|
||||
if (exitNode.type === "remoteExitNode") {
|
||||
return { hasAccess: !!result.exitNodeOrgId, exitNode };
|
||||
}
|
||||
|
||||
// For any other type, deny access
|
||||
return { hasAccess: false, exitNode };
|
||||
}
|
||||
|
||||
export async function listExitNodes(orgId: string, filterOnline = false, noCloud = false) {
|
||||
const allExitNodes = await db
|
||||
.select({
|
||||
exitNodeId: exitNodes.exitNodeId,
|
||||
name: exitNodes.name,
|
||||
address: exitNodes.address,
|
||||
endpoint: exitNodes.endpoint,
|
||||
publicKey: exitNodes.publicKey,
|
||||
listenPort: exitNodes.listenPort,
|
||||
reachableAt: exitNodes.reachableAt,
|
||||
maxConnections: exitNodes.maxConnections,
|
||||
online: exitNodes.online,
|
||||
lastPing: exitNodes.lastPing,
|
||||
type: exitNodes.type,
|
||||
orgId: exitNodeOrgs.orgId,
|
||||
region: exitNodes.region
|
||||
})
|
||||
.from(exitNodes)
|
||||
.leftJoin(
|
||||
exitNodeOrgs,
|
||||
eq(exitNodes.exitNodeId, exitNodeOrgs.exitNodeId)
|
||||
)
|
||||
.where(
|
||||
or(
|
||||
// Include all exit nodes that are NOT of type remoteExitNode
|
||||
and(
|
||||
eq(exitNodes.type, "gerbil"),
|
||||
or(
|
||||
// only choose nodes that are in the same region
|
||||
eq(exitNodes.region, config.getRawPrivateConfig().app.region),
|
||||
isNull(exitNodes.region) // or for enterprise where region is not set
|
||||
)
|
||||
),
|
||||
// Include remoteExitNode types where the orgId matches the newt's organization
|
||||
and(
|
||||
eq(exitNodes.type, "remoteExitNode"),
|
||||
eq(exitNodeOrgs.orgId, orgId)
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
// Filter the nodes. If there are NO remoteExitNodes then do nothing. If there are then remove all of the non-remoteExitNodes
|
||||
if (allExitNodes.length === 0) {
|
||||
logger.warn("No exit nodes found for ping request!");
|
||||
return [];
|
||||
}
|
||||
|
||||
// Enhanced online checking: consider node offline if either DB says offline OR HTTP ping fails
|
||||
const nodesWithRealOnlineStatus = await Promise.all(
|
||||
allExitNodes.map(async (node) => {
|
||||
// If database says it's online, verify with HTTP ping
|
||||
let online: boolean;
|
||||
if (filterOnline && node.type == "remoteExitNode") {
|
||||
try {
|
||||
const isActuallyOnline = await checkExitNodeOnlineStatus(
|
||||
node.endpoint
|
||||
);
|
||||
|
||||
// set the item in the database if it is offline
|
||||
if (isActuallyOnline != node.online) {
|
||||
await db
|
||||
.update(exitNodes)
|
||||
.set({ online: isActuallyOnline })
|
||||
.where(eq(exitNodes.exitNodeId, node.exitNodeId));
|
||||
}
|
||||
online = isActuallyOnline;
|
||||
} catch (error) {
|
||||
logger.warn(
|
||||
`Failed to check online status for exit node ${node.name} (${node.endpoint}): ${error instanceof Error ? error.message : "Unknown error"}`
|
||||
);
|
||||
online = false;
|
||||
}
|
||||
} else {
|
||||
online = node.online;
|
||||
}
|
||||
|
||||
return {
|
||||
...node,
|
||||
online
|
||||
};
|
||||
})
|
||||
);
|
||||
|
||||
const remoteExitNodes = nodesWithRealOnlineStatus.filter(
|
||||
(node) =>
|
||||
node.type === "remoteExitNode" && (!filterOnline || node.online)
|
||||
);
|
||||
const gerbilExitNodes = nodesWithRealOnlineStatus.filter(
|
||||
(node) => node.type === "gerbil" && (!filterOnline || node.online) && !noCloud
|
||||
);
|
||||
|
||||
// THIS PROVIDES THE FALL
|
||||
const exitNodesList =
|
||||
remoteExitNodes.length > 0 ? remoteExitNodes : gerbilExitNodes;
|
||||
|
||||
return exitNodesList;
|
||||
}
|
||||
|
||||
/**
|
||||
* Selects the most suitable exit node from a list of ping results.
|
||||
*
|
||||
* The selection algorithm follows these steps:
|
||||
*
|
||||
* 1. **Filter Invalid Nodes**: Excludes nodes with errors or zero weight.
|
||||
*
|
||||
* 2. **Sort by Latency**: Sorts valid nodes in ascending order of latency.
|
||||
*
|
||||
* 3. **Preferred Selection**:
|
||||
* - If the lowest-latency node has sufficient capacity (≥10% weight),
|
||||
* check if a previously connected node is also acceptable.
|
||||
* - The previously connected node is preferred if its latency is within
|
||||
* 30ms or 15% of the best node’s latency.
|
||||
*
|
||||
* 4. **Fallback to Next Best**:
|
||||
* - If the lowest-latency node is under capacity, find the next node
|
||||
* with acceptable capacity.
|
||||
*
|
||||
* 5. **Final Fallback**:
|
||||
* - If no nodes meet the capacity threshold, fall back to the node
|
||||
* with the highest weight (i.e., most available capacity).
|
||||
*
|
||||
*/
|
||||
export function selectBestExitNode(
|
||||
pingResults: ExitNodePingResult[]
|
||||
): ExitNodePingResult | null {
|
||||
const MIN_CAPACITY_THRESHOLD = 0.1;
|
||||
const LATENCY_TOLERANCE_MS = 30;
|
||||
const LATENCY_TOLERANCE_PERCENT = 0.15;
|
||||
|
||||
// Filter out invalid nodes
|
||||
const validNodes = pingResults.filter((n) => !n.error && n.weight > 0);
|
||||
|
||||
if (validNodes.length === 0) {
|
||||
logger.error("No valid exit nodes available");
|
||||
return null;
|
||||
}
|
||||
|
||||
// Sort by latency (ascending)
|
||||
const sortedNodes = validNodes
|
||||
.slice()
|
||||
.sort((a, b) => a.latencyMs - b.latencyMs);
|
||||
const lowestLatencyNode = sortedNodes[0];
|
||||
|
||||
logger.debug(
|
||||
`Lowest latency node: ${lowestLatencyNode.exitNodeName} (${lowestLatencyNode.latencyMs} ms, weight=${lowestLatencyNode.weight.toFixed(2)})`
|
||||
);
|
||||
|
||||
// If lowest latency node has enough capacity, check if previously connected node is acceptable
|
||||
if (lowestLatencyNode.weight >= MIN_CAPACITY_THRESHOLD) {
|
||||
const previouslyConnectedNode = sortedNodes.find(
|
||||
(n) =>
|
||||
n.wasPreviouslyConnected && n.weight >= MIN_CAPACITY_THRESHOLD
|
||||
);
|
||||
|
||||
if (previouslyConnectedNode) {
|
||||
const latencyDiff =
|
||||
previouslyConnectedNode.latencyMs - lowestLatencyNode.latencyMs;
|
||||
const percentDiff = latencyDiff / lowestLatencyNode.latencyMs;
|
||||
|
||||
if (
|
||||
latencyDiff <= LATENCY_TOLERANCE_MS ||
|
||||
percentDiff <= LATENCY_TOLERANCE_PERCENT
|
||||
) {
|
||||
logger.info(
|
||||
`Sticking with previously connected node: ${previouslyConnectedNode.exitNodeName} ` +
|
||||
`(${previouslyConnectedNode.latencyMs} ms), latency diff = ${latencyDiff.toFixed(1)}ms ` +
|
||||
`/ ${(percentDiff * 100).toFixed(1)}%.`
|
||||
);
|
||||
return previouslyConnectedNode;
|
||||
}
|
||||
}
|
||||
|
||||
return lowestLatencyNode;
|
||||
}
|
||||
|
||||
// Otherwise, find the next node (after the lowest) that has enough capacity
|
||||
for (let i = 1; i < sortedNodes.length; i++) {
|
||||
const node = sortedNodes[i];
|
||||
if (node.weight >= MIN_CAPACITY_THRESHOLD) {
|
||||
logger.info(
|
||||
`Lowest latency node under capacity. Using next best: ${node.exitNodeName} ` +
|
||||
`(${node.latencyMs} ms, weight=${node.weight.toFixed(2)})`
|
||||
);
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
// Fallback: pick the highest weight node
|
||||
const fallbackNode = validNodes.reduce((a, b) =>
|
||||
a.weight > b.weight ? a : b
|
||||
);
|
||||
logger.warn(
|
||||
`No nodes with ≥10% weight. Falling back to highest capacity node: ${fallbackNode.exitNodeName}`
|
||||
);
|
||||
return fallbackNode;
|
||||
}
|
||||
|
||||
export async function checkExitNodeOrg(exitNodeId: number, orgId: string) {
|
||||
const [exitNodeOrg] = await db
|
||||
.select()
|
||||
.from(exitNodeOrgs)
|
||||
.where(
|
||||
and(
|
||||
eq(exitNodeOrgs.exitNodeId, exitNodeId),
|
||||
eq(exitNodeOrgs.orgId, orgId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!exitNodeOrg) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
export async function resolveExitNodes(hostname: string, publicKey: string) {
|
||||
const resourceExitNodes = await db
|
||||
.select({
|
||||
endpoint: exitNodes.endpoint,
|
||||
publicKey: exitNodes.publicKey,
|
||||
orgId: resources.orgId
|
||||
})
|
||||
.from(resources)
|
||||
.innerJoin(targets, eq(resources.resourceId, targets.resourceId))
|
||||
.leftJoin(
|
||||
targetHealthCheck,
|
||||
eq(targetHealthCheck.targetId, targets.targetId)
|
||||
)
|
||||
.innerJoin(sites, eq(targets.siteId, sites.siteId))
|
||||
.innerJoin(exitNodes, eq(sites.exitNodeId, exitNodes.exitNodeId))
|
||||
.where(
|
||||
and(
|
||||
eq(resources.fullDomain, hostname),
|
||||
ne(exitNodes.publicKey, publicKey),
|
||||
ne(targetHealthCheck.hcHealth, "unhealthy")
|
||||
)
|
||||
);
|
||||
|
||||
return resourceExitNodes;
|
||||
}
|
||||
@@ -1,16 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export * from "./limitSet";
|
||||
export * from "./features";
|
||||
export * from "./limitsService";
|
||||
@@ -1,25 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import RedisStore from "@server/db/private/redisStore";
|
||||
import { MemoryStore, Store } from "express-rate-limit";
|
||||
|
||||
export function createStore(): Store {
|
||||
const rateLimitStore: Store = new RedisStore({
|
||||
prefix: 'api-rate-limit', // Optional: customize Redis key prefix
|
||||
skipFailedRequests: true, // Don't count failed requests
|
||||
skipSuccessfulRequests: false, // Count successful requests
|
||||
});
|
||||
|
||||
return rateLimitStore;
|
||||
}
|
||||
@@ -1,192 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import fs from "fs";
|
||||
import yaml from "js-yaml";
|
||||
import { privateConfigFilePath1 } from "@server/lib/consts";
|
||||
import { z } from "zod";
|
||||
import { colorsSchema } from "@server/lib/colorsSchema";
|
||||
import { build } from "@server/build";
|
||||
|
||||
const portSchema = z.number().positive().gt(0).lte(65535);
|
||||
|
||||
export const privateConfigSchema = z
|
||||
.object({
|
||||
app: z.object({
|
||||
region: z.string().optional().default("default"),
|
||||
base_domain: z.string().optional()
|
||||
}).optional().default({
|
||||
region: "default"
|
||||
}),
|
||||
server: z.object({
|
||||
encryption_key_path: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("./config/encryption.pem")
|
||||
.pipe(z.string().min(8)),
|
||||
resend_api_key: z.string().optional(),
|
||||
reo_client_id: z.string().optional(),
|
||||
}).optional().default({
|
||||
encryption_key_path: "./config/encryption.pem"
|
||||
}),
|
||||
redis: z
|
||||
.object({
|
||||
host: z.string(),
|
||||
port: portSchema,
|
||||
password: z.string().optional(),
|
||||
db: z.number().int().nonnegative().optional().default(0),
|
||||
replicas: z
|
||||
.array(
|
||||
z.object({
|
||||
host: z.string(),
|
||||
port: portSchema,
|
||||
password: z.string().optional(),
|
||||
db: z.number().int().nonnegative().optional().default(0)
|
||||
})
|
||||
)
|
||||
.optional()
|
||||
// tls: z
|
||||
// .object({
|
||||
// reject_unauthorized: z
|
||||
// .boolean()
|
||||
// .optional()
|
||||
// .default(true)
|
||||
// })
|
||||
// .optional()
|
||||
})
|
||||
.optional(),
|
||||
gerbil: z
|
||||
.object({
|
||||
local_exit_node_reachable_at: z.string().optional().default("http://gerbil:3003")
|
||||
})
|
||||
.optional()
|
||||
.default({}),
|
||||
flags: z
|
||||
.object({
|
||||
enable_redis: z.boolean().optional(),
|
||||
hide_supporter_key: z.boolean().optional()
|
||||
})
|
||||
.optional(),
|
||||
branding: z
|
||||
.object({
|
||||
app_name: z.string().optional(),
|
||||
background_image_path: z.string().optional(),
|
||||
colors: z
|
||||
.object({
|
||||
light: colorsSchema.optional(),
|
||||
dark: colorsSchema.optional()
|
||||
})
|
||||
.optional(),
|
||||
logo: z
|
||||
.object({
|
||||
light_path: z.string().optional(),
|
||||
dark_path: z.string().optional(),
|
||||
auth_page: z
|
||||
.object({
|
||||
width: z.number().optional(),
|
||||
height: z.number().optional()
|
||||
})
|
||||
.optional(),
|
||||
navbar: z
|
||||
.object({
|
||||
width: z.number().optional(),
|
||||
height: z.number().optional()
|
||||
})
|
||||
.optional()
|
||||
})
|
||||
.optional(),
|
||||
favicon_path: z.string().optional(),
|
||||
footer: z
|
||||
.array(
|
||||
z.object({
|
||||
text: z.string(),
|
||||
href: z.string().optional()
|
||||
})
|
||||
)
|
||||
.optional(),
|
||||
login_page: z
|
||||
.object({
|
||||
subtitle_text: z.string().optional(),
|
||||
title_text: z.string().optional()
|
||||
})
|
||||
.optional(),
|
||||
signup_page: z
|
||||
.object({
|
||||
subtitle_text: z.string().optional(),
|
||||
title_text: z.string().optional()
|
||||
})
|
||||
.optional(),
|
||||
resource_auth_page: z
|
||||
.object({
|
||||
show_logo: z.boolean().optional(),
|
||||
hide_powered_by: z.boolean().optional(),
|
||||
title_text: z.string().optional(),
|
||||
subtitle_text: z.string().optional()
|
||||
})
|
||||
.optional(),
|
||||
emails: z
|
||||
.object({
|
||||
signature: z.string().optional(),
|
||||
colors: z
|
||||
.object({
|
||||
primary: z.string().optional()
|
||||
})
|
||||
.optional()
|
||||
})
|
||||
.optional()
|
||||
})
|
||||
.optional(),
|
||||
stripe: z
|
||||
.object({
|
||||
secret_key: z.string(),
|
||||
webhook_secret: z.string(),
|
||||
s3Bucket: z.string(),
|
||||
s3Region: z.string().default("us-east-1"),
|
||||
localFilePath: z.string()
|
||||
})
|
||||
.optional(),
|
||||
});
|
||||
|
||||
export function readPrivateConfigFile() {
|
||||
if (build == "oss") {
|
||||
return {};
|
||||
}
|
||||
|
||||
const loadConfig = (configPath: string) => {
|
||||
try {
|
||||
const yamlContent = fs.readFileSync(configPath, "utf8");
|
||||
const config = yaml.load(yamlContent);
|
||||
return config;
|
||||
} catch (error) {
|
||||
if (error instanceof Error) {
|
||||
throw new Error(
|
||||
`Error loading configuration file: ${error.message}`
|
||||
);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
let environment: any;
|
||||
if (fs.existsSync(privateConfigFilePath1)) {
|
||||
environment = loadConfig(privateConfigFilePath1);
|
||||
}
|
||||
|
||||
if (!environment) {
|
||||
throw new Error(
|
||||
"No private configuration file found."
|
||||
);
|
||||
}
|
||||
|
||||
return environment;
|
||||
}
|
||||
@@ -1,124 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Resend } from "resend";
|
||||
import config from "../config";
|
||||
import logger from "@server/logger";
|
||||
|
||||
export enum AudienceIds {
|
||||
General = "5cfbf99b-c592-40a9-9b8a-577a4681c158",
|
||||
Subscribed = "870b43fd-387f-44de-8fc1-707335f30b20",
|
||||
Churned = "f3ae92bd-2fdb-4d77-8746-2118afd62549"
|
||||
}
|
||||
|
||||
const resend = new Resend(
|
||||
config.getRawPrivateConfig().server.resend_api_key || "missing"
|
||||
);
|
||||
|
||||
export default resend;
|
||||
|
||||
export async function moveEmailToAudience(
|
||||
email: string,
|
||||
audienceId: AudienceIds
|
||||
) {
|
||||
if (process.env.ENVIRONMENT !== "prod") {
|
||||
logger.debug(`Skipping moving email ${email} to audience ${audienceId} in non-prod environment`);
|
||||
return;
|
||||
}
|
||||
const { error, data } = await retryWithBackoff(async () => {
|
||||
const { data, error } = await resend.contacts.create({
|
||||
email,
|
||||
unsubscribed: false,
|
||||
audienceId
|
||||
});
|
||||
if (error) {
|
||||
throw new Error(
|
||||
`Error adding email ${email} to audience ${audienceId}: ${error}`
|
||||
);
|
||||
}
|
||||
return { error, data };
|
||||
});
|
||||
|
||||
if (error) {
|
||||
logger.error(
|
||||
`Error adding email ${email} to audience ${audienceId}: ${error}`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
if (data) {
|
||||
logger.debug(
|
||||
`Added email ${email} to audience ${audienceId} with contact ID ${data.id}`
|
||||
);
|
||||
}
|
||||
|
||||
const otherAudiences = Object.values(AudienceIds).filter(
|
||||
(id) => id !== audienceId
|
||||
);
|
||||
|
||||
for (const otherAudienceId of otherAudiences) {
|
||||
const { error, data } = await retryWithBackoff(async () => {
|
||||
const { data, error } = await resend.contacts.remove({
|
||||
email,
|
||||
audienceId: otherAudienceId
|
||||
});
|
||||
if (error) {
|
||||
throw new Error(
|
||||
`Error removing email ${email} from audience ${otherAudienceId}: ${error}`
|
||||
);
|
||||
}
|
||||
return { error, data };
|
||||
});
|
||||
|
||||
if (error) {
|
||||
logger.error(
|
||||
`Error removing email ${email} from audience ${otherAudienceId}: ${error}`
|
||||
);
|
||||
}
|
||||
|
||||
if (data) {
|
||||
logger.info(
|
||||
`Removed email ${email} from audience ${otherAudienceId}`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type RetryOptions = {
|
||||
retries?: number;
|
||||
initialDelayMs?: number;
|
||||
factor?: number;
|
||||
};
|
||||
|
||||
export async function retryWithBackoff<T>(
|
||||
fn: () => Promise<T>,
|
||||
options: RetryOptions = {}
|
||||
): Promise<T> {
|
||||
const { retries = 5, initialDelayMs = 500, factor = 2 } = options;
|
||||
|
||||
let attempt = 0;
|
||||
let delay = initialDelayMs;
|
||||
|
||||
while (true) {
|
||||
try {
|
||||
return await fn();
|
||||
} catch (err) {
|
||||
attempt++;
|
||||
|
||||
if (attempt > retries) throw err;
|
||||
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
delay *= factor;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { S3Client } from "@aws-sdk/client-s3";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
export const s3Client = new S3Client({
|
||||
region: config.getRawPrivateConfig().stripe?.s3Region || "us-east-1",
|
||||
});
|
||||
@@ -1,28 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import Stripe from "stripe";
|
||||
import config from "@server/lib/config";
|
||||
import logger from "@server/logger";
|
||||
import { build } from "@server/build";
|
||||
|
||||
let stripe: Stripe | undefined = undefined;
|
||||
if (build == "saas") {
|
||||
const stripeApiKey = config.getRawPrivateConfig().stripe?.secret_key;
|
||||
if (!stripeApiKey) {
|
||||
logger.error("Stripe secret key is not configured");
|
||||
}
|
||||
stripe = new Stripe(stripeApiKey!);
|
||||
}
|
||||
|
||||
export default stripe;
|
||||
@@ -1,14 +0,0 @@
|
||||
import { build } from "@server/build";
|
||||
|
||||
// Import both modules
|
||||
import * as certificateModule from "./certificates";
|
||||
import * as privateCertificateModule from "./privateCertificates";
|
||||
|
||||
// Conditionally export Remote Certificates implementation based on build type
|
||||
const remoteCertificatesImplementation = build === "oss" ? certificateModule : privateCertificateModule;
|
||||
|
||||
// Re-export all items from the selected implementation
|
||||
export const {
|
||||
getValidCertificatesForDomains,
|
||||
getValidCertificatesForDomainsHybrid
|
||||
} = remoteCertificatesImplementation;
|
||||
@@ -1,116 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import config from "../config";
|
||||
import { certificates, db } from "@server/db";
|
||||
import { and, eq, isNotNull } from "drizzle-orm";
|
||||
import { decryptData } from "../encryption";
|
||||
import * as fs from "fs";
|
||||
|
||||
export async function getValidCertificatesForDomains(
|
||||
domains: Set<string>
|
||||
): Promise<
|
||||
Array<{
|
||||
id: number;
|
||||
domain: string;
|
||||
wildcard: boolean | null;
|
||||
certFile: string | null;
|
||||
keyFile: string | null;
|
||||
expiresAt: number | null;
|
||||
updatedAt?: number | null;
|
||||
}>
|
||||
> {
|
||||
if (domains.size === 0) {
|
||||
return [];
|
||||
}
|
||||
|
||||
const domainArray = Array.from(domains);
|
||||
|
||||
// TODO: add more foreign keys to make this query more efficient - we dont need to keep getting every certificate
|
||||
const validCerts = await db
|
||||
.select({
|
||||
id: certificates.certId,
|
||||
domain: certificates.domain,
|
||||
certFile: certificates.certFile,
|
||||
keyFile: certificates.keyFile,
|
||||
expiresAt: certificates.expiresAt,
|
||||
updatedAt: certificates.updatedAt,
|
||||
wildcard: certificates.wildcard
|
||||
})
|
||||
.from(certificates)
|
||||
.where(
|
||||
and(
|
||||
eq(certificates.status, "valid"),
|
||||
isNotNull(certificates.certFile),
|
||||
isNotNull(certificates.keyFile)
|
||||
)
|
||||
);
|
||||
|
||||
// Filter certificates for the specified domains and if it is a wildcard then you can match on everything up to the first dot
|
||||
const validCertsFiltered = validCerts.filter((cert) => {
|
||||
return (
|
||||
domainArray.includes(cert.domain) ||
|
||||
(cert.wildcard &&
|
||||
domainArray.some((domain) =>
|
||||
domain.endsWith(`.${cert.domain}`)
|
||||
))
|
||||
);
|
||||
});
|
||||
|
||||
const encryptionKeyPath = config.getRawPrivateConfig().server.encryption_key_path;
|
||||
|
||||
if (!fs.existsSync(encryptionKeyPath)) {
|
||||
throw new Error(
|
||||
"Encryption key file not found. Please generate one first."
|
||||
);
|
||||
}
|
||||
|
||||
const encryptionKeyHex = fs.readFileSync(encryptionKeyPath, "utf8").trim();
|
||||
const encryptionKey = Buffer.from(encryptionKeyHex, "hex");
|
||||
|
||||
const validCertsDecrypted = validCertsFiltered.map((cert) => {
|
||||
// Decrypt and save certificate file
|
||||
const decryptedCert = decryptData(
|
||||
cert.certFile!, // is not null from query
|
||||
encryptionKey
|
||||
);
|
||||
|
||||
// Decrypt and save key file
|
||||
const decryptedKey = decryptData(cert.keyFile!, encryptionKey);
|
||||
|
||||
// Return only the certificate data without org information
|
||||
return {
|
||||
...cert,
|
||||
certFile: decryptedCert,
|
||||
keyFile: decryptedKey
|
||||
};
|
||||
});
|
||||
|
||||
return validCertsDecrypted;
|
||||
}
|
||||
|
||||
export async function getValidCertificatesForDomainsHybrid(
|
||||
domains: Set<string>
|
||||
): Promise<
|
||||
Array<{
|
||||
id: number;
|
||||
domain: string;
|
||||
wildcard: boolean | null;
|
||||
certFile: string | null;
|
||||
keyFile: string | null;
|
||||
expiresAt: number | null;
|
||||
updatedAt?: number | null;
|
||||
}>
|
||||
> {
|
||||
return []; // stub
|
||||
}
|
||||
15
server/lib/resend.ts
Normal file
15
server/lib/resend.ts
Normal file
@@ -0,0 +1,15 @@
|
||||
export enum AudienceIds {
|
||||
General = "",
|
||||
Subscribed = "",
|
||||
Churned = ""
|
||||
}
|
||||
|
||||
let resend;
|
||||
export default resend;
|
||||
|
||||
export async function moveEmailToAudience(
|
||||
email: string,
|
||||
audienceId: AudienceIds
|
||||
) {
|
||||
return
|
||||
}
|
||||
5
server/lib/s3.ts
Normal file
5
server/lib/s3.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
import { S3Client } from "@aws-sdk/client-s3";
|
||||
|
||||
export const s3Client = new S3Client({
|
||||
region: process.env.S3_REGION || "us-east-1",
|
||||
});
|
||||
@@ -8,12 +8,12 @@ import { db, exitNodes } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { tokenManager } from "../tokenManager";
|
||||
import { getCurrentExitNodeId } from "@server/lib/exitNodes";
|
||||
import { getTraefikConfig } from "./";
|
||||
import { getTraefikConfig } from "#dynamic/lib/traefik";
|
||||
import {
|
||||
getValidCertificatesForDomains,
|
||||
getValidCertificatesForDomainsHybrid
|
||||
} from "../remoteCertificates";
|
||||
import { sendToExitNode } from "../exitNodes";
|
||||
} from "#dynamic/lib/certificates";
|
||||
import { sendToExitNode } from "#dynamic/lib/exitNodes";
|
||||
import { build } from "@server/build";
|
||||
|
||||
export class TraefikConfigManager {
|
||||
|
||||
@@ -1,11 +1 @@
|
||||
import { build } from "@server/build";
|
||||
|
||||
// Import both modules
|
||||
import * as traefikModule from "./getTraefikConfig";
|
||||
import * as privateTraefikModule from "./privateGetTraefikConfig";
|
||||
|
||||
// Conditionally export Traefik configuration implementation based on build type
|
||||
const traefikImplementation = build === "oss" ? traefikModule : privateTraefikModule;
|
||||
|
||||
// Re-export all items from the selected implementation
|
||||
export const { getTraefikConfig } = traefikImplementation;
|
||||
export * from "./getTraefikConfig";
|
||||
@@ -1,712 +0,0 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
certificates,
|
||||
db,
|
||||
domainNamespaces,
|
||||
exitNodes,
|
||||
loginPage,
|
||||
targetHealthCheck
|
||||
} from "@server/db";
|
||||
import { and, eq, inArray, or, isNull, ne, isNotNull, desc } from "drizzle-orm";
|
||||
import logger from "@server/logger";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import config from "@server/lib/config";
|
||||
import { orgs, resources, sites, Target, targets } from "@server/db";
|
||||
import { build } from "@server/build";
|
||||
import { sanitize } from "./utils";
|
||||
|
||||
const redirectHttpsMiddlewareName = "redirect-to-https";
|
||||
const redirectToRootMiddlewareName = "redirect-to-root";
|
||||
const badgerMiddlewareName = "badger";
|
||||
|
||||
export async function getTraefikConfig(
|
||||
exitNodeId: number,
|
||||
siteTypes: string[],
|
||||
filterOutNamespaceDomains = false,
|
||||
generateLoginPageRouters = false
|
||||
): Promise<any> {
|
||||
// Define extended target type with site information
|
||||
type TargetWithSite = Target & {
|
||||
site: {
|
||||
siteId: number;
|
||||
type: string;
|
||||
subnet: string | null;
|
||||
exitNodeId: number | null;
|
||||
online: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
// Get resources with their targets and sites in a single optimized query
|
||||
// Start from sites on this exit node, then join to targets and resources
|
||||
const resourcesWithTargetsAndSites = await db
|
||||
.select({
|
||||
// Resource fields
|
||||
resourceId: resources.resourceId,
|
||||
resourceName: resources.name,
|
||||
fullDomain: resources.fullDomain,
|
||||
ssl: resources.ssl,
|
||||
http: resources.http,
|
||||
proxyPort: resources.proxyPort,
|
||||
protocol: resources.protocol,
|
||||
subdomain: resources.subdomain,
|
||||
domainId: resources.domainId,
|
||||
enabled: resources.enabled,
|
||||
stickySession: resources.stickySession,
|
||||
tlsServerName: resources.tlsServerName,
|
||||
setHostHeader: resources.setHostHeader,
|
||||
enableProxy: resources.enableProxy,
|
||||
headers: resources.headers,
|
||||
// Target fields
|
||||
targetId: targets.targetId,
|
||||
targetEnabled: targets.enabled,
|
||||
ip: targets.ip,
|
||||
method: targets.method,
|
||||
port: targets.port,
|
||||
internalPort: targets.internalPort,
|
||||
hcHealth: targetHealthCheck.hcHealth,
|
||||
path: targets.path,
|
||||
pathMatchType: targets.pathMatchType,
|
||||
priority: targets.priority,
|
||||
|
||||
// Site fields
|
||||
siteId: sites.siteId,
|
||||
siteType: sites.type,
|
||||
siteOnline: sites.online,
|
||||
subnet: sites.subnet,
|
||||
exitNodeId: sites.exitNodeId,
|
||||
// Namespace
|
||||
domainNamespaceId: domainNamespaces.domainNamespaceId,
|
||||
// Certificate
|
||||
certificateStatus: certificates.status
|
||||
})
|
||||
.from(sites)
|
||||
.innerJoin(targets, eq(targets.siteId, sites.siteId))
|
||||
.innerJoin(resources, eq(resources.resourceId, targets.resourceId))
|
||||
.leftJoin(certificates, eq(certificates.domainId, resources.domainId))
|
||||
.leftJoin(
|
||||
targetHealthCheck,
|
||||
eq(targetHealthCheck.targetId, targets.targetId)
|
||||
)
|
||||
.leftJoin(
|
||||
domainNamespaces,
|
||||
eq(domainNamespaces.domainId, resources.domainId)
|
||||
) // THIS IS CLOUD ONLY TO FILTER OUT THE DOMAIN NAMESPACES IF REQUIRED
|
||||
.where(
|
||||
and(
|
||||
eq(targets.enabled, true),
|
||||
eq(resources.enabled, true),
|
||||
// or(
|
||||
eq(sites.exitNodeId, exitNodeId),
|
||||
// isNull(sites.exitNodeId)
|
||||
// ),
|
||||
or(
|
||||
ne(targetHealthCheck.hcHealth, "unhealthy"), // Exclude unhealthy targets
|
||||
isNull(targetHealthCheck.hcHealth) // Include targets with no health check record
|
||||
),
|
||||
inArray(sites.type, siteTypes),
|
||||
config.getRawConfig().traefik.allow_raw_resources
|
||||
? isNotNull(resources.http) // ignore the http check if allow_raw_resources is true
|
||||
: eq(resources.http, true)
|
||||
)
|
||||
)
|
||||
.orderBy(desc(targets.priority), targets.targetId); // stable ordering
|
||||
|
||||
// Group by resource and include targets with their unique site data
|
||||
const resourcesMap = new Map();
|
||||
|
||||
resourcesWithTargetsAndSites.forEach((row) => {
|
||||
const resourceId = row.resourceId;
|
||||
const resourceName = sanitize(row.resourceName) || "";
|
||||
const targetPath = sanitize(row.path) || ""; // Handle null/undefined paths
|
||||
const pathMatchType = row.pathMatchType || "";
|
||||
const priority = row.priority ?? 100;
|
||||
|
||||
if (filterOutNamespaceDomains && row.domainNamespaceId) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Create a unique key combining resourceId and path+pathMatchType
|
||||
const pathKey = [targetPath, pathMatchType].filter(Boolean).join("-");
|
||||
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
|
||||
const key = sanitize(mapKey);
|
||||
|
||||
if (!resourcesMap.has(key)) {
|
||||
resourcesMap.set(key, {
|
||||
resourceId: row.resourceId,
|
||||
name: resourceName,
|
||||
fullDomain: row.fullDomain,
|
||||
ssl: row.ssl,
|
||||
http: row.http,
|
||||
proxyPort: row.proxyPort,
|
||||
protocol: row.protocol,
|
||||
subdomain: row.subdomain,
|
||||
domainId: row.domainId,
|
||||
enabled: row.enabled,
|
||||
stickySession: row.stickySession,
|
||||
tlsServerName: row.tlsServerName,
|
||||
setHostHeader: row.setHostHeader,
|
||||
enableProxy: row.enableProxy,
|
||||
certificateStatus: row.certificateStatus,
|
||||
targets: [],
|
||||
headers: row.headers,
|
||||
path: row.path, // the targets will all have the same path
|
||||
pathMatchType: row.pathMatchType, // the targets will all have the same pathMatchType
|
||||
priority: priority // may be null, we fallback later
|
||||
});
|
||||
}
|
||||
|
||||
// Add target with its associated site data
|
||||
resourcesMap.get(key).targets.push({
|
||||
resourceId: row.resourceId,
|
||||
targetId: row.targetId,
|
||||
ip: row.ip,
|
||||
method: row.method,
|
||||
port: row.port,
|
||||
internalPort: row.internalPort,
|
||||
enabled: row.targetEnabled,
|
||||
priority: row.priority,
|
||||
site: {
|
||||
siteId: row.siteId,
|
||||
type: row.siteType,
|
||||
subnet: row.subnet,
|
||||
exitNodeId: row.exitNodeId,
|
||||
online: row.siteOnline
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// make sure we have at least one resource
|
||||
if (resourcesMap.size === 0) {
|
||||
return {};
|
||||
}
|
||||
|
||||
const config_output: any = {
|
||||
http: {
|
||||
middlewares: {
|
||||
[redirectHttpsMiddlewareName]: {
|
||||
redirectScheme: {
|
||||
scheme: "https"
|
||||
}
|
||||
},
|
||||
[redirectToRootMiddlewareName]: {
|
||||
redirectRegex: {
|
||||
regex: "^(https?)://([^/]+)(/.*)?",
|
||||
replacement: "${1}://${2}/auth/org",
|
||||
permanent: false
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// get the key and the resource
|
||||
for (const [key, resource] of resourcesMap.entries()) {
|
||||
const targets = resource.targets;
|
||||
|
||||
const routerName = `${key}-${resource.name}-router`;
|
||||
const serviceName = `${key}-${resource.name}-service`;
|
||||
const fullDomain = `${resource.fullDomain}`;
|
||||
const transportName = `${key}-transport`;
|
||||
const headersMiddlewareName = `${key}-headers-middleware`;
|
||||
|
||||
if (!resource.enabled) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (resource.http) {
|
||||
if (!resource.domainId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!resource.fullDomain) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (resource.certificateStatus !== "valid") {
|
||||
logger.debug(
|
||||
`Resource ${resource.resourceId} has certificate stats ${resource.certificateStats}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// add routers and services empty objects if they don't exist
|
||||
if (!config_output.http.routers) {
|
||||
config_output.http.routers = {};
|
||||
}
|
||||
|
||||
if (!config_output.http.services) {
|
||||
config_output.http.services = {};
|
||||
}
|
||||
|
||||
const domainParts = fullDomain.split(".");
|
||||
let wildCard;
|
||||
if (domainParts.length <= 2) {
|
||||
wildCard = `*.${domainParts.join(".")}`;
|
||||
} else {
|
||||
wildCard = `*.${domainParts.slice(1).join(".")}`;
|
||||
}
|
||||
|
||||
if (!resource.subdomain) {
|
||||
wildCard = resource.fullDomain;
|
||||
}
|
||||
|
||||
const configDomain = config.getDomain(resource.domainId);
|
||||
|
||||
let certResolver: string, preferWildcardCert: boolean;
|
||||
if (!configDomain) {
|
||||
certResolver = config.getRawConfig().traefik.cert_resolver;
|
||||
preferWildcardCert =
|
||||
config.getRawConfig().traefik.prefer_wildcard_cert;
|
||||
} else {
|
||||
certResolver = configDomain.cert_resolver;
|
||||
preferWildcardCert = configDomain.prefer_wildcard_cert;
|
||||
}
|
||||
|
||||
let tls = {};
|
||||
if (build == "oss") {
|
||||
tls = {
|
||||
certResolver: certResolver,
|
||||
...(preferWildcardCert
|
||||
? {
|
||||
domains: [
|
||||
{
|
||||
main: wildCard
|
||||
}
|
||||
]
|
||||
}
|
||||
: {})
|
||||
};
|
||||
}
|
||||
|
||||
const additionalMiddlewares =
|
||||
config.getRawConfig().traefik.additional_middlewares || [];
|
||||
|
||||
const routerMiddlewares = [
|
||||
badgerMiddlewareName,
|
||||
...additionalMiddlewares
|
||||
];
|
||||
|
||||
if (resource.headers || resource.setHostHeader) {
|
||||
// if there are headers, parse them into an object
|
||||
const headersObj: { [key: string]: string } = {};
|
||||
if (resource.headers) {
|
||||
let headersArr: { name: string; value: string }[] = [];
|
||||
try {
|
||||
headersArr = JSON.parse(resource.headers) as {
|
||||
name: string;
|
||||
value: string;
|
||||
}[];
|
||||
} catch (e) {
|
||||
logger.warn(
|
||||
`Failed to parse headers for resource ${resource.resourceId}: ${e}`
|
||||
);
|
||||
}
|
||||
|
||||
headersArr.forEach((header) => {
|
||||
headersObj[header.name] = header.value;
|
||||
});
|
||||
}
|
||||
|
||||
if (resource.setHostHeader) {
|
||||
headersObj["Host"] = resource.setHostHeader;
|
||||
}
|
||||
|
||||
// check if the object is not empty
|
||||
if (Object.keys(headersObj).length > 0) {
|
||||
// Add the headers middleware
|
||||
if (!config_output.http.middlewares) {
|
||||
config_output.http.middlewares = {};
|
||||
}
|
||||
config_output.http.middlewares[headersMiddlewareName] = {
|
||||
headers: {
|
||||
customRequestHeaders: headersObj
|
||||
}
|
||||
};
|
||||
|
||||
routerMiddlewares.push(headersMiddlewareName);
|
||||
}
|
||||
}
|
||||
|
||||
let rule = `Host(\`${fullDomain}\`)`;
|
||||
|
||||
// priority logic
|
||||
let priority: number;
|
||||
if (resource.priority && resource.priority != 100) {
|
||||
priority = resource.priority;
|
||||
} else {
|
||||
priority = 100;
|
||||
if (resource.path && resource.pathMatchType) {
|
||||
priority += 10;
|
||||
if (resource.pathMatchType === "exact") {
|
||||
priority += 5;
|
||||
} else if (resource.pathMatchType === "prefix") {
|
||||
priority += 3;
|
||||
} else if (resource.pathMatchType === "regex") {
|
||||
priority += 2;
|
||||
}
|
||||
if (resource.path === "/") {
|
||||
priority = 1; // lowest for catch-all
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (resource.path && resource.pathMatchType) {
|
||||
//priority += 1;
|
||||
// add path to rule based on match type
|
||||
let path = resource.path;
|
||||
// if the path doesn't start with a /, add it
|
||||
if (!path.startsWith("/")) {
|
||||
path = `/${path}`;
|
||||
}
|
||||
if (resource.pathMatchType === "exact") {
|
||||
rule += ` && Path(\`${path}\`)`;
|
||||
} else if (resource.pathMatchType === "prefix") {
|
||||
rule += ` && PathPrefix(\`${path}\`)`;
|
||||
} else if (resource.pathMatchType === "regex") {
|
||||
rule += ` && PathRegexp(\`${resource.path}\`)`; // this is the raw path because it's a regex
|
||||
}
|
||||
}
|
||||
|
||||
config_output.http.routers![routerName] = {
|
||||
entryPoints: [
|
||||
resource.ssl
|
||||
? config.getRawConfig().traefik.https_entrypoint
|
||||
: config.getRawConfig().traefik.http_entrypoint
|
||||
],
|
||||
middlewares: routerMiddlewares,
|
||||
service: serviceName,
|
||||
rule: rule,
|
||||
priority: priority,
|
||||
...(resource.ssl ? { tls } : {})
|
||||
};
|
||||
|
||||
if (resource.ssl) {
|
||||
config_output.http.routers![routerName + "-redirect"] = {
|
||||
entryPoints: [
|
||||
config.getRawConfig().traefik.http_entrypoint
|
||||
],
|
||||
middlewares: [redirectHttpsMiddlewareName],
|
||||
service: serviceName,
|
||||
rule: rule,
|
||||
priority: priority
|
||||
};
|
||||
}
|
||||
|
||||
config_output.http.services![serviceName] = {
|
||||
loadBalancer: {
|
||||
servers: (() => {
|
||||
// Check if any sites are online
|
||||
// THIS IS SO THAT THERE IS SOME IMMEDIATE FEEDBACK
|
||||
// EVEN IF THE SITES HAVE NOT UPDATED YET FROM THE
|
||||
// RECEIVE BANDWIDTH ENDPOINT.
|
||||
|
||||
// TODO: HOW TO HANDLE ^^^^^^ BETTER
|
||||
const anySitesOnline = (
|
||||
targets as TargetWithSite[]
|
||||
).some((target: TargetWithSite) => target.site.online);
|
||||
|
||||
return (
|
||||
(targets as TargetWithSite[])
|
||||
.filter((target: TargetWithSite) => {
|
||||
if (!target.enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If any sites are online, exclude offline sites
|
||||
if (anySitesOnline && !target.site.online) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
) {
|
||||
if (
|
||||
!target.ip ||
|
||||
!target.port ||
|
||||
!target.method
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
} else if (target.site.type === "newt") {
|
||||
if (
|
||||
!target.internalPort ||
|
||||
!target.method ||
|
||||
!target.site.subnet
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((target: TargetWithSite) => {
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
) {
|
||||
return {
|
||||
url: `${target.method}://${target.ip}:${target.port}`
|
||||
};
|
||||
} else if (target.site.type === "newt") {
|
||||
const ip =
|
||||
target.site.subnet!.split("/")[0];
|
||||
return {
|
||||
url: `${target.method}://${ip}:${target.internalPort}`
|
||||
};
|
||||
}
|
||||
})
|
||||
// filter out duplicates
|
||||
.filter(
|
||||
(v, i, a) =>
|
||||
a.findIndex(
|
||||
(t) => t && v && t.url === v.url
|
||||
) === i
|
||||
)
|
||||
);
|
||||
})(),
|
||||
...(resource.stickySession
|
||||
? {
|
||||
sticky: {
|
||||
cookie: {
|
||||
name: "p_sticky", // TODO: make this configurable via config.yml like other cookies
|
||||
secure: resource.ssl,
|
||||
httpOnly: true
|
||||
}
|
||||
}
|
||||
}
|
||||
: {})
|
||||
}
|
||||
};
|
||||
|
||||
// Add the serversTransport if TLS server name is provided
|
||||
if (resource.tlsServerName) {
|
||||
if (!config_output.http.serversTransports) {
|
||||
config_output.http.serversTransports = {};
|
||||
}
|
||||
config_output.http.serversTransports![transportName] = {
|
||||
serverName: resource.tlsServerName,
|
||||
//unfortunately the following needs to be set. traefik doesn't merge the default serverTransport settings
|
||||
// if defined in the static config and here. if not set, self-signed certs won't work
|
||||
insecureSkipVerify: true
|
||||
};
|
||||
config_output.http.services![
|
||||
serviceName
|
||||
].loadBalancer.serversTransport = transportName;
|
||||
}
|
||||
} else {
|
||||
// Non-HTTP (TCP/UDP) configuration
|
||||
if (!resource.enableProxy) {
|
||||
continue;
|
||||
}
|
||||
|
||||
const protocol = resource.protocol.toLowerCase();
|
||||
const port = resource.proxyPort;
|
||||
|
||||
if (!port) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!config_output[protocol]) {
|
||||
config_output[protocol] = {
|
||||
routers: {},
|
||||
services: {}
|
||||
};
|
||||
}
|
||||
|
||||
config_output[protocol].routers[routerName] = {
|
||||
entryPoints: [`${protocol}-${port}`],
|
||||
service: serviceName,
|
||||
...(protocol === "tcp" ? { rule: "HostSNI(`*`)" } : {})
|
||||
};
|
||||
|
||||
config_output[protocol].services[serviceName] = {
|
||||
loadBalancer: {
|
||||
servers: (() => {
|
||||
// Check if any sites are online
|
||||
const anySitesOnline = (
|
||||
targets as TargetWithSite[]
|
||||
).some((target: TargetWithSite) => target.site.online);
|
||||
|
||||
return (targets as TargetWithSite[])
|
||||
.filter((target: TargetWithSite) => {
|
||||
if (!target.enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If any sites are online, exclude offline sites
|
||||
if (anySitesOnline && !target.site.online) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
) {
|
||||
if (!target.ip || !target.port) {
|
||||
return false;
|
||||
}
|
||||
} else if (target.site.type === "newt") {
|
||||
if (
|
||||
!target.internalPort ||
|
||||
!target.site.subnet
|
||||
) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((target: TargetWithSite) => {
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
) {
|
||||
return {
|
||||
address: `${target.ip}:${target.port}`
|
||||
};
|
||||
} else if (target.site.type === "newt") {
|
||||
const ip =
|
||||
target.site.subnet!.split("/")[0];
|
||||
return {
|
||||
address: `${ip}:${target.internalPort}`
|
||||
};
|
||||
}
|
||||
});
|
||||
})(),
|
||||
...(resource.stickySession
|
||||
? {
|
||||
sticky: {
|
||||
ipStrategy: {
|
||||
depth: 0,
|
||||
sourcePort: true
|
||||
}
|
||||
}
|
||||
}
|
||||
: {})
|
||||
}
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (generateLoginPageRouters) {
|
||||
const exitNodeLoginPages = await db
|
||||
.select({
|
||||
loginPageId: loginPage.loginPageId,
|
||||
fullDomain: loginPage.fullDomain,
|
||||
exitNodeId: exitNodes.exitNodeId,
|
||||
domainId: loginPage.domainId,
|
||||
certificateStatus: certificates.status
|
||||
})
|
||||
.from(loginPage)
|
||||
.innerJoin(
|
||||
exitNodes,
|
||||
eq(exitNodes.exitNodeId, loginPage.exitNodeId)
|
||||
)
|
||||
.leftJoin(
|
||||
certificates,
|
||||
eq(certificates.domainId, loginPage.domainId)
|
||||
)
|
||||
.where(eq(exitNodes.exitNodeId, exitNodeId));
|
||||
|
||||
if (exitNodeLoginPages.length > 0) {
|
||||
if (!config_output.http.services) {
|
||||
config_output.http.services = {};
|
||||
}
|
||||
|
||||
if (!config_output.http.services["landing-service"]) {
|
||||
config_output.http.services["landing-service"] = {
|
||||
loadBalancer: {
|
||||
servers: [
|
||||
{
|
||||
url: `http://${
|
||||
config.getRawConfig().server
|
||||
.internal_hostname
|
||||
}:${config.getRawConfig().server.next_port}`
|
||||
}
|
||||
]
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
for (const lp of exitNodeLoginPages) {
|
||||
if (!lp.domainId) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (!lp.fullDomain) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (lp.certificateStatus !== "valid") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// auth-allowed:
|
||||
// rule: "Host(`auth.pangolin.internal`) && (PathRegexp(`^/auth/resource/[0-9]+$`) || PathPrefix(`/_next`))"
|
||||
// service: next-service
|
||||
// entryPoints:
|
||||
// - websecure
|
||||
|
||||
const routerName = `loginpage-${lp.loginPageId}`;
|
||||
const fullDomain = `${lp.fullDomain}`;
|
||||
|
||||
if (!config_output.http.routers) {
|
||||
config_output.http.routers = {};
|
||||
}
|
||||
|
||||
config_output.http.routers![routerName + "-router"] = {
|
||||
entryPoints: [
|
||||
config.getRawConfig().traefik.https_entrypoint
|
||||
],
|
||||
service: "landing-service",
|
||||
rule: `Host(\`${fullDomain}\`) && (PathRegexp(\`^/auth/resource/[^/]+$\`) || PathRegexp(\`^/auth/idp/[0-9]+/oidc/callback\`) || PathPrefix(\`/_next\`) || Path(\`/auth/org\`) || PathRegexp(\`^/__nextjs*\`))`,
|
||||
priority: 203,
|
||||
tls: {}
|
||||
};
|
||||
|
||||
// auth-catchall:
|
||||
// rule: "Host(`auth.example.com`)"
|
||||
// middlewares:
|
||||
// - redirect-to-root
|
||||
// service: next-service
|
||||
// entryPoints:
|
||||
// - web
|
||||
|
||||
config_output.http.routers![routerName + "-catchall"] = {
|
||||
entryPoints: [
|
||||
config.getRawConfig().traefik.https_entrypoint
|
||||
],
|
||||
middlewares: [redirectToRootMiddlewareName],
|
||||
service: "landing-service",
|
||||
rule: `Host(\`${fullDomain}\`)`,
|
||||
priority: 202,
|
||||
tls: {}
|
||||
};
|
||||
|
||||
// we need to add a redirect from http to https too
|
||||
config_output.http.routers![routerName + "-redirect"] = {
|
||||
entryPoints: [
|
||||
config.getRawConfig().traefik.http_entrypoint
|
||||
],
|
||||
middlewares: [redirectHttpsMiddlewareName],
|
||||
service: "landing-service",
|
||||
rule: `Host(\`${fullDomain}\`)`,
|
||||
priority: 201
|
||||
};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return config_output;
|
||||
}
|
||||
Reference in New Issue
Block a user