This commit is contained in:
Owen
2025-10-04 18:36:44 -07:00
parent 3123f858bb
commit c2c907852d
320 changed files with 35785 additions and 2984 deletions

View File

@@ -13,7 +13,7 @@ import { fromError } from "zod-validation-error";
import { getAllowedIps } from "../target/helpers";
import { proxyToRemote } from "@server/lib/remoteProxy";
import { getNextAvailableSubnet } from "@server/lib/exitNodes";
import { createExitNode } from "./createExitNode";
import { createExitNode } from "./privateCreateExitNode";
// Define Zod schema for request validation
const getConfigSchema = z.object({

View File

@@ -4,6 +4,9 @@ import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { resolveExitNodes } from "@server/lib/exitNodes";
import config from "@server/lib/config";
import { build } from "@server/build";
// Define Zod schema for request validation
const getResolvedHostnameSchema = z.object({
@@ -17,22 +20,42 @@ export async function getResolvedHostname(
next: NextFunction
): Promise<any> {
try {
// Validate request parameters
const parsedParams = getResolvedHostnameSchema.safeParse(
req.body
);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
let endpoints: string[] = []; // always route locally
if (build != "oss") {
// Validate request parameters
const parsedParams = getResolvedHostnameSchema.safeParse(req.body);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { hostname, publicKey } = parsedParams.data;
const baseDomain = config.getRawPrivateConfig().app.base_domain;
// if the hostname ends with the base domain then send back a empty array
if (baseDomain && hostname.endsWith(baseDomain)) {
return res.status(HttpCode.OK).send({
endpoints: [] // this should force to route locally
});
}
const resourceExitNodes = await resolveExitNodes(
hostname,
publicKey
);
endpoints = resourceExitNodes.map((node) => node.endpoint);
}
// return the endpoints
return res.status(HttpCode.OK).send({
endpoints: [] // ALWAYS ROUTE LOCALLY
endpoints
});
} catch (error) {
logger.error(error);

View File

@@ -2,7 +2,7 @@ import logger from "@server/logger";
import { db } from "@server/db";
import { exitNodes } from "@server/db";
import { eq } from "drizzle-orm";
import { sendToExitNode } from "../../lib/exitNodeComms";
import { sendToExitNode } from "@server/lib/exitNodes";
export async function addPeer(
exitNodeId: number,

View File

@@ -0,0 +1,67 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { db, ExitNode, exitNodes } from "@server/db";
import { getUniqueExitNodeEndpointName } from "@server/db/names";
import config from "@server/lib/config";
import { getNextAvailableSubnet } from "@server/lib/exitNodes";
import logger from "@server/logger";
import { eq } from "drizzle-orm";
export async function createExitNode(
publicKey: string,
reachableAt: string | undefined
) {
// Fetch exit node
const [exitNodeQuery] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.publicKey, publicKey));
let exitNode: ExitNode;
if (!exitNodeQuery) {
const address = await getNextAvailableSubnet();
// TODO: eventually we will want to get the next available port so that we can multiple exit nodes
// const listenPort = await getNextAvailablePort();
const listenPort = config.getRawConfig().gerbil.start_port;
let subEndpoint = "";
if (config.getRawConfig().gerbil.use_subdomain) {
subEndpoint = await getUniqueExitNodeEndpointName();
}
const exitNodeName =
config.getRawConfig().gerbil.exit_node_name ||
`Exit Node ${publicKey.slice(0, 8)}`;
// create a new exit node
[exitNode] = await db
.insert(exitNodes)
.values({
publicKey,
endpoint: `${subEndpoint}${subEndpoint != "" ? "." : ""}${config.getRawConfig().gerbil.base_endpoint}`,
address,
listenPort,
reachableAt,
name: exitNodeName
})
.returning()
.execute();
logger.info(
`Created new exit node ${exitNode.name} with address ${exitNode.address} and port ${exitNode.listenPort}`
);
} else {
exitNode = exitNodeQuery;
}
return exitNode;
}

View File

@@ -0,0 +1,13 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/

View File

@@ -6,7 +6,10 @@ import logger from "@server/logger";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import response from "@server/lib/response";
import { usageService } from "@server/lib/private/billing/usageService";
import { FeatureId } from "@server/lib/private/billing/features";
import { checkExitNodeOrg } from "@server/lib/exitNodes";
import { build } from "@server/build";
// Track sites that are already offline to avoid unnecessary queries
const offlineSites = new Set<string>();
@@ -29,7 +32,7 @@ export const receiveBandwidth = async (
throw new Error("Invalid bandwidth data");
}
await updateSiteBandwidth(bandwidthData);
await updateSiteBandwidth(bandwidthData, build == "saas"); // we are checking the usage on saas only
return response(res, {
data: {},
@@ -51,6 +54,7 @@ export const receiveBandwidth = async (
export async function updateSiteBandwidth(
bandwidthData: PeerBandwidth[],
calcUsageAndLimits: boolean,
exitNodeId?: number
) {
const currentTime = new Date();
@@ -89,18 +93,23 @@ export async function updateSiteBandwidth(
lastBandwidthUpdate: sites.lastBandwidthUpdate
});
if (exitNodeId) {
if (await checkExitNodeOrg(exitNodeId, updatedSite.orgId)) {
// not allowed
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
);
// THIS SHOULD TRIGGER THE TRANSACTION TO FAIL?
throw new Error("Exit node not allowed");
}
}
if (updatedSite) {
if (exitNodeId) {
if (
await checkExitNodeOrg(
exitNodeId,
updatedSite.orgId
)
) {
// not allowed
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
);
// THIS SHOULD TRIGGER THE TRANSACTION TO FAIL?
throw new Error("Exit node not allowed");
}
}
updatedSites.push({ ...updatedSite, peer });
}
}
@@ -116,6 +125,74 @@ export async function updateSiteBandwidth(
const currentOrgUptime = orgUptimeMap.get(site.orgId) || 0;
orgUptimeMap.set(site.orgId, currentOrgUptime + 10 / 60); // Store in minutes and jut add 10 seconds
}
if (calcUsageAndLimits) {
// REMOTE EXIT NODES DO NOT COUNT TOWARDS USAGE
// Process all usage updates sequentially by organization to reduce deadlock risk
const allOrgIds = new Set([...orgUsageMap.keys(), ...orgUptimeMap.keys()]);
for (const orgId of allOrgIds) {
try {
// Process bandwidth usage for this org
const totalBandwidth = orgUsageMap.get(orgId);
if (totalBandwidth) {
const bandwidthUsage = await usageService.add(
orgId,
FeatureId.EGRESS_DATA_MB,
totalBandwidth,
trx
);
if (bandwidthUsage) {
usageService
.checkLimitSet(
orgId,
true,
FeatureId.EGRESS_DATA_MB,
bandwidthUsage
)
.catch((error: any) => {
logger.error(
`Error checking bandwidth limits for org ${orgId}:`,
error
);
});
}
}
// Process uptime usage for this org
const totalUptime = orgUptimeMap.get(orgId);
if (totalUptime) {
const uptimeUsage = await usageService.add(
orgId,
FeatureId.SITE_UPTIME,
totalUptime,
trx
);
if (uptimeUsage) {
usageService
.checkLimitSet(
orgId,
true,
FeatureId.SITE_UPTIME,
uptimeUsage
)
.catch((error: any) => {
logger.error(
`Error checking uptime limits for org ${orgId}:`,
error
);
});
}
}
} catch (error) {
logger.error(
`Error processing usage for org ${orgId}:`,
error
);
// Don't break the loop, continue with other orgs
}
}
}
}
// Handle sites that reported zero bandwidth but need online status updated
@@ -161,7 +238,7 @@ export async function updateSiteBandwidth(
.where(eq(sites.siteId, site.siteId))
.returning();
if (exitNodeId) {
if (updatedSite && exitNodeId) {
if (
await checkExitNodeOrg(
exitNodeId,

View File

@@ -105,7 +105,7 @@ export async function updateHolePunch(
destinations: destinations
});
} catch (error) {
logger.error(error);
// logger.error(error); // FIX THIS
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
@@ -122,7 +122,8 @@ export async function updateAndGenerateEndpointDestinations(
port: number,
timestamp: number,
token: string,
exitNode: ExitNode
exitNode: ExitNode,
checkOrg = false
) {
let currentSiteId: number | undefined;
const destinations: PeerDestination[] = [];
@@ -158,7 +159,7 @@ export async function updateAndGenerateEndpointDestinations(
.where(eq(clients.clientId, olm.clientId))
.returning();
if (await checkExitNodeOrg(exitNode.exitNodeId, client.orgId)) {
if (await checkExitNodeOrg(exitNode.exitNodeId, client.orgId) && checkOrg) {
// not allowed
logger.warn(
`Exit node ${exitNode.exitNodeId} is not allowed for org ${client.orgId}`
@@ -253,7 +254,7 @@ export async function updateAndGenerateEndpointDestinations(
.where(eq(sites.siteId, newt.siteId))
.limit(1);
if (await checkExitNodeOrg(exitNode.exitNodeId, site.orgId)) {
if (await checkExitNodeOrg(exitNode.exitNodeId, site.orgId) && checkOrg) {
// not allowed
logger.warn(
`Exit node ${exitNode.exitNodeId} is not allowed for org ${site.orgId}`