mirror of
https://github.com/fosrl/pangolin.git
synced 2026-03-26 20:46:39 +00:00
Compare commits
15 Commits
1.16.2-s.2
...
dev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
395cab795c | ||
|
|
ce59a8a52b | ||
|
|
38d30b0214 | ||
|
|
fff38aac85 | ||
|
|
5a2a97b23a | ||
|
|
5b894e8682 | ||
|
|
19f8c1772f | ||
|
|
37d331e813 | ||
|
|
c660df55cd | ||
|
|
7c8b865379 | ||
|
|
3cca0c09c0 | ||
|
|
b01fcc70fe | ||
|
|
35fed74e49 | ||
|
|
6cf1b9b010 | ||
|
|
dae169540b |
115
license.py
Normal file
115
license.py
Normal file
@@ -0,0 +1,115 @@
|
|||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
# --- Configuration ---
|
||||||
|
# The header text to be added to the files.
|
||||||
|
HEADER_TEXT = """/*
|
||||||
|
* This file is part of a proprietary work.
|
||||||
|
*
|
||||||
|
* Copyright (c) 2025 Fossorial, Inc.
|
||||||
|
* All rights reserved.
|
||||||
|
*
|
||||||
|
* This file is licensed under the Fossorial Commercial License.
|
||||||
|
* You may not use this file except in compliance with the License.
|
||||||
|
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||||
|
*
|
||||||
|
* This file is not licensed under the AGPLv3.
|
||||||
|
*/
|
||||||
|
"""
|
||||||
|
|
||||||
|
def should_add_header(file_path):
|
||||||
|
"""
|
||||||
|
Checks if a file should receive the commercial license header.
|
||||||
|
Returns True if 'private' is in the path or file content.
|
||||||
|
"""
|
||||||
|
# Check if 'private' is in the file path (case-insensitive)
|
||||||
|
if 'server/private' in file_path.lower():
|
||||||
|
return True
|
||||||
|
|
||||||
|
# Check if 'private' is in the file content (case-insensitive)
|
||||||
|
# try:
|
||||||
|
# with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
|
||||||
|
# content = f.read()
|
||||||
|
# if 'private' in content.lower():
|
||||||
|
# return True
|
||||||
|
# except Exception as e:
|
||||||
|
# print(f"Could not read file {file_path}: {e}")
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
def process_directory(root_dir):
|
||||||
|
"""
|
||||||
|
Recursively scans a directory and adds headers to qualifying .ts or .tsx files,
|
||||||
|
skipping any 'node_modules' directories.
|
||||||
|
"""
|
||||||
|
print(f"Scanning directory: {root_dir}")
|
||||||
|
files_processed = 0
|
||||||
|
headers_added = 0
|
||||||
|
|
||||||
|
for root, dirs, files in os.walk(root_dir):
|
||||||
|
# --- MODIFICATION ---
|
||||||
|
# Exclude 'node_modules' directories from the scan to improve performance.
|
||||||
|
if 'node_modules' in dirs:
|
||||||
|
dirs.remove('node_modules')
|
||||||
|
|
||||||
|
for file in files:
|
||||||
|
if file.endswith('.ts') or file.endswith('.tsx'):
|
||||||
|
file_path = os.path.join(root, file)
|
||||||
|
files_processed += 1
|
||||||
|
|
||||||
|
try:
|
||||||
|
with open(file_path, 'r+', encoding='utf-8') as f:
|
||||||
|
original_content = f.read()
|
||||||
|
has_header = original_content.startswith(HEADER_TEXT.strip())
|
||||||
|
|
||||||
|
if should_add_header(file_path):
|
||||||
|
# Add header only if it's not already there
|
||||||
|
if not has_header:
|
||||||
|
f.seek(0, 0) # Go to the beginning of the file
|
||||||
|
f.write(HEADER_TEXT.strip() + '\n\n' + original_content)
|
||||||
|
print(f"Added header to: {file_path}")
|
||||||
|
headers_added += 1
|
||||||
|
else:
|
||||||
|
print(f"Header already exists in: {file_path}")
|
||||||
|
else:
|
||||||
|
# Remove header if it exists but shouldn't be there
|
||||||
|
if has_header:
|
||||||
|
# Find the end of the header and remove it (including following newlines)
|
||||||
|
header_with_newlines = HEADER_TEXT.strip() + '\n\n'
|
||||||
|
if original_content.startswith(header_with_newlines):
|
||||||
|
content_without_header = original_content[len(header_with_newlines):]
|
||||||
|
else:
|
||||||
|
# Handle case where there might be different newline patterns
|
||||||
|
header_end = len(HEADER_TEXT.strip())
|
||||||
|
# Skip any newlines after the header
|
||||||
|
while header_end < len(original_content) and original_content[header_end] in '\n\r':
|
||||||
|
header_end += 1
|
||||||
|
content_without_header = original_content[header_end:]
|
||||||
|
|
||||||
|
f.seek(0)
|
||||||
|
f.write(content_without_header)
|
||||||
|
f.truncate()
|
||||||
|
print(f"Removed header from: {file_path}")
|
||||||
|
headers_added += 1 # Reusing counter for modifications
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print(f"Error processing file {file_path}: {e}")
|
||||||
|
|
||||||
|
print("\n--- Scan Complete ---")
|
||||||
|
print(f"Total .ts or .tsx files found: {files_processed}")
|
||||||
|
print(f"Files modified (headers added/removed): {headers_added}")
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
# Get the target directory from the command line arguments.
|
||||||
|
# If no directory is provided, it uses the current directory ('.').
|
||||||
|
if len(sys.argv) > 1:
|
||||||
|
target_directory = sys.argv[1]
|
||||||
|
else:
|
||||||
|
target_directory = '.' # Default to current directory
|
||||||
|
|
||||||
|
if not os.path.isdir(target_directory):
|
||||||
|
print(f"Error: Directory '{target_directory}' not found.")
|
||||||
|
sys.exit(1)
|
||||||
|
|
||||||
|
process_directory(os.path.abspath(target_directory))
|
||||||
123
server/lib/ip.ts
123
server/lib/ip.ts
@@ -571,6 +571,129 @@ export function generateSubnetProxyTargets(
|
|||||||
return targets;
|
return targets;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
export type SubnetProxyTargetV2 = {
|
||||||
|
sourcePrefixes: string[]; // must be cidrs
|
||||||
|
destPrefix: string; // must be a cidr
|
||||||
|
disableIcmp?: boolean;
|
||||||
|
rewriteTo?: string; // must be a cidr
|
||||||
|
portRange?: {
|
||||||
|
min: number;
|
||||||
|
max: number;
|
||||||
|
protocol: "tcp" | "udp";
|
||||||
|
}[];
|
||||||
|
};
|
||||||
|
|
||||||
|
export function generateSubnetProxyTargetV2(
|
||||||
|
siteResource: SiteResource,
|
||||||
|
clients: {
|
||||||
|
clientId: number;
|
||||||
|
pubKey: string | null;
|
||||||
|
subnet: string | null;
|
||||||
|
}[]
|
||||||
|
): SubnetProxyTargetV2 | undefined {
|
||||||
|
if (clients.length === 0) {
|
||||||
|
logger.debug(
|
||||||
|
`No clients have access to site resource ${siteResource.siteResourceId}, skipping target generation.`
|
||||||
|
);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
let target: SubnetProxyTargetV2 | null = null;
|
||||||
|
|
||||||
|
const portRange = [
|
||||||
|
...parsePortRangeString(siteResource.tcpPortRangeString, "tcp"),
|
||||||
|
...parsePortRangeString(siteResource.udpPortRangeString, "udp")
|
||||||
|
];
|
||||||
|
const disableIcmp = siteResource.disableIcmp ?? false;
|
||||||
|
|
||||||
|
if (siteResource.mode == "host") {
|
||||||
|
let destination = siteResource.destination;
|
||||||
|
// check if this is a valid ip
|
||||||
|
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
|
||||||
|
if (ipSchema.safeParse(destination).success) {
|
||||||
|
destination = `${destination}/32`;
|
||||||
|
|
||||||
|
target = {
|
||||||
|
sourcePrefixes: [],
|
||||||
|
destPrefix: destination,
|
||||||
|
portRange,
|
||||||
|
disableIcmp
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (siteResource.alias && siteResource.aliasAddress) {
|
||||||
|
// also push a match for the alias address
|
||||||
|
target = {
|
||||||
|
sourcePrefixes: [],
|
||||||
|
destPrefix: `${siteResource.aliasAddress}/32`,
|
||||||
|
rewriteTo: destination,
|
||||||
|
portRange,
|
||||||
|
disableIcmp
|
||||||
|
};
|
||||||
|
}
|
||||||
|
} else if (siteResource.mode == "cidr") {
|
||||||
|
target = {
|
||||||
|
sourcePrefixes: [],
|
||||||
|
destPrefix: siteResource.destination,
|
||||||
|
portRange,
|
||||||
|
disableIcmp
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!target) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
for (const clientSite of clients) {
|
||||||
|
if (!clientSite.subnet) {
|
||||||
|
logger.debug(
|
||||||
|
`Client ${clientSite.clientId} has no subnet, skipping for site resource ${siteResource.siteResourceId}.`
|
||||||
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
const clientPrefix = `${clientSite.subnet.split("/")[0]}/32`;
|
||||||
|
|
||||||
|
// add client prefix to source prefixes
|
||||||
|
target.sourcePrefixes.push(clientPrefix);
|
||||||
|
}
|
||||||
|
|
||||||
|
// print a nice representation of the targets
|
||||||
|
// logger.debug(
|
||||||
|
// `Generated subnet proxy targets for: ${JSON.stringify(targets, null, 2)}`
|
||||||
|
// );
|
||||||
|
|
||||||
|
return target;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Converts a SubnetProxyTargetV2 to an array of SubnetProxyTarget (v1)
|
||||||
|
* by expanding each source prefix into its own target entry.
|
||||||
|
* @param targetV2 - The v2 target to convert
|
||||||
|
* @returns Array of v1 SubnetProxyTarget objects
|
||||||
|
*/
|
||||||
|
export function convertSubnetProxyTargetsV2ToV1(
|
||||||
|
targetsV2: SubnetProxyTargetV2[]
|
||||||
|
): SubnetProxyTarget[] {
|
||||||
|
return targetsV2.flatMap((targetV2) =>
|
||||||
|
targetV2.sourcePrefixes.map((sourcePrefix) => ({
|
||||||
|
sourcePrefix,
|
||||||
|
destPrefix: targetV2.destPrefix,
|
||||||
|
...(targetV2.disableIcmp !== undefined && {
|
||||||
|
disableIcmp: targetV2.disableIcmp
|
||||||
|
}),
|
||||||
|
...(targetV2.rewriteTo !== undefined && {
|
||||||
|
rewriteTo: targetV2.rewriteTo
|
||||||
|
}),
|
||||||
|
...(targetV2.portRange !== undefined && {
|
||||||
|
portRange: targetV2.portRange
|
||||||
|
})
|
||||||
|
}))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Custom schema for validating port range strings
|
// Custom schema for validating port range strings
|
||||||
// Format: "80,443,8000-9000" or "*" for all ports, or empty string
|
// Format: "80,443,8000-9000" or "*" for all ports, or empty string
|
||||||
export const portRangeStringSchema = z
|
export const portRangeStringSchema = z
|
||||||
|
|||||||
@@ -302,8 +302,8 @@ export const configSchema = z
|
|||||||
.optional()
|
.optional()
|
||||||
.default({
|
.default({
|
||||||
block_size: 24,
|
block_size: 24,
|
||||||
subnet_group: "100.90.128.0/24",
|
subnet_group: "100.90.128.0/20",
|
||||||
utility_subnet_group: "100.96.128.0/24"
|
utility_subnet_group: "100.96.128.0/20"
|
||||||
}),
|
}),
|
||||||
rate_limits: z
|
rate_limits: z
|
||||||
.object({
|
.object({
|
||||||
|
|||||||
@@ -32,7 +32,7 @@ import logger from "@server/logger";
|
|||||||
import {
|
import {
|
||||||
generateAliasConfig,
|
generateAliasConfig,
|
||||||
generateRemoteSubnets,
|
generateRemoteSubnets,
|
||||||
generateSubnetProxyTargets,
|
generateSubnetProxyTargetV2,
|
||||||
parseEndpoint,
|
parseEndpoint,
|
||||||
formatEndpoint
|
formatEndpoint
|
||||||
} from "@server/lib/ip";
|
} from "@server/lib/ip";
|
||||||
@@ -660,19 +660,16 @@ async function handleSubnetProxyTargetUpdates(
|
|||||||
);
|
);
|
||||||
|
|
||||||
if (addedClients.length > 0) {
|
if (addedClients.length > 0) {
|
||||||
const targetsToAdd = generateSubnetProxyTargets(
|
const targetToAdd = generateSubnetProxyTargetV2(
|
||||||
siteResource,
|
siteResource,
|
||||||
addedClients
|
addedClients
|
||||||
);
|
);
|
||||||
|
|
||||||
if (targetsToAdd.length > 0) {
|
if (targetToAdd) {
|
||||||
logger.info(
|
|
||||||
`Adding ${targetsToAdd.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
|
|
||||||
);
|
|
||||||
proxyJobs.push(
|
proxyJobs.push(
|
||||||
addSubnetProxyTargets(
|
addSubnetProxyTargets(
|
||||||
newt.newtId,
|
newt.newtId,
|
||||||
targetsToAdd,
|
[targetToAdd],
|
||||||
newt.version
|
newt.version
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
@@ -700,19 +697,16 @@ async function handleSubnetProxyTargetUpdates(
|
|||||||
);
|
);
|
||||||
|
|
||||||
if (removedClients.length > 0) {
|
if (removedClients.length > 0) {
|
||||||
const targetsToRemove = generateSubnetProxyTargets(
|
const targetToRemove = generateSubnetProxyTargetV2(
|
||||||
siteResource,
|
siteResource,
|
||||||
removedClients
|
removedClients
|
||||||
);
|
);
|
||||||
|
|
||||||
if (targetsToRemove.length > 0) {
|
if (targetToRemove) {
|
||||||
logger.info(
|
|
||||||
`Removing ${targetsToRemove.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
|
|
||||||
);
|
|
||||||
proxyJobs.push(
|
proxyJobs.push(
|
||||||
removeSubnetProxyTargets(
|
removeSubnetProxyTargets(
|
||||||
newt.newtId,
|
newt.newtId,
|
||||||
targetsToRemove,
|
[targetToRemove],
|
||||||
newt.version
|
newt.version
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
@@ -1169,7 +1163,7 @@ async function handleMessagesForClientResources(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (const resource of resources) {
|
for (const resource of resources) {
|
||||||
const targets = generateSubnetProxyTargets(resource, [
|
const target = generateSubnetProxyTargetV2(resource, [
|
||||||
{
|
{
|
||||||
clientId: client.clientId,
|
clientId: client.clientId,
|
||||||
pubKey: client.pubKey,
|
pubKey: client.pubKey,
|
||||||
@@ -1177,11 +1171,11 @@ async function handleMessagesForClientResources(
|
|||||||
}
|
}
|
||||||
]);
|
]);
|
||||||
|
|
||||||
if (targets.length > 0) {
|
if (target) {
|
||||||
proxyJobs.push(
|
proxyJobs.push(
|
||||||
addSubnetProxyTargets(
|
addSubnetProxyTargets(
|
||||||
newt.newtId,
|
newt.newtId,
|
||||||
targets,
|
[target],
|
||||||
newt.version
|
newt.version
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
@@ -1246,7 +1240,7 @@ async function handleMessagesForClientResources(
|
|||||||
}
|
}
|
||||||
|
|
||||||
for (const resource of resources) {
|
for (const resource of resources) {
|
||||||
const targets = generateSubnetProxyTargets(resource, [
|
const target = generateSubnetProxyTargetV2(resource, [
|
||||||
{
|
{
|
||||||
clientId: client.clientId,
|
clientId: client.clientId,
|
||||||
pubKey: client.pubKey,
|
pubKey: client.pubKey,
|
||||||
@@ -1254,11 +1248,11 @@ async function handleMessagesForClientResources(
|
|||||||
}
|
}
|
||||||
]);
|
]);
|
||||||
|
|
||||||
if (targets.length > 0) {
|
if (target) {
|
||||||
proxyJobs.push(
|
proxyJobs.push(
|
||||||
removeSubnetProxyTargets(
|
removeSubnetProxyTargets(
|
||||||
newt.newtId,
|
newt.newtId,
|
||||||
targets,
|
[target],
|
||||||
newt.version
|
newt.version
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|||||||
@@ -57,7 +57,10 @@ export const privateConfigSchema = z.object({
|
|||||||
.object({
|
.object({
|
||||||
host: z.string(),
|
host: z.string(),
|
||||||
port: portSchema,
|
port: portSchema,
|
||||||
password: z.string().optional(),
|
password: z
|
||||||
|
.string()
|
||||||
|
.optional()
|
||||||
|
.transform(getEnvOrYaml("REDIS_PASSWORD")),
|
||||||
db: z.int().nonnegative().optional().default(0),
|
db: z.int().nonnegative().optional().default(0),
|
||||||
replicas: z
|
replicas: z
|
||||||
.array(
|
.array(
|
||||||
|
|||||||
@@ -24,6 +24,7 @@ import {
|
|||||||
sites,
|
sites,
|
||||||
userOrgs
|
userOrgs
|
||||||
} from "@server/db";
|
} from "@server/db";
|
||||||
|
import { logAccessAudit } from "#private/lib/logAccessAudit";
|
||||||
import { isLicensedOrSubscribed } from "#private/lib/isLicencedOrSubscribed";
|
import { isLicensedOrSubscribed } from "#private/lib/isLicencedOrSubscribed";
|
||||||
import { tierMatrix } from "@server/lib/billing/tierMatrix";
|
import { tierMatrix } from "@server/lib/billing/tierMatrix";
|
||||||
import response from "@server/lib/response";
|
import response from "@server/lib/response";
|
||||||
@@ -463,6 +464,24 @@ export async function signSshKey(
|
|||||||
})
|
})
|
||||||
});
|
});
|
||||||
|
|
||||||
|
await logAccessAudit({
|
||||||
|
action: true,
|
||||||
|
type: "ssh",
|
||||||
|
orgId: orgId,
|
||||||
|
resourceId: resource.siteResourceId,
|
||||||
|
user: req.user
|
||||||
|
? { username: req.user.username ?? "", userId: req.user.userId }
|
||||||
|
: undefined,
|
||||||
|
metadata: {
|
||||||
|
resourceName: resource.name,
|
||||||
|
siteId: resource.siteId,
|
||||||
|
sshUsername: usernameToUse,
|
||||||
|
sshHost: sshHost
|
||||||
|
},
|
||||||
|
userAgent: req.headers["user-agent"],
|
||||||
|
requestIp: req.ip
|
||||||
|
});
|
||||||
|
|
||||||
return response<SignSshKeyResponse>(res, {
|
return response<SignSshKeyResponse>(res, {
|
||||||
data: {
|
data: {
|
||||||
certificate: cert.certificate,
|
certificate: cert.certificate,
|
||||||
|
|||||||
@@ -1,15 +1,54 @@
|
|||||||
import { sendToClient } from "#dynamic/routers/ws";
|
import { sendToClient } from "#dynamic/routers/ws";
|
||||||
import { db, olms, Transaction } from "@server/db";
|
import { db, newts, olms } from "@server/db";
|
||||||
|
import {
|
||||||
|
Alias,
|
||||||
|
convertSubnetProxyTargetsV2ToV1,
|
||||||
|
SubnetProxyTarget,
|
||||||
|
SubnetProxyTargetV2
|
||||||
|
} from "@server/lib/ip";
|
||||||
import { canCompress } from "@server/lib/clientVersionChecks";
|
import { canCompress } from "@server/lib/clientVersionChecks";
|
||||||
import { Alias, SubnetProxyTarget } from "@server/lib/ip";
|
|
||||||
import logger from "@server/logger";
|
import logger from "@server/logger";
|
||||||
import { eq } from "drizzle-orm";
|
import { eq } from "drizzle-orm";
|
||||||
|
import semver from "semver";
|
||||||
|
|
||||||
|
const NEWT_V2_TARGETS_VERSION = ">=1.10.3";
|
||||||
|
|
||||||
|
export async function convertTargetsIfNessicary(
|
||||||
|
newtId: string,
|
||||||
|
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[]
|
||||||
|
) {
|
||||||
|
// get the newt
|
||||||
|
const [newt] = await db
|
||||||
|
.select()
|
||||||
|
.from(newts)
|
||||||
|
.where(eq(newts.newtId, newtId));
|
||||||
|
if (!newt) {
|
||||||
|
throw new Error(`No newt found for id: ${newtId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the semver
|
||||||
|
if (
|
||||||
|
newt.version &&
|
||||||
|
!semver.satisfies(newt.version, NEWT_V2_TARGETS_VERSION)
|
||||||
|
) {
|
||||||
|
logger.debug(
|
||||||
|
`addTargets Newt version ${newt.version} does not support targets v2 falling back`
|
||||||
|
);
|
||||||
|
targets = convertSubnetProxyTargetsV2ToV1(
|
||||||
|
targets as SubnetProxyTargetV2[]
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return targets;
|
||||||
|
}
|
||||||
|
|
||||||
export async function addTargets(
|
export async function addTargets(
|
||||||
newtId: string,
|
newtId: string,
|
||||||
targets: SubnetProxyTarget[],
|
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[],
|
||||||
version?: string | null
|
version?: string | null
|
||||||
) {
|
) {
|
||||||
|
targets = await convertTargetsIfNessicary(newtId, targets);
|
||||||
|
|
||||||
await sendToClient(
|
await sendToClient(
|
||||||
newtId,
|
newtId,
|
||||||
{
|
{
|
||||||
@@ -22,9 +61,11 @@ export async function addTargets(
|
|||||||
|
|
||||||
export async function removeTargets(
|
export async function removeTargets(
|
||||||
newtId: string,
|
newtId: string,
|
||||||
targets: SubnetProxyTarget[],
|
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[],
|
||||||
version?: string | null
|
version?: string | null
|
||||||
) {
|
) {
|
||||||
|
targets = await convertTargetsIfNessicary(newtId, targets);
|
||||||
|
|
||||||
await sendToClient(
|
await sendToClient(
|
||||||
newtId,
|
newtId,
|
||||||
{
|
{
|
||||||
@@ -38,11 +79,39 @@ export async function removeTargets(
|
|||||||
export async function updateTargets(
|
export async function updateTargets(
|
||||||
newtId: string,
|
newtId: string,
|
||||||
targets: {
|
targets: {
|
||||||
oldTargets: SubnetProxyTarget[];
|
oldTargets: SubnetProxyTarget[] | SubnetProxyTargetV2[];
|
||||||
newTargets: SubnetProxyTarget[];
|
newTargets: SubnetProxyTarget[] | SubnetProxyTargetV2[];
|
||||||
},
|
},
|
||||||
version?: string | null
|
version?: string | null
|
||||||
) {
|
) {
|
||||||
|
// get the newt
|
||||||
|
const [newt] = await db
|
||||||
|
.select()
|
||||||
|
.from(newts)
|
||||||
|
.where(eq(newts.newtId, newtId));
|
||||||
|
if (!newt) {
|
||||||
|
logger.error(`addTargetsL No newt found for id: ${newtId}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
// check the semver
|
||||||
|
if (
|
||||||
|
newt.version &&
|
||||||
|
!semver.satisfies(newt.version, NEWT_V2_TARGETS_VERSION)
|
||||||
|
) {
|
||||||
|
logger.debug(
|
||||||
|
`addTargets Newt version ${newt.version} does not support targets v2 falling back`
|
||||||
|
);
|
||||||
|
targets = {
|
||||||
|
oldTargets: convertSubnetProxyTargetsV2ToV1(
|
||||||
|
targets.oldTargets as SubnetProxyTargetV2[]
|
||||||
|
),
|
||||||
|
newTargets: convertSubnetProxyTargetsV2ToV1(
|
||||||
|
targets.newTargets as SubnetProxyTargetV2[]
|
||||||
|
)
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
await sendToClient(
|
await sendToClient(
|
||||||
newtId,
|
newtId,
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -1,6 +1,5 @@
|
|||||||
import { Request, Response, NextFunction } from "express";
|
import { Request, Response, NextFunction } from "express";
|
||||||
import { eq, sql } from "drizzle-orm";
|
import { sql } from "drizzle-orm";
|
||||||
import { sites } from "@server/db";
|
|
||||||
import { db } from "@server/db";
|
import { db } from "@server/db";
|
||||||
import logger from "@server/logger";
|
import logger from "@server/logger";
|
||||||
import createHttpError from "http-errors";
|
import createHttpError from "http-errors";
|
||||||
@@ -31,7 +30,10 @@ const MAX_RETRIES = 3;
|
|||||||
const BASE_DELAY_MS = 50;
|
const BASE_DELAY_MS = 50;
|
||||||
|
|
||||||
// How often to flush accumulated bandwidth data to the database
|
// How often to flush accumulated bandwidth data to the database
|
||||||
const FLUSH_INTERVAL_MS = 30_000; // 30 seconds
|
const FLUSH_INTERVAL_MS = 300_000; // 300 seconds
|
||||||
|
|
||||||
|
// Maximum number of sites to include in a single batch UPDATE statement
|
||||||
|
const BATCH_CHUNK_SIZE = 250;
|
||||||
|
|
||||||
// In-memory accumulator: publicKey -> AccumulatorEntry
|
// In-memory accumulator: publicKey -> AccumulatorEntry
|
||||||
let accumulator = new Map<string, AccumulatorEntry>();
|
let accumulator = new Map<string, AccumulatorEntry>();
|
||||||
@@ -75,13 +77,33 @@ async function withDeadlockRetry<T>(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a raw SQL query that returns rows, in a way that works across both
|
||||||
|
* the PostgreSQL driver (which exposes `execute`) and the SQLite driver (which
|
||||||
|
* exposes `all`). Drizzle's typed query builder doesn't support bulk
|
||||||
|
* UPDATE … FROM (VALUES …) natively, so we drop to raw SQL here.
|
||||||
|
*/
|
||||||
|
async function dbQueryRows<T extends Record<string, unknown>>(
|
||||||
|
query: Parameters<(typeof sql)["join"]>[0][number]
|
||||||
|
): Promise<T[]> {
|
||||||
|
const anyDb = db as any;
|
||||||
|
if (typeof anyDb.execute === "function") {
|
||||||
|
// PostgreSQL (node-postgres via Drizzle) — returns { rows: [...] } or an array
|
||||||
|
const result = await anyDb.execute(query);
|
||||||
|
return (Array.isArray(result) ? result : (result.rows ?? [])) as T[];
|
||||||
|
}
|
||||||
|
// SQLite (better-sqlite3 via Drizzle) — returns an array directly
|
||||||
|
return (await anyDb.all(query)) as T[];
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Flush all accumulated site bandwidth data to the database.
|
* Flush all accumulated site bandwidth data to the database.
|
||||||
*
|
*
|
||||||
* Swaps out the accumulator before writing so that any bandwidth messages
|
* Swaps out the accumulator before writing so that any bandwidth messages
|
||||||
* received during the flush are captured in the new accumulator rather than
|
* received during the flush are captured in the new accumulator rather than
|
||||||
* being lost or causing contention. Entries that fail to write are re-queued
|
* being lost or causing contention. Sites are updated in chunks via a single
|
||||||
* back into the accumulator so they will be retried on the next flush.
|
* batch UPDATE per chunk. Failed chunks are discarded — exact per-flush
|
||||||
|
* accuracy is not critical and re-queuing is not worth the added complexity.
|
||||||
*
|
*
|
||||||
* This function is exported so that the application's graceful-shutdown
|
* This function is exported so that the application's graceful-shutdown
|
||||||
* cleanup handler can call it before the process exits.
|
* cleanup handler can call it before the process exits.
|
||||||
@@ -108,76 +130,76 @@ export async function flushSiteBandwidthToDb(): Promise<void> {
|
|||||||
`Flushing accumulated bandwidth data for ${sortedEntries.length} site(s) to the database`
|
`Flushing accumulated bandwidth data for ${sortedEntries.length} site(s) to the database`
|
||||||
);
|
);
|
||||||
|
|
||||||
// Aggregate billing usage by org, collected during the DB update loop.
|
// Build a lookup so post-processing can reach each entry by publicKey.
|
||||||
|
const snapshotMap = new Map(sortedEntries);
|
||||||
|
|
||||||
|
// Aggregate billing usage by org across all chunks.
|
||||||
const orgUsageMap = new Map<string, number>();
|
const orgUsageMap = new Map<string, number>();
|
||||||
|
|
||||||
for (const [publicKey, { bytesIn, bytesOut, exitNodeId, calcUsage }] of sortedEntries) {
|
// Process in chunks so individual queries stay at a reasonable size.
|
||||||
|
for (let i = 0; i < sortedEntries.length; i += BATCH_CHUNK_SIZE) {
|
||||||
|
const chunk = sortedEntries.slice(i, i + BATCH_CHUNK_SIZE);
|
||||||
|
const chunkEnd = i + chunk.length - 1;
|
||||||
|
|
||||||
|
// Build a parameterised VALUES list: (pubKey, bytesIn, bytesOut), ...
|
||||||
|
// Both PostgreSQL and SQLite (≥ 3.33.0, which better-sqlite3 bundles)
|
||||||
|
// support UPDATE … FROM (VALUES …), letting us update the whole chunk
|
||||||
|
// in a single query instead of N individual round-trips.
|
||||||
|
const valuesList = chunk.map(([publicKey, { bytesIn, bytesOut }]) =>
|
||||||
|
sql`(${publicKey}, ${bytesIn}, ${bytesOut})`
|
||||||
|
);
|
||||||
|
const valuesClause = sql.join(valuesList, sql`, `);
|
||||||
|
|
||||||
|
let rows: { orgId: string; pubKey: string }[] = [];
|
||||||
|
|
||||||
try {
|
try {
|
||||||
const updatedSite = await withDeadlockRetry(async () => {
|
rows = await withDeadlockRetry(async () => {
|
||||||
const [result] = await db
|
return dbQueryRows<{ orgId: string; pubKey: string }>(sql`
|
||||||
.update(sites)
|
UPDATE sites
|
||||||
.set({
|
SET
|
||||||
megabytesOut: sql`COALESCE(${sites.megabytesOut}, 0) + ${bytesIn}`,
|
"bytesOut" = COALESCE("bytesOut", 0) + v.bytes_in,
|
||||||
megabytesIn: sql`COALESCE(${sites.megabytesIn}, 0) + ${bytesOut}`,
|
"bytesIn" = COALESCE("bytesIn", 0) + v.bytes_out,
|
||||||
lastBandwidthUpdate: currentTime,
|
"lastBandwidthUpdate" = ${currentTime}
|
||||||
})
|
FROM (VALUES ${valuesClause}) AS v(pub_key, bytes_in, bytes_out)
|
||||||
.where(eq(sites.pubKey, publicKey))
|
WHERE sites."pubKey" = v.pub_key
|
||||||
.returning({
|
RETURNING sites."orgId" AS "orgId", sites."pubKey" AS "pubKey"
|
||||||
orgId: sites.orgId,
|
`);
|
||||||
siteId: sites.siteId
|
}, `flush bandwidth chunk [${i}–${chunkEnd}]`);
|
||||||
});
|
|
||||||
return result;
|
|
||||||
}, `flush bandwidth for site ${publicKey}`);
|
|
||||||
|
|
||||||
if (updatedSite) {
|
|
||||||
if (exitNodeId) {
|
|
||||||
const notAllowed = await checkExitNodeOrg(
|
|
||||||
exitNodeId,
|
|
||||||
updatedSite.orgId
|
|
||||||
);
|
|
||||||
if (notAllowed) {
|
|
||||||
logger.warn(
|
|
||||||
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
|
|
||||||
);
|
|
||||||
// Skip usage tracking for this site but continue
|
|
||||||
// processing the rest.
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (calcUsage) {
|
|
||||||
const totalBandwidth = bytesIn + bytesOut;
|
|
||||||
const current = orgUsageMap.get(updatedSite.orgId) ?? 0;
|
|
||||||
orgUsageMap.set(updatedSite.orgId, current + totalBandwidth);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (error) {
|
} catch (error) {
|
||||||
logger.error(
|
logger.error(
|
||||||
`Failed to flush bandwidth for site ${publicKey}:`,
|
`Failed to flush bandwidth chunk [${i}–${chunkEnd}], discarding ${chunk.length} site(s):`,
|
||||||
error
|
error
|
||||||
);
|
);
|
||||||
|
// Discard the chunk — exact per-flush accuracy is not critical.
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
// Re-queue the failed entry so it is retried on the next flush
|
// Collect billing usage from the returned rows.
|
||||||
// rather than silently dropped.
|
for (const { orgId, pubKey } of rows) {
|
||||||
const existing = accumulator.get(publicKey);
|
const entry = snapshotMap.get(pubKey);
|
||||||
if (existing) {
|
if (!entry) continue;
|
||||||
existing.bytesIn += bytesIn;
|
|
||||||
existing.bytesOut += bytesOut;
|
const { bytesIn, bytesOut, exitNodeId, calcUsage } = entry;
|
||||||
} else {
|
|
||||||
accumulator.set(publicKey, {
|
if (exitNodeId) {
|
||||||
bytesIn,
|
const notAllowed = await checkExitNodeOrg(exitNodeId, orgId);
|
||||||
bytesOut,
|
if (notAllowed) {
|
||||||
exitNodeId,
|
logger.warn(
|
||||||
calcUsage
|
`Exit node ${exitNodeId} is not allowed for org ${orgId}`
|
||||||
});
|
);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (calcUsage) {
|
||||||
|
const current = orgUsageMap.get(orgId) ?? 0;
|
||||||
|
orgUsageMap.set(orgId, current + bytesIn + bytesOut);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Process billing usage updates outside the site-update loop to keep
|
// Process billing usage updates after all chunks are written.
|
||||||
// lock scope small and concerns separated.
|
|
||||||
if (orgUsageMap.size > 0) {
|
if (orgUsageMap.size > 0) {
|
||||||
// Sort org IDs for consistent lock ordering.
|
|
||||||
const sortedOrgIds = [...orgUsageMap.keys()].sort();
|
const sortedOrgIds = [...orgUsageMap.keys()].sort();
|
||||||
|
|
||||||
for (const orgId of sortedOrgIds) {
|
for (const orgId of sortedOrgIds) {
|
||||||
|
|||||||
@@ -16,8 +16,8 @@ import { eq, and } from "drizzle-orm";
|
|||||||
import config from "@server/lib/config";
|
import config from "@server/lib/config";
|
||||||
import {
|
import {
|
||||||
formatEndpoint,
|
formatEndpoint,
|
||||||
generateSubnetProxyTargets,
|
generateSubnetProxyTargetV2,
|
||||||
SubnetProxyTarget
|
SubnetProxyTargetV2
|
||||||
} from "@server/lib/ip";
|
} from "@server/lib/ip";
|
||||||
|
|
||||||
export async function buildClientConfigurationForNewtClient(
|
export async function buildClientConfigurationForNewtClient(
|
||||||
@@ -143,7 +143,7 @@ export async function buildClientConfigurationForNewtClient(
|
|||||||
.from(siteResources)
|
.from(siteResources)
|
||||||
.where(eq(siteResources.siteId, siteId));
|
.where(eq(siteResources.siteId, siteId));
|
||||||
|
|
||||||
const targetsToSend: SubnetProxyTarget[] = [];
|
const targetsToSend: SubnetProxyTargetV2[] = [];
|
||||||
|
|
||||||
for (const resource of allSiteResources) {
|
for (const resource of allSiteResources) {
|
||||||
// Get clients associated with this specific resource
|
// Get clients associated with this specific resource
|
||||||
@@ -168,12 +168,14 @@ export async function buildClientConfigurationForNewtClient(
|
|||||||
)
|
)
|
||||||
);
|
);
|
||||||
|
|
||||||
const resourceTargets = generateSubnetProxyTargets(
|
const resourceTarget = generateSubnetProxyTargetV2(
|
||||||
resource,
|
resource,
|
||||||
resourceClients
|
resourceClients
|
||||||
);
|
);
|
||||||
|
|
||||||
targetsToSend.push(...resourceTargets);
|
if (resourceTarget) {
|
||||||
|
targetsToSend.push(resourceTarget);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return {
|
return {
|
||||||
|
|||||||
@@ -6,6 +6,7 @@ import { db, ExitNode, exitNodes, Newt, sites } from "@server/db";
|
|||||||
import { eq } from "drizzle-orm";
|
import { eq } from "drizzle-orm";
|
||||||
import { sendToExitNode } from "#dynamic/lib/exitNodes";
|
import { sendToExitNode } from "#dynamic/lib/exitNodes";
|
||||||
import { buildClientConfigurationForNewtClient } from "./buildConfiguration";
|
import { buildClientConfigurationForNewtClient } from "./buildConfiguration";
|
||||||
|
import { convertTargetsIfNessicary } from "../client/targets";
|
||||||
import { canCompress } from "@server/lib/clientVersionChecks";
|
import { canCompress } from "@server/lib/clientVersionChecks";
|
||||||
|
|
||||||
const inputSchema = z.object({
|
const inputSchema = z.object({
|
||||||
@@ -127,13 +128,15 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
|
|||||||
exitNode
|
exitNode
|
||||||
);
|
);
|
||||||
|
|
||||||
|
const targetsToSend = await convertTargetsIfNessicary(newt.newtId, targets);
|
||||||
|
|
||||||
return {
|
return {
|
||||||
message: {
|
message: {
|
||||||
type: "newt/wg/receive-config",
|
type: "newt/wg/receive-config",
|
||||||
data: {
|
data: {
|
||||||
ipAddress: site.address,
|
ipAddress: site.address,
|
||||||
peers,
|
peers,
|
||||||
targets
|
targets: targetsToSend
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
options: {
|
options: {
|
||||||
|
|||||||
@@ -88,7 +88,7 @@ const createSiteResourceSchema = z
|
|||||||
},
|
},
|
||||||
{
|
{
|
||||||
message:
|
message:
|
||||||
"Destination must be a valid IP address or valid domain AND alias is required"
|
"Destination must be a valid IPV4 address or valid domain AND alias is required"
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
.refine(
|
.refine(
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ import { updatePeerData, updateTargets } from "@server/routers/client/targets";
|
|||||||
import {
|
import {
|
||||||
generateAliasConfig,
|
generateAliasConfig,
|
||||||
generateRemoteSubnets,
|
generateRemoteSubnets,
|
||||||
generateSubnetProxyTargets,
|
generateSubnetProxyTargetV2,
|
||||||
isIpInCidr,
|
isIpInCidr,
|
||||||
portRangeStringSchema
|
portRangeStringSchema
|
||||||
} from "@server/lib/ip";
|
} from "@server/lib/ip";
|
||||||
@@ -608,18 +608,18 @@ export async function handleMessagingForUpdatedSiteResource(
|
|||||||
|
|
||||||
// Only update targets on newt if destination changed
|
// Only update targets on newt if destination changed
|
||||||
if (destinationChanged || portRangesChanged) {
|
if (destinationChanged || portRangesChanged) {
|
||||||
const oldTargets = generateSubnetProxyTargets(
|
const oldTarget = generateSubnetProxyTargetV2(
|
||||||
existingSiteResource,
|
existingSiteResource,
|
||||||
mergedAllClients
|
mergedAllClients
|
||||||
);
|
);
|
||||||
const newTargets = generateSubnetProxyTargets(
|
const newTarget = generateSubnetProxyTargetV2(
|
||||||
updatedSiteResource,
|
updatedSiteResource,
|
||||||
mergedAllClients
|
mergedAllClients
|
||||||
);
|
);
|
||||||
|
|
||||||
await updateTargets(newt.newtId, {
|
await updateTargets(newt.newtId, {
|
||||||
oldTargets: oldTargets,
|
oldTargets: oldTarget ? [oldTarget] : [],
|
||||||
newTargets: newTargets
|
newTargets: newTarget ? [newTarget] : []
|
||||||
}, newt.version);
|
}, newt.version);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -493,7 +493,8 @@ export default function GeneralPage() {
|
|||||||
{
|
{
|
||||||
value: "whitelistedEmail",
|
value: "whitelistedEmail",
|
||||||
label: "Whitelisted Email"
|
label: "Whitelisted Email"
|
||||||
}
|
},
|
||||||
|
{ value: "ssh", label: "SSH" }
|
||||||
]}
|
]}
|
||||||
selectedValue={filters.type}
|
selectedValue={filters.type}
|
||||||
onValueChange={(value) =>
|
onValueChange={(value) =>
|
||||||
@@ -507,13 +508,12 @@ export default function GeneralPage() {
|
|||||||
);
|
);
|
||||||
},
|
},
|
||||||
cell: ({ row }) => {
|
cell: ({ row }) => {
|
||||||
// should be capitalized first letter
|
const typeLabel =
|
||||||
return (
|
row.original.type === "ssh"
|
||||||
<span>
|
? "SSH"
|
||||||
{row.original.type.charAt(0).toUpperCase() +
|
: row.original.type.charAt(0).toUpperCase() +
|
||||||
row.original.type.slice(1) || "-"}
|
row.original.type.slice(1);
|
||||||
</span>
|
return <span>{typeLabel || "-"}</span>;
|
||||||
);
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
Reference in New Issue
Block a user