Compare commits

...

15 Commits

Author SHA1 Message Date
Owen
395cab795c Batch set bandwidth 2026-03-25 20:35:21 -07:00
Owen
ce59a8a52b Merge branch 'main' into dev 2026-03-24 20:38:16 -07:00
Owen
38d30b0214 Add license script 2026-03-24 18:13:57 -07:00
Owen
fff38aac85 Add ssh access log 2026-03-24 16:26:56 -07:00
Owen
5a2a97b23a Add better pooling controls 2026-03-24 16:12:13 -07:00
Owen
5b894e8682 Disable everything if not paid 2026-03-24 16:01:54 -07:00
Owen Schwartz
19f8c1772f Merge pull request #2698 from fosrl/msg-opt
Improve proxy list message size
2026-03-23 16:05:24 -07:00
Owen
37d331e813 Update version 2026-03-23 16:05:05 -07:00
Owen
c660df55cd Merge branch 'dev' into msg-opt 2026-03-23 16:00:50 -07:00
Owen Schwartz
7c8b865379 Merge pull request #2695 from noe-charmet/redis-password-env
Allow setting Redis password from env
2026-03-23 12:02:45 -07:00
Noe Charmet
3cca0c09c0 Allow setting Redis password from env 2026-03-23 11:18:55 +01:00
Owen
b01fcc70fe Fix ts and add note about ipv4 2026-03-03 14:45:18 -08:00
Owen
35fed74e49 Merge branch 'dev' into msg-opt 2026-03-02 18:52:35 -08:00
Owen
6cf1b9b010 Support improved targets msg v2 2026-03-02 18:51:48 -08:00
Owen
dae169540b Fix defaults for orgs 2026-03-02 16:49:17 -08:00
13 changed files with 460 additions and 110 deletions

115
license.py Normal file
View File

@@ -0,0 +1,115 @@
import os
import sys
# --- Configuration ---
# The header text to be added to the files.
HEADER_TEXT = """/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
"""
def should_add_header(file_path):
"""
Checks if a file should receive the commercial license header.
Returns True if 'private' is in the path or file content.
"""
# Check if 'private' is in the file path (case-insensitive)
if 'server/private' in file_path.lower():
return True
# Check if 'private' is in the file content (case-insensitive)
# try:
# with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
# content = f.read()
# if 'private' in content.lower():
# return True
# except Exception as e:
# print(f"Could not read file {file_path}: {e}")
return False
def process_directory(root_dir):
"""
Recursively scans a directory and adds headers to qualifying .ts or .tsx files,
skipping any 'node_modules' directories.
"""
print(f"Scanning directory: {root_dir}")
files_processed = 0
headers_added = 0
for root, dirs, files in os.walk(root_dir):
# --- MODIFICATION ---
# Exclude 'node_modules' directories from the scan to improve performance.
if 'node_modules' in dirs:
dirs.remove('node_modules')
for file in files:
if file.endswith('.ts') or file.endswith('.tsx'):
file_path = os.path.join(root, file)
files_processed += 1
try:
with open(file_path, 'r+', encoding='utf-8') as f:
original_content = f.read()
has_header = original_content.startswith(HEADER_TEXT.strip())
if should_add_header(file_path):
# Add header only if it's not already there
if not has_header:
f.seek(0, 0) # Go to the beginning of the file
f.write(HEADER_TEXT.strip() + '\n\n' + original_content)
print(f"Added header to: {file_path}")
headers_added += 1
else:
print(f"Header already exists in: {file_path}")
else:
# Remove header if it exists but shouldn't be there
if has_header:
# Find the end of the header and remove it (including following newlines)
header_with_newlines = HEADER_TEXT.strip() + '\n\n'
if original_content.startswith(header_with_newlines):
content_without_header = original_content[len(header_with_newlines):]
else:
# Handle case where there might be different newline patterns
header_end = len(HEADER_TEXT.strip())
# Skip any newlines after the header
while header_end < len(original_content) and original_content[header_end] in '\n\r':
header_end += 1
content_without_header = original_content[header_end:]
f.seek(0)
f.write(content_without_header)
f.truncate()
print(f"Removed header from: {file_path}")
headers_added += 1 # Reusing counter for modifications
except Exception as e:
print(f"Error processing file {file_path}: {e}")
print("\n--- Scan Complete ---")
print(f"Total .ts or .tsx files found: {files_processed}")
print(f"Files modified (headers added/removed): {headers_added}")
if __name__ == "__main__":
# Get the target directory from the command line arguments.
# If no directory is provided, it uses the current directory ('.').
if len(sys.argv) > 1:
target_directory = sys.argv[1]
else:
target_directory = '.' # Default to current directory
if not os.path.isdir(target_directory):
print(f"Error: Directory '{target_directory}' not found.")
sys.exit(1)
process_directory(os.path.abspath(target_directory))

View File

@@ -571,6 +571,129 @@ export function generateSubnetProxyTargets(
return targets;
}
export type SubnetProxyTargetV2 = {
sourcePrefixes: string[]; // must be cidrs
destPrefix: string; // must be a cidr
disableIcmp?: boolean;
rewriteTo?: string; // must be a cidr
portRange?: {
min: number;
max: number;
protocol: "tcp" | "udp";
}[];
};
export function generateSubnetProxyTargetV2(
siteResource: SiteResource,
clients: {
clientId: number;
pubKey: string | null;
subnet: string | null;
}[]
): SubnetProxyTargetV2 | undefined {
if (clients.length === 0) {
logger.debug(
`No clients have access to site resource ${siteResource.siteResourceId}, skipping target generation.`
);
return;
}
let target: SubnetProxyTargetV2 | null = null;
const portRange = [
...parsePortRangeString(siteResource.tcpPortRangeString, "tcp"),
...parsePortRangeString(siteResource.udpPortRangeString, "udp")
];
const disableIcmp = siteResource.disableIcmp ?? false;
if (siteResource.mode == "host") {
let destination = siteResource.destination;
// check if this is a valid ip
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
if (ipSchema.safeParse(destination).success) {
destination = `${destination}/32`;
target = {
sourcePrefixes: [],
destPrefix: destination,
portRange,
disableIcmp
};
}
if (siteResource.alias && siteResource.aliasAddress) {
// also push a match for the alias address
target = {
sourcePrefixes: [],
destPrefix: `${siteResource.aliasAddress}/32`,
rewriteTo: destination,
portRange,
disableIcmp
};
}
} else if (siteResource.mode == "cidr") {
target = {
sourcePrefixes: [],
destPrefix: siteResource.destination,
portRange,
disableIcmp
};
}
if (!target) {
return;
}
for (const clientSite of clients) {
if (!clientSite.subnet) {
logger.debug(
`Client ${clientSite.clientId} has no subnet, skipping for site resource ${siteResource.siteResourceId}.`
);
continue;
}
const clientPrefix = `${clientSite.subnet.split("/")[0]}/32`;
// add client prefix to source prefixes
target.sourcePrefixes.push(clientPrefix);
}
// print a nice representation of the targets
// logger.debug(
// `Generated subnet proxy targets for: ${JSON.stringify(targets, null, 2)}`
// );
return target;
}
/**
* Converts a SubnetProxyTargetV2 to an array of SubnetProxyTarget (v1)
* by expanding each source prefix into its own target entry.
* @param targetV2 - The v2 target to convert
* @returns Array of v1 SubnetProxyTarget objects
*/
export function convertSubnetProxyTargetsV2ToV1(
targetsV2: SubnetProxyTargetV2[]
): SubnetProxyTarget[] {
return targetsV2.flatMap((targetV2) =>
targetV2.sourcePrefixes.map((sourcePrefix) => ({
sourcePrefix,
destPrefix: targetV2.destPrefix,
...(targetV2.disableIcmp !== undefined && {
disableIcmp: targetV2.disableIcmp
}),
...(targetV2.rewriteTo !== undefined && {
rewriteTo: targetV2.rewriteTo
}),
...(targetV2.portRange !== undefined && {
portRange: targetV2.portRange
})
}))
);
}
// Custom schema for validating port range strings
// Format: "80,443,8000-9000" or "*" for all ports, or empty string
export const portRangeStringSchema = z

View File

@@ -302,8 +302,8 @@ export const configSchema = z
.optional()
.default({
block_size: 24,
subnet_group: "100.90.128.0/24",
utility_subnet_group: "100.96.128.0/24"
subnet_group: "100.90.128.0/20",
utility_subnet_group: "100.96.128.0/20"
}),
rate_limits: z
.object({

View File

@@ -32,7 +32,7 @@ import logger from "@server/logger";
import {
generateAliasConfig,
generateRemoteSubnets,
generateSubnetProxyTargets,
generateSubnetProxyTargetV2,
parseEndpoint,
formatEndpoint
} from "@server/lib/ip";
@@ -660,19 +660,16 @@ async function handleSubnetProxyTargetUpdates(
);
if (addedClients.length > 0) {
const targetsToAdd = generateSubnetProxyTargets(
const targetToAdd = generateSubnetProxyTargetV2(
siteResource,
addedClients
);
if (targetsToAdd.length > 0) {
logger.info(
`Adding ${targetsToAdd.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
);
if (targetToAdd) {
proxyJobs.push(
addSubnetProxyTargets(
newt.newtId,
targetsToAdd,
[targetToAdd],
newt.version
)
);
@@ -700,19 +697,16 @@ async function handleSubnetProxyTargetUpdates(
);
if (removedClients.length > 0) {
const targetsToRemove = generateSubnetProxyTargets(
const targetToRemove = generateSubnetProxyTargetV2(
siteResource,
removedClients
);
if (targetsToRemove.length > 0) {
logger.info(
`Removing ${targetsToRemove.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
);
if (targetToRemove) {
proxyJobs.push(
removeSubnetProxyTargets(
newt.newtId,
targetsToRemove,
[targetToRemove],
newt.version
)
);
@@ -1169,7 +1163,7 @@ async function handleMessagesForClientResources(
}
for (const resource of resources) {
const targets = generateSubnetProxyTargets(resource, [
const target = generateSubnetProxyTargetV2(resource, [
{
clientId: client.clientId,
pubKey: client.pubKey,
@@ -1177,11 +1171,11 @@ async function handleMessagesForClientResources(
}
]);
if (targets.length > 0) {
if (target) {
proxyJobs.push(
addSubnetProxyTargets(
newt.newtId,
targets,
[target],
newt.version
)
);
@@ -1246,7 +1240,7 @@ async function handleMessagesForClientResources(
}
for (const resource of resources) {
const targets = generateSubnetProxyTargets(resource, [
const target = generateSubnetProxyTargetV2(resource, [
{
clientId: client.clientId,
pubKey: client.pubKey,
@@ -1254,11 +1248,11 @@ async function handleMessagesForClientResources(
}
]);
if (targets.length > 0) {
if (target) {
proxyJobs.push(
removeSubnetProxyTargets(
newt.newtId,
targets,
[target],
newt.version
)
);

View File

@@ -57,7 +57,10 @@ export const privateConfigSchema = z.object({
.object({
host: z.string(),
port: portSchema,
password: z.string().optional(),
password: z
.string()
.optional()
.transform(getEnvOrYaml("REDIS_PASSWORD")),
db: z.int().nonnegative().optional().default(0),
replicas: z
.array(

View File

@@ -24,6 +24,7 @@ import {
sites,
userOrgs
} from "@server/db";
import { logAccessAudit } from "#private/lib/logAccessAudit";
import { isLicensedOrSubscribed } from "#private/lib/isLicencedOrSubscribed";
import { tierMatrix } from "@server/lib/billing/tierMatrix";
import response from "@server/lib/response";
@@ -463,6 +464,24 @@ export async function signSshKey(
})
});
await logAccessAudit({
action: true,
type: "ssh",
orgId: orgId,
resourceId: resource.siteResourceId,
user: req.user
? { username: req.user.username ?? "", userId: req.user.userId }
: undefined,
metadata: {
resourceName: resource.name,
siteId: resource.siteId,
sshUsername: usernameToUse,
sshHost: sshHost
},
userAgent: req.headers["user-agent"],
requestIp: req.ip
});
return response<SignSshKeyResponse>(res, {
data: {
certificate: cert.certificate,

View File

@@ -1,15 +1,54 @@
import { sendToClient } from "#dynamic/routers/ws";
import { db, olms, Transaction } from "@server/db";
import { db, newts, olms } from "@server/db";
import {
Alias,
convertSubnetProxyTargetsV2ToV1,
SubnetProxyTarget,
SubnetProxyTargetV2
} from "@server/lib/ip";
import { canCompress } from "@server/lib/clientVersionChecks";
import { Alias, SubnetProxyTarget } from "@server/lib/ip";
import logger from "@server/logger";
import { eq } from "drizzle-orm";
import semver from "semver";
const NEWT_V2_TARGETS_VERSION = ">=1.10.3";
export async function convertTargetsIfNessicary(
newtId: string,
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[]
) {
// get the newt
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.newtId, newtId));
if (!newt) {
throw new Error(`No newt found for id: ${newtId}`);
}
// check the semver
if (
newt.version &&
!semver.satisfies(newt.version, NEWT_V2_TARGETS_VERSION)
) {
logger.debug(
`addTargets Newt version ${newt.version} does not support targets v2 falling back`
);
targets = convertSubnetProxyTargetsV2ToV1(
targets as SubnetProxyTargetV2[]
);
}
return targets;
}
export async function addTargets(
newtId: string,
targets: SubnetProxyTarget[],
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[],
version?: string | null
) {
targets = await convertTargetsIfNessicary(newtId, targets);
await sendToClient(
newtId,
{
@@ -22,9 +61,11 @@ export async function addTargets(
export async function removeTargets(
newtId: string,
targets: SubnetProxyTarget[],
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[],
version?: string | null
) {
targets = await convertTargetsIfNessicary(newtId, targets);
await sendToClient(
newtId,
{
@@ -38,11 +79,39 @@ export async function removeTargets(
export async function updateTargets(
newtId: string,
targets: {
oldTargets: SubnetProxyTarget[];
newTargets: SubnetProxyTarget[];
oldTargets: SubnetProxyTarget[] | SubnetProxyTargetV2[];
newTargets: SubnetProxyTarget[] | SubnetProxyTargetV2[];
},
version?: string | null
) {
// get the newt
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.newtId, newtId));
if (!newt) {
logger.error(`addTargetsL No newt found for id: ${newtId}`);
return;
}
// check the semver
if (
newt.version &&
!semver.satisfies(newt.version, NEWT_V2_TARGETS_VERSION)
) {
logger.debug(
`addTargets Newt version ${newt.version} does not support targets v2 falling back`
);
targets = {
oldTargets: convertSubnetProxyTargetsV2ToV1(
targets.oldTargets as SubnetProxyTargetV2[]
),
newTargets: convertSubnetProxyTargetsV2ToV1(
targets.newTargets as SubnetProxyTargetV2[]
)
};
}
await sendToClient(
newtId,
{

View File

@@ -1,6 +1,5 @@
import { Request, Response, NextFunction } from "express";
import { eq, sql } from "drizzle-orm";
import { sites } from "@server/db";
import { sql } from "drizzle-orm";
import { db } from "@server/db";
import logger from "@server/logger";
import createHttpError from "http-errors";
@@ -31,7 +30,10 @@ const MAX_RETRIES = 3;
const BASE_DELAY_MS = 50;
// How often to flush accumulated bandwidth data to the database
const FLUSH_INTERVAL_MS = 30_000; // 30 seconds
const FLUSH_INTERVAL_MS = 300_000; // 300 seconds
// Maximum number of sites to include in a single batch UPDATE statement
const BATCH_CHUNK_SIZE = 250;
// In-memory accumulator: publicKey -> AccumulatorEntry
let accumulator = new Map<string, AccumulatorEntry>();
@@ -75,13 +77,33 @@ async function withDeadlockRetry<T>(
}
}
/**
* Execute a raw SQL query that returns rows, in a way that works across both
* the PostgreSQL driver (which exposes `execute`) and the SQLite driver (which
* exposes `all`). Drizzle's typed query builder doesn't support bulk
* UPDATE … FROM (VALUES …) natively, so we drop to raw SQL here.
*/
async function dbQueryRows<T extends Record<string, unknown>>(
query: Parameters<(typeof sql)["join"]>[0][number]
): Promise<T[]> {
const anyDb = db as any;
if (typeof anyDb.execute === "function") {
// PostgreSQL (node-postgres via Drizzle) — returns { rows: [...] } or an array
const result = await anyDb.execute(query);
return (Array.isArray(result) ? result : (result.rows ?? [])) as T[];
}
// SQLite (better-sqlite3 via Drizzle) — returns an array directly
return (await anyDb.all(query)) as T[];
}
/**
* Flush all accumulated site bandwidth data to the database.
*
* Swaps out the accumulator before writing so that any bandwidth messages
* received during the flush are captured in the new accumulator rather than
* being lost or causing contention. Entries that fail to write are re-queued
* back into the accumulator so they will be retried on the next flush.
* being lost or causing contention. Sites are updated in chunks via a single
* batch UPDATE per chunk. Failed chunks are discarded — exact per-flush
* accuracy is not critical and re-queuing is not worth the added complexity.
*
* This function is exported so that the application's graceful-shutdown
* cleanup handler can call it before the process exits.
@@ -108,76 +130,76 @@ export async function flushSiteBandwidthToDb(): Promise<void> {
`Flushing accumulated bandwidth data for ${sortedEntries.length} site(s) to the database`
);
// Aggregate billing usage by org, collected during the DB update loop.
// Build a lookup so post-processing can reach each entry by publicKey.
const snapshotMap = new Map(sortedEntries);
// Aggregate billing usage by org across all chunks.
const orgUsageMap = new Map<string, number>();
for (const [publicKey, { bytesIn, bytesOut, exitNodeId, calcUsage }] of sortedEntries) {
// Process in chunks so individual queries stay at a reasonable size.
for (let i = 0; i < sortedEntries.length; i += BATCH_CHUNK_SIZE) {
const chunk = sortedEntries.slice(i, i + BATCH_CHUNK_SIZE);
const chunkEnd = i + chunk.length - 1;
// Build a parameterised VALUES list: (pubKey, bytesIn, bytesOut), ...
// Both PostgreSQL and SQLite (≥ 3.33.0, which better-sqlite3 bundles)
// support UPDATE … FROM (VALUES …), letting us update the whole chunk
// in a single query instead of N individual round-trips.
const valuesList = chunk.map(([publicKey, { bytesIn, bytesOut }]) =>
sql`(${publicKey}, ${bytesIn}, ${bytesOut})`
);
const valuesClause = sql.join(valuesList, sql`, `);
let rows: { orgId: string; pubKey: string }[] = [];
try {
const updatedSite = await withDeadlockRetry(async () => {
const [result] = await db
.update(sites)
.set({
megabytesOut: sql`COALESCE(${sites.megabytesOut}, 0) + ${bytesIn}`,
megabytesIn: sql`COALESCE(${sites.megabytesIn}, 0) + ${bytesOut}`,
lastBandwidthUpdate: currentTime,
})
.where(eq(sites.pubKey, publicKey))
.returning({
orgId: sites.orgId,
siteId: sites.siteId
});
return result;
}, `flush bandwidth for site ${publicKey}`);
if (updatedSite) {
if (exitNodeId) {
const notAllowed = await checkExitNodeOrg(
exitNodeId,
updatedSite.orgId
);
if (notAllowed) {
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
);
// Skip usage tracking for this site but continue
// processing the rest.
continue;
}
}
if (calcUsage) {
const totalBandwidth = bytesIn + bytesOut;
const current = orgUsageMap.get(updatedSite.orgId) ?? 0;
orgUsageMap.set(updatedSite.orgId, current + totalBandwidth);
}
}
rows = await withDeadlockRetry(async () => {
return dbQueryRows<{ orgId: string; pubKey: string }>(sql`
UPDATE sites
SET
"bytesOut" = COALESCE("bytesOut", 0) + v.bytes_in,
"bytesIn" = COALESCE("bytesIn", 0) + v.bytes_out,
"lastBandwidthUpdate" = ${currentTime}
FROM (VALUES ${valuesClause}) AS v(pub_key, bytes_in, bytes_out)
WHERE sites."pubKey" = v.pub_key
RETURNING sites."orgId" AS "orgId", sites."pubKey" AS "pubKey"
`);
}, `flush bandwidth chunk [${i}${chunkEnd}]`);
} catch (error) {
logger.error(
`Failed to flush bandwidth for site ${publicKey}:`,
`Failed to flush bandwidth chunk [${i}${chunkEnd}], discarding ${chunk.length} site(s):`,
error
);
// Discard the chunk — exact per-flush accuracy is not critical.
continue;
}
// Re-queue the failed entry so it is retried on the next flush
// rather than silently dropped.
const existing = accumulator.get(publicKey);
if (existing) {
existing.bytesIn += bytesIn;
existing.bytesOut += bytesOut;
} else {
accumulator.set(publicKey, {
bytesIn,
bytesOut,
exitNodeId,
calcUsage
});
// Collect billing usage from the returned rows.
for (const { orgId, pubKey } of rows) {
const entry = snapshotMap.get(pubKey);
if (!entry) continue;
const { bytesIn, bytesOut, exitNodeId, calcUsage } = entry;
if (exitNodeId) {
const notAllowed = await checkExitNodeOrg(exitNodeId, orgId);
if (notAllowed) {
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${orgId}`
);
continue;
}
}
if (calcUsage) {
const current = orgUsageMap.get(orgId) ?? 0;
orgUsageMap.set(orgId, current + bytesIn + bytesOut);
}
}
}
// Process billing usage updates outside the site-update loop to keep
// lock scope small and concerns separated.
// Process billing usage updates after all chunks are written.
if (orgUsageMap.size > 0) {
// Sort org IDs for consistent lock ordering.
const sortedOrgIds = [...orgUsageMap.keys()].sort();
for (const orgId of sortedOrgIds) {

View File

@@ -16,8 +16,8 @@ import { eq, and } from "drizzle-orm";
import config from "@server/lib/config";
import {
formatEndpoint,
generateSubnetProxyTargets,
SubnetProxyTarget
generateSubnetProxyTargetV2,
SubnetProxyTargetV2
} from "@server/lib/ip";
export async function buildClientConfigurationForNewtClient(
@@ -143,7 +143,7 @@ export async function buildClientConfigurationForNewtClient(
.from(siteResources)
.where(eq(siteResources.siteId, siteId));
const targetsToSend: SubnetProxyTarget[] = [];
const targetsToSend: SubnetProxyTargetV2[] = [];
for (const resource of allSiteResources) {
// Get clients associated with this specific resource
@@ -168,12 +168,14 @@ export async function buildClientConfigurationForNewtClient(
)
);
const resourceTargets = generateSubnetProxyTargets(
const resourceTarget = generateSubnetProxyTargetV2(
resource,
resourceClients
);
targetsToSend.push(...resourceTargets);
if (resourceTarget) {
targetsToSend.push(resourceTarget);
}
}
return {

View File

@@ -6,6 +6,7 @@ import { db, ExitNode, exitNodes, Newt, sites } from "@server/db";
import { eq } from "drizzle-orm";
import { sendToExitNode } from "#dynamic/lib/exitNodes";
import { buildClientConfigurationForNewtClient } from "./buildConfiguration";
import { convertTargetsIfNessicary } from "../client/targets";
import { canCompress } from "@server/lib/clientVersionChecks";
const inputSchema = z.object({
@@ -127,13 +128,15 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
exitNode
);
const targetsToSend = await convertTargetsIfNessicary(newt.newtId, targets);
return {
message: {
type: "newt/wg/receive-config",
data: {
ipAddress: site.address,
peers,
targets
targets: targetsToSend
}
},
options: {

View File

@@ -88,7 +88,7 @@ const createSiteResourceSchema = z
},
{
message:
"Destination must be a valid IP address or valid domain AND alias is required"
"Destination must be a valid IPV4 address or valid domain AND alias is required"
}
)
.refine(

View File

@@ -24,7 +24,7 @@ import { updatePeerData, updateTargets } from "@server/routers/client/targets";
import {
generateAliasConfig,
generateRemoteSubnets,
generateSubnetProxyTargets,
generateSubnetProxyTargetV2,
isIpInCidr,
portRangeStringSchema
} from "@server/lib/ip";
@@ -608,18 +608,18 @@ export async function handleMessagingForUpdatedSiteResource(
// Only update targets on newt if destination changed
if (destinationChanged || portRangesChanged) {
const oldTargets = generateSubnetProxyTargets(
const oldTarget = generateSubnetProxyTargetV2(
existingSiteResource,
mergedAllClients
);
const newTargets = generateSubnetProxyTargets(
const newTarget = generateSubnetProxyTargetV2(
updatedSiteResource,
mergedAllClients
);
await updateTargets(newt.newtId, {
oldTargets: oldTargets,
newTargets: newTargets
oldTargets: oldTarget ? [oldTarget] : [],
newTargets: newTarget ? [newTarget] : []
}, newt.version);
}

View File

@@ -493,7 +493,8 @@ export default function GeneralPage() {
{
value: "whitelistedEmail",
label: "Whitelisted Email"
}
},
{ value: "ssh", label: "SSH" }
]}
selectedValue={filters.type}
onValueChange={(value) =>
@@ -507,13 +508,12 @@ export default function GeneralPage() {
);
},
cell: ({ row }) => {
// should be capitalized first letter
return (
<span>
{row.original.type.charAt(0).toUpperCase() +
row.original.type.slice(1) || "-"}
</span>
);
const typeLabel =
row.original.type === "ssh"
? "SSH"
: row.original.type.charAt(0).toUpperCase() +
row.original.type.slice(1);
return <span>{typeLabel || "-"}</span>;
}
},
{