Compare commits

..

1 Commits

Author SHA1 Message Date
Owen
48abb9e98c Breakout sites tables 2026-04-08 22:04:12 -04:00
37 changed files with 807 additions and 1736 deletions

View File

@@ -1817,11 +1817,6 @@
"editInternalResourceDialogModePort": "Port", "editInternalResourceDialogModePort": "Port",
"editInternalResourceDialogModeHost": "Host", "editInternalResourceDialogModeHost": "Host",
"editInternalResourceDialogModeCidr": "CIDR", "editInternalResourceDialogModeCidr": "CIDR",
"editInternalResourceDialogModeHttp": "HTTP",
"editInternalResourceDialogModeHttps": "HTTPS",
"editInternalResourceDialogScheme": "Scheme",
"editInternalResourceDialogEnableSsl": "Enable SSL",
"editInternalResourceDialogEnableSslDescription": "Enable SSL/TLS encryption for secure HTTPS connections to the destination.",
"editInternalResourceDialogDestination": "Destination", "editInternalResourceDialogDestination": "Destination",
"editInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.", "editInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.",
"editInternalResourceDialogDestinationIPDescription": "The IP or hostname address of the resource on the site's network.", "editInternalResourceDialogDestinationIPDescription": "The IP or hostname address of the resource on the site's network.",
@@ -1865,19 +1860,11 @@
"createInternalResourceDialogModePort": "Port", "createInternalResourceDialogModePort": "Port",
"createInternalResourceDialogModeHost": "Host", "createInternalResourceDialogModeHost": "Host",
"createInternalResourceDialogModeCidr": "CIDR", "createInternalResourceDialogModeCidr": "CIDR",
"createInternalResourceDialogModeHttp": "HTTP",
"createInternalResourceDialogModeHttps": "HTTPS",
"scheme": "Scheme",
"createInternalResourceDialogScheme": "Scheme",
"createInternalResourceDialogEnableSsl": "Enable SSL",
"createInternalResourceDialogEnableSslDescription": "Enable SSL/TLS encryption for secure HTTPS connections to the destination.",
"createInternalResourceDialogDestination": "Destination", "createInternalResourceDialogDestination": "Destination",
"createInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.", "createInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.",
"createInternalResourceDialogDestinationCidrDescription": "The CIDR range of the resource on the site's network.", "createInternalResourceDialogDestinationCidrDescription": "The CIDR range of the resource on the site's network.",
"createInternalResourceDialogAlias": "Alias", "createInternalResourceDialogAlias": "Alias",
"createInternalResourceDialogAliasDescription": "An optional internal DNS alias for this resource.", "createInternalResourceDialogAliasDescription": "An optional internal DNS alias for this resource.",
"internalResourceDownstreamSchemeRequired": "Scheme is required for HTTP resources",
"internalResourceHttpPortRequired": "Destination port is required for HTTP resources",
"siteConfiguration": "Configuration", "siteConfiguration": "Configuration",
"siteAcceptClientConnections": "Accept Client Connections", "siteAcceptClientConnections": "Accept Client Connections",
"siteAcceptClientConnectionsDescription": "Allow user devices and clients to access resources on this site. This can be changed later.", "siteAcceptClientConnectionsDescription": "Allow user devices and clients to access resources on this site. This can be changed later.",
@@ -2129,7 +2116,6 @@
"domainPickerFreeProvidedDomain": "Free Provided Domain", "domainPickerFreeProvidedDomain": "Free Provided Domain",
"domainPickerVerified": "Verified", "domainPickerVerified": "Verified",
"domainPickerUnverified": "Unverified", "domainPickerUnverified": "Unverified",
"domainPickerManual": "Manual",
"domainPickerInvalidSubdomainStructure": "This subdomain contains invalid characters or structure. It will be sanitized automatically when you save.", "domainPickerInvalidSubdomainStructure": "This subdomain contains invalid characters or structure. It will be sanitized automatically when you save.",
"domainPickerError": "Error", "domainPickerError": "Error",
"domainPickerErrorLoadDomains": "Failed to load organization domains", "domainPickerErrorLoadDomains": "Failed to load organization domains",
@@ -2673,12 +2659,8 @@
"editInternalResourceDialogAddUsers": "Add Users", "editInternalResourceDialogAddUsers": "Add Users",
"editInternalResourceDialogAddClients": "Add Clients", "editInternalResourceDialogAddClients": "Add Clients",
"editInternalResourceDialogDestinationLabel": "Destination", "editInternalResourceDialogDestinationLabel": "Destination",
"editInternalResourceDialogDestinationDescription": "Choose where this resource runs and how clients reach it, then complete the settings that apply to your setup.", "editInternalResourceDialogDestinationDescription": "Specify the destination address for the internal resource. This can be a hostname, IP address, or CIDR range depending on the selected mode. Optionally set an internal DNS alias for easier identification.",
"editInternalResourceDialogPortRestrictionsDescription": "Restrict access to specific TCP/UDP ports or allow/block all ports.", "editInternalResourceDialogPortRestrictionsDescription": "Restrict access to specific TCP/UDP ports or allow/block all ports.",
"createInternalResourceDialogHttpConfiguration": "HTTP configuration",
"createInternalResourceDialogHttpConfigurationDescription": "Choose the domain clients will use to reach this resource over HTTP or HTTPS.",
"editInternalResourceDialogHttpConfiguration": "HTTP configuration",
"editInternalResourceDialogHttpConfigurationDescription": "Choose the domain clients will use to reach this resource over HTTP or HTTPS.",
"editInternalResourceDialogTcp": "TCP", "editInternalResourceDialogTcp": "TCP",
"editInternalResourceDialogUdp": "UDP", "editInternalResourceDialogUdp": "UDP",
"editInternalResourceDialogIcmp": "ICMP", "editInternalResourceDialogIcmp": "ICMP",
@@ -2717,8 +2699,6 @@
"maintenancePageMessagePlaceholder": "We'll be back soon! Our site is currently undergoing scheduled maintenance.", "maintenancePageMessagePlaceholder": "We'll be back soon! Our site is currently undergoing scheduled maintenance.",
"maintenancePageMessageDescription": "Detailed message explaining the maintenance", "maintenancePageMessageDescription": "Detailed message explaining the maintenance",
"maintenancePageTimeTitle": "Estimated Completion Time (Optional)", "maintenancePageTimeTitle": "Estimated Completion Time (Optional)",
"privateMaintenanceScreenTitle": "Private Placeholder Screen",
"privateMaintenanceScreenMessage": "This domain is being used on a private resource. Please connect using the Pangolin client to access this resource.",
"maintenanceTime": "e.g., 2 hours, Nov 1 at 5:00 PM", "maintenanceTime": "e.g., 2 hours, Nov 1 at 5:00 PM",
"maintenanceEstimatedTimeDescription": "When you expect maintenance to be completed", "maintenanceEstimatedTimeDescription": "When you expect maintenance to be completed",
"editDomain": "Edit Domain", "editDomain": "Edit Domain",

View File

@@ -57,9 +57,7 @@ export const orgs = pgTable("orgs", {
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull() .notNull()
.default(0), .default(0),
settingsLogRetentionDaysConnection: integer( settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
"settingsLogRetentionDaysConnection"
) // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull() .notNull()
.default(0), .default(0),
sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format) sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format)
@@ -91,21 +89,15 @@ export const sites = pgTable("sites", {
name: varchar("name").notNull(), name: varchar("name").notNull(),
pubKey: varchar("pubKey"), pubKey: varchar("pubKey"),
subnet: varchar("subnet"), subnet: varchar("subnet"),
megabytesIn: real("bytesIn").default(0),
megabytesOut: real("bytesOut").default(0),
lastBandwidthUpdate: varchar("lastBandwidthUpdate"),
type: varchar("type").notNull(), // "newt" or "wireguard" type: varchar("type").notNull(), // "newt" or "wireguard"
online: boolean("online").notNull().default(false), online: boolean("online").notNull().default(false),
lastPing: integer("lastPing"),
address: varchar("address"), address: varchar("address"),
endpoint: varchar("endpoint"), endpoint: varchar("endpoint"),
publicKey: varchar("publicKey"), publicKey: varchar("publicKey"),
lastHolePunch: bigint("lastHolePunch", { mode: "number" }), lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
listenPort: integer("listenPort"), listenPort: integer("listenPort"),
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true), dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
status: varchar("status") status: varchar("status").$type<"pending" | "approved">().default("approved")
.$type<"pending" | "approved">()
.default("approved")
}); });
export const resources = pgTable("resources", { export const resources = pgTable("resources", {
@@ -234,9 +226,8 @@ export const siteResources = pgTable("siteResources", {
.references(() => orgs.orgId, { onDelete: "cascade" }), .references(() => orgs.orgId, { onDelete: "cascade" }),
niceId: varchar("niceId").notNull(), niceId: varchar("niceId").notNull(),
name: varchar("name").notNull(), name: varchar("name").notNull(),
ssl: boolean("ssl").notNull().default(false), mode: varchar("mode").$type<"host" | "cidr">().notNull(), // "host" | "cidr" | "port"
mode: varchar("mode").$type<"host" | "cidr" | "http">().notNull(), // "host" | "cidr" | "http" protocol: varchar("protocol"), // only for port mode
scheme: varchar("scheme").$type<"http" | "https">(), // only for when we are doing https or http mode
proxyPort: integer("proxyPort"), // only for port mode proxyPort: integer("proxyPort"), // only for port mode
destinationPort: integer("destinationPort"), // only for port mode destinationPort: integer("destinationPort"), // only for port mode
destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode
@@ -734,10 +725,7 @@ export const clients = pgTable("clients", {
name: varchar("name").notNull(), name: varchar("name").notNull(),
pubKey: varchar("pubKey"), pubKey: varchar("pubKey"),
subnet: varchar("subnet").notNull(), subnet: varchar("subnet").notNull(),
megabytesIn: real("bytesIn"),
megabytesOut: real("bytesOut"),
lastBandwidthUpdate: varchar("lastBandwidthUpdate"),
lastPing: integer("lastPing"),
type: varchar("type").notNull(), // "olm" type: varchar("type").notNull(), // "olm"
online: boolean("online").notNull().default(false), online: boolean("online").notNull().default(false),
// endpoint: varchar("endpoint"), // endpoint: varchar("endpoint"),
@@ -750,6 +738,42 @@ export const clients = pgTable("clients", {
>() >()
}); });
export const sitePing = pgTable("sitePing", {
siteId: integer("siteId")
.primaryKey()
.references(() => sites.siteId, { onDelete: "cascade" })
.notNull(),
lastPing: integer("lastPing")
});
export const siteBandwidth = pgTable("siteBandwidth", {
siteId: integer("siteId")
.primaryKey()
.references(() => sites.siteId, { onDelete: "cascade" })
.notNull(),
megabytesIn: real("bytesIn").default(0),
megabytesOut: real("bytesOut").default(0),
lastBandwidthUpdate: integer("lastBandwidthUpdate") // unix epoch
});
export const clientPing = pgTable("clientPing", {
clientId: integer("clientId")
.primaryKey()
.references(() => clients.clientId, { onDelete: "cascade" })
.notNull(),
lastPing: integer("lastPing")
});
export const clientBandwidth = pgTable("clientBandwidth", {
clientId: integer("clientId")
.primaryKey()
.references(() => clients.clientId, { onDelete: "cascade" })
.notNull(),
megabytesIn: real("bytesIn"),
megabytesOut: real("bytesOut"),
lastBandwidthUpdate: integer("lastBandwidthUpdate") // unix epoch
});
export const clientSitesAssociationsCache = pgTable( export const clientSitesAssociationsCache = pgTable(
"clientSitesAssociationsCache", "clientSitesAssociationsCache",
{ {
@@ -1111,3 +1135,7 @@ export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
export type RoundTripMessageTracker = InferSelectModel< export type RoundTripMessageTracker = InferSelectModel<
typeof roundTripMessageTracker typeof roundTripMessageTracker
>; >;
export type SitePing = typeof sitePing.$inferSelect;
export type SiteBandwidth = typeof siteBandwidth.$inferSelect;
export type ClientPing = typeof clientPing.$inferSelect;
export type ClientBandwidth = typeof clientBandwidth.$inferSelect;

View File

@@ -54,9 +54,7 @@ export const orgs = sqliteTable("orgs", {
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull() .notNull()
.default(0), .default(0),
settingsLogRetentionDaysConnection: integer( settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
"settingsLogRetentionDaysConnection"
) // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull() .notNull()
.default(0), .default(0),
sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format) sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format)
@@ -97,12 +95,8 @@ export const sites = sqliteTable("sites", {
name: text("name").notNull(), name: text("name").notNull(),
pubKey: text("pubKey"), pubKey: text("pubKey"),
subnet: text("subnet"), subnet: text("subnet"),
megabytesIn: integer("bytesIn").default(0),
megabytesOut: integer("bytesOut").default(0),
lastBandwidthUpdate: text("lastBandwidthUpdate"),
type: text("type").notNull(), // "newt" or "wireguard" type: text("type").notNull(), // "newt" or "wireguard"
online: integer("online", { mode: "boolean" }).notNull().default(false), online: integer("online", { mode: "boolean" }).notNull().default(false),
lastPing: integer("lastPing"),
// exit node stuff that is how to connect to the site when it has a wg server // exit node stuff that is how to connect to the site when it has a wg server
address: text("address"), // this is the address of the wireguard interface in newt address: text("address"), // this is the address of the wireguard interface in newt
@@ -260,9 +254,8 @@ export const siteResources = sqliteTable("siteResources", {
.references(() => orgs.orgId, { onDelete: "cascade" }), .references(() => orgs.orgId, { onDelete: "cascade" }),
niceId: text("niceId").notNull(), niceId: text("niceId").notNull(),
name: text("name").notNull(), name: text("name").notNull(),
ssl: integer("ssl", { mode: "boolean" }).notNull().default(false), mode: text("mode").$type<"host" | "cidr">().notNull(), // "host" | "cidr" | "port"
mode: text("mode").$type<"host" | "cidr" | "http">().notNull(), // "host" | "cidr" | "http" protocol: text("protocol"), // only for port mode
scheme: text("scheme").$type<"http" | "https">(), // only for when we are doing https or http mode
proxyPort: integer("proxyPort"), // only for port mode proxyPort: integer("proxyPort"), // only for port mode
destinationPort: integer("destinationPort"), // only for port mode destinationPort: integer("destinationPort"), // only for port mode
destination: text("destination").notNull(), // ip, cidr, hostname destination: text("destination").notNull(), // ip, cidr, hostname
@@ -402,10 +395,7 @@ export const clients = sqliteTable("clients", {
pubKey: text("pubKey"), pubKey: text("pubKey"),
olmId: text("olmId"), // to lock it to a specific olm optionally olmId: text("olmId"), // to lock it to a specific olm optionally
subnet: text("subnet").notNull(), subnet: text("subnet").notNull(),
megabytesIn: integer("bytesIn"),
megabytesOut: integer("bytesOut"),
lastBandwidthUpdate: text("lastBandwidthUpdate"),
lastPing: integer("lastPing"),
type: text("type").notNull(), // "olm" type: text("type").notNull(), // "olm"
online: integer("online", { mode: "boolean" }).notNull().default(false), online: integer("online", { mode: "boolean" }).notNull().default(false),
// endpoint: text("endpoint"), // endpoint: text("endpoint"),
@@ -417,6 +407,42 @@ export const clients = sqliteTable("clients", {
>() >()
}); });
export const sitePing = sqliteTable("sitePing", {
siteId: integer("siteId")
.primaryKey()
.references(() => sites.siteId, { onDelete: "cascade" })
.notNull(),
lastPing: integer("lastPing")
});
export const siteBandwidth = sqliteTable("siteBandwidth", {
siteId: integer("siteId")
.primaryKey()
.references(() => sites.siteId, { onDelete: "cascade" })
.notNull(),
megabytesIn: integer("bytesIn").default(0),
megabytesOut: integer("bytesOut").default(0),
lastBandwidthUpdate: integer("lastBandwidthUpdate") // unix epoch
});
export const clientPing = sqliteTable("clientPing", {
clientId: integer("clientId")
.primaryKey()
.references(() => clients.clientId, { onDelete: "cascade" })
.notNull(),
lastPing: integer("lastPing")
});
export const clientBandwidth = sqliteTable("clientBandwidth", {
clientId: integer("clientId")
.primaryKey()
.references(() => clients.clientId, { onDelete: "cascade" })
.notNull(),
megabytesIn: integer("bytesIn"),
megabytesOut: integer("bytesOut"),
lastBandwidthUpdate: integer("lastBandwidthUpdate") // unix epoch
});
export const clientSitesAssociationsCache = sqliteTable( export const clientSitesAssociationsCache = sqliteTable(
"clientSitesAssociationsCache", "clientSitesAssociationsCache",
{ {
@@ -1212,3 +1238,7 @@ export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
export type RoundTripMessageTracker = InferSelectModel< export type RoundTripMessageTracker = InferSelectModel<
typeof roundTripMessageTracker typeof roundTripMessageTracker
>; >;
export type SitePing = typeof sitePing.$inferSelect;
export type SiteBandwidth = typeof siteBandwidth.$inferSelect;
export type ClientPing = typeof clientPing.$inferSelect;
export type ClientBandwidth = typeof clientBandwidth.$inferSelect;

View File

@@ -22,7 +22,6 @@ import { TraefikConfigManager } from "@server/lib/traefik/TraefikConfigManager";
import { initCleanup } from "#dynamic/cleanup"; import { initCleanup } from "#dynamic/cleanup";
import license from "#dynamic/license/license"; import license from "#dynamic/license/license";
import { initLogCleanupInterval } from "@server/lib/cleanupLogs"; import { initLogCleanupInterval } from "@server/lib/cleanupLogs";
import { initAcmeCertSync } from "#dynamic/lib/acmeCertSync";
import { fetchServerIp } from "@server/lib/serverIpService"; import { fetchServerIp } from "@server/lib/serverIpService";
async function startServers() { async function startServers() {
@@ -40,7 +39,6 @@ async function startServers() {
initTelemetryClient(); initTelemetryClient();
initLogCleanupInterval(); initLogCleanupInterval();
initAcmeCertSync();
// Start all servers // Start all servers
const apiServer = createApiServer(); const apiServer = createApiServer();

View File

@@ -1,3 +0,0 @@
export function initAcmeCertSync(): void {
// stub
}

View File

@@ -16,20 +16,6 @@ import { Config } from "./types";
import logger from "@server/logger"; import logger from "@server/logger";
import { getNextAvailableAliasAddress } from "../ip"; import { getNextAvailableAliasAddress } from "../ip";
function siteResourceModeForDb(mode: "host" | "cidr" | "http" | "https"): {
mode: "host" | "cidr" | "http";
ssl: boolean;
scheme: "http" | "https" | null;
} {
if (mode === "https") {
return { mode: "http", ssl: true, scheme: "https" };
}
if (mode === "http") {
return { mode: "http", ssl: false, scheme: "http" };
}
return { mode, ssl: false, scheme: null };
}
export type ClientResourcesResults = { export type ClientResourcesResults = {
newSiteResource: SiteResource; newSiteResource: SiteResource;
oldSiteResource?: SiteResource; oldSiteResource?: SiteResource;
@@ -90,18 +76,14 @@ export async function updateClientResources(
} }
if (existingResource) { if (existingResource) {
const mappedMode = siteResourceModeForDb(resourceData.mode);
// Update existing resource // Update existing resource
const [updatedResource] = await trx const [updatedResource] = await trx
.update(siteResources) .update(siteResources)
.set({ .set({
name: resourceData.name || resourceNiceId, name: resourceData.name || resourceNiceId,
siteId: site.siteId, siteId: site.siteId,
mode: mappedMode.mode, mode: resourceData.mode,
ssl: mappedMode.ssl,
scheme: mappedMode.scheme,
destination: resourceData.destination, destination: resourceData.destination,
destinationPort: resourceData["destination-port"],
enabled: true, // hardcoded for now enabled: true, // hardcoded for now
// enabled: resourceData.enabled ?? true, // enabled: resourceData.enabled ?? true,
alias: resourceData.alias || null, alias: resourceData.alias || null,
@@ -225,9 +207,9 @@ export async function updateClientResources(
oldSiteResource: existingResource oldSiteResource: existingResource
}); });
} else { } else {
const mappedMode = siteResourceModeForDb(resourceData.mode);
let aliasAddress: string | null = null; let aliasAddress: string | null = null;
if (mappedMode.mode === "host" || mappedMode.mode === "http") { if (resourceData.mode == "host") {
// we can only have an alias on a host
aliasAddress = await getNextAvailableAliasAddress(orgId); aliasAddress = await getNextAvailableAliasAddress(orgId);
} }
@@ -239,11 +221,8 @@ export async function updateClientResources(
siteId: site.siteId, siteId: site.siteId,
niceId: resourceNiceId, niceId: resourceNiceId,
name: resourceData.name || resourceNiceId, name: resourceData.name || resourceNiceId,
mode: mappedMode.mode, mode: resourceData.mode,
ssl: mappedMode.ssl,
scheme: mappedMode.scheme,
destination: resourceData.destination, destination: resourceData.destination,
destinationPort: resourceData["destination-port"],
enabled: true, // hardcoded for now enabled: true, // hardcoded for now
// enabled: resourceData.enabled ?? true, // enabled: resourceData.enabled ?? true,
alias: resourceData.alias || null, alias: resourceData.alias || null,

View File

@@ -325,11 +325,11 @@ export function isTargetsOnlyResource(resource: any): boolean {
export const ClientResourceSchema = z export const ClientResourceSchema = z
.object({ .object({
name: z.string().min(1).max(255), name: z.string().min(1).max(255),
mode: z.enum(["host", "cidr", "http", "https"]), mode: z.enum(["host", "cidr"]),
site: z.string(), site: z.string(),
// protocol: z.enum(["tcp", "udp"]).optional(), // protocol: z.enum(["tcp", "udp"]).optional(),
// proxyPort: z.int().positive().optional(), // proxyPort: z.int().positive().optional(),
"destination-port": z.int().positive().optional(), // destinationPort: z.int().positive().optional(),
destination: z.string().min(1), destination: z.string().min(1),
// enabled: z.boolean().default(true), // enabled: z.boolean().default(true),
"tcp-ports": portRangeStringSchema.optional().default("*"), "tcp-ports": portRangeStringSchema.optional().default("*"),

View File

@@ -582,16 +582,6 @@ export type SubnetProxyTargetV2 = {
protocol: "tcp" | "udp"; protocol: "tcp" | "udp";
}[]; }[];
resourceId?: number; resourceId?: number;
protocol?: "http" | "https"; // if set, this target only applies to the specified protocol
httpTargets?: HTTPTarget[];
tlsCert?: string;
tlsKey?: string;
};
export type HTTPTarget = {
destAddr: string; // must be an IP or hostname
destPort: number;
scheme: "http" | "https";
}; };
export function generateSubnetProxyTargetV2( export function generateSubnetProxyTargetV2(
@@ -629,7 +619,7 @@ export function generateSubnetProxyTargetV2(
destPrefix: destination, destPrefix: destination,
portRange, portRange,
disableIcmp, disableIcmp,
resourceId: siteResource.siteResourceId resourceId: siteResource.siteResourceId,
}; };
} }
@@ -641,7 +631,7 @@ export function generateSubnetProxyTargetV2(
rewriteTo: destination, rewriteTo: destination,
portRange, portRange,
disableIcmp, disableIcmp,
resourceId: siteResource.siteResourceId resourceId: siteResource.siteResourceId,
}; };
} }
} else if (siteResource.mode == "cidr") { } else if (siteResource.mode == "cidr") {
@@ -650,46 +640,7 @@ export function generateSubnetProxyTargetV2(
destPrefix: siteResource.destination, destPrefix: siteResource.destination,
portRange, portRange,
disableIcmp, disableIcmp,
resourceId: siteResource.siteResourceId
};
} else if (siteResource.mode == "http") {
let destination = siteResource.destination;
// check if this is a valid ip
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
if (ipSchema.safeParse(destination).success) {
destination = `${destination}/32`;
}
if (
!siteResource.alias ||
!siteResource.aliasAddress ||
!siteResource.destinationPort ||
!siteResource.scheme
) {
logger.debug(
`Site resource ${siteResource.siteResourceId} is in HTTP mode but is missing alias or alias address or destinationPort or scheme, skipping alias target generation.`
);
return;
}
const publicProtocol = siteResource.ssl ? "https" : "http";
// also push a match for the alias address
target = {
sourcePrefixes: [],
destPrefix: `${siteResource.aliasAddress}/32`,
rewriteTo: destination,
portRange,
disableIcmp,
resourceId: siteResource.siteResourceId, resourceId: siteResource.siteResourceId,
protocol: publicProtocol,
httpTargets: [
{
destAddr: siteResource.destination,
destPort: siteResource.destinationPort,
scheme: siteResource.scheme
}
]
// tlsCert: "",
// tlsKey: ""
}; };
} }
@@ -719,31 +670,33 @@ export function generateSubnetProxyTargetV2(
return target; return target;
} }
/** /**
* Converts a SubnetProxyTargetV2 to an array of SubnetProxyTarget (v1) * Converts a SubnetProxyTargetV2 to an array of SubnetProxyTarget (v1)
* by expanding each source prefix into its own target entry. * by expanding each source prefix into its own target entry.
* @param targetV2 - The v2 target to convert * @param targetV2 - The v2 target to convert
* @returns Array of v1 SubnetProxyTarget objects * @returns Array of v1 SubnetProxyTarget objects
*/ */
export function convertSubnetProxyTargetsV2ToV1( export function convertSubnetProxyTargetsV2ToV1(
targetsV2: SubnetProxyTargetV2[] targetsV2: SubnetProxyTargetV2[]
): SubnetProxyTarget[] { ): SubnetProxyTarget[] {
return targetsV2.flatMap((targetV2) => return targetsV2.flatMap((targetV2) =>
targetV2.sourcePrefixes.map((sourcePrefix) => ({ targetV2.sourcePrefixes.map((sourcePrefix) => ({
sourcePrefix, sourcePrefix,
destPrefix: targetV2.destPrefix, destPrefix: targetV2.destPrefix,
...(targetV2.disableIcmp !== undefined && { ...(targetV2.disableIcmp !== undefined && {
disableIcmp: targetV2.disableIcmp disableIcmp: targetV2.disableIcmp
}), }),
...(targetV2.rewriteTo !== undefined && { ...(targetV2.rewriteTo !== undefined && {
rewriteTo: targetV2.rewriteTo rewriteTo: targetV2.rewriteTo
}), }),
...(targetV2.portRange !== undefined && { ...(targetV2.portRange !== undefined && {
portRange: targetV2.portRange portRange: targetV2.portRange
}) })
})) }))
); );
} }
// Custom schema for validating port range strings // Custom schema for validating port range strings
// Format: "80,443,8000-9000" or "*" for all ports, or empty string // Format: "80,443,8000-9000" or "*" for all ports, or empty string

View File

@@ -3,7 +3,7 @@ import config from "./config";
import { getHostMeta } from "./hostMeta"; import { getHostMeta } from "./hostMeta";
import logger from "@server/logger"; import logger from "@server/logger";
import { apiKeys, db, roles, siteResources } from "@server/db"; import { apiKeys, db, roles, siteResources } from "@server/db";
import { sites, users, orgs, resources, clients, idp } from "@server/db"; import { sites, users, orgs, resources, clients, idp, siteBandwidth } from "@server/db";
import { eq, count, notInArray, and, isNotNull, isNull } from "drizzle-orm"; import { eq, count, notInArray, and, isNotNull, isNull } from "drizzle-orm";
import { APP_VERSION } from "./consts"; import { APP_VERSION } from "./consts";
import crypto from "crypto"; import crypto from "crypto";
@@ -150,12 +150,13 @@ class TelemetryClient {
const siteDetails = await db const siteDetails = await db
.select({ .select({
siteName: sites.name, siteName: sites.name,
megabytesIn: sites.megabytesIn, megabytesIn: siteBandwidth.megabytesIn,
megabytesOut: sites.megabytesOut, megabytesOut: siteBandwidth.megabytesOut,
type: sites.type, type: sites.type,
online: sites.online online: sites.online
}) })
.from(sites); .from(sites)
.leftJoin(siteBandwidth, eq(siteBandwidth.siteId, sites.siteId));
const supporterKey = config.getSupporterData(); const supporterKey = config.getSupporterData();

View File

@@ -1,277 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import fs from "fs";
import crypto from "crypto";
import { certificates, domains, db } from "@server/db";
import { and, eq } from "drizzle-orm";
import { encryptData, decryptData } from "@server/lib/encryption";
import logger from "@server/logger";
import config from "#private/lib/config";
interface AcmeCert {
domain: { main: string; sans?: string[] };
certificate: string;
key: string;
Store: string;
}
interface AcmeJson {
[resolver: string]: {
Certificates: AcmeCert[];
};
}
function getEncryptionKey(): Buffer {
const keyHex = config.getRawPrivateConfig().server.encryption_key;
if (!keyHex) {
throw new Error("acmeCertSync: encryption key is not configured");
}
return Buffer.from(keyHex, "hex");
}
async function findDomainId(certDomain: string): Promise<string | null> {
// Strip wildcard prefix before lookup (*.example.com -> example.com)
const lookupDomain = certDomain.startsWith("*.")
? certDomain.slice(2)
: certDomain;
// 1. Exact baseDomain match (any domain type)
const exactMatch = await db
.select({ domainId: domains.domainId })
.from(domains)
.where(eq(domains.baseDomain, lookupDomain))
.limit(1);
if (exactMatch.length > 0) {
return exactMatch[0].domainId;
}
// 2. Walk up the domain hierarchy looking for a wildcard-type domain whose
// baseDomain is a suffix of the cert domain. e.g. cert "sub.example.com"
// matches a wildcard domain with baseDomain "example.com".
const parts = lookupDomain.split(".");
for (let i = 1; i < parts.length; i++) {
const candidate = parts.slice(i).join(".");
if (!candidate) continue;
const wildcardMatch = await db
.select({ domainId: domains.domainId })
.from(domains)
.where(
and(
eq(domains.baseDomain, candidate),
eq(domains.type, "wildcard")
)
)
.limit(1);
if (wildcardMatch.length > 0) {
return wildcardMatch[0].domainId;
}
}
return null;
}
function extractFirstCert(pemBundle: string): string | null {
const match = pemBundle.match(
/-----BEGIN CERTIFICATE-----[\s\S]+?-----END CERTIFICATE-----/
);
return match ? match[0] : null;
}
async function syncAcmeCerts(
acmeJsonPath: string,
resolver: string
): Promise<void> {
let raw: string;
try {
raw = fs.readFileSync(acmeJsonPath, "utf8");
} catch (err) {
logger.debug(
`acmeCertSync: could not read ${acmeJsonPath}: ${err}`
);
return;
}
let acmeJson: AcmeJson;
try {
acmeJson = JSON.parse(raw);
} catch (err) {
logger.debug(`acmeCertSync: could not parse acme.json: ${err}`);
return;
}
const resolverData = acmeJson[resolver];
if (!resolverData || !Array.isArray(resolverData.Certificates)) {
logger.debug(
`acmeCertSync: no certificates found for resolver "${resolver}"`
);
return;
}
const encryptionKey = getEncryptionKey();
for (const cert of resolverData.Certificates) {
const domain = cert.domain?.main;
if (!domain) {
logger.debug(
`acmeCertSync: skipping cert with missing domain`
);
continue;
}
if (!cert.certificate || !cert.key) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - empty certificate or key field`
);
continue;
}
const certPem = Buffer.from(cert.certificate, "base64").toString(
"utf8"
);
const keyPem = Buffer.from(cert.key, "base64").toString("utf8");
if (!certPem.trim() || !keyPem.trim()) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - blank PEM after base64 decode`
);
continue;
}
// Check if cert already exists in DB
const existing = await db
.select()
.from(certificates)
.where(eq(certificates.domain, domain))
.limit(1);
if (existing.length > 0 && existing[0].certFile) {
try {
const storedCertPem = decryptData(
existing[0].certFile,
encryptionKey
);
if (storedCertPem === certPem) {
logger.debug(
`acmeCertSync: cert for ${domain} is unchanged, skipping`
);
continue;
}
} catch (err) {
// Decryption failure means we should proceed with the update
logger.debug(
`acmeCertSync: could not decrypt stored cert for ${domain}, will update: ${err}`
);
}
}
// Parse cert expiry from the first cert in the PEM bundle
let expiresAt: number | null = null;
const firstCertPem = extractFirstCert(certPem);
if (firstCertPem) {
try {
const x509 = new crypto.X509Certificate(firstCertPem);
expiresAt = Math.floor(
new Date(x509.validTo).getTime() / 1000
);
} catch (err) {
logger.debug(
`acmeCertSync: could not parse cert expiry for ${domain}: ${err}`
);
}
}
const wildcard = domain.startsWith("*.");
const encryptedCert = encryptData(certPem, encryptionKey);
const encryptedKey = encryptData(keyPem, encryptionKey);
const now = Math.floor(Date.now() / 1000);
const domainId = await findDomainId(domain);
if (domainId) {
logger.debug(
`acmeCertSync: resolved domainId "${domainId}" for cert domain "${domain}"`
);
} else {
logger.debug(
`acmeCertSync: no matching domain record found for cert domain "${domain}"`
);
}
if (existing.length > 0) {
await db
.update(certificates)
.set({
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
updatedAt: now,
wildcard,
...(domainId !== null && { domainId })
})
.where(eq(certificates.domain, domain));
logger.info(
`acmeCertSync: updated certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
} else {
await db.insert(certificates).values({
domain,
domainId,
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
createdAt: now,
updatedAt: now,
wildcard
});
logger.info(
`acmeCertSync: inserted new certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
}
}
}
export function initAcmeCertSync(): void {
const privateConfig = config.getRawPrivateConfig();
if (!privateConfig.flags?.enable_acme_cert_sync) {
return;
}
const acmeJsonPath =
privateConfig.acme?.acme_json_path ?? "config/letsencrypt/acme.json";
const resolver = privateConfig.acme?.resolver ?? "letsencrypt";
const intervalMs = privateConfig.acme?.sync_interval_ms ?? 5000;
logger.info(
`acmeCertSync: starting ACME cert sync from "${acmeJsonPath}" using resolver "${resolver}" every ${intervalMs}ms`
);
// Run immediately on init, then on the configured interval
syncAcmeCerts(acmeJsonPath, resolver).catch((err) => {
logger.error(`acmeCertSync: error during initial sync: ${err}`);
});
setInterval(() => {
syncAcmeCerts(acmeJsonPath, resolver).catch((err) => {
logger.error(`acmeCertSync: error during sync: ${err}`);
});
}, intervalMs);
}

View File

@@ -95,21 +95,10 @@ export const privateConfigSchema = z.object({
.object({ .object({
enable_redis: z.boolean().optional().default(false), enable_redis: z.boolean().optional().default(false),
use_pangolin_dns: z.boolean().optional().default(false), use_pangolin_dns: z.boolean().optional().default(false),
use_org_only_idp: z.boolean().optional(), use_org_only_idp: z.boolean().optional()
enable_acme_cert_sync: z.boolean().optional().default(false)
}) })
.optional() .optional()
.prefault({}), .prefault({}),
acme: z
.object({
acme_json_path: z
.string()
.optional()
.default("config/letsencrypt/acme.json"),
resolver: z.string().optional().default("letsencrypt"),
sync_interval_ms: z.number().optional().default(5000)
})
.optional(),
branding: z branding: z
.object({ .object({
app_name: z.string().optional(), app_name: z.string().optional(),

View File

@@ -33,7 +33,7 @@ import {
} from "drizzle-orm"; } from "drizzle-orm";
import logger from "@server/logger"; import logger from "@server/logger";
import config from "@server/lib/config"; import config from "@server/lib/config";
import { orgs, resources, sites, siteResources, Target, targets } from "@server/db"; import { orgs, resources, sites, Target, targets } from "@server/db";
import { import {
sanitize, sanitize,
encodePath, encodePath,
@@ -267,34 +267,6 @@ export async function getTraefikConfig(
}); });
}); });
// Query siteResources in HTTP mode with SSL enabled and aliases — cert generation / HTTPS edge
const siteResourcesWithAliases = await db
.select({
siteResourceId: siteResources.siteResourceId,
alias: siteResources.alias,
mode: siteResources.mode
})
.from(siteResources)
.innerJoin(sites, eq(sites.siteId, siteResources.siteId))
.where(
and(
eq(siteResources.enabled, true),
isNotNull(siteResources.alias),
eq(siteResources.mode, "http"),
eq(siteResources.ssl, true),
or(
eq(sites.exitNodeId, exitNodeId),
and(
isNull(sites.exitNodeId),
sql`(${siteTypes.includes("local") ? 1 : 0} = 1)`,
eq(sites.type, "local"),
sql`(${build != "saas" ? 1 : 0} = 1)`
)
),
inArray(sites.type, siteTypes)
)
);
let validCerts: CertificateResult[] = []; let validCerts: CertificateResult[] = [];
if (privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) { if (privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) {
// create a list of all domains to get certs for // create a list of all domains to get certs for
@@ -304,12 +276,6 @@ export async function getTraefikConfig(
domains.add(resource.fullDomain); domains.add(resource.fullDomain);
} }
} }
// Include siteResource aliases so pangolin-dns also fetches certs for them
for (const sr of siteResourcesWithAliases) {
if (sr.alias) {
domains.add(sr.alias);
}
}
// get the valid certs for these domains // get the valid certs for these domains
validCerts = await getValidCertificatesForDomains(domains, true); // we are caching here because this is called often validCerts = await getValidCertificatesForDomains(domains, true); // we are caching here because this is called often
// logger.debug(`Valid certs for domains: ${JSON.stringify(validCerts)}`); // logger.debug(`Valid certs for domains: ${JSON.stringify(validCerts)}`);
@@ -901,128 +867,6 @@ export async function getTraefikConfig(
} }
} }
// Add Traefik routes for siteResource aliases (HTTP mode + SSL) so that
// Traefik generates TLS certificates for those domains even when no
// matching resource exists yet.
if (siteResourcesWithAliases.length > 0) {
// Build a set of domains already covered by normal resources
const existingFullDomains = new Set<string>();
for (const resource of resourcesMap.values()) {
if (resource.fullDomain) {
existingFullDomains.add(resource.fullDomain);
}
}
for (const sr of siteResourcesWithAliases) {
if (!sr.alias) continue;
// Skip if this alias is already handled by a resource router
if (existingFullDomains.has(sr.alias)) continue;
const alias = sr.alias;
const srKey = `site-resource-cert-${sr.siteResourceId}`;
const siteResourceServiceName = `${srKey}-service`;
const siteResourceRouterName = `${srKey}-router`;
const siteResourceRewriteMiddlewareName = `${srKey}-rewrite`;
const maintenancePort = config.getRawConfig().server.next_port;
const maintenanceHost =
config.getRawConfig().server.internal_hostname;
if (!config_output.http.routers) {
config_output.http.routers = {};
}
if (!config_output.http.services) {
config_output.http.services = {};
}
if (!config_output.http.middlewares) {
config_output.http.middlewares = {};
}
// Service pointing at the internal maintenance/Next.js page
config_output.http.services[siteResourceServiceName] = {
loadBalancer: {
servers: [
{
url: `http://${maintenanceHost}:${maintenancePort}`
}
],
passHostHeader: true
}
};
// Middleware that rewrites any path to /maintenance-screen
config_output.http.middlewares[
siteResourceRewriteMiddlewareName
] = {
replacePathRegex: {
regex: "^/(.*)",
replacement: "/private-maintenance-screen"
}
};
// HTTP -> HTTPS redirect so the ACME challenge can be served
config_output.http.routers[
`${siteResourceRouterName}-redirect`
] = {
entryPoints: [
config.getRawConfig().traefik.http_entrypoint
],
middlewares: [redirectHttpsMiddlewareName],
service: siteResourceServiceName,
rule: `Host(\`${alias}\`)`,
priority: 100
};
// Determine TLS / cert-resolver configuration
let tls: any = {};
if (
!privateConfig.getRawPrivateConfig().flags.use_pangolin_dns
) {
const domainParts = alias.split(".");
const wildCard =
domainParts.length <= 2
? `*.${domainParts.join(".")}`
: `*.${domainParts.slice(1).join(".")}`;
const globalDefaultResolver =
config.getRawConfig().traefik.cert_resolver;
const globalDefaultPreferWildcard =
config.getRawConfig().traefik.prefer_wildcard_cert;
tls = {
certResolver: globalDefaultResolver,
...(globalDefaultPreferWildcard
? { domains: [{ main: wildCard }] }
: {})
};
} else {
// pangolin-dns: only add route if we already have a valid cert
const matchingCert = validCerts.find(
(cert) => cert.queriedDomain === alias
);
if (!matchingCert) {
logger.debug(
`No matching certificate found for siteResource alias: ${alias}`
);
continue;
}
}
// HTTPS router — presence of this entry triggers cert generation
config_output.http.routers[siteResourceRouterName] = {
entryPoints: [
config.getRawConfig().traefik.https_entrypoint
],
service: siteResourceServiceName,
middlewares: [siteResourceRewriteMiddlewareName],
rule: `Host(\`${alias}\`)`,
priority: 100,
tls
};
}
}
if (generateLoginPageRouters) { if (generateLoginPageRouters) {
const exitNodeLoginPages = await db const exitNodeLoginPages = await db
.select({ .select({

View File

@@ -18,10 +18,11 @@ import {
subscriptionItems, subscriptionItems,
usage, usage,
sites, sites,
siteBandwidth,
customers, customers,
orgs orgs
} from "@server/db"; } from "@server/db";
import { eq, and } from "drizzle-orm"; import { eq, and, inArray } from "drizzle-orm";
import logger from "@server/logger"; import logger from "@server/logger";
import { getFeatureIdByMetricId, getFeatureIdByPriceId } from "@server/lib/billing/features"; import { getFeatureIdByMetricId, getFeatureIdByPriceId } from "@server/lib/billing/features";
import stripe from "#private/lib/stripe"; import stripe from "#private/lib/stripe";
@@ -253,14 +254,19 @@ export async function handleSubscriptionUpdated(
); );
} }
// Also reset the sites to 0 // Also reset the site bandwidth to 0
await trx await trx
.update(sites) .update(siteBandwidth)
.set({ .set({
megabytesIn: 0, megabytesIn: 0,
megabytesOut: 0 megabytesOut: 0
}) })
.where(eq(sites.orgId, orgId)); .where(
inArray(
siteBandwidth.siteId,
trx.select({ siteId: sites.siteId }).from(sites).where(eq(sites.orgId, orgId))
)
);
}); });
} }
} }

View File

@@ -1,4 +1,5 @@
import { import {
clientBandwidth,
clients, clients,
clientSitesAssociationsCache, clientSitesAssociationsCache,
currentFingerprint, currentFingerprint,
@@ -180,8 +181,8 @@ function queryClientsBase() {
name: clients.name, name: clients.name,
pubKey: clients.pubKey, pubKey: clients.pubKey,
subnet: clients.subnet, subnet: clients.subnet,
megabytesIn: clients.megabytesIn, megabytesIn: clientBandwidth.megabytesIn,
megabytesOut: clients.megabytesOut, megabytesOut: clientBandwidth.megabytesOut,
orgName: orgs.name, orgName: orgs.name,
type: clients.type, type: clients.type,
online: clients.online, online: clients.online,
@@ -200,7 +201,8 @@ function queryClientsBase() {
.leftJoin(orgs, eq(clients.orgId, orgs.orgId)) .leftJoin(orgs, eq(clients.orgId, orgs.orgId))
.leftJoin(olms, eq(clients.clientId, olms.clientId)) .leftJoin(olms, eq(clients.clientId, olms.clientId))
.leftJoin(users, eq(clients.userId, users.userId)) .leftJoin(users, eq(clients.userId, users.userId))
.leftJoin(currentFingerprint, eq(olms.olmId, currentFingerprint.olmId)); .leftJoin(currentFingerprint, eq(olms.olmId, currentFingerprint.olmId))
.leftJoin(clientBandwidth, eq(clientBandwidth.clientId, clients.clientId));
} }
async function getSiteAssociations(clientIds: number[]) { async function getSiteAssociations(clientIds: number[]) {
@@ -367,9 +369,15 @@ export async function listClients(
.offset(pageSize * (page - 1)) .offset(pageSize * (page - 1))
.orderBy( .orderBy(
sort_by sort_by
? order === "asc" ? (() => {
? asc(clients[sort_by]) const field =
: desc(clients[sort_by]) sort_by === "megabytesIn"
? clientBandwidth.megabytesIn
: sort_by === "megabytesOut"
? clientBandwidth.megabytesOut
: clients.name;
return order === "asc" ? asc(field) : desc(field);
})()
: asc(clients.name) : asc(clients.name)
); );

View File

@@ -1,5 +1,6 @@
import { build } from "@server/build"; import { build } from "@server/build";
import { import {
clientBandwidth,
clients, clients,
currentFingerprint, currentFingerprint,
db, db,
@@ -211,8 +212,8 @@ function queryUserDevicesBase() {
name: clients.name, name: clients.name,
pubKey: clients.pubKey, pubKey: clients.pubKey,
subnet: clients.subnet, subnet: clients.subnet,
megabytesIn: clients.megabytesIn, megabytesIn: clientBandwidth.megabytesIn,
megabytesOut: clients.megabytesOut, megabytesOut: clientBandwidth.megabytesOut,
orgName: orgs.name, orgName: orgs.name,
type: clients.type, type: clients.type,
online: clients.online, online: clients.online,
@@ -239,7 +240,8 @@ function queryUserDevicesBase() {
.leftJoin(orgs, eq(clients.orgId, orgs.orgId)) .leftJoin(orgs, eq(clients.orgId, orgs.orgId))
.leftJoin(olms, eq(clients.clientId, olms.clientId)) .leftJoin(olms, eq(clients.clientId, olms.clientId))
.leftJoin(users, eq(clients.userId, users.userId)) .leftJoin(users, eq(clients.userId, users.userId))
.leftJoin(currentFingerprint, eq(olms.olmId, currentFingerprint.olmId)); .leftJoin(currentFingerprint, eq(olms.olmId, currentFingerprint.olmId))
.leftJoin(clientBandwidth, eq(clientBandwidth.clientId, clients.clientId));
} }
type OlmWithUpdateAvailable = Awaited< type OlmWithUpdateAvailable = Awaited<
@@ -427,9 +429,15 @@ export async function listUserDevices(
.offset(pageSize * (page - 1)) .offset(pageSize * (page - 1))
.orderBy( .orderBy(
sort_by sort_by
? order === "asc" ? (() => {
? asc(clients[sort_by]) const field =
: desc(clients[sort_by]) sort_by === "megabytesIn"
? clientBandwidth.megabytesIn
: sort_by === "megabytesOut"
? clientBandwidth.megabytesOut
: clients.name;
return order === "asc" ? asc(field) : desc(field);
})()
: asc(clients.clientId) : asc(clients.clientId)
); );

View File

@@ -122,7 +122,7 @@ export async function flushSiteBandwidthToDb(): Promise<void> {
const snapshot = accumulator; const snapshot = accumulator;
accumulator = new Map<string, AccumulatorEntry>(); accumulator = new Map<string, AccumulatorEntry>();
const currentTime = new Date().toISOString(); const currentEpoch = Math.floor(Date.now() / 1000);
// Sort by publicKey for consistent lock ordering across concurrent // Sort by publicKey for consistent lock ordering across concurrent
// writers — deadlock-prevention strategy. // writers — deadlock-prevention strategy.
@@ -157,33 +157,52 @@ export async function flushSiteBandwidthToDb(): Promise<void> {
orgId: string; orgId: string;
pubKey: string; pubKey: string;
}>(sql` }>(sql`
UPDATE sites WITH upsert AS (
SET INSERT INTO "siteBandwidth" ("siteId", "bytesIn", "bytesOut", "lastBandwidthUpdate")
"bytesOut" = COALESCE("bytesOut", 0) + ${bytesIn}, SELECT s."siteId", ${bytesIn}, ${bytesOut}, ${currentEpoch}
"bytesIn" = COALESCE("bytesIn", 0) + ${bytesOut}, FROM "sites" s WHERE s."pubKey" = ${publicKey}
"lastBandwidthUpdate" = ${currentTime} ON CONFLICT ("siteId") DO UPDATE SET
WHERE "pubKey" = ${publicKey} "bytesIn" = COALESCE("siteBandwidth"."bytesIn", 0) + EXCLUDED."bytesIn",
RETURNING "orgId", "pubKey" "bytesOut" = COALESCE("siteBandwidth"."bytesOut", 0) + EXCLUDED."bytesOut",
"lastBandwidthUpdate" = EXCLUDED."lastBandwidthUpdate"
RETURNING "siteId"
)
SELECT u."siteId", s."orgId", s."pubKey"
FROM upsert u
INNER JOIN "sites" s ON s."siteId" = u."siteId"
`); `);
results.push(...result); results.push(...result);
} }
return results; return results;
} }
// PostgreSQL: batch UPDATE … FROM (VALUES …) — single round-trip per chunk. // PostgreSQL: batch UPSERT via CTE — single round-trip per chunk.
const valuesList = chunk.map(([publicKey, { bytesIn, bytesOut }]) => const valuesList = chunk.map(([publicKey, { bytesIn, bytesOut }]) =>
sql`(${publicKey}::text, ${bytesIn}::real, ${bytesOut}::real)` sql`(${publicKey}::text, ${bytesIn}::real, ${bytesOut}::real)`
); );
const valuesClause = sql.join(valuesList, sql`, `); const valuesClause = sql.join(valuesList, sql`, `);
return dbQueryRows<{ orgId: string; pubKey: string }>(sql` return dbQueryRows<{ orgId: string; pubKey: string }>(sql`
UPDATE sites WITH vals(pub_key, bytes_in, bytes_out) AS (
SET VALUES ${valuesClause}
"bytesOut" = COALESCE("bytesOut", 0) + v.bytes_in, ),
"bytesIn" = COALESCE("bytesIn", 0) + v.bytes_out, site_lookup AS (
"lastBandwidthUpdate" = ${currentTime} SELECT s."siteId", s."orgId", s."pubKey", v.bytes_in, v.bytes_out
FROM (VALUES ${valuesClause}) AS v(pub_key, bytes_in, bytes_out) FROM vals v
WHERE sites."pubKey" = v.pub_key INNER JOIN "sites" s ON s."pubKey" = v.pub_key
RETURNING sites."orgId" AS "orgId", sites."pubKey" AS "pubKey" ),
upsert AS (
INSERT INTO "siteBandwidth" ("siteId", "bytesIn", "bytesOut", "lastBandwidthUpdate")
SELECT sl."siteId", sl.bytes_in, sl.bytes_out, ${currentEpoch}::integer
FROM site_lookup sl
ON CONFLICT ("siteId") DO UPDATE SET
"bytesIn" = COALESCE("siteBandwidth"."bytesIn", 0) + EXCLUDED."bytesIn",
"bytesOut" = COALESCE("siteBandwidth"."bytesOut", 0) + EXCLUDED."bytesOut",
"lastBandwidthUpdate" = EXCLUDED."lastBandwidthUpdate"
RETURNING "siteId"
)
SELECT u."siteId", s."orgId", s."pubKey"
FROM upsert u
INNER JOIN "sites" s ON s."siteId" = u."siteId"
`); `);
}, `flush bandwidth chunk [${i}${chunkEnd}]`); }, `flush bandwidth chunk [${i}${chunkEnd}]`);
} catch (error) { } catch (error) {

View File

@@ -1,11 +1,11 @@
import { db, newts, sites, targetHealthCheck, targets } from "@server/db"; import { db, newts, sites, targetHealthCheck, targets, sitePing, siteBandwidth } from "@server/db";
import { import {
hasActiveConnections, hasActiveConnections,
getClientConfigVersion getClientConfigVersion
} from "#dynamic/routers/ws"; } from "#dynamic/routers/ws";
import { MessageHandler } from "@server/routers/ws"; import { MessageHandler } from "@server/routers/ws";
import { Newt } from "@server/db"; import { Newt } from "@server/db";
import { eq, lt, isNull, and, or, ne, not } from "drizzle-orm"; import { eq, lt, isNull, and, or, ne } from "drizzle-orm";
import logger from "@server/logger"; import logger from "@server/logger";
import { sendNewtSyncMessage } from "./sync"; import { sendNewtSyncMessage } from "./sync";
import { recordPing } from "./pingAccumulator"; import { recordPing } from "./pingAccumulator";
@@ -41,17 +41,18 @@ export const startNewtOfflineChecker = (): void => {
.select({ .select({
siteId: sites.siteId, siteId: sites.siteId,
newtId: newts.newtId, newtId: newts.newtId,
lastPing: sites.lastPing lastPing: sitePing.lastPing
}) })
.from(sites) .from(sites)
.innerJoin(newts, eq(newts.siteId, sites.siteId)) .innerJoin(newts, eq(newts.siteId, sites.siteId))
.leftJoin(sitePing, eq(sitePing.siteId, sites.siteId))
.where( .where(
and( and(
eq(sites.online, true), eq(sites.online, true),
eq(sites.type, "newt"), eq(sites.type, "newt"),
or( or(
lt(sites.lastPing, twoMinutesAgo), lt(sitePing.lastPing, twoMinutesAgo),
isNull(sites.lastPing) isNull(sitePing.lastPing)
) )
) )
); );
@@ -112,15 +113,11 @@ export const startNewtOfflineChecker = (): void => {
.select({ .select({
siteId: sites.siteId, siteId: sites.siteId,
online: sites.online, online: sites.online,
lastBandwidthUpdate: sites.lastBandwidthUpdate lastBandwidthUpdate: siteBandwidth.lastBandwidthUpdate
}) })
.from(sites) .from(sites)
.where( .innerJoin(siteBandwidth, eq(siteBandwidth.siteId, sites.siteId))
and( .where(eq(sites.type, "wireguard"));
eq(sites.type, "wireguard"),
not(isNull(sites.lastBandwidthUpdate))
)
);
const wireguardOfflineThreshold = Math.floor( const wireguardOfflineThreshold = Math.floor(
(Date.now() - OFFLINE_THRESHOLD_BANDWIDTH_MS) / 1000 (Date.now() - OFFLINE_THRESHOLD_BANDWIDTH_MS) / 1000
@@ -128,12 +125,7 @@ export const startNewtOfflineChecker = (): void => {
// loop over each one. If its offline and there is a new update then mark it online. If its online and there is no update then mark it offline // loop over each one. If its offline and there is a new update then mark it online. If its online and there is no update then mark it offline
for (const site of allWireguardSites) { for (const site of allWireguardSites) {
const lastBandwidthUpdate = if ((site.lastBandwidthUpdate ?? 0) < wireguardOfflineThreshold && site.online) {
new Date(site.lastBandwidthUpdate!).getTime() / 1000;
if (
lastBandwidthUpdate < wireguardOfflineThreshold &&
site.online
) {
logger.info( logger.info(
`Marking wireguard site ${site.siteId} offline: no bandwidth update in over ${OFFLINE_THRESHOLD_BANDWIDTH_MS / 60000} minutes` `Marking wireguard site ${site.siteId} offline: no bandwidth update in over ${OFFLINE_THRESHOLD_BANDWIDTH_MS / 60000} minutes`
); );
@@ -142,10 +134,7 @@ export const startNewtOfflineChecker = (): void => {
.update(sites) .update(sites)
.set({ online: false }) .set({ online: false })
.where(eq(sites.siteId, site.siteId)); .where(eq(sites.siteId, site.siteId));
} else if ( } else if ((site.lastBandwidthUpdate ?? 0) >= wireguardOfflineThreshold && !site.online) {
lastBandwidthUpdate >= wireguardOfflineThreshold &&
!site.online
) {
logger.info( logger.info(
`Marking wireguard site ${site.siteId} online: recent bandwidth update` `Marking wireguard site ${site.siteId} online: recent bandwidth update`
); );

View File

@@ -1,6 +1,5 @@
import { db } from "@server/db"; import { db, clients, clientBandwidth } from "@server/db";
import { MessageHandler } from "@server/routers/ws"; import { MessageHandler } from "@server/routers/ws";
import { clients } from "@server/db";
import { eq, sql } from "drizzle-orm"; import { eq, sql } from "drizzle-orm";
import logger from "@server/logger"; import logger from "@server/logger";
@@ -85,7 +84,7 @@ export async function flushBandwidthToDb(): Promise<void> {
const snapshot = accumulator; const snapshot = accumulator;
accumulator = new Map<string, BandwidthAccumulator>(); accumulator = new Map<string, BandwidthAccumulator>();
const currentTime = new Date().toISOString(); const currentEpoch = Math.floor(Date.now() / 1000);
// Sort by publicKey for consistent lock ordering across concurrent // Sort by publicKey for consistent lock ordering across concurrent
// writers — this is the same deadlock-prevention strategy used in the // writers — this is the same deadlock-prevention strategy used in the
@@ -101,19 +100,37 @@ export async function flushBandwidthToDb(): Promise<void> {
for (const [publicKey, { bytesIn, bytesOut }] of sortedEntries) { for (const [publicKey, { bytesIn, bytesOut }] of sortedEntries) {
try { try {
await withDeadlockRetry(async () => { await withDeadlockRetry(async () => {
// Use atomic SQL increment to avoid the SELECT-then-UPDATE // Find clientId by pubKey
// anti-pattern and the races it would introduce. const [clientRow] = await db
.select({ clientId: clients.clientId })
.from(clients)
.where(eq(clients.pubKey, publicKey))
.limit(1);
if (!clientRow) {
logger.warn(`No client found for pubKey ${publicKey}, skipping`);
return;
}
await db await db
.update(clients) .insert(clientBandwidth)
.set({ .values({
clientId: clientRow.clientId,
// Note: bytesIn from peer goes to megabytesOut (data // Note: bytesIn from peer goes to megabytesOut (data
// sent to client) and bytesOut from peer goes to // sent to client) and bytesOut from peer goes to
// megabytesIn (data received from client). // megabytesIn (data received from client).
megabytesOut: sql`COALESCE(${clients.megabytesOut}, 0) + ${bytesIn}`, megabytesOut: bytesIn,
megabytesIn: sql`COALESCE(${clients.megabytesIn}, 0) + ${bytesOut}`, megabytesIn: bytesOut,
lastBandwidthUpdate: currentTime lastBandwidthUpdate: currentEpoch
}) })
.where(eq(clients.pubKey, publicKey)); .onConflictDoUpdate({
target: clientBandwidth.clientId,
set: {
megabytesOut: sql`COALESCE(${clientBandwidth.megabytesOut}, 0) + ${bytesIn}`,
megabytesIn: sql`COALESCE(${clientBandwidth.megabytesIn}, 0) + ${bytesOut}`,
lastBandwidthUpdate: currentEpoch
}
});
}, `flush bandwidth for client ${publicKey}`); }, `flush bandwidth for client ${publicKey}`);
} catch (error) { } catch (error) {
logger.error( logger.error(

View File

@@ -1,6 +1,6 @@
import { db } from "@server/db"; import { db } from "@server/db";
import { sites, clients, olms } from "@server/db"; import { sites, clients, olms, sitePing, clientPing } from "@server/db";
import { inArray } from "drizzle-orm"; import { inArray, sql } from "drizzle-orm";
import logger from "@server/logger"; import logger from "@server/logger";
/** /**
@@ -81,11 +81,8 @@ export function recordClientPing(
/** /**
* Flush all accumulated site pings to the database. * Flush all accumulated site pings to the database.
* *
* Each batch of up to BATCH_SIZE rows is written with a **single** UPDATE * For each batch: first upserts individual per-site timestamps into
* statement. We use the maximum timestamp across the batch so that `lastPing` * `sitePing`, then bulk-updates `sites.online = true`.
* reflects the most recent ping seen for any site in the group. This avoids
* the multi-statement transaction that previously created additional
* row-lock ordering hazards.
*/ */
async function flushSitePingsToDb(): Promise<void> { async function flushSitePingsToDb(): Promise<void> {
if (pendingSitePings.size === 0) { if (pendingSitePings.size === 0) {
@@ -103,20 +100,25 @@ async function flushSitePingsToDb(): Promise<void> {
for (let i = 0; i < entries.length; i += BATCH_SIZE) { for (let i = 0; i < entries.length; i += BATCH_SIZE) {
const batch = entries.slice(i, i + BATCH_SIZE); const batch = entries.slice(i, i + BATCH_SIZE);
// Use the latest timestamp in the batch so that `lastPing` always
// moves forward. Using a single timestamp for the whole batch means
// we only ever need one UPDATE statement (no transaction).
const maxTimestamp = Math.max(...batch.map(([, ts]) => ts));
const siteIds = batch.map(([id]) => id); const siteIds = batch.map(([id]) => id);
try { try {
await withRetry(async () => { await withRetry(async () => {
const rows = batch.map(([siteId, ts]) => ({ siteId, lastPing: ts }));
// Step 1: Upsert ping timestamps into sitePing
await db
.insert(sitePing)
.values(rows)
.onConflictDoUpdate({
target: sitePing.siteId,
set: { lastPing: sql`excluded."lastPing"` }
});
// Step 2: Update online status on sites
await db await db
.update(sites) .update(sites)
.set({ .set({ online: true })
online: true,
lastPing: maxTimestamp
})
.where(inArray(sites.siteId, siteIds)); .where(inArray(sites.siteId, siteIds));
}, "flushSitePingsToDb"); }, "flushSitePingsToDb");
} catch (error) { } catch (error) {
@@ -139,7 +141,8 @@ async function flushSitePingsToDb(): Promise<void> {
/** /**
* Flush all accumulated client (OLM) pings to the database. * Flush all accumulated client (OLM) pings to the database.
* *
* Same single-UPDATE-per-batch approach as `flushSitePingsToDb`. * For each batch: first upserts individual per-client timestamps into
* `clientPing`, then bulk-updates `clients.online = true, archived = false`.
*/ */
async function flushClientPingsToDb(): Promise<void> { async function flushClientPingsToDb(): Promise<void> {
if (pendingClientPings.size === 0 && pendingOlmArchiveResets.size === 0) { if (pendingClientPings.size === 0 && pendingOlmArchiveResets.size === 0) {
@@ -161,18 +164,25 @@ async function flushClientPingsToDb(): Promise<void> {
for (let i = 0; i < entries.length; i += BATCH_SIZE) { for (let i = 0; i < entries.length; i += BATCH_SIZE) {
const batch = entries.slice(i, i + BATCH_SIZE); const batch = entries.slice(i, i + BATCH_SIZE);
const maxTimestamp = Math.max(...batch.map(([, ts]) => ts));
const clientIds = batch.map(([id]) => id); const clientIds = batch.map(([id]) => id);
try { try {
await withRetry(async () => { await withRetry(async () => {
const rows = batch.map(([clientId, ts]) => ({ clientId, lastPing: ts }));
// Step 1: Upsert ping timestamps into clientPing
await db
.insert(clientPing)
.values(rows)
.onConflictDoUpdate({
target: clientPing.clientId,
set: { lastPing: sql`excluded."lastPing"` }
});
// Step 2: Update online + unarchive on clients
await db await db
.update(clients) .update(clients)
.set({ .set({ online: true, archived: false })
lastPing: maxTimestamp,
online: true,
archived: false
})
.where(inArray(clients.clientId, clientIds)); .where(inArray(clients.clientId, clientIds));
}, "flushClientPingsToDb"); }, "flushClientPingsToDb");
} catch (error) { } catch (error) {

View File

@@ -1,8 +1,8 @@
import { disconnectClient, getClientConfigVersion } from "#dynamic/routers/ws"; import { disconnectClient, getClientConfigVersion } from "#dynamic/routers/ws";
import { db } from "@server/db"; import { db } from "@server/db";
import { MessageHandler } from "@server/routers/ws"; import { MessageHandler } from "@server/routers/ws";
import { clients, olms, Olm } from "@server/db"; import { clients, olms, Olm, clientPing } from "@server/db";
import { eq, lt, isNull, and, or } from "drizzle-orm"; import { eq, lt, isNull, and, or, inArray } from "drizzle-orm";
import { recordClientPing } from "@server/routers/newt/pingAccumulator"; import { recordClientPing } from "@server/routers/newt/pingAccumulator";
import logger from "@server/logger"; import logger from "@server/logger";
import { validateSessionToken } from "@server/auth/sessions/app"; import { validateSessionToken } from "@server/auth/sessions/app";
@@ -37,21 +37,33 @@ export const startOlmOfflineChecker = (): void => {
// TODO: WE NEED TO MAKE SURE THIS WORKS WITH DISTRIBUTED NODES ALL DOING THE SAME THING // TODO: WE NEED TO MAKE SURE THIS WORKS WITH DISTRIBUTED NODES ALL DOING THE SAME THING
// Find clients that haven't pinged in the last 2 minutes and mark them as offline // Find clients that haven't pinged in the last 2 minutes and mark them as offline
const offlineClients = await db const staleClientRows = await db
.update(clients) .select({
.set({ online: false }) clientId: clients.clientId,
olmId: clients.olmId,
lastPing: clientPing.lastPing
})
.from(clients)
.leftJoin(clientPing, eq(clientPing.clientId, clients.clientId))
.where( .where(
and( and(
eq(clients.online, true), eq(clients.online, true),
or( or(
lt(clients.lastPing, twoMinutesAgo), lt(clientPing.lastPing, twoMinutesAgo),
isNull(clients.lastPing) isNull(clientPing.lastPing)
) )
) )
) );
.returning();
for (const offlineClient of offlineClients) { if (staleClientRows.length > 0) {
const staleClientIds = staleClientRows.map((c) => c.clientId);
await db
.update(clients)
.set({ online: false })
.where(inArray(clients.clientId, staleClientIds));
}
for (const offlineClient of staleClientRows) {
logger.info( logger.info(
`Kicking offline olm client ${offlineClient.clientId} due to inactivity` `Kicking offline olm client ${offlineClient.clientId} due to inactivity`
); );

View File

@@ -1,7 +1,7 @@
import { NextFunction, Request, Response } from "express"; import { NextFunction, Request, Response } from "express";
import { z } from "zod"; import { z } from "zod";
import { db, sites } from "@server/db"; import { db, sites, siteBandwidth } from "@server/db";
import { eq } from "drizzle-orm"; import { eq, inArray } from "drizzle-orm";
import response from "@server/lib/response"; import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode"; import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors"; import createHttpError from "http-errors";
@@ -60,12 +60,17 @@ export async function resetOrgBandwidth(
} }
await db await db
.update(sites) .update(siteBandwidth)
.set({ .set({
megabytesIn: 0, megabytesIn: 0,
megabytesOut: 0 megabytesOut: 0
}) })
.where(eq(sites.orgId, orgId)); .where(
inArray(
siteBandwidth.siteId,
db.select({ siteId: sites.siteId }).from(sites).where(eq(sites.orgId, orgId))
)
);
return response(res, { return response(res, {
data: {}, data: {},

View File

@@ -144,7 +144,7 @@ export async function getUserResources(
name: string; name: string;
destination: string; destination: string;
mode: string; mode: string;
scheme: string | null; protocol: string | null;
enabled: boolean; enabled: boolean;
alias: string | null; alias: string | null;
aliasAddress: string | null; aliasAddress: string | null;
@@ -156,7 +156,7 @@ export async function getUserResources(
name: siteResources.name, name: siteResources.name,
destination: siteResources.destination, destination: siteResources.destination,
mode: siteResources.mode, mode: siteResources.mode,
scheme: siteResources.scheme, protocol: siteResources.protocol,
enabled: siteResources.enabled, enabled: siteResources.enabled,
alias: siteResources.alias, alias: siteResources.alias,
aliasAddress: siteResources.aliasAddress aliasAddress: siteResources.aliasAddress
@@ -240,7 +240,7 @@ export async function getUserResources(
name: siteResource.name, name: siteResource.name,
destination: siteResource.destination, destination: siteResource.destination,
mode: siteResource.mode, mode: siteResource.mode,
protocol: siteResource.scheme, protocol: siteResource.protocol,
enabled: siteResource.enabled, enabled: siteResource.enabled,
alias: siteResource.alias, alias: siteResource.alias,
aliasAddress: siteResource.aliasAddress, aliasAddress: siteResource.aliasAddress,
@@ -289,7 +289,7 @@ export type GetUserResourcesResponse = {
enabled: boolean; enabled: boolean;
alias: string | null; alias: string | null;
aliasAddress: string | null; aliasAddress: string | null;
type: "site"; type: 'site';
}>; }>;
}; };
}; };

View File

@@ -6,6 +6,7 @@ import {
remoteExitNodes, remoteExitNodes,
roleSites, roleSites,
sites, sites,
siteBandwidth,
userSites userSites
} from "@server/db"; } from "@server/db";
import cache from "#dynamic/lib/cache"; import cache from "#dynamic/lib/cache";
@@ -155,8 +156,8 @@ function querySitesBase() {
name: sites.name, name: sites.name,
pubKey: sites.pubKey, pubKey: sites.pubKey,
subnet: sites.subnet, subnet: sites.subnet,
megabytesIn: sites.megabytesIn, megabytesIn: siteBandwidth.megabytesIn,
megabytesOut: sites.megabytesOut, megabytesOut: siteBandwidth.megabytesOut,
orgName: orgs.name, orgName: orgs.name,
type: sites.type, type: sites.type,
online: sites.online, online: sites.online,
@@ -175,7 +176,8 @@ function querySitesBase() {
.leftJoin( .leftJoin(
remoteExitNodes, remoteExitNodes,
eq(remoteExitNodes.exitNodeId, sites.exitNodeId) eq(remoteExitNodes.exitNodeId, sites.exitNodeId)
); )
.leftJoin(siteBandwidth, eq(siteBandwidth.siteId, sites.siteId));
} }
type SiteWithUpdateAvailable = Awaited<ReturnType<typeof querySitesBase>>[0] & { type SiteWithUpdateAvailable = Awaited<ReturnType<typeof querySitesBase>>[0] & {
@@ -299,9 +301,15 @@ export async function listSites(
.offset(pageSize * (page - 1)) .offset(pageSize * (page - 1))
.orderBy( .orderBy(
sort_by sort_by
? order === "asc" ? (() => {
? asc(sites[sort_by]) const field =
: desc(sites[sort_by]) sort_by === "megabytesIn"
? siteBandwidth.megabytesIn
: sort_by === "megabytesOut"
? siteBandwidth.megabytesOut
: sites.name;
return order === "asc" ? asc(field) : desc(field);
})()
: asc(sites.name) : asc(sites.name)
); );

View File

@@ -36,12 +36,11 @@ const createSiteResourceParamsSchema = z.strictObject({
const createSiteResourceSchema = z const createSiteResourceSchema = z
.strictObject({ .strictObject({
name: z.string().min(1).max(255), name: z.string().min(1).max(255),
mode: z.enum(["host", "cidr", "http"]), mode: z.enum(["host", "cidr", "port"]),
ssl: z.boolean().optional(), // only used for http mode
siteId: z.int(), siteId: z.int(),
scheme: z.enum(["http", "https"]).optional(), // protocol: z.enum(["tcp", "udp"]).optional(),
// proxyPort: z.int().positive().optional(), // proxyPort: z.int().positive().optional(),
destinationPort: z.int().positive().optional(), // destinationPort: z.int().positive().optional(),
destination: z.string().min(1), destination: z.string().min(1),
enabled: z.boolean().default(true), enabled: z.boolean().default(true),
alias: z alias: z
@@ -63,20 +62,15 @@ const createSiteResourceSchema = z
.strict() .strict()
.refine( .refine(
(data) => { (data) => {
if ( if (data.mode === "host") {
data.mode === "host" || // Check if it's a valid IP address using zod (v4 or v6)
data.mode == "http" const isValidIP = z
) { // .union([z.ipv4(), z.ipv6()])
if (data.mode == "host") { .union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
// Check if it's a valid IP address using zod (v4 or v6) .safeParse(data.destination).success;
const isValidIP = z
// .union([z.ipv4(), z.ipv6()])
.union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
.safeParse(data.destination).success;
if (isValidIP) { if (isValidIP) {
return true; return true;
}
} }
// Check if it's a valid domain (hostname pattern, TLD not required) // Check if it's a valid domain (hostname pattern, TLD not required)
@@ -111,21 +105,6 @@ const createSiteResourceSchema = z
{ {
message: "Destination must be a valid CIDR notation for cidr mode" message: "Destination must be a valid CIDR notation for cidr mode"
} }
)
.refine(
(data) => {
if (data.mode !== "http") return true;
return (
data.scheme !== undefined &&
data.destinationPort !== undefined &&
data.destinationPort >= 1 &&
data.destinationPort <= 65535
);
},
{
message:
"HTTP mode requires scheme (http or https) and a valid destination port"
}
); );
export type CreateSiteResourceBody = z.infer<typeof createSiteResourceSchema>; export type CreateSiteResourceBody = z.infer<typeof createSiteResourceSchema>;
@@ -182,12 +161,11 @@ export async function createSiteResource(
name, name,
siteId, siteId,
mode, mode,
scheme, // protocol,
// proxyPort, // proxyPort,
destinationPort, // destinationPort,
destination, destination,
enabled, enabled,
ssl,
alias, alias,
userIds, userIds,
roleIds, roleIds,
@@ -248,6 +226,30 @@ export async function createSiteResource(
); );
} }
// // check if resource with same protocol and proxy port already exists (only for port mode)
// if (mode === "port" && protocol && proxyPort) {
// const [existingResource] = await db
// .select()
// .from(siteResources)
// .where(
// and(
// eq(siteResources.siteId, siteId),
// eq(siteResources.orgId, orgId),
// eq(siteResources.protocol, protocol),
// eq(siteResources.proxyPort, proxyPort)
// )
// )
// .limit(1);
// if (existingResource && existingResource.siteResourceId) {
// return next(
// createHttpError(
// HttpCode.CONFLICT,
// "A resource with the same protocol and proxy port already exists"
// )
// );
// }
// }
// make sure the alias is unique within the org if provided // make sure the alias is unique within the org if provided
if (alias) { if (alias) {
const [conflict] = await db const [conflict] = await db
@@ -278,7 +280,8 @@ export async function createSiteResource(
const niceId = await getUniqueSiteResourceName(orgId); const niceId = await getUniqueSiteResourceName(orgId);
let aliasAddress: string | null = null; let aliasAddress: string | null = null;
if (mode === "host" || mode === "http") { if (mode == "host") {
// we can only have an alias on a host
aliasAddress = await getNextAvailableAliasAddress(orgId); aliasAddress = await getNextAvailableAliasAddress(orgId);
} }
@@ -290,11 +293,8 @@ export async function createSiteResource(
niceId, niceId,
orgId, orgId,
name, name,
mode, mode: mode as "host" | "cidr",
ssl,
destination, destination,
scheme,
destinationPort,
enabled, enabled,
alias, alias,
aliasAddress, aliasAddress,

View File

@@ -41,12 +41,12 @@ const listAllSiteResourcesByOrgQuerySchema = z.object({
}), }),
query: z.string().optional(), query: z.string().optional(),
mode: z mode: z
.enum(["host", "cidr", "http"]) .enum(["host", "cidr"])
.optional() .optional()
.catch(undefined) .catch(undefined)
.openapi({ .openapi({
type: "string", type: "string",
enum: ["host", "cidr", "http"], enum: ["host", "cidr"],
description: "Filter site resources by mode" description: "Filter site resources by mode"
}), }),
sort_by: z sort_by: z
@@ -88,8 +88,7 @@ function querySiteResourcesBase() {
niceId: siteResources.niceId, niceId: siteResources.niceId,
name: siteResources.name, name: siteResources.name,
mode: siteResources.mode, mode: siteResources.mode,
ssl: siteResources.ssl, protocol: siteResources.protocol,
scheme: siteResources.scheme,
proxyPort: siteResources.proxyPort, proxyPort: siteResources.proxyPort,
destinationPort: siteResources.destinationPort, destinationPort: siteResources.destinationPort,
destination: siteResources.destination, destination: siteResources.destination,
@@ -194,9 +193,7 @@ export async function listAllSiteResourcesByOrg(
const baseQuery = querySiteResourcesBase().where(and(...conditions)); const baseQuery = querySiteResourcesBase().where(and(...conditions));
const countQuery = db.$count( const countQuery = db.$count(
querySiteResourcesBase() querySiteResourcesBase().where(and(...conditions)).as("filtered_site_resources")
.where(and(...conditions))
.as("filtered_site_resources")
); );
const [siteResourcesList, totalCount] = await Promise.all([ const [siteResourcesList, totalCount] = await Promise.all([

View File

@@ -51,11 +51,10 @@ const updateSiteResourceSchema = z
) )
.optional(), .optional(),
// mode: z.enum(["host", "cidr", "port"]).optional(), // mode: z.enum(["host", "cidr", "port"]).optional(),
mode: z.enum(["host", "cidr", "http"]).optional(), mode: z.enum(["host", "cidr"]).optional(),
ssl: z.boolean().optional(), // protocol: z.enum(["tcp", "udp"]).nullish(),
scheme: z.enum(["http", "https"]).nullish(),
// proxyPort: z.int().positive().nullish(), // proxyPort: z.int().positive().nullish(),
destinationPort: z.int().positive().nullish(), // destinationPort: z.int().positive().nullish(),
destination: z.string().min(1).optional(), destination: z.string().min(1).optional(),
enabled: z.boolean().optional(), enabled: z.boolean().optional(),
alias: z alias: z
@@ -77,20 +76,14 @@ const updateSiteResourceSchema = z
.strict() .strict()
.refine( .refine(
(data) => { (data) => {
if ( if (data.mode === "host" && data.destination) {
(data.mode === "host" || const isValidIP = z
data.mode == "http") && // .union([z.ipv4(), z.ipv6()])
data.destination .union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
) { .safeParse(data.destination).success;
if (data.mode == "host") {
const isValidIP = z
// .union([z.ipv4(), z.ipv6()])
.union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
.safeParse(data.destination).success;
if (isValidIP) { if (isValidIP) {
return true; return true;
}
} }
// Check if it's a valid domain (hostname pattern, TLD not required) // Check if it's a valid domain (hostname pattern, TLD not required)
@@ -125,23 +118,6 @@ const updateSiteResourceSchema = z
{ {
message: "Destination must be a valid CIDR notation for cidr mode" message: "Destination must be a valid CIDR notation for cidr mode"
} }
)
.refine(
(data) => {
if (data.mode !== "http") return true;
return (
data.scheme !== undefined &&
data.scheme !== null &&
data.destinationPort !== undefined &&
data.destinationPort !== null &&
data.destinationPort >= 1 &&
data.destinationPort <= 65535
);
},
{
message:
"HTTP mode requires scheme (http or https) and a valid destination port"
}
); );
export type UpdateSiteResourceBody = z.infer<typeof updateSiteResourceSchema>; export type UpdateSiteResourceBody = z.infer<typeof updateSiteResourceSchema>;
@@ -199,11 +175,8 @@ export async function updateSiteResource(
siteId, // because it can change siteId, // because it can change
niceId, niceId,
mode, mode,
scheme,
destination, destination,
destinationPort,
alias, alias,
ssl,
enabled, enabled,
userIds, userIds,
roleIds, roleIds,
@@ -373,10 +346,7 @@ export async function updateSiteResource(
siteId, siteId,
niceId, niceId,
mode, mode,
scheme,
ssl,
destination, destination,
destinationPort,
enabled, enabled,
alias: alias && alias.trim() ? alias : null, alias: alias && alias.trim() ? alias : null,
tcpPortRangeString, tcpPortRangeString,
@@ -479,10 +449,7 @@ export async function updateSiteResource(
name: name, name: name,
siteId: siteId, siteId: siteId,
mode: mode, mode: mode,
scheme,
ssl,
destination: destination, destination: destination,
destinationPort: destinationPort,
enabled: enabled, enabled: enabled,
alias: alias && alias.trim() ? alias : null, alias: alias && alias.trim() ? alias : null,
tcpPortRangeString: tcpPortRangeString, tcpPortRangeString: tcpPortRangeString,

View File

@@ -22,6 +22,7 @@ import m13 from "./scriptsPg/1.15.3";
import m14 from "./scriptsPg/1.15.4"; import m14 from "./scriptsPg/1.15.4";
import m15 from "./scriptsPg/1.16.0"; import m15 from "./scriptsPg/1.16.0";
import m16 from "./scriptsPg/1.17.0"; import m16 from "./scriptsPg/1.17.0";
import m17 from "./scriptsPg/1.18.0";
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER // THIS CANNOT IMPORT ANYTHING FROM THE SERVER
// EXCEPT FOR THE DATABASE AND THE SCHEMA // EXCEPT FOR THE DATABASE AND THE SCHEMA
@@ -43,7 +44,8 @@ const migrations = [
{ version: "1.15.3", run: m13 }, { version: "1.15.3", run: m13 },
{ version: "1.15.4", run: m14 }, { version: "1.15.4", run: m14 },
{ version: "1.16.0", run: m15 }, { version: "1.16.0", run: m15 },
{ version: "1.17.0", run: m16 } { version: "1.17.0", run: m16 },
{ version: "1.18.0", run: m17 }
// Add new migrations here as they are created // Add new migrations here as they are created
] as { ] as {
version: string; version: string;

View File

@@ -40,6 +40,7 @@ import m34 from "./scriptsSqlite/1.15.3";
import m35 from "./scriptsSqlite/1.15.4"; import m35 from "./scriptsSqlite/1.15.4";
import m36 from "./scriptsSqlite/1.16.0"; import m36 from "./scriptsSqlite/1.16.0";
import m37 from "./scriptsSqlite/1.17.0"; import m37 from "./scriptsSqlite/1.17.0";
import m38 from "./scriptsSqlite/1.18.0";
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER // THIS CANNOT IMPORT ANYTHING FROM THE SERVER
// EXCEPT FOR THE DATABASE AND THE SCHEMA // EXCEPT FOR THE DATABASE AND THE SCHEMA
@@ -77,7 +78,8 @@ const migrations = [
{ version: "1.15.3", run: m34 }, { version: "1.15.3", run: m34 },
{ version: "1.15.4", run: m35 }, { version: "1.15.4", run: m35 },
{ version: "1.16.0", run: m36 }, { version: "1.16.0", run: m36 },
{ version: "1.17.0", run: m37 } { version: "1.17.0", run: m37 },
{ version: "1.18.0", run: m38 }
// Add new migrations here as they are created // Add new migrations here as they are created
] as const; ] as const;

View File

@@ -235,9 +235,7 @@ export default async function migration() {
for (const row of existingUserInviteRoles) { for (const row of existingUserInviteRoles) {
await db.execute(sql` await db.execute(sql`
INSERT INTO "userInviteRoles" ("inviteId", "roleId") INSERT INTO "userInviteRoles" ("inviteId", "roleId")
SELECT ${row.inviteId}, ${row.roleId} VALUES (${row.inviteId}, ${row.roleId})
WHERE EXISTS (SELECT 1 FROM "userInvites" WHERE "inviteId" = ${row.inviteId})
AND EXISTS (SELECT 1 FROM "roles" WHERE "roleId" = ${row.roleId})
ON CONFLICT DO NOTHING ON CONFLICT DO NOTHING
`); `);
} }
@@ -260,10 +258,7 @@ export default async function migration() {
for (const row of existingUserOrgRoles) { for (const row of existingUserOrgRoles) {
await db.execute(sql` await db.execute(sql`
INSERT INTO "userOrgRoles" ("userId", "orgId", "roleId") INSERT INTO "userOrgRoles" ("userId", "orgId", "roleId")
SELECT ${row.userId}, ${row.orgId}, ${row.roleId} VALUES (${row.userId}, ${row.orgId}, ${row.roleId})
WHERE EXISTS (SELECT 1 FROM "user" WHERE "id" = ${row.userId})
AND EXISTS (SELECT 1 FROM "orgs" WHERE "orgId" = ${row.orgId})
AND EXISTS (SELECT 1 FROM "roles" WHERE "roleId" = ${row.roleId})
ON CONFLICT DO NOTHING ON CONFLICT DO NOTHING
`); `);
} }

View File

@@ -145,7 +145,7 @@ export default async function migration() {
).run(); ).run();
db.prepare( db.prepare(
`INSERT INTO '__new_userOrgs'("userId", "orgId", "isOwner", "autoProvisioned", "pamUsername") SELECT "userId", "orgId", "isOwner", "autoProvisioned", "pamUsername" FROM 'userOrgs' WHERE EXISTS (SELECT 1 FROM 'user' WHERE id = userOrgs.userId) AND EXISTS (SELECT 1 FROM 'orgs' WHERE orgId = userOrgs.orgId);` `INSERT INTO '__new_userOrgs'("userId", "orgId", "isOwner", "autoProvisioned", "pamUsername") SELECT "userId", "orgId", "isOwner", "autoProvisioned", "pamUsername" FROM 'userOrgs';`
).run(); ).run();
db.prepare(`DROP TABLE 'userOrgs';`).run(); db.prepare(`DROP TABLE 'userOrgs';`).run();
db.prepare( db.prepare(
@@ -246,15 +246,12 @@ export default async function migration() {
// Re-insert the preserved invite role assignments into the new userInviteRoles table // Re-insert the preserved invite role assignments into the new userInviteRoles table
if (existingUserInviteRoles.length > 0) { if (existingUserInviteRoles.length > 0) {
const insertUserInviteRole = db.prepare( const insertUserInviteRole = db.prepare(
`INSERT OR IGNORE INTO 'userInviteRoles' ("inviteId", "roleId") `INSERT OR IGNORE INTO 'userInviteRoles' ("inviteId", "roleId") VALUES (?, ?)`
SELECT ?, ?
WHERE EXISTS (SELECT 1 FROM 'userInvites' WHERE inviteId = ?)
AND EXISTS (SELECT 1 FROM 'roles' WHERE roleId = ?)`
); );
const insertAll = db.transaction(() => { const insertAll = db.transaction(() => {
for (const row of existingUserInviteRoles) { for (const row of existingUserInviteRoles) {
insertUserInviteRole.run(row.inviteId, row.roleId, row.inviteId, row.roleId); insertUserInviteRole.run(row.inviteId, row.roleId);
} }
}); });
@@ -268,16 +265,12 @@ export default async function migration() {
// Re-insert the preserved role assignments into the new userOrgRoles table // Re-insert the preserved role assignments into the new userOrgRoles table
if (existingUserOrgRoles.length > 0) { if (existingUserOrgRoles.length > 0) {
const insertUserOrgRole = db.prepare( const insertUserOrgRole = db.prepare(
`INSERT OR IGNORE INTO 'userOrgRoles' ("userId", "orgId", "roleId") `INSERT OR IGNORE INTO 'userOrgRoles' ("userId", "orgId", "roleId") VALUES (?, ?, ?)`
SELECT ?, ?, ?
WHERE EXISTS (SELECT 1 FROM 'user' WHERE id = ?)
AND EXISTS (SELECT 1 FROM 'orgs' WHERE orgId = ?)
AND EXISTS (SELECT 1 FROM 'roles' WHERE roleId = ?)`
); );
const insertAll = db.transaction(() => { const insertAll = db.transaction(() => {
for (const row of existingUserOrgRoles) { for (const row of existingUserOrgRoles) {
insertUserOrgRole.run(row.userId, row.orgId, row.roleId, row.userId, row.orgId, row.roleId); insertUserOrgRole.run(row.userId, row.orgId, row.roleId);
} }
}); });

View File

@@ -56,30 +56,18 @@ export default async function ClientResourcesPage(
const internalResourceRows: InternalResourceRow[] = siteResources.map( const internalResourceRows: InternalResourceRow[] = siteResources.map(
(siteResource) => { (siteResource) => {
const rawMode = siteResource.mode as string | undefined;
const normalizedMode =
rawMode === "https"
? ("http" as const)
: rawMode === "host" || rawMode === "cidr" || rawMode === "http"
? rawMode
: ("host" as const);
return { return {
id: siteResource.siteResourceId, id: siteResource.siteResourceId,
name: siteResource.name, name: siteResource.name,
orgId: params.orgId, orgId: params.orgId,
siteName: siteResource.siteName, siteName: siteResource.siteName,
siteAddress: siteResource.siteAddress || null, siteAddress: siteResource.siteAddress || null,
mode: normalizedMode, mode: siteResource.mode || ("port" as any),
scheme:
siteResource.scheme ??
(rawMode === "https" ? ("https" as const) : null),
ssl:
siteResource.ssl === true || rawMode === "https",
// protocol: siteResource.protocol, // protocol: siteResource.protocol,
// proxyPort: siteResource.proxyPort, // proxyPort: siteResource.proxyPort,
siteId: siteResource.siteId, siteId: siteResource.siteId,
destination: siteResource.destination, destination: siteResource.destination,
httpHttpsPort: siteResource.destinationPort ?? null, // destinationPort: siteResource.destinationPort,
alias: siteResource.alias || null, alias: siteResource.alias || null,
aliasAddress: siteResource.aliasAddress || null, aliasAddress: siteResource.aliasAddress || null,
siteNiceId: siteResource.siteNiceId, siteNiceId: siteResource.siteNiceId,

View File

@@ -1,32 +0,0 @@
import { Metadata } from "next";
import { getTranslations } from "next-intl/server";
import {
Card,
CardContent,
CardHeader,
CardTitle
} from "@app/components/ui/card";
export const dynamic = "force-dynamic";
export const metadata: Metadata = {
title: "Private Placeholder"
};
export default async function MaintenanceScreen() {
const t = await getTranslations();
let title = t("privateMaintenanceScreenTitle");
let message = t("privateMaintenanceScreenMessage");
return (
<div className="min-h-screen flex items-center justify-center p-4">
<Card className="w-full max-w-md">
<CardHeader>
<CardTitle>{title}</CardTitle>
</CardHeader>
<CardContent className="space-y-4">{message}</CardContent>
</Card>
</div>
);
}

View File

@@ -46,15 +46,13 @@ export type InternalResourceRow = {
siteName: string; siteName: string;
siteAddress: string | null; siteAddress: string | null;
// mode: "host" | "cidr" | "port"; // mode: "host" | "cidr" | "port";
mode: "host" | "cidr" | "http"; mode: "host" | "cidr";
scheme: "http" | "https" | null;
ssl: boolean;
// protocol: string | null; // protocol: string | null;
// proxyPort: number | null; // proxyPort: number | null;
siteId: number; siteId: number;
siteNiceId: string; siteNiceId: string;
destination: string; destination: string;
httpHttpsPort: number | null; // destinationPort: number | null;
alias: string | null; alias: string | null;
aliasAddress: string | null; aliasAddress: string | null;
niceId: string; niceId: string;
@@ -65,39 +63,6 @@ export type InternalResourceRow = {
authDaemonPort?: number | null; authDaemonPort?: number | null;
}; };
function resolveHttpHttpsDisplayPort(
mode: "http",
httpHttpsPort: number | null
): number {
if (httpHttpsPort != null) {
return httpHttpsPort;
}
return 80;
}
function formatDestinationDisplay(row: InternalResourceRow): string {
const { mode, destination, httpHttpsPort, scheme } = row;
if (mode !== "http") {
return destination;
}
const port = resolveHttpHttpsDisplayPort(mode, httpHttpsPort);
const downstreamScheme = scheme ?? "http";
const hostPart =
destination.includes(":") && !destination.startsWith("[")
? `[${destination}]`
: destination;
return `${downstreamScheme}://${hostPart}:${port}`;
}
function isSafeUrlForLink(href: string): boolean {
try {
void new URL(href);
return true;
} catch {
return false;
}
}
type ClientResourcesTableProps = { type ClientResourcesTableProps = {
internalResources: InternalResourceRow[]; internalResources: InternalResourceRow[];
orgId: string; orgId: string;
@@ -250,10 +215,6 @@ export default function ClientResourcesTable({
{ {
value: "cidr", value: "cidr",
label: t("editInternalResourceDialogModeCidr") label: t("editInternalResourceDialogModeCidr")
},
{
value: "http",
label: t("editInternalResourceDialogModeHttp")
} }
]} ]}
selectedValue={searchParams.get("mode") ?? undefined} selectedValue={searchParams.get("mode") ?? undefined}
@@ -266,14 +227,10 @@ export default function ClientResourcesTable({
), ),
cell: ({ row }) => { cell: ({ row }) => {
const resourceRow = row.original; const resourceRow = row.original;
const modeLabels: Record< const modeLabels: Record<"host" | "cidr" | "port", string> = {
"host" | "cidr" | "port" | "http",
string
> = {
host: t("editInternalResourceDialogModeHost"), host: t("editInternalResourceDialogModeHost"),
cidr: t("editInternalResourceDialogModeCidr"), cidr: t("editInternalResourceDialogModeCidr"),
port: t("editInternalResourceDialogModePort"), port: t("editInternalResourceDialogModePort")
http: t("editInternalResourceDialogModeHttp")
}; };
return <span>{modeLabels[resourceRow.mode]}</span>; return <span>{modeLabels[resourceRow.mode]}</span>;
} }
@@ -286,12 +243,11 @@ export default function ClientResourcesTable({
), ),
cell: ({ row }) => { cell: ({ row }) => {
const resourceRow = row.original; const resourceRow = row.original;
const display = formatDestinationDisplay(resourceRow);
return ( return (
<CopyToClipboard <CopyToClipboard
text={display} text={resourceRow.destination}
isLink={false} isLink={false}
displayText={display} displayText={resourceRow.destination}
/> />
); );
} }
@@ -304,26 +260,15 @@ export default function ClientResourcesTable({
), ),
cell: ({ row }) => { cell: ({ row }) => {
const resourceRow = row.original; const resourceRow = row.original;
if (resourceRow.mode === "host" && resourceRow.alias) { return resourceRow.mode === "host" && resourceRow.alias ? (
return ( <CopyToClipboard
<CopyToClipboard text={resourceRow.alias}
text={resourceRow.alias} isLink={false}
isLink={false} displayText={resourceRow.alias}
displayText={resourceRow.alias} />
/> ) : (
); <span>-</span>
} );
if (resourceRow.mode === "http" && resourceRow.alias) {
const url = `${resourceRow.ssl ? "https" : "http"}://${resourceRow.alias}`;
return (
<CopyToClipboard
text={url}
isLink={isSafeUrlForLink(url)}
displayText={url}
/>
);
}
return <span>-</span>;
} }
}, },
{ {

View File

@@ -50,10 +50,7 @@ export default function CreateInternalResourceDialog({
setIsSubmitting(true); setIsSubmitting(true);
try { try {
let data = { ...values }; let data = { ...values };
if ( if (data.mode === "host" && isHostname(data.destination)) {
(data.mode === "host" || data.mode === "http") &&
isHostname(data.destination)
) {
const currentAlias = data.alias?.trim() || ""; const currentAlias = data.alias?.trim() || "";
if (!currentAlias) { if (!currentAlias) {
let aliasValue = data.destination; let aliasValue = data.destination;
@@ -72,42 +69,21 @@ export default function CreateInternalResourceDialog({
mode: data.mode, mode: data.mode,
destination: data.destination, destination: data.destination,
enabled: true, enabled: true,
...(data.mode === "http" && { alias: data.alias && typeof data.alias === "string" && data.alias.trim() ? data.alias : undefined,
scheme: data.scheme,
ssl: data.ssl ?? false,
destinationPort: data.httpHttpsPort ?? undefined
}),
alias:
data.alias &&
typeof data.alias === "string" &&
data.alias.trim()
? data.alias
: undefined,
tcpPortRangeString: data.tcpPortRangeString, tcpPortRangeString: data.tcpPortRangeString,
udpPortRangeString: data.udpPortRangeString, udpPortRangeString: data.udpPortRangeString,
disableIcmp: data.disableIcmp ?? false, disableIcmp: data.disableIcmp ?? false,
...(data.authDaemonMode != null && { ...(data.authDaemonMode != null && { authDaemonMode: data.authDaemonMode }),
authDaemonMode: data.authDaemonMode ...(data.authDaemonMode === "remote" && data.authDaemonPort != null && { authDaemonPort: data.authDaemonPort }),
}), roleIds: data.roles ? data.roles.map((r) => parseInt(r.id)) : [],
...(data.authDaemonMode === "remote" &&
data.authDaemonPort != null && {
authDaemonPort: data.authDaemonPort
}),
roleIds: data.roles
? data.roles.map((r) => parseInt(r.id))
: [],
userIds: data.users ? data.users.map((u) => u.id) : [], userIds: data.users ? data.users.map((u) => u.id) : [],
clientIds: data.clients clientIds: data.clients ? data.clients.map((c) => parseInt(c.id)) : []
? data.clients.map((c) => parseInt(c.id))
: []
} }
); );
toast({ toast({
title: t("createInternalResourceDialogSuccess"), title: t("createInternalResourceDialogSuccess"),
description: t( description: t("createInternalResourceDialogInternalResourceCreatedSuccessfully"),
"createInternalResourceDialogInternalResourceCreatedSuccessfully"
),
variant: "default" variant: "default"
}); });
setOpen(false); setOpen(false);
@@ -117,9 +93,7 @@ export default function CreateInternalResourceDialog({
title: t("createInternalResourceDialogError"), title: t("createInternalResourceDialogError"),
description: formatAxiosError( description: formatAxiosError(
error, error,
t( t("createInternalResourceDialogFailedToCreateInternalResource")
"createInternalResourceDialogFailedToCreateInternalResource"
)
), ),
variant: "destructive" variant: "destructive"
}); });
@@ -132,13 +106,9 @@ export default function CreateInternalResourceDialog({
<Credenza open={open} onOpenChange={setOpen}> <Credenza open={open} onOpenChange={setOpen}>
<CredenzaContent className="max-w-3xl"> <CredenzaContent className="max-w-3xl">
<CredenzaHeader> <CredenzaHeader>
<CredenzaTitle> <CredenzaTitle>{t("createInternalResourceDialogCreateClientResource")}</CredenzaTitle>
{t("createInternalResourceDialogCreateClientResource")}
</CredenzaTitle>
<CredenzaDescription> <CredenzaDescription>
{t( {t("createInternalResourceDialogCreateClientResourceDescription")}
"createInternalResourceDialogCreateClientResourceDescription"
)}
</CredenzaDescription> </CredenzaDescription>
</CredenzaHeader> </CredenzaHeader>
<CredenzaBody> <CredenzaBody>
@@ -153,11 +123,7 @@ export default function CreateInternalResourceDialog({
</CredenzaBody> </CredenzaBody>
<CredenzaFooter> <CredenzaFooter>
<CredenzaClose asChild> <CredenzaClose asChild>
<Button <Button variant="outline" onClick={() => setOpen(false)} disabled={isSubmitting}>
variant="outline"
onClick={() => setOpen(false)}
disabled={isSubmitting}
>
{t("createInternalResourceDialogCancel")} {t("createInternalResourceDialogCancel")}
</Button> </Button>
</CredenzaClose> </CredenzaClose>

View File

@@ -163,18 +163,15 @@ export default function DomainPicker({
domainId: firstOrExistingDomain.domainId domainId: firstOrExistingDomain.domainId
}; };
const base = firstOrExistingDomain.baseDomain;
const sub =
firstOrExistingDomain.type !== "cname"
? defaultSubdomain?.trim() || undefined
: undefined;
onDomainChange?.({ onDomainChange?.({
domainId: firstOrExistingDomain.domainId, domainId: firstOrExistingDomain.domainId,
type: "organization", type: "organization",
subdomain: sub, subdomain:
fullDomain: sub ? `${sub}.${base}` : base, firstOrExistingDomain.type !== "cname"
baseDomain: base ? defaultSubdomain || undefined
: undefined,
fullDomain: firstOrExistingDomain.baseDomain,
baseDomain: firstOrExistingDomain.baseDomain
}); });
} }
} }
@@ -512,11 +509,9 @@ export default function DomainPicker({
<span className="truncate"> <span className="truncate">
{selectedBaseDomain.domain} {selectedBaseDomain.domain}
</span> </span>
{selectedBaseDomain.verified && {selectedBaseDomain.verified && (
selectedBaseDomain.domainType !== <CheckCircle2 className="h-3 w-3 text-green-500 shrink-0" />
"wildcard" && ( )}
<CheckCircle2 className="h-3 w-3 text-green-500 shrink-0" />
)}
</div> </div>
) : ( ) : (
t("domainPickerSelectBaseDomain") t("domainPickerSelectBaseDomain")
@@ -579,23 +574,14 @@ export default function DomainPicker({
} }
</span> </span>
<span className="text-xs text-muted-foreground"> <span className="text-xs text-muted-foreground">
{orgDomain.type === {orgDomain.type.toUpperCase()}{" "}
"wildcard" {" "}
{orgDomain.verified
? t( ? t(
"domainPickerManual" "domainPickerVerified"
) )
: ( : t(
<> "domainPickerUnverified"
{orgDomain.type.toUpperCase()}{" "}
{" "}
{orgDomain.verified
? t(
"domainPickerVerified"
)
: t(
"domainPickerUnverified"
)}
</>
)} )}
</span> </span>
</div> </div>

View File

@@ -54,10 +54,7 @@ export default function EditInternalResourceDialog({
async function handleSubmit(values: InternalResourceFormValues) { async function handleSubmit(values: InternalResourceFormValues) {
try { try {
let data = { ...values }; let data = { ...values };
if ( if (data.mode === "host" && isHostname(data.destination)) {
(data.mode === "host" || data.mode === "http") &&
isHostname(data.destination)
) {
const currentAlias = data.alias?.trim() || ""; const currentAlias = data.alias?.trim() || "";
if (!currentAlias) { if (!currentAlias) {
let aliasValue = data.destination; let aliasValue = data.destination;
@@ -74,11 +71,6 @@ export default function EditInternalResourceDialog({
mode: data.mode, mode: data.mode,
niceId: data.niceId, niceId: data.niceId,
destination: data.destination, destination: data.destination,
...(data.mode === "http" && {
scheme: data.scheme,
ssl: data.ssl ?? false,
destinationPort: data.httpHttpsPort ?? null
}),
alias: alias:
data.alias && data.alias &&
typeof data.alias === "string" && typeof data.alias === "string" &&

File diff suppressed because it is too large Load Diff