mirror of
https://github.com/fosrl/pangolin.git
synced 2026-02-14 08:56:39 +00:00
Merge branch 'dev' into feat/device-approvals
This commit is contained in:
321
server/db/asns.ts
Normal file
321
server/db/asns.ts
Normal file
@@ -0,0 +1,321 @@
|
||||
// Curated list of major ASNs (Cloud Providers, CDNs, ISPs, etc.)
|
||||
// This is not exhaustive - there are 100,000+ ASNs globally
|
||||
// Users can still enter any ASN manually in the input field
|
||||
export const MAJOR_ASNS = [
|
||||
{
|
||||
name: "ALL ASNs",
|
||||
code: "ALL",
|
||||
asn: 0 // Special value that will match all
|
||||
},
|
||||
// Major Cloud Providers
|
||||
{
|
||||
name: "Google LLC",
|
||||
code: "AS15169",
|
||||
asn: 15169
|
||||
},
|
||||
{
|
||||
name: "Amazon AWS",
|
||||
code: "AS16509",
|
||||
asn: 16509
|
||||
},
|
||||
{
|
||||
name: "Amazon AWS (EC2)",
|
||||
code: "AS14618",
|
||||
asn: 14618
|
||||
},
|
||||
{
|
||||
name: "Microsoft Azure",
|
||||
code: "AS8075",
|
||||
asn: 8075
|
||||
},
|
||||
{
|
||||
name: "Microsoft Corporation",
|
||||
code: "AS8068",
|
||||
asn: 8068
|
||||
},
|
||||
{
|
||||
name: "DigitalOcean",
|
||||
code: "AS14061",
|
||||
asn: 14061
|
||||
},
|
||||
{
|
||||
name: "Linode",
|
||||
code: "AS63949",
|
||||
asn: 63949
|
||||
},
|
||||
{
|
||||
name: "Hetzner Online",
|
||||
code: "AS24940",
|
||||
asn: 24940
|
||||
},
|
||||
{
|
||||
name: "OVH SAS",
|
||||
code: "AS16276",
|
||||
asn: 16276
|
||||
},
|
||||
{
|
||||
name: "Oracle Cloud",
|
||||
code: "AS31898",
|
||||
asn: 31898
|
||||
},
|
||||
{
|
||||
name: "Alibaba Cloud",
|
||||
code: "AS45102",
|
||||
asn: 45102
|
||||
},
|
||||
{
|
||||
name: "IBM Cloud",
|
||||
code: "AS36351",
|
||||
asn: 36351
|
||||
},
|
||||
|
||||
// CDNs
|
||||
{
|
||||
name: "Cloudflare",
|
||||
code: "AS13335",
|
||||
asn: 13335
|
||||
},
|
||||
{
|
||||
name: "Fastly",
|
||||
code: "AS54113",
|
||||
asn: 54113
|
||||
},
|
||||
{
|
||||
name: "Akamai Technologies",
|
||||
code: "AS20940",
|
||||
asn: 20940
|
||||
},
|
||||
{
|
||||
name: "Akamai (Primary)",
|
||||
code: "AS16625",
|
||||
asn: 16625
|
||||
},
|
||||
|
||||
// Mobile Carriers - US
|
||||
{
|
||||
name: "T-Mobile USA",
|
||||
code: "AS21928",
|
||||
asn: 21928
|
||||
},
|
||||
{
|
||||
name: "Verizon Wireless",
|
||||
code: "AS6167",
|
||||
asn: 6167
|
||||
},
|
||||
{
|
||||
name: "AT&T Mobility",
|
||||
code: "AS20057",
|
||||
asn: 20057
|
||||
},
|
||||
{
|
||||
name: "Sprint (T-Mobile)",
|
||||
code: "AS1239",
|
||||
asn: 1239
|
||||
},
|
||||
{
|
||||
name: "US Cellular",
|
||||
code: "AS6430",
|
||||
asn: 6430
|
||||
},
|
||||
|
||||
// Mobile Carriers - Europe
|
||||
{
|
||||
name: "Vodafone UK",
|
||||
code: "AS25135",
|
||||
asn: 25135
|
||||
},
|
||||
{
|
||||
name: "EE (UK)",
|
||||
code: "AS12576",
|
||||
asn: 12576
|
||||
},
|
||||
{
|
||||
name: "Three UK",
|
||||
code: "AS29194",
|
||||
asn: 29194
|
||||
},
|
||||
{
|
||||
name: "O2 UK",
|
||||
code: "AS13285",
|
||||
asn: 13285
|
||||
},
|
||||
{
|
||||
name: "Telefonica Spain Mobile",
|
||||
code: "AS12430",
|
||||
asn: 12430
|
||||
},
|
||||
|
||||
// Mobile Carriers - Asia
|
||||
{
|
||||
name: "NTT DoCoMo (Japan)",
|
||||
code: "AS9605",
|
||||
asn: 9605
|
||||
},
|
||||
{
|
||||
name: "SoftBank Mobile (Japan)",
|
||||
code: "AS17676",
|
||||
asn: 17676
|
||||
},
|
||||
{
|
||||
name: "SK Telecom (Korea)",
|
||||
code: "AS9318",
|
||||
asn: 9318
|
||||
},
|
||||
{
|
||||
name: "KT Corporation Mobile (Korea)",
|
||||
code: "AS4766",
|
||||
asn: 4766
|
||||
},
|
||||
{
|
||||
name: "Airtel India",
|
||||
code: "AS24560",
|
||||
asn: 24560
|
||||
},
|
||||
{
|
||||
name: "China Mobile",
|
||||
code: "AS9808",
|
||||
asn: 9808
|
||||
},
|
||||
|
||||
// Major US ISPs
|
||||
{
|
||||
name: "AT&T Services",
|
||||
code: "AS7018",
|
||||
asn: 7018
|
||||
},
|
||||
{
|
||||
name: "Comcast Cable",
|
||||
code: "AS7922",
|
||||
asn: 7922
|
||||
},
|
||||
{
|
||||
name: "Verizon",
|
||||
code: "AS701",
|
||||
asn: 701
|
||||
},
|
||||
{
|
||||
name: "Cox Communications",
|
||||
code: "AS22773",
|
||||
asn: 22773
|
||||
},
|
||||
{
|
||||
name: "Charter Communications",
|
||||
code: "AS20115",
|
||||
asn: 20115
|
||||
},
|
||||
{
|
||||
name: "CenturyLink",
|
||||
code: "AS209",
|
||||
asn: 209
|
||||
},
|
||||
|
||||
// Major European ISPs
|
||||
{
|
||||
name: "Deutsche Telekom",
|
||||
code: "AS3320",
|
||||
asn: 3320
|
||||
},
|
||||
{
|
||||
name: "Vodafone",
|
||||
code: "AS1273",
|
||||
asn: 1273
|
||||
},
|
||||
{
|
||||
name: "British Telecom",
|
||||
code: "AS2856",
|
||||
asn: 2856
|
||||
},
|
||||
{
|
||||
name: "Orange",
|
||||
code: "AS3215",
|
||||
asn: 3215
|
||||
},
|
||||
{
|
||||
name: "Telefonica",
|
||||
code: "AS12956",
|
||||
asn: 12956
|
||||
},
|
||||
|
||||
// Major Asian ISPs
|
||||
{
|
||||
name: "China Telecom",
|
||||
code: "AS4134",
|
||||
asn: 4134
|
||||
},
|
||||
{
|
||||
name: "China Unicom",
|
||||
code: "AS4837",
|
||||
asn: 4837
|
||||
},
|
||||
{
|
||||
name: "NTT Communications",
|
||||
code: "AS2914",
|
||||
asn: 2914
|
||||
},
|
||||
{
|
||||
name: "KDDI Corporation",
|
||||
code: "AS2516",
|
||||
asn: 2516
|
||||
},
|
||||
{
|
||||
name: "Reliance Jio (India)",
|
||||
code: "AS55836",
|
||||
asn: 55836
|
||||
},
|
||||
|
||||
// VPN/Proxy Providers
|
||||
{
|
||||
name: "Private Internet Access",
|
||||
code: "AS46562",
|
||||
asn: 46562
|
||||
},
|
||||
{
|
||||
name: "NordVPN",
|
||||
code: "AS202425",
|
||||
asn: 202425
|
||||
},
|
||||
{
|
||||
name: "Mullvad VPN",
|
||||
code: "AS213281",
|
||||
asn: 213281
|
||||
},
|
||||
|
||||
// Social Media / Major Tech
|
||||
{
|
||||
name: "Facebook/Meta",
|
||||
code: "AS32934",
|
||||
asn: 32934
|
||||
},
|
||||
{
|
||||
name: "Twitter/X",
|
||||
code: "AS13414",
|
||||
asn: 13414
|
||||
},
|
||||
{
|
||||
name: "Apple",
|
||||
code: "AS714",
|
||||
asn: 714
|
||||
},
|
||||
{
|
||||
name: "Netflix",
|
||||
code: "AS2906",
|
||||
asn: 2906
|
||||
},
|
||||
|
||||
// Academic/Research
|
||||
{
|
||||
name: "MIT",
|
||||
code: "AS3",
|
||||
asn: 3
|
||||
},
|
||||
{
|
||||
name: "Stanford University",
|
||||
code: "AS32",
|
||||
asn: 32
|
||||
},
|
||||
{
|
||||
name: "CERN",
|
||||
code: "AS513",
|
||||
asn: 513
|
||||
}
|
||||
];
|
||||
13
server/db/maxmindAsn.ts
Normal file
13
server/db/maxmindAsn.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import maxmind, { AsnResponse, Reader } from "maxmind";
|
||||
import config from "@server/lib/config";
|
||||
|
||||
let maxmindAsnLookup: Reader<AsnResponse> | null;
|
||||
if (config.getRawConfig().server.maxmind_asn_path) {
|
||||
maxmindAsnLookup = await maxmind.open<AsnResponse>(
|
||||
config.getRawConfig().server.maxmind_asn_path!
|
||||
);
|
||||
} else {
|
||||
maxmindAsnLookup = null;
|
||||
}
|
||||
|
||||
export { maxmindAsnLookup };
|
||||
@@ -132,7 +132,17 @@ export const resources = pgTable("resources", {
|
||||
}),
|
||||
headers: text("headers"), // comma-separated list of headers to add to the request
|
||||
proxyProtocol: boolean("proxyProtocol").notNull().default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1),
|
||||
|
||||
maintenanceModeEnabled: boolean("maintenanceModeEnabled")
|
||||
.notNull()
|
||||
.default(false),
|
||||
maintenanceModeType: text("maintenanceModeType", {
|
||||
enum: ["forced", "automatic"]
|
||||
}).default("forced"), // "forced" = always show, "automatic" = only when down
|
||||
maintenanceTitle: text("maintenanceTitle"),
|
||||
maintenanceMessage: text("maintenanceMessage"),
|
||||
maintenanceEstimatedTime: text("maintenanceEstimatedTime")
|
||||
});
|
||||
|
||||
export const targets = pgTable("targets", {
|
||||
@@ -215,8 +225,8 @@ export const siteResources = pgTable("siteResources", {
|
||||
enabled: boolean("enabled").notNull().default(true),
|
||||
alias: varchar("alias"),
|
||||
aliasAddress: varchar("aliasAddress"),
|
||||
tcpPortRangeString: varchar("tcpPortRangeString"),
|
||||
udpPortRangeString: varchar("udpPortRangeString"),
|
||||
tcpPortRangeString: varchar("tcpPortRangeString").notNull().default("*"),
|
||||
udpPortRangeString: varchar("udpPortRangeString").notNull().default("*"),
|
||||
disableIcmp: boolean("disableIcmp").notNull().default(false)
|
||||
});
|
||||
|
||||
@@ -457,6 +467,23 @@ export const resourceHeaderAuth = pgTable("resourceHeaderAuth", {
|
||||
headerAuthHash: varchar("headerAuthHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceHeaderAuthExtendedCompatibility = pgTable(
|
||||
"resourceHeaderAuthExtendedCompatibility",
|
||||
{
|
||||
headerAuthExtendedCompatibilityId: serial(
|
||||
"headerAuthExtendedCompatibilityId"
|
||||
).primaryKey(),
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
extendedCompatibilityIsActivated: boolean(
|
||||
"extendedCompatibilityIsActivated"
|
||||
)
|
||||
.notNull()
|
||||
.default(true)
|
||||
}
|
||||
);
|
||||
|
||||
export const resourceAccessToken = pgTable("resourceAccessToken", {
|
||||
accessTokenId: varchar("accessTokenId").primaryKey(),
|
||||
orgId: varchar("orgId")
|
||||
@@ -860,6 +887,9 @@ export type ResourceSession = InferSelectModel<typeof resourceSessions>;
|
||||
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
|
||||
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;
|
||||
export type ResourceHeaderAuth = InferSelectModel<typeof resourceHeaderAuth>;
|
||||
export type ResourceHeaderAuthExtendedCompatibility = InferSelectModel<
|
||||
typeof resourceHeaderAuthExtendedCompatibility
|
||||
>;
|
||||
export type ResourceOtp = InferSelectModel<typeof resourceOtp>;
|
||||
export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>;
|
||||
export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
|
||||
|
||||
@@ -14,7 +14,9 @@ import {
|
||||
sessions,
|
||||
userOrgs,
|
||||
userResources,
|
||||
users
|
||||
users,
|
||||
ResourceHeaderAuthExtendedCompatibility,
|
||||
resourceHeaderAuthExtendedCompatibility
|
||||
} from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
|
||||
@@ -23,6 +25,7 @@ export type ResourceWithAuth = {
|
||||
pincode: ResourcePincode | null;
|
||||
password: ResourcePassword | null;
|
||||
headerAuth: ResourceHeaderAuth | null;
|
||||
headerAuthExtendedCompatibility: ResourceHeaderAuthExtendedCompatibility | null;
|
||||
org: Org;
|
||||
};
|
||||
|
||||
@@ -52,6 +55,13 @@ export async function getResourceByDomain(
|
||||
resourceHeaderAuth,
|
||||
eq(resourceHeaderAuth.resourceId, resources.resourceId)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
resources.resourceId
|
||||
)
|
||||
)
|
||||
.innerJoin(orgs, eq(orgs.orgId, resources.orgId))
|
||||
.where(eq(resources.fullDomain, domain))
|
||||
.limit(1);
|
||||
@@ -65,6 +75,8 @@ export async function getResourceByDomain(
|
||||
pincode: result.resourcePincode,
|
||||
password: result.resourcePassword,
|
||||
headerAuth: result.resourceHeaderAuth,
|
||||
headerAuthExtendedCompatibility:
|
||||
result.resourceHeaderAuthExtendedCompatibility,
|
||||
org: result.orgs
|
||||
};
|
||||
}
|
||||
|
||||
@@ -150,7 +150,19 @@ export const resources = sqliteTable("resources", {
|
||||
proxyProtocol: integer("proxyProtocol", { mode: "boolean" })
|
||||
.notNull()
|
||||
.default(false),
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
|
||||
proxyProtocolVersion: integer("proxyProtocolVersion").default(1),
|
||||
|
||||
maintenanceModeEnabled: integer("maintenanceModeEnabled", {
|
||||
mode: "boolean"
|
||||
})
|
||||
.notNull()
|
||||
.default(false),
|
||||
maintenanceModeType: text("maintenanceModeType", {
|
||||
enum: ["forced", "automatic"]
|
||||
}).default("forced"), // "forced" = always show, "automatic" = only when down
|
||||
maintenanceTitle: text("maintenanceTitle"),
|
||||
maintenanceMessage: text("maintenanceMessage"),
|
||||
maintenanceEstimatedTime: text("maintenanceEstimatedTime")
|
||||
});
|
||||
|
||||
export const targets = sqliteTable("targets", {
|
||||
@@ -241,9 +253,9 @@ export const siteResources = sqliteTable("siteResources", {
|
||||
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
|
||||
alias: text("alias"),
|
||||
aliasAddress: text("aliasAddress"),
|
||||
tcpPortRangeString: text("tcpPortRangeString"),
|
||||
udpPortRangeString: text("udpPortRangeString"),
|
||||
disableIcmp: integer("disableIcmp", { mode: "boolean" })
|
||||
tcpPortRangeString: text("tcpPortRangeString").notNull().default("*"),
|
||||
udpPortRangeString: text("udpPortRangeString").notNull().default("*"),
|
||||
disableIcmp: integer("disableIcmp", { mode: "boolean" }).notNull().default(false)
|
||||
});
|
||||
|
||||
export const clientSiteResources = sqliteTable("clientSiteResources", {
|
||||
@@ -631,6 +643,26 @@ export const resourceHeaderAuth = sqliteTable("resourceHeaderAuth", {
|
||||
headerAuthHash: text("headerAuthHash").notNull()
|
||||
});
|
||||
|
||||
export const resourceHeaderAuthExtendedCompatibility = sqliteTable(
|
||||
"resourceHeaderAuthExtendedCompatibility",
|
||||
{
|
||||
headerAuthExtendedCompatibilityId: integer(
|
||||
"headerAuthExtendedCompatibilityId"
|
||||
).primaryKey({
|
||||
autoIncrement: true
|
||||
}),
|
||||
resourceId: integer("resourceId")
|
||||
.notNull()
|
||||
.references(() => resources.resourceId, { onDelete: "cascade" }),
|
||||
extendedCompatibilityIsActivated: integer(
|
||||
"extendedCompatibilityIsActivated",
|
||||
{ mode: "boolean" }
|
||||
)
|
||||
.notNull()
|
||||
.default(true)
|
||||
}
|
||||
);
|
||||
|
||||
export const resourceAccessToken = sqliteTable("resourceAccessToken", {
|
||||
accessTokenId: text("accessTokenId").primaryKey(),
|
||||
orgId: text("orgId")
|
||||
@@ -916,6 +948,9 @@ export type ResourceSession = InferSelectModel<typeof resourceSessions>;
|
||||
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
|
||||
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;
|
||||
export type ResourceHeaderAuth = InferSelectModel<typeof resourceHeaderAuth>;
|
||||
export type ResourceHeaderAuthExtendedCompatibility = InferSelectModel<
|
||||
typeof resourceHeaderAuthExtendedCompatibility
|
||||
>;
|
||||
export type ResourceOtp = InferSelectModel<typeof resourceOtp>;
|
||||
export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>;
|
||||
export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
|
||||
|
||||
29
server/lib/asn.ts
Normal file
29
server/lib/asn.ts
Normal file
@@ -0,0 +1,29 @@
|
||||
import logger from "@server/logger";
|
||||
import { maxmindAsnLookup } from "@server/db/maxmindAsn";
|
||||
|
||||
export async function getAsnForIp(ip: string): Promise<number | undefined> {
|
||||
try {
|
||||
if (!maxmindAsnLookup) {
|
||||
logger.debug(
|
||||
"MaxMind ASN DB path not configured, cannot perform ASN lookup"
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const result = maxmindAsnLookup.get(ip);
|
||||
|
||||
if (!result || !result.autonomous_system_number) {
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`ASN lookup successful for IP ${ip}: AS${result.autonomous_system_number}`
|
||||
);
|
||||
|
||||
return result.autonomous_system_number;
|
||||
} catch (error) {
|
||||
logger.error("Error performing ASN lookup:", error);
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
3
server/lib/blueprints/MaintenanceSchema.ts
Normal file
3
server/lib/blueprints/MaintenanceSchema.ts
Normal file
@@ -0,0 +1,3 @@
|
||||
import { z } from "zod";
|
||||
|
||||
export const MaintenanceSchema = z.object({});
|
||||
@@ -1,4 +1,14 @@
|
||||
import { db, newts, blueprints, Blueprint, Site, siteResources, roleSiteResources, userSiteResources, clientSiteResources } from "@server/db";
|
||||
import {
|
||||
db,
|
||||
newts,
|
||||
blueprints,
|
||||
Blueprint,
|
||||
Site,
|
||||
siteResources,
|
||||
roleSiteResources,
|
||||
userSiteResources,
|
||||
clientSiteResources
|
||||
} from "@server/db";
|
||||
import { Config, ConfigSchema } from "./types";
|
||||
import { ProxyResourcesResults, updateProxyResources } from "./proxyResources";
|
||||
import { fromError } from "zod-validation-error";
|
||||
@@ -126,7 +136,7 @@ export async function applyBlueprint({
|
||||
)
|
||||
.then((rows) => rows.map((row) => row.roleId));
|
||||
|
||||
const existingUserIds= await trx
|
||||
const existingUserIds = await trx
|
||||
.select()
|
||||
.from(userSiteResources)
|
||||
.where(
|
||||
@@ -134,7 +144,8 @@ export async function applyBlueprint({
|
||||
userSiteResources.siteResourceId,
|
||||
result.oldSiteResource.siteResourceId
|
||||
)
|
||||
).then((rows) => rows.map((row) => row.userId));
|
||||
)
|
||||
.then((rows) => rows.map((row) => row.userId));
|
||||
|
||||
const existingClientIds = await trx
|
||||
.select()
|
||||
@@ -144,13 +155,19 @@ export async function applyBlueprint({
|
||||
clientSiteResources.siteResourceId,
|
||||
result.oldSiteResource.siteResourceId
|
||||
)
|
||||
).then((rows) => rows.map((row) => row.clientId));
|
||||
)
|
||||
.then((rows) => rows.map((row) => row.clientId));
|
||||
|
||||
// delete the existing site resource
|
||||
await trx
|
||||
.delete(siteResources)
|
||||
.where(
|
||||
and(eq(siteResources.siteResourceId, result.oldSiteResource.siteResourceId))
|
||||
and(
|
||||
eq(
|
||||
siteResources.siteResourceId,
|
||||
result.oldSiteResource.siteResourceId
|
||||
)
|
||||
)
|
||||
);
|
||||
|
||||
await rebuildClientAssociationsFromSiteResource(
|
||||
@@ -161,7 +178,7 @@ export async function applyBlueprint({
|
||||
const [insertedSiteResource] = await trx
|
||||
.insert(siteResources)
|
||||
.values({
|
||||
...result.newSiteResource,
|
||||
...result.newSiteResource
|
||||
})
|
||||
.returning();
|
||||
|
||||
@@ -172,18 +189,20 @@ export async function applyBlueprint({
|
||||
|
||||
if (existingRoleIds.length > 0) {
|
||||
await trx.insert(roleSiteResources).values(
|
||||
existingRoleIds.map((roleId) => ({
|
||||
existingRoleIds.map((roleId) => ({
|
||||
roleId,
|
||||
siteResourceId: insertedSiteResource!.siteResourceId
|
||||
siteResourceId:
|
||||
insertedSiteResource!.siteResourceId
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
if (existingUserIds.length > 0) {
|
||||
await trx.insert(userSiteResources).values(
|
||||
existingUserIds.map((userId) => ({
|
||||
existingUserIds.map((userId) => ({
|
||||
userId,
|
||||
siteResourceId: insertedSiteResource!.siteResourceId
|
||||
siteResourceId:
|
||||
insertedSiteResource!.siteResourceId
|
||||
}))
|
||||
);
|
||||
}
|
||||
@@ -192,7 +211,8 @@ export async function applyBlueprint({
|
||||
await trx.insert(clientSiteResources).values(
|
||||
existingClientIds.map((clientId) => ({
|
||||
clientId,
|
||||
siteResourceId: insertedSiteResource!.siteResourceId
|
||||
siteResourceId:
|
||||
insertedSiteResource!.siteResourceId
|
||||
}))
|
||||
);
|
||||
}
|
||||
@@ -201,7 +221,6 @@ export async function applyBlueprint({
|
||||
insertedSiteResource,
|
||||
trx
|
||||
);
|
||||
|
||||
} else {
|
||||
const [newSite] = await trx
|
||||
.select()
|
||||
|
||||
@@ -36,7 +36,9 @@ export async function applyNewtDockerBlueprint(
|
||||
|
||||
if (
|
||||
isEmptyObject(blueprint["proxy-resources"]) &&
|
||||
isEmptyObject(blueprint["client-resources"])
|
||||
isEmptyObject(blueprint["client-resources"]) &&
|
||||
isEmptyObject(blueprint["public-resources"]) &&
|
||||
isEmptyObject(blueprint["private-resources"])
|
||||
) {
|
||||
return;
|
||||
}
|
||||
|
||||
@@ -54,10 +54,14 @@ function getContainerPort(container: Container): number | null {
|
||||
export function processContainerLabels(containers: Container[]): {
|
||||
"proxy-resources": { [key: string]: ResourceConfig };
|
||||
"client-resources": { [key: string]: ResourceConfig };
|
||||
"public-resources": { [key: string]: ResourceConfig };
|
||||
"private-resources": { [key: string]: ResourceConfig };
|
||||
} {
|
||||
const result = {
|
||||
"proxy-resources": {} as { [key: string]: ResourceConfig },
|
||||
"client-resources": {} as { [key: string]: ResourceConfig }
|
||||
"client-resources": {} as { [key: string]: ResourceConfig },
|
||||
"public-resources": {} as { [key: string]: ResourceConfig },
|
||||
"private-resources": {} as { [key: string]: ResourceConfig }
|
||||
};
|
||||
|
||||
// Process each container
|
||||
@@ -68,8 +72,10 @@ export function processContainerLabels(containers: Container[]): {
|
||||
|
||||
const proxyResourceLabels: DockerLabels = {};
|
||||
const clientResourceLabels: DockerLabels = {};
|
||||
const publicResourceLabels: DockerLabels = {};
|
||||
const privateResourceLabels: DockerLabels = {};
|
||||
|
||||
// Filter and separate proxy-resources and client-resources labels
|
||||
// Filter and separate proxy-resources, client-resources, public-resources, and private-resources labels
|
||||
Object.entries(container.labels).forEach(([key, value]) => {
|
||||
if (key.startsWith("pangolin.proxy-resources.")) {
|
||||
// remove the pangolin.proxy- prefix to get "resources.xxx"
|
||||
@@ -79,6 +85,14 @@ export function processContainerLabels(containers: Container[]): {
|
||||
// remove the pangolin.client- prefix to get "resources.xxx"
|
||||
const strippedKey = key.replace("pangolin.client-", "");
|
||||
clientResourceLabels[strippedKey] = value;
|
||||
} else if (key.startsWith("pangolin.public-resources.")) {
|
||||
// remove the pangolin.public- prefix to get "resources.xxx"
|
||||
const strippedKey = key.replace("pangolin.public-", "");
|
||||
publicResourceLabels[strippedKey] = value;
|
||||
} else if (key.startsWith("pangolin.private-resources.")) {
|
||||
// remove the pangolin.private- prefix to get "resources.xxx"
|
||||
const strippedKey = key.replace("pangolin.private-", "");
|
||||
privateResourceLabels[strippedKey] = value;
|
||||
}
|
||||
});
|
||||
|
||||
@@ -99,6 +113,24 @@ export function processContainerLabels(containers: Container[]): {
|
||||
result["client-resources"]
|
||||
);
|
||||
}
|
||||
|
||||
// Process public resources (alias for proxy resources)
|
||||
if (Object.keys(publicResourceLabels).length > 0) {
|
||||
processResourceLabels(
|
||||
publicResourceLabels,
|
||||
container,
|
||||
result["public-resources"]
|
||||
);
|
||||
}
|
||||
|
||||
// Process private resources (alias for client resources)
|
||||
if (Object.keys(privateResourceLabels).length > 0) {
|
||||
processResourceLabels(
|
||||
privateResourceLabels,
|
||||
container,
|
||||
result["private-resources"]
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
return result;
|
||||
|
||||
@@ -3,6 +3,7 @@ import {
|
||||
orgDomains,
|
||||
Resource,
|
||||
resourceHeaderAuth,
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
resourcePincode,
|
||||
resourceRules,
|
||||
resourceWhitelist,
|
||||
@@ -30,7 +31,8 @@ import { pickPort } from "@server/routers/target/helpers";
|
||||
import { resourcePassword } from "@server/db";
|
||||
import { hashPassword } from "@server/auth/password";
|
||||
import { isValidCIDR, isValidIP, isValidUrlGlobPattern } from "../validators";
|
||||
import { get } from "http";
|
||||
import { isLicensedOrSubscribed } from "../isLicencedOrSubscribed";
|
||||
import { build } from "@server/build";
|
||||
|
||||
export type ProxyResourcesResults = {
|
||||
proxyResource: Resource;
|
||||
@@ -209,6 +211,16 @@ export async function updateProxyResources(
|
||||
resource = existingResource;
|
||||
} else {
|
||||
// Update existing resource
|
||||
|
||||
const isLicensed = await isLicensedOrSubscribed(orgId);
|
||||
if (build == "enterprise" && !isLicensed) {
|
||||
logger.warn(
|
||||
"Server is not licensed! Clearing set maintenance screen values"
|
||||
);
|
||||
// null the maintenance mode fields if not licensed
|
||||
resourceData.maintenance = undefined;
|
||||
}
|
||||
|
||||
[resource] = await trx
|
||||
.update(resources)
|
||||
.set({
|
||||
@@ -233,7 +245,14 @@ export async function updateProxyResources(
|
||||
: false,
|
||||
headers: headers || null,
|
||||
applyRules:
|
||||
resourceData.rules && resourceData.rules.length > 0
|
||||
resourceData.rules && resourceData.rules.length > 0,
|
||||
maintenanceModeEnabled:
|
||||
resourceData.maintenance?.enabled,
|
||||
maintenanceModeType: resourceData.maintenance?.type,
|
||||
maintenanceTitle: resourceData.maintenance?.title,
|
||||
maintenanceMessage: resourceData.maintenance?.message,
|
||||
maintenanceEstimatedTime:
|
||||
resourceData.maintenance?.["estimated-time"]
|
||||
})
|
||||
.where(
|
||||
eq(resources.resourceId, existingResource.resourceId)
|
||||
@@ -287,21 +306,47 @@ export async function updateProxyResources(
|
||||
existingResource.resourceId
|
||||
)
|
||||
);
|
||||
|
||||
await trx
|
||||
.delete(resourceHeaderAuthExtendedCompatibility)
|
||||
.where(
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
existingResource.resourceId
|
||||
)
|
||||
);
|
||||
|
||||
if (resourceData.auth?.["basic-auth"]) {
|
||||
const headerAuthUser =
|
||||
resourceData.auth?.["basic-auth"]?.user;
|
||||
const headerAuthPassword =
|
||||
resourceData.auth?.["basic-auth"]?.password;
|
||||
if (headerAuthUser && headerAuthPassword) {
|
||||
const headerAuthExtendedCompatibility =
|
||||
resourceData.auth?.["basic-auth"]
|
||||
?.extendedCompatibility;
|
||||
if (
|
||||
headerAuthUser &&
|
||||
headerAuthPassword &&
|
||||
headerAuthExtendedCompatibility !== null
|
||||
) {
|
||||
const headerAuthHash = await hashPassword(
|
||||
Buffer.from(
|
||||
`${headerAuthUser}:${headerAuthPassword}`
|
||||
).toString("base64")
|
||||
);
|
||||
await trx.insert(resourceHeaderAuth).values({
|
||||
resourceId: existingResource.resourceId,
|
||||
headerAuthHash
|
||||
});
|
||||
await Promise.all([
|
||||
trx.insert(resourceHeaderAuth).values({
|
||||
resourceId: existingResource.resourceId,
|
||||
headerAuthHash
|
||||
}),
|
||||
trx
|
||||
.insert(resourceHeaderAuthExtendedCompatibility)
|
||||
.values({
|
||||
resourceId: existingResource.resourceId,
|
||||
extendedCompatibilityIsActivated:
|
||||
headerAuthExtendedCompatibility
|
||||
})
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -604,6 +649,15 @@ export async function updateProxyResources(
|
||||
);
|
||||
}
|
||||
|
||||
const isLicensed = await isLicensedOrSubscribed(orgId);
|
||||
if (build == "enterprise" && !isLicensed) {
|
||||
logger.warn(
|
||||
"Server is not licensed! Clearing set maintenance screen values"
|
||||
);
|
||||
// null the maintenance mode fields if not licensed
|
||||
resourceData.maintenance = undefined;
|
||||
}
|
||||
|
||||
// Create new resource
|
||||
const [newResource] = await trx
|
||||
.insert(resources)
|
||||
@@ -625,7 +679,13 @@ export async function updateProxyResources(
|
||||
ssl: resourceSsl,
|
||||
headers: headers || null,
|
||||
applyRules:
|
||||
resourceData.rules && resourceData.rules.length > 0
|
||||
resourceData.rules && resourceData.rules.length > 0,
|
||||
maintenanceModeEnabled: resourceData.maintenance?.enabled,
|
||||
maintenanceModeType: resourceData.maintenance?.type,
|
||||
maintenanceTitle: resourceData.maintenance?.title,
|
||||
maintenanceMessage: resourceData.maintenance?.message,
|
||||
maintenanceEstimatedTime:
|
||||
resourceData.maintenance?.["estimated-time"]
|
||||
})
|
||||
.returning();
|
||||
|
||||
@@ -656,18 +716,33 @@ export async function updateProxyResources(
|
||||
const headerAuthUser = resourceData.auth?.["basic-auth"]?.user;
|
||||
const headerAuthPassword =
|
||||
resourceData.auth?.["basic-auth"]?.password;
|
||||
const headerAuthExtendedCompatibility =
|
||||
resourceData.auth?.["basic-auth"]?.extendedCompatibility;
|
||||
|
||||
if (headerAuthUser && headerAuthPassword) {
|
||||
if (
|
||||
headerAuthUser &&
|
||||
headerAuthPassword &&
|
||||
headerAuthExtendedCompatibility !== null
|
||||
) {
|
||||
const headerAuthHash = await hashPassword(
|
||||
Buffer.from(
|
||||
`${headerAuthUser}:${headerAuthPassword}`
|
||||
).toString("base64")
|
||||
);
|
||||
|
||||
await trx.insert(resourceHeaderAuth).values({
|
||||
resourceId: newResource.resourceId,
|
||||
headerAuthHash
|
||||
});
|
||||
await Promise.all([
|
||||
trx.insert(resourceHeaderAuth).values({
|
||||
resourceId: newResource.resourceId,
|
||||
headerAuthHash
|
||||
}),
|
||||
trx
|
||||
.insert(resourceHeaderAuthExtendedCompatibility)
|
||||
.values({
|
||||
resourceId: newResource.resourceId,
|
||||
extendedCompatibilityIsActivated:
|
||||
headerAuthExtendedCompatibility
|
||||
})
|
||||
]);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import { z } from "zod";
|
||||
import { portRangeStringSchema } from "@server/lib/ip";
|
||||
import { MaintenanceSchema } from "#dynamic/lib/blueprints/MaintenanceSchema";
|
||||
|
||||
export const SiteSchema = z.object({
|
||||
name: z.string().min(1).max(100),
|
||||
@@ -56,7 +57,8 @@ export const AuthSchema = z.object({
|
||||
"basic-auth": z
|
||||
.object({
|
||||
user: z.string().min(1),
|
||||
password: z.string().min(1)
|
||||
password: z.string().min(1),
|
||||
extendedCompatibility: z.boolean().default(true)
|
||||
})
|
||||
.optional(),
|
||||
"sso-enabled": z.boolean().optional().default(false),
|
||||
@@ -72,11 +74,69 @@ export const AuthSchema = z.object({
|
||||
"auto-login-idp": z.int().positive().optional()
|
||||
});
|
||||
|
||||
export const RuleSchema = z.object({
|
||||
action: z.enum(["allow", "deny", "pass"]),
|
||||
match: z.enum(["cidr", "path", "ip", "country"]),
|
||||
value: z.string()
|
||||
});
|
||||
export const RuleSchema = z
|
||||
.object({
|
||||
action: z.enum(["allow", "deny", "pass"]),
|
||||
match: z.enum(["cidr", "path", "ip", "country", "asn"]),
|
||||
value: z.string()
|
||||
})
|
||||
.refine(
|
||||
(rule) => {
|
||||
if (rule.match === "ip") {
|
||||
// Check if it's a valid IP address (v4 or v6)
|
||||
return z.union([z.ipv4(), z.ipv6()]).safeParse(rule.value)
|
||||
.success;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
{
|
||||
path: ["value"],
|
||||
message: "Value must be a valid IP address when match is 'ip'"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
(rule) => {
|
||||
if (rule.match === "cidr") {
|
||||
// Check if it's a valid CIDR (v4 or v6)
|
||||
return z.union([z.cidrv4(), z.cidrv6()]).safeParse(rule.value)
|
||||
.success;
|
||||
}
|
||||
return true;
|
||||
},
|
||||
{
|
||||
path: ["value"],
|
||||
message: "Value must be a valid CIDR notation when match is 'cidr'"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
(rule) => {
|
||||
if (rule.match === "country") {
|
||||
// Check if it's a valid 2-letter country code or "ALL"
|
||||
return /^[A-Z]{2}$/.test(rule.value) || rule.value === "ALL";
|
||||
}
|
||||
return true;
|
||||
},
|
||||
{
|
||||
path: ["value"],
|
||||
message:
|
||||
"Value must be a 2-letter country code or 'ALL' when match is 'country'"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
(rule) => {
|
||||
if (rule.match === "asn") {
|
||||
// Check if it's either AS<number> format or "ALL"
|
||||
const asNumberPattern = /^AS\d+$/i;
|
||||
return asNumberPattern.test(rule.value) || rule.value === "ALL";
|
||||
}
|
||||
return true;
|
||||
},
|
||||
{
|
||||
path: ["value"],
|
||||
message:
|
||||
"Value must be 'AS<number>' format or 'ALL' when match is 'asn'"
|
||||
}
|
||||
);
|
||||
|
||||
export const HeaderSchema = z.object({
|
||||
name: z.string().min(1),
|
||||
@@ -97,7 +157,8 @@ export const ResourceSchema = z
|
||||
"host-header": z.string().optional(),
|
||||
"tls-server-name": z.string().optional(),
|
||||
headers: z.array(HeaderSchema).optional(),
|
||||
rules: z.array(RuleSchema).optional()
|
||||
rules: z.array(RuleSchema).optional(),
|
||||
maintenance: MaintenanceSchema.optional()
|
||||
})
|
||||
.refine(
|
||||
(resource) => {
|
||||
|
||||
@@ -1,5 +1,20 @@
|
||||
import NodeCache from "node-cache";
|
||||
import logger from "@server/logger";
|
||||
|
||||
export const cache = new NodeCache({ stdTTL: 3600, checkperiod: 120 });
|
||||
// Create cache with maxKeys limit to prevent memory leaks
|
||||
// With ~10k requests/day and 5min TTL, 10k keys should be more than sufficient
|
||||
export const cache = new NodeCache({
|
||||
stdTTL: 3600,
|
||||
checkperiod: 120,
|
||||
maxKeys: 10000
|
||||
});
|
||||
|
||||
// Log cache statistics periodically for monitoring
|
||||
setInterval(() => {
|
||||
const stats = cache.getStats();
|
||||
logger.debug(
|
||||
`Cache stats - Keys: ${stats.keys}, Hits: ${stats.hits}, Misses: ${stats.misses}, Hit rate: ${stats.hits > 0 ? ((stats.hits / (stats.hits + stats.misses)) * 100).toFixed(2) : 0}%`
|
||||
);
|
||||
}, 300000); // Every 5 minutes
|
||||
|
||||
export default cache;
|
||||
|
||||
@@ -84,6 +84,10 @@ export class Config {
|
||||
?.disable_basic_wireguard_sites
|
||||
? "true"
|
||||
: "false";
|
||||
process.env.FLAGS_DISABLE_PRODUCT_HELP_BANNERS = parsedConfig.flags
|
||||
?.disable_product_help_banners
|
||||
? "true"
|
||||
: "false";
|
||||
|
||||
process.env.PRODUCT_UPDATES_NOTIFICATION_ENABLED = parsedConfig.app
|
||||
.notifications.product_updates
|
||||
@@ -99,6 +103,10 @@ export class Config {
|
||||
process.env.MAXMIND_DB_PATH = parsedConfig.server.maxmind_db_path;
|
||||
}
|
||||
|
||||
if (parsedConfig.server.maxmind_asn_path) {
|
||||
process.env.MAXMIND_ASN_PATH = parsedConfig.server.maxmind_asn_path;
|
||||
}
|
||||
|
||||
this.rawConfig = parsedConfig;
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ import path from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
|
||||
// This is a placeholder value replaced by the build process
|
||||
export const APP_VERSION = "1.13.1";
|
||||
export const APP_VERSION = "1.14.0";
|
||||
|
||||
export const __FILENAME = fileURLToPath(import.meta.url);
|
||||
export const __DIRNAME = path.dirname(__FILENAME);
|
||||
|
||||
@@ -4,6 +4,7 @@ import { and, eq, isNotNull } from "drizzle-orm";
|
||||
import config from "@server/lib/config";
|
||||
import z from "zod";
|
||||
import logger from "@server/logger";
|
||||
import semver from "semver";
|
||||
|
||||
interface IPRange {
|
||||
start: bigint;
|
||||
@@ -301,6 +302,26 @@ export function isIpInCidr(ip: string, cidr: string): boolean {
|
||||
return ipBigInt >= range.start && ipBigInt <= range.end;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks if two CIDR ranges overlap
|
||||
* @param cidr1 First CIDR string
|
||||
* @param cidr2 Second CIDR string
|
||||
* @returns boolean indicating if the two CIDRs overlap
|
||||
*/
|
||||
export function doCidrsOverlap(cidr1: string, cidr2: string): boolean {
|
||||
const version1 = detectIpVersion(cidr1.split("/")[0]);
|
||||
const version2 = detectIpVersion(cidr2.split("/")[0]);
|
||||
if (version1 !== version2) {
|
||||
// Different IP versions cannot overlap
|
||||
return false;
|
||||
}
|
||||
const range1 = cidrToRange(cidr1);
|
||||
const range2 = cidrToRange(cidr2);
|
||||
|
||||
// Overlap if the ranges intersect
|
||||
return range1.start <= range2.end && range2.start <= range1.end;
|
||||
}
|
||||
|
||||
export async function getNextAvailableClientSubnet(
|
||||
orgId: string,
|
||||
transaction: Transaction | typeof db = db
|
||||
@@ -663,3 +684,35 @@ export function parsePortRangeString(
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
export function stripPortFromHost(ip: string, badgerVersion?: string): string {
|
||||
const isNewerBadger =
|
||||
badgerVersion &&
|
||||
semver.valid(badgerVersion) &&
|
||||
semver.gte(badgerVersion, "1.3.1");
|
||||
|
||||
if (isNewerBadger) {
|
||||
return ip;
|
||||
}
|
||||
|
||||
if (ip.startsWith("[") && ip.includes("]")) {
|
||||
// if brackets are found, extract the IPv6 address from between the brackets
|
||||
const ipv6Match = ip.match(/\[(.*?)\]/);
|
||||
if (ipv6Match) {
|
||||
return ipv6Match[1];
|
||||
}
|
||||
}
|
||||
|
||||
// Check if it looks like IPv4 (contains dots and matches IPv4 pattern)
|
||||
// IPv4 format: x.x.x.x where x is 0-255
|
||||
const ipv4Pattern = /^(\d{1,3}\.){3}\d{1,3}/;
|
||||
if (ipv4Pattern.test(ip)) {
|
||||
const lastColonIndex = ip.lastIndexOf(":");
|
||||
if (lastColonIndex !== -1) {
|
||||
return ip.substring(0, lastColonIndex);
|
||||
}
|
||||
}
|
||||
|
||||
// Return as is
|
||||
return ip;
|
||||
}
|
||||
|
||||
17
server/lib/isLicencedOrSubscribed.ts
Normal file
17
server/lib/isLicencedOrSubscribed.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { build } from "@server/build";
|
||||
import license from "#dynamic/license/license";
|
||||
import { getOrgTierData } from "#dynamic/lib/billing";
|
||||
import { TierId } from "@server/lib/billing/tiers";
|
||||
|
||||
export async function isLicensedOrSubscribed(orgId: string): Promise<boolean> {
|
||||
if (build === "enterprise") {
|
||||
return await license.isUnlocked();
|
||||
}
|
||||
|
||||
if (build === "saas") {
|
||||
const { tier } = await getOrgTierData(orgId);
|
||||
return tier === TierId.STANDARD;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
@@ -133,7 +133,8 @@ export const configSchema = z
|
||||
.optional(),
|
||||
trust_proxy: z.int().gte(0).optional().default(1),
|
||||
secret: z.string().pipe(z.string().min(8)).optional(),
|
||||
maxmind_db_path: z.string().optional()
|
||||
maxmind_db_path: z.string().optional(),
|
||||
maxmind_asn_path: z.string().optional()
|
||||
})
|
||||
.optional()
|
||||
.default({
|
||||
@@ -255,11 +256,11 @@ export const configSchema = z
|
||||
orgs: z
|
||||
.object({
|
||||
block_size: z.number().positive().gt(0).optional().default(24),
|
||||
subnet_group: z.string().optional().default("100.90.128.0/24"),
|
||||
subnet_group: z.string().optional().default("100.90.128.0/20"),
|
||||
utility_subnet_group: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("100.96.128.0/24") //just hardcode this for now as well
|
||||
.default("100.96.128.0/20") //just hardcode this for now as well
|
||||
})
|
||||
.optional()
|
||||
.default({
|
||||
@@ -329,7 +330,8 @@ export const configSchema = z
|
||||
enable_integration_api: z.boolean().optional(),
|
||||
disable_local_sites: z.boolean().optional(),
|
||||
disable_basic_wireguard_sites: z.boolean().optional(),
|
||||
disable_config_managed_domains: z.boolean().optional()
|
||||
disable_config_managed_domains: z.boolean().optional(),
|
||||
disable_product_help_banners: z.boolean().optional()
|
||||
})
|
||||
.optional(),
|
||||
dns: z
|
||||
|
||||
@@ -19,24 +19,33 @@ import { sanitize, validatePathRewriteConfig } from "./utils";
|
||||
const redirectHttpsMiddlewareName = "redirect-to-https";
|
||||
const badgerMiddlewareName = "badger";
|
||||
|
||||
// Define extended target type with site information
|
||||
type TargetWithSite = Target & {
|
||||
resourceId: number;
|
||||
targetId: number;
|
||||
ip: string | null;
|
||||
method: string | null;
|
||||
port: number | null;
|
||||
internalPort: number | null;
|
||||
enabled: boolean;
|
||||
health: string | null;
|
||||
site: {
|
||||
siteId: number;
|
||||
type: string;
|
||||
subnet: string | null;
|
||||
exitNodeId: number | null;
|
||||
online: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
export async function getTraefikConfig(
|
||||
exitNodeId: number,
|
||||
siteTypes: string[],
|
||||
filterOutNamespaceDomains = false,
|
||||
generateLoginPageRouters = false,
|
||||
allowRawResources = true
|
||||
filterOutNamespaceDomains = false, // UNUSED BUT USED IN PRIVATE
|
||||
generateLoginPageRouters = false, // UNUSED BUT USED IN PRIVATE
|
||||
allowRawResources = true,
|
||||
allowMaintenancePage = true, // UNUSED BUT USED IN PRIVATE
|
||||
): Promise<any> {
|
||||
// Define extended target type with site information
|
||||
type TargetWithSite = Target & {
|
||||
site: {
|
||||
siteId: number;
|
||||
type: string;
|
||||
subnet: string | null;
|
||||
exitNodeId: number | null;
|
||||
online: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
// Get resources with their targets and sites in a single optimized query
|
||||
// Start from sites on this exit node, then join to targets and resources
|
||||
const resourcesWithTargetsAndSites = await db
|
||||
@@ -59,6 +68,7 @@ export async function getTraefikConfig(
|
||||
headers: resources.headers,
|
||||
proxyProtocol: resources.proxyProtocol,
|
||||
proxyProtocolVersion: resources.proxyProtocolVersion,
|
||||
|
||||
// Target fields
|
||||
targetId: targets.targetId,
|
||||
targetEnabled: targets.enabled,
|
||||
@@ -103,10 +113,6 @@ export async function getTraefikConfig(
|
||||
eq(sites.type, "local")
|
||||
)
|
||||
),
|
||||
or(
|
||||
ne(targetHealthCheck.hcHealth, "unhealthy"), // Exclude unhealthy targets
|
||||
isNull(targetHealthCheck.hcHealth) // Include targets with no health check record
|
||||
),
|
||||
inArray(sites.type, siteTypes),
|
||||
allowRawResources
|
||||
? isNotNull(resources.http) // ignore the http check if allow_raw_resources is true
|
||||
@@ -184,7 +190,6 @@ export async function getTraefikConfig(
|
||||
});
|
||||
}
|
||||
|
||||
// Add target with its associated site data
|
||||
resourcesMap.get(key).targets.push({
|
||||
resourceId: row.resourceId,
|
||||
targetId: row.targetId,
|
||||
@@ -193,6 +198,7 @@ export async function getTraefikConfig(
|
||||
port: row.port,
|
||||
internalPort: row.internalPort,
|
||||
enabled: row.targetEnabled,
|
||||
health: row.hcHealth,
|
||||
site: {
|
||||
siteId: row.siteId,
|
||||
type: row.siteType,
|
||||
@@ -222,7 +228,7 @@ export async function getTraefikConfig(
|
||||
|
||||
// get the key and the resource
|
||||
for (const [key, resource] of resourcesMap.entries()) {
|
||||
const targets = resource.targets;
|
||||
const targets = resource.targets as TargetWithSite[];
|
||||
|
||||
const routerName = `${key}-${resource.name}-router`;
|
||||
const serviceName = `${key}-${resource.name}-service`;
|
||||
@@ -470,17 +476,21 @@ export async function getTraefikConfig(
|
||||
// RECEIVE BANDWIDTH ENDPOINT.
|
||||
|
||||
// TODO: HOW TO HANDLE ^^^^^^ BETTER
|
||||
const anySitesOnline = (
|
||||
targets as TargetWithSite[]
|
||||
).some((target: TargetWithSite) => target.site.online);
|
||||
const anySitesOnline = targets.some(
|
||||
(target) => target.site.online
|
||||
);
|
||||
|
||||
return (
|
||||
(targets as TargetWithSite[])
|
||||
.filter((target: TargetWithSite) => {
|
||||
targets
|
||||
.filter((target) => {
|
||||
if (!target.enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (target.health == "unhealthy") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If any sites are online, exclude offline sites
|
||||
if (anySitesOnline && !target.site.online) {
|
||||
return false;
|
||||
@@ -508,7 +518,7 @@ export async function getTraefikConfig(
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((target: TargetWithSite) => {
|
||||
.map((target) => {
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
@@ -594,12 +604,12 @@ export async function getTraefikConfig(
|
||||
loadBalancer: {
|
||||
servers: (() => {
|
||||
// Check if any sites are online
|
||||
const anySitesOnline = (
|
||||
targets as TargetWithSite[]
|
||||
).some((target: TargetWithSite) => target.site.online);
|
||||
const anySitesOnline = targets.some(
|
||||
(target) => target.site.online
|
||||
);
|
||||
|
||||
return (targets as TargetWithSite[])
|
||||
.filter((target: TargetWithSite) => {
|
||||
return targets
|
||||
.filter((target) => {
|
||||
if (!target.enabled) {
|
||||
return false;
|
||||
}
|
||||
@@ -626,7 +636,7 @@ export async function getTraefikConfig(
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((target: TargetWithSite) => {
|
||||
.map((target) => {
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
|
||||
22
server/private/lib/blueprints/MaintenanceSchema.ts
Normal file
22
server/private/lib/blueprints/MaintenanceSchema.ts
Normal file
@@ -0,0 +1,22 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { z } from "zod";
|
||||
|
||||
export const MaintenanceSchema = z.object({
|
||||
enabled: z.boolean().optional(),
|
||||
type: z.enum(["forced", "automatic"]).optional(),
|
||||
title: z.string().max(255).nullable().optional(),
|
||||
message: z.string().max(2000).nullable().optional(),
|
||||
"estimated-time": z.string().max(100).nullable().optional()
|
||||
});
|
||||
@@ -23,10 +23,10 @@ import {
|
||||
} from "@server/lib/checkOrgAccessPolicy";
|
||||
import { UserType } from "@server/types/UserTypes";
|
||||
|
||||
export async function enforceResourceSessionLength(
|
||||
export function enforceResourceSessionLength(
|
||||
resourceSession: ResourceSession,
|
||||
org: Org
|
||||
): Promise<{ valid: boolean; error?: string }> {
|
||||
): { valid: boolean; error?: string } {
|
||||
if (org.maxSessionLengthHours) {
|
||||
const sessionIssuedAt = resourceSession.issuedAt; // may be null
|
||||
const maxSessionLengthHours = org.maxSessionLengthHours;
|
||||
|
||||
@@ -17,6 +17,7 @@ import logger from "@server/logger";
|
||||
import { and, eq, lt } from "drizzle-orm";
|
||||
import cache from "@server/lib/cache";
|
||||
import { calculateCutoffTimestamp } from "@server/lib/cleanupLogs";
|
||||
import { stripPortFromHost } from "@server/lib/ip";
|
||||
|
||||
async function getAccessDays(orgId: string): Promise<number> {
|
||||
// check cache first
|
||||
@@ -116,19 +117,7 @@ export async function logAccessAudit(data: {
|
||||
}
|
||||
|
||||
const clientIp = data.requestIp
|
||||
? (() => {
|
||||
if (
|
||||
data.requestIp.startsWith("[") &&
|
||||
data.requestIp.includes("]")
|
||||
) {
|
||||
// if brackets are found, extract the IPv6 address from between the brackets
|
||||
const ipv6Match = data.requestIp.match(/\[(.*?)\]/);
|
||||
if (ipv6Match) {
|
||||
return ipv6Match[1];
|
||||
}
|
||||
}
|
||||
return data.requestIp;
|
||||
})()
|
||||
? stripPortFromHost(data.requestIp)
|
||||
: undefined;
|
||||
|
||||
const countryCode = data.requestIp
|
||||
@@ -161,8 +150,11 @@ async function getCountryCodeFromIp(ip: string): Promise<string | undefined> {
|
||||
|
||||
if (!cachedCountryCode) {
|
||||
cachedCountryCode = await getCountryCodeForIp(ip); // do it locally
|
||||
// Cache for longer since IP geolocation doesn't change frequently
|
||||
cache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
|
||||
// Only cache successful lookups to avoid filling cache with undefined values
|
||||
if (cachedCountryCode) {
|
||||
// Cache for longer since IP geolocation doesn't change frequently
|
||||
cache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
|
||||
}
|
||||
}
|
||||
|
||||
return cachedCountryCode;
|
||||
|
||||
@@ -47,24 +47,33 @@ const redirectHttpsMiddlewareName = "redirect-to-https";
|
||||
const redirectToRootMiddlewareName = "redirect-to-root";
|
||||
const badgerMiddlewareName = "badger";
|
||||
|
||||
// Define extended target type with site information
|
||||
type TargetWithSite = Target & {
|
||||
resourceId: number;
|
||||
targetId: number;
|
||||
ip: string | null;
|
||||
method: string | null;
|
||||
port: number | null;
|
||||
internalPort: number | null;
|
||||
enabled: boolean;
|
||||
health: string | null;
|
||||
site: {
|
||||
siteId: number;
|
||||
type: string;
|
||||
subnet: string | null;
|
||||
exitNodeId: number | null;
|
||||
online: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
export async function getTraefikConfig(
|
||||
exitNodeId: number,
|
||||
siteTypes: string[],
|
||||
filterOutNamespaceDomains = false,
|
||||
generateLoginPageRouters = false,
|
||||
allowRawResources = true
|
||||
allowRawResources = true,
|
||||
allowMaintenancePage = true
|
||||
): Promise<any> {
|
||||
// Define extended target type with site information
|
||||
type TargetWithSite = Target & {
|
||||
site: {
|
||||
siteId: number;
|
||||
type: string;
|
||||
subnet: string | null;
|
||||
exitNodeId: number | null;
|
||||
online: boolean;
|
||||
};
|
||||
};
|
||||
|
||||
// Get resources with their targets and sites in a single optimized query
|
||||
// Start from sites on this exit node, then join to targets and resources
|
||||
const resourcesWithTargetsAndSites = await db
|
||||
@@ -87,6 +96,13 @@ export async function getTraefikConfig(
|
||||
headers: resources.headers,
|
||||
proxyProtocol: resources.proxyProtocol,
|
||||
proxyProtocolVersion: resources.proxyProtocolVersion,
|
||||
|
||||
maintenanceModeEnabled: resources.maintenanceModeEnabled,
|
||||
maintenanceModeType: resources.maintenanceModeType,
|
||||
maintenanceTitle: resources.maintenanceTitle,
|
||||
maintenanceMessage: resources.maintenanceMessage,
|
||||
maintenanceEstimatedTime: resources.maintenanceEstimatedTime,
|
||||
|
||||
// Target fields
|
||||
targetId: targets.targetId,
|
||||
targetEnabled: targets.enabled,
|
||||
@@ -140,10 +156,6 @@ export async function getTraefikConfig(
|
||||
sql`(${build != "saas" ? 1 : 0} = 1)` // Dont allow undefined local sites in cloud
|
||||
)
|
||||
),
|
||||
or(
|
||||
ne(targetHealthCheck.hcHealth, "unhealthy"), // Exclude unhealthy targets
|
||||
isNull(targetHealthCheck.hcHealth) // Include targets with no health check record
|
||||
),
|
||||
inArray(sites.type, siteTypes),
|
||||
allowRawResources
|
||||
? isNotNull(resources.http) // ignore the http check if allow_raw_resources is true
|
||||
@@ -220,7 +232,13 @@ export async function getTraefikConfig(
|
||||
rewritePathType: row.rewritePathType,
|
||||
priority: priority, // may be null, we fallback later
|
||||
domainCertResolver: row.domainCertResolver,
|
||||
preferWildcardCert: row.preferWildcardCert
|
||||
preferWildcardCert: row.preferWildcardCert,
|
||||
|
||||
maintenanceModeEnabled: row.maintenanceModeEnabled,
|
||||
maintenanceModeType: row.maintenanceModeType,
|
||||
maintenanceTitle: row.maintenanceTitle,
|
||||
maintenanceMessage: row.maintenanceMessage,
|
||||
maintenanceEstimatedTime: row.maintenanceEstimatedTime
|
||||
});
|
||||
}
|
||||
|
||||
@@ -233,6 +251,7 @@ export async function getTraefikConfig(
|
||||
port: row.port,
|
||||
internalPort: row.internalPort,
|
||||
enabled: row.targetEnabled,
|
||||
health: row.hcHealth,
|
||||
site: {
|
||||
siteId: row.siteId,
|
||||
type: row.siteType,
|
||||
@@ -278,7 +297,7 @@ export async function getTraefikConfig(
|
||||
|
||||
// get the key and the resource
|
||||
for (const [key, resource] of resourcesMap.entries()) {
|
||||
const targets = resource.targets;
|
||||
const targets = resource.targets as TargetWithSite[];
|
||||
|
||||
const routerName = `${key}-${resource.name}-router`;
|
||||
const serviceName = `${key}-${resource.name}-service`;
|
||||
@@ -308,20 +327,37 @@ export async function getTraefikConfig(
|
||||
config_output.http.services = {};
|
||||
}
|
||||
|
||||
const domainParts = fullDomain.split(".");
|
||||
let wildCard;
|
||||
if (domainParts.length <= 2) {
|
||||
wildCard = `*.${domainParts.join(".")}`;
|
||||
const additionalMiddlewares =
|
||||
config.getRawConfig().traefik.additional_middlewares || [];
|
||||
|
||||
const routerMiddlewares = [
|
||||
badgerMiddlewareName,
|
||||
...additionalMiddlewares
|
||||
];
|
||||
|
||||
let rule = `Host(\`${fullDomain}\`)`;
|
||||
|
||||
// priority logic
|
||||
let priority: number;
|
||||
if (resource.priority && resource.priority != 100) {
|
||||
priority = resource.priority;
|
||||
} else {
|
||||
wildCard = `*.${domainParts.slice(1).join(".")}`;
|
||||
priority = 100;
|
||||
if (resource.path && resource.pathMatchType) {
|
||||
priority += 10;
|
||||
if (resource.pathMatchType === "exact") {
|
||||
priority += 5;
|
||||
} else if (resource.pathMatchType === "prefix") {
|
||||
priority += 3;
|
||||
} else if (resource.pathMatchType === "regex") {
|
||||
priority += 2;
|
||||
}
|
||||
if (resource.path === "/") {
|
||||
priority = 1; // lowest for catch-all
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (!resource.subdomain) {
|
||||
wildCard = resource.fullDomain;
|
||||
}
|
||||
|
||||
const configDomain = config.getDomain(resource.domainId);
|
||||
|
||||
let tls = {};
|
||||
if (!privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) {
|
||||
const domainParts = fullDomain.split(".");
|
||||
@@ -387,13 +423,117 @@ export async function getTraefikConfig(
|
||||
}
|
||||
}
|
||||
|
||||
const additionalMiddlewares =
|
||||
config.getRawConfig().traefik.additional_middlewares || [];
|
||||
if (resource.ssl) {
|
||||
config_output.http.routers![routerName + "-redirect"] = {
|
||||
entryPoints: [
|
||||
config.getRawConfig().traefik.http_entrypoint
|
||||
],
|
||||
middlewares: [redirectHttpsMiddlewareName],
|
||||
service: serviceName,
|
||||
rule: rule,
|
||||
priority: priority
|
||||
};
|
||||
}
|
||||
|
||||
const routerMiddlewares = [
|
||||
badgerMiddlewareName,
|
||||
...additionalMiddlewares
|
||||
];
|
||||
const availableServers = targets.filter((target) => {
|
||||
if (!target.enabled) return false;
|
||||
|
||||
if (!target.site.online) return false;
|
||||
|
||||
if (target.health == "unhealthy") return false;
|
||||
|
||||
return true;
|
||||
});
|
||||
|
||||
const hasHealthyServers = availableServers.length > 0;
|
||||
|
||||
let showMaintenancePage = false;
|
||||
if (resource.maintenanceModeEnabled) {
|
||||
if (resource.maintenanceModeType === "forced") {
|
||||
showMaintenancePage = true;
|
||||
// logger.debug(
|
||||
// `Resource ${resource.name} (${fullDomain}) is in FORCED maintenance mode`
|
||||
// );
|
||||
} else if (resource.maintenanceModeType === "automatic") {
|
||||
showMaintenancePage = !hasHealthyServers;
|
||||
if (showMaintenancePage) {
|
||||
logger.warn(
|
||||
`Resource ${resource.name} (${fullDomain}) has no healthy servers - showing maintenance page (AUTOMATIC mode)`
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (showMaintenancePage && allowMaintenancePage) {
|
||||
const maintenanceServiceName = `${key}-maintenance-service`;
|
||||
const maintenanceRouterName = `${key}-maintenance-router`;
|
||||
const rewriteMiddlewareName = `${key}-maintenance-rewrite`;
|
||||
|
||||
const entrypointHttp =
|
||||
config.getRawConfig().traefik.http_entrypoint;
|
||||
const entrypointHttps =
|
||||
config.getRawConfig().traefik.https_entrypoint;
|
||||
|
||||
const fullDomain = resource.fullDomain;
|
||||
const domainParts = fullDomain.split(".");
|
||||
const wildCard = resource.subdomain
|
||||
? `*.${domainParts.slice(1).join(".")}`
|
||||
: fullDomain;
|
||||
|
||||
const maintenancePort = config.getRawConfig().server.next_port;
|
||||
const maintenanceHost =
|
||||
config.getRawConfig().server.internal_hostname;
|
||||
|
||||
config_output.http.services[maintenanceServiceName] = {
|
||||
loadBalancer: {
|
||||
servers: [
|
||||
{
|
||||
url: `http://${maintenanceHost}:${maintenancePort}`
|
||||
}
|
||||
],
|
||||
passHostHeader: true
|
||||
}
|
||||
};
|
||||
|
||||
// middleware to rewrite path to /maintenance-screen
|
||||
if (!config_output.http.middlewares) {
|
||||
config_output.http.middlewares = {};
|
||||
}
|
||||
|
||||
config_output.http.middlewares[rewriteMiddlewareName] = {
|
||||
replacePathRegex: {
|
||||
regex: "^/(.*)",
|
||||
replacement: "/maintenance-screen"
|
||||
}
|
||||
};
|
||||
|
||||
config_output.http.routers[maintenanceRouterName] = {
|
||||
entryPoints: [
|
||||
resource.ssl ? entrypointHttps : entrypointHttp
|
||||
],
|
||||
service: maintenanceServiceName,
|
||||
middlewares: [rewriteMiddlewareName],
|
||||
rule: rule,
|
||||
priority: 2000,
|
||||
...(resource.ssl ? { tls } : {})
|
||||
};
|
||||
|
||||
// Router to allow Next.js assets to load without rewrite
|
||||
config_output.http.routers[`${maintenanceRouterName}-assets`] =
|
||||
{
|
||||
entryPoints: [
|
||||
resource.ssl ? entrypointHttps : entrypointHttp
|
||||
],
|
||||
service: maintenanceServiceName,
|
||||
rule: `Host(\`${fullDomain}\`) && (PathPrefix(\`/_next\`) || PathRegexp(\`^/__nextjs*\`))`,
|
||||
priority: 2001,
|
||||
...(resource.ssl ? { tls } : {})
|
||||
};
|
||||
|
||||
// logger.info(`Maintenance mode active for ${fullDomain}`);
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle path rewriting middleware
|
||||
if (
|
||||
@@ -485,29 +625,6 @@ export async function getTraefikConfig(
|
||||
}
|
||||
}
|
||||
|
||||
let rule = `Host(\`${fullDomain}\`)`;
|
||||
|
||||
// priority logic
|
||||
let priority: number;
|
||||
if (resource.priority && resource.priority != 100) {
|
||||
priority = resource.priority;
|
||||
} else {
|
||||
priority = 100;
|
||||
if (resource.path && resource.pathMatchType) {
|
||||
priority += 10;
|
||||
if (resource.pathMatchType === "exact") {
|
||||
priority += 5;
|
||||
} else if (resource.pathMatchType === "prefix") {
|
||||
priority += 3;
|
||||
} else if (resource.pathMatchType === "regex") {
|
||||
priority += 2;
|
||||
}
|
||||
if (resource.path === "/") {
|
||||
priority = 1; // lowest for catch-all
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (resource.path && resource.pathMatchType) {
|
||||
//priority += 1;
|
||||
// add path to rule based on match type
|
||||
@@ -538,18 +655,6 @@ export async function getTraefikConfig(
|
||||
...(resource.ssl ? { tls } : {})
|
||||
};
|
||||
|
||||
if (resource.ssl) {
|
||||
config_output.http.routers![routerName + "-redirect"] = {
|
||||
entryPoints: [
|
||||
config.getRawConfig().traefik.http_entrypoint
|
||||
],
|
||||
middlewares: [redirectHttpsMiddlewareName],
|
||||
service: serviceName,
|
||||
rule: rule,
|
||||
priority: priority
|
||||
};
|
||||
}
|
||||
|
||||
config_output.http.services![serviceName] = {
|
||||
loadBalancer: {
|
||||
servers: (() => {
|
||||
@@ -559,17 +664,21 @@ export async function getTraefikConfig(
|
||||
// RECEIVE BANDWIDTH ENDPOINT.
|
||||
|
||||
// TODO: HOW TO HANDLE ^^^^^^ BETTER
|
||||
const anySitesOnline = (
|
||||
targets as TargetWithSite[]
|
||||
).some((target: TargetWithSite) => target.site.online);
|
||||
const anySitesOnline = targets.some(
|
||||
(target) => target.site.online
|
||||
);
|
||||
|
||||
return (
|
||||
(targets as TargetWithSite[])
|
||||
.filter((target: TargetWithSite) => {
|
||||
targets
|
||||
.filter((target) => {
|
||||
if (!target.enabled) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (target.health == "unhealthy") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// If any sites are online, exclude offline sites
|
||||
if (anySitesOnline && !target.site.online) {
|
||||
return false;
|
||||
@@ -597,7 +706,7 @@ export async function getTraefikConfig(
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((target: TargetWithSite) => {
|
||||
.map((target) => {
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
@@ -683,12 +792,12 @@ export async function getTraefikConfig(
|
||||
loadBalancer: {
|
||||
servers: (() => {
|
||||
// Check if any sites are online
|
||||
const anySitesOnline = (
|
||||
targets as TargetWithSite[]
|
||||
).some((target: TargetWithSite) => target.site.online);
|
||||
const anySitesOnline = targets.some(
|
||||
(target) => target.site.online
|
||||
);
|
||||
|
||||
return (targets as TargetWithSite[])
|
||||
.filter((target: TargetWithSite) => {
|
||||
return targets
|
||||
.filter((target) => {
|
||||
if (!target.enabled) {
|
||||
return false;
|
||||
}
|
||||
@@ -715,7 +824,7 @@ export async function getTraefikConfig(
|
||||
}
|
||||
return true;
|
||||
})
|
||||
.map((target: TargetWithSite) => {
|
||||
.map((target) => {
|
||||
if (
|
||||
target.site.type === "local" ||
|
||||
target.site.type === "wireguard"
|
||||
|
||||
@@ -36,8 +36,11 @@ import {
|
||||
LoginPage,
|
||||
resourceHeaderAuth,
|
||||
ResourceHeaderAuth,
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
ResourceHeaderAuthExtendedCompatibility,
|
||||
orgs,
|
||||
requestAuditLog
|
||||
requestAuditLog,
|
||||
Org
|
||||
} from "@server/db";
|
||||
import {
|
||||
resources,
|
||||
@@ -76,6 +79,8 @@ import { checkExitNodeOrg, resolveExitNodes } from "#private/lib/exitNodes";
|
||||
import { maxmindLookup } from "@server/db/maxmind";
|
||||
import { verifyResourceAccessToken } from "@server/auth/verifyResourceAccessToken";
|
||||
import semver from "semver";
|
||||
import { maxmindAsnLookup } from "@server/db/maxmindAsn";
|
||||
import { checkOrgAccessPolicy } from "@server/lib/checkOrgAccessPolicy";
|
||||
|
||||
// Zod schemas for request validation
|
||||
const getResourceByDomainParamsSchema = z.strictObject({
|
||||
@@ -91,6 +96,12 @@ const getUserOrgRoleParamsSchema = z.strictObject({
|
||||
orgId: z.string().min(1, "Organization ID is required")
|
||||
});
|
||||
|
||||
const getUserOrgSessionVerifySchema = z.strictObject({
|
||||
userId: z.string().min(1, "User ID is required"),
|
||||
orgId: z.string().min(1, "Organization ID is required"),
|
||||
sessionId: z.string().min(1, "Session ID is required")
|
||||
});
|
||||
|
||||
const getRoleResourceAccessParamsSchema = z.strictObject({
|
||||
roleId: z
|
||||
.string()
|
||||
@@ -174,6 +185,8 @@ export type ResourceWithAuth = {
|
||||
pincode: ResourcePincode | null;
|
||||
password: ResourcePassword | null;
|
||||
headerAuth: ResourceHeaderAuth | null;
|
||||
headerAuthExtendedCompatibility: ResourceHeaderAuthExtendedCompatibility | null;
|
||||
org: Org
|
||||
};
|
||||
|
||||
export type UserSessionWithUser = {
|
||||
@@ -234,7 +247,8 @@ hybridRouter.get(
|
||||
["newt", "local", "wireguard"], // Allow them to use all the site types
|
||||
true, // But don't allow domain namespace resources
|
||||
false, // Dont include login pages,
|
||||
true // allow raw resources
|
||||
true, // allow raw resources
|
||||
false // dont generate maintenance page
|
||||
);
|
||||
|
||||
return response(res, {
|
||||
@@ -497,6 +511,14 @@ hybridRouter.get(
|
||||
resourceHeaderAuth,
|
||||
eq(resourceHeaderAuth.resourceId, resources.resourceId)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
resources.resourceId
|
||||
)
|
||||
)
|
||||
.innerJoin(orgs, eq(orgs.orgId, resources.orgId))
|
||||
.where(eq(resources.fullDomain, domain))
|
||||
.limit(1);
|
||||
|
||||
@@ -529,7 +551,10 @@ hybridRouter.get(
|
||||
resource: result.resources,
|
||||
pincode: result.resourcePincode,
|
||||
password: result.resourcePassword,
|
||||
headerAuth: result.resourceHeaderAuth
|
||||
headerAuth: result.resourceHeaderAuth,
|
||||
headerAuthExtendedCompatibility:
|
||||
result.resourceHeaderAuthExtendedCompatibility,
|
||||
org: result.orgs
|
||||
};
|
||||
|
||||
return response<ResourceWithAuth>(res, {
|
||||
@@ -593,6 +618,16 @@ hybridRouter.get(
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!result) {
|
||||
return response<LoginPage | null>(res, {
|
||||
data: null,
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Login page not found",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
}
|
||||
|
||||
if (
|
||||
await checkExitNodeOrg(
|
||||
remoteExitNode.exitNodeId,
|
||||
@@ -608,16 +643,6 @@ hybridRouter.get(
|
||||
);
|
||||
}
|
||||
|
||||
if (!result) {
|
||||
return response<LoginPage | null>(res, {
|
||||
data: null,
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Login page not found",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
}
|
||||
|
||||
return response<LoginPage>(res, {
|
||||
data: result.loginPage,
|
||||
success: true,
|
||||
@@ -809,6 +834,69 @@ hybridRouter.get(
|
||||
}
|
||||
);
|
||||
|
||||
// Get user organization role
|
||||
hybridRouter.get(
|
||||
"/user/:userId/org/:orgId/session/:sessionId/verify",
|
||||
async (req: Request, res: Response, next: NextFunction) => {
|
||||
try {
|
||||
const parsedParams = getUserOrgSessionVerifySchema.safeParse(
|
||||
req.params
|
||||
);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { userId, orgId, sessionId } = parsedParams.data;
|
||||
const remoteExitNode = req.remoteExitNode;
|
||||
|
||||
if (!remoteExitNode || !remoteExitNode.exitNodeId) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
"Remote exit node not found"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
if (await checkExitNodeOrg(remoteExitNode.exitNodeId, orgId)) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.UNAUTHORIZED,
|
||||
"User is not authorized to access this organization"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const accessPolicy = await checkOrgAccessPolicy({
|
||||
orgId,
|
||||
userId,
|
||||
sessionId
|
||||
});
|
||||
|
||||
return response(res, {
|
||||
data: accessPolicy,
|
||||
success: true,
|
||||
error: false,
|
||||
message: "User org access policy retrieved successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Failed to get user org role"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// Check if role has access to resource
|
||||
hybridRouter.get(
|
||||
"/role/:roleId/resource/:resourceId/access",
|
||||
@@ -1238,6 +1326,70 @@ hybridRouter.get(
|
||||
}
|
||||
);
|
||||
|
||||
const asnIpLookupParamsSchema = z.object({
|
||||
ip: z.union([z.ipv4(), z.ipv6()])
|
||||
});
|
||||
hybridRouter.get(
|
||||
"/asnip/:ip",
|
||||
async (req: Request, res: Response, next: NextFunction) => {
|
||||
try {
|
||||
const parsedParams = asnIpLookupParamsSchema.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { ip } = parsedParams.data;
|
||||
|
||||
if (!maxmindAsnLookup) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.SERVICE_UNAVAILABLE,
|
||||
"ASNIP service is not available"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const result = maxmindAsnLookup.get(ip);
|
||||
|
||||
if (!result || !result.autonomous_system_number) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.NOT_FOUND,
|
||||
"ASNIP information not found"
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { autonomous_system_number } = result;
|
||||
|
||||
logger.debug(
|
||||
`ASNIP lookup successful for IP ${ip}: ${autonomous_system_number}`
|
||||
);
|
||||
|
||||
return response(res, {
|
||||
data: { asn: autonomous_system_number },
|
||||
success: true,
|
||||
error: false,
|
||||
message: "GeoIP lookup successful",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"Failed to validate resource session token"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
);
|
||||
|
||||
// GERBIL ROUTERS
|
||||
const getConfigSchema = z.object({
|
||||
publicKey: z.string(),
|
||||
|
||||
@@ -16,6 +16,7 @@ import * as auth from "#private/routers/auth";
|
||||
import * as orgIdp from "#private/routers/orgIdp";
|
||||
import * as billing from "#private/routers/billing";
|
||||
import * as license from "#private/routers/license";
|
||||
import * as resource from "#private/routers/resource";
|
||||
|
||||
import { verifySessionUserMiddleware } from "@server/middlewares";
|
||||
|
||||
@@ -37,3 +38,5 @@ internalRouter.post(
|
||||
);
|
||||
|
||||
internalRouter.get(`/license/status`, license.getLicenseStatus);
|
||||
|
||||
internalRouter.get("/maintenance/info", resource.getMaintenanceInfo);
|
||||
|
||||
@@ -40,6 +40,11 @@ async function query(orgId: string | undefined, fullDomain: string) {
|
||||
eq(loginPage.loginPageId, loginPageOrg.loginPageId)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!res) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
...res.loginPage,
|
||||
orgId: res.loginPageOrg.orgId
|
||||
@@ -65,6 +70,11 @@ async function query(orgId: string | undefined, fullDomain: string) {
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!res) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
...res,
|
||||
orgId: orgLink.orgId
|
||||
|
||||
@@ -48,6 +48,11 @@ async function query(orgId: string) {
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (!res) {
|
||||
return null;
|
||||
}
|
||||
|
||||
return {
|
||||
...res,
|
||||
orgId: orgLink.orgs.orgId,
|
||||
|
||||
113
server/private/routers/resource/getMaintenanceInfo.ts
Normal file
113
server/private/routers/resource/getMaintenanceInfo.ts
Normal file
@@ -0,0 +1,113 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db } from "@server/db";
|
||||
import { resources } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import response from "@server/lib/response";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import logger from "@server/logger";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { GetMaintenanceInfoResponse } from "@server/routers/resource/types";
|
||||
|
||||
const getMaintenanceInfoSchema = z
|
||||
.object({
|
||||
fullDomain: z.string().min(1, "Domain is required")
|
||||
})
|
||||
.strict();
|
||||
|
||||
async function query(fullDomain: string) {
|
||||
const [res] = await db
|
||||
.select({
|
||||
resourceId: resources.resourceId,
|
||||
name: resources.name,
|
||||
fullDomain: resources.fullDomain,
|
||||
maintenanceModeEnabled: resources.maintenanceModeEnabled,
|
||||
maintenanceModeType: resources.maintenanceModeType,
|
||||
maintenanceTitle: resources.maintenanceTitle,
|
||||
maintenanceMessage: resources.maintenanceMessage,
|
||||
maintenanceEstimatedTime: resources.maintenanceEstimatedTime
|
||||
})
|
||||
.from(resources)
|
||||
.where(eq(resources.fullDomain, fullDomain))
|
||||
.limit(1);
|
||||
return res;
|
||||
}
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/maintenance/info",
|
||||
description: "Get maintenance information for a resource by domain.",
|
||||
tags: [OpenAPITags.Resource],
|
||||
request: {
|
||||
query: z.object({
|
||||
fullDomain: z.string()
|
||||
})
|
||||
},
|
||||
responses: {
|
||||
200: {
|
||||
description: "Maintenance information retrieved successfully"
|
||||
},
|
||||
404: {
|
||||
description: "Resource not found"
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
export async function getMaintenanceInfo(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedQuery = getMaintenanceInfoSchema.safeParse(req.query);
|
||||
if (!parsedQuery.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedQuery.error).toString()
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const { fullDomain } = parsedQuery.data;
|
||||
|
||||
const maintenanceInfo = await query(fullDomain);
|
||||
|
||||
if (!maintenanceInfo) {
|
||||
return next(
|
||||
createHttpError(HttpCode.NOT_FOUND, "Resource not found")
|
||||
);
|
||||
}
|
||||
|
||||
return response<GetMaintenanceInfoResponse>(res, {
|
||||
data: maintenanceInfo,
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Maintenance information retrieved successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.INTERNAL_SERVER_ERROR,
|
||||
"An error occurred while retrieving maintenance information"
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
14
server/private/routers/resource/index.ts
Normal file
14
server/private/routers/resource/index.ts
Normal file
@@ -0,0 +1,14 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
export * from "./getMaintenanceInfo";
|
||||
@@ -99,12 +99,13 @@ async function query(query: Q) {
|
||||
.where(and(baseConditions, not(isNull(requestAuditLog.location))))
|
||||
.groupBy(requestAuditLog.location)
|
||||
.orderBy(desc(totalQ))
|
||||
.limit(DISTINCT_LIMIT+1);
|
||||
.limit(DISTINCT_LIMIT + 1);
|
||||
|
||||
if (requestsPerCountry.length > DISTINCT_LIMIT) {
|
||||
// throw an error
|
||||
throw createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
// todo: is this even possible?
|
||||
`Too many distinct countries. Please narrow your query.`
|
||||
);
|
||||
}
|
||||
|
||||
@@ -189,22 +189,22 @@ async function queryUniqueFilterAttributes(
|
||||
.selectDistinct({ actor: requestAuditLog.actor })
|
||||
.from(requestAuditLog)
|
||||
.where(baseConditions)
|
||||
.limit(DISTINCT_LIMIT+1),
|
||||
.limit(DISTINCT_LIMIT + 1),
|
||||
primaryDb
|
||||
.selectDistinct({ locations: requestAuditLog.location })
|
||||
.from(requestAuditLog)
|
||||
.where(baseConditions)
|
||||
.limit(DISTINCT_LIMIT+1),
|
||||
.limit(DISTINCT_LIMIT + 1),
|
||||
primaryDb
|
||||
.selectDistinct({ hosts: requestAuditLog.host })
|
||||
.from(requestAuditLog)
|
||||
.where(baseConditions)
|
||||
.limit(DISTINCT_LIMIT+1),
|
||||
.limit(DISTINCT_LIMIT + 1),
|
||||
primaryDb
|
||||
.selectDistinct({ paths: requestAuditLog.path })
|
||||
.from(requestAuditLog)
|
||||
.where(baseConditions)
|
||||
.limit(DISTINCT_LIMIT+1),
|
||||
.limit(DISTINCT_LIMIT + 1),
|
||||
primaryDb
|
||||
.selectDistinct({
|
||||
id: requestAuditLog.resourceId,
|
||||
@@ -216,18 +216,20 @@ async function queryUniqueFilterAttributes(
|
||||
eq(requestAuditLog.resourceId, resources.resourceId)
|
||||
)
|
||||
.where(baseConditions)
|
||||
.limit(DISTINCT_LIMIT+1)
|
||||
.limit(DISTINCT_LIMIT + 1)
|
||||
]);
|
||||
|
||||
if (
|
||||
uniqueActors.length > DISTINCT_LIMIT ||
|
||||
uniqueLocations.length > DISTINCT_LIMIT ||
|
||||
uniqueHosts.length > DISTINCT_LIMIT ||
|
||||
uniquePaths.length > DISTINCT_LIMIT ||
|
||||
uniqueResources.length > DISTINCT_LIMIT
|
||||
) {
|
||||
throw new Error("Too many distinct filter attributes to retrieve. Please refine your time range.");
|
||||
}
|
||||
// TODO: for stuff like the paths this is too restrictive so lets just show some of the paths and the user needs to
|
||||
// refine the time range to see what they need to see
|
||||
// if (
|
||||
// uniqueActors.length > DISTINCT_LIMIT ||
|
||||
// uniqueLocations.length > DISTINCT_LIMIT ||
|
||||
// uniqueHosts.length > DISTINCT_LIMIT ||
|
||||
// uniquePaths.length > DISTINCT_LIMIT ||
|
||||
// uniqueResources.length > DISTINCT_LIMIT
|
||||
// ) {
|
||||
// throw new Error("Too many distinct filter attributes to retrieve. Please refine your time range.");
|
||||
// }
|
||||
|
||||
return {
|
||||
actors: uniqueActors
|
||||
@@ -307,10 +309,12 @@ export async function queryRequestAuditLogs(
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
// if the message is "Too many distinct filter attributes to retrieve. Please refine your time range.", return a 400 and the message
|
||||
if (error instanceof Error && error.message === "Too many distinct filter attributes to retrieve. Please refine your time range.") {
|
||||
return next(
|
||||
createHttpError(HttpCode.BAD_REQUEST, error.message)
|
||||
);
|
||||
if (
|
||||
error instanceof Error &&
|
||||
error.message ===
|
||||
"Too many distinct filter attributes to retrieve. Please refine your time range."
|
||||
) {
|
||||
return next(createHttpError(HttpCode.BAD_REQUEST, error.message));
|
||||
}
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
|
||||
@@ -10,6 +10,7 @@ import { eq, and, gt } from "drizzle-orm";
|
||||
import { createSession, generateSessionToken } from "@server/auth/sessions/app";
|
||||
import { encodeHexLowerCase } from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import { stripPortFromHost } from "@server/lib/ip";
|
||||
|
||||
const paramsSchema = z.object({
|
||||
code: z.string().min(1, "Code is required")
|
||||
@@ -27,30 +28,6 @@ export type PollDeviceWebAuthResponse = {
|
||||
token?: string;
|
||||
};
|
||||
|
||||
// Helper function to extract IP from request (same as in startDeviceWebAuth)
|
||||
function extractIpFromRequest(req: Request): string | undefined {
|
||||
const ip = req.ip || req.socket.remoteAddress;
|
||||
if (!ip) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Handle IPv6 format [::1] or IPv4 format
|
||||
if (ip.startsWith("[") && ip.includes("]")) {
|
||||
const ipv6Match = ip.match(/\[(.*?)\]/);
|
||||
if (ipv6Match) {
|
||||
return ipv6Match[1];
|
||||
}
|
||||
}
|
||||
|
||||
// Handle IPv4 with port (split at last colon)
|
||||
const lastColonIndex = ip.lastIndexOf(":");
|
||||
if (lastColonIndex !== -1) {
|
||||
return ip.substring(0, lastColonIndex);
|
||||
}
|
||||
|
||||
return ip;
|
||||
}
|
||||
|
||||
export async function pollDeviceWebAuth(
|
||||
req: Request,
|
||||
res: Response,
|
||||
@@ -70,7 +47,7 @@ export async function pollDeviceWebAuth(
|
||||
try {
|
||||
const { code } = parsedParams.data;
|
||||
const now = Date.now();
|
||||
const requestIp = extractIpFromRequest(req);
|
||||
const requestIp = req.ip ? stripPortFromHost(req.ip) : undefined;
|
||||
|
||||
// Hash the code before querying
|
||||
const hashedCode = hashDeviceCode(code);
|
||||
|
||||
@@ -12,6 +12,7 @@ import { TimeSpan } from "oslo";
|
||||
import { maxmindLookup } from "@server/db/maxmind";
|
||||
import { encodeHexLowerCase } from "@oslojs/encoding";
|
||||
import { sha256 } from "@oslojs/crypto/sha2";
|
||||
import { stripPortFromHost } from "@server/lib/ip";
|
||||
|
||||
const bodySchema = z
|
||||
.object({
|
||||
@@ -39,30 +40,6 @@ function hashDeviceCode(code: string): string {
|
||||
return encodeHexLowerCase(sha256(new TextEncoder().encode(code)));
|
||||
}
|
||||
|
||||
// Helper function to extract IP from request
|
||||
function extractIpFromRequest(req: Request): string | undefined {
|
||||
const ip = req.ip;
|
||||
if (!ip) {
|
||||
return undefined;
|
||||
}
|
||||
|
||||
// Handle IPv6 format [::1] or IPv4 format
|
||||
if (ip.startsWith("[") && ip.includes("]")) {
|
||||
const ipv6Match = ip.match(/\[(.*?)\]/);
|
||||
if (ipv6Match) {
|
||||
return ipv6Match[1];
|
||||
}
|
||||
}
|
||||
|
||||
// Handle IPv4 with port (split at last colon)
|
||||
const lastColonIndex = ip.lastIndexOf(":");
|
||||
if (lastColonIndex !== -1) {
|
||||
return ip.substring(0, lastColonIndex);
|
||||
}
|
||||
|
||||
return ip;
|
||||
}
|
||||
|
||||
// Helper function to get city from IP (if available)
|
||||
async function getCityFromIp(ip: string): Promise<string | undefined> {
|
||||
try {
|
||||
@@ -112,7 +89,7 @@ export async function startDeviceWebAuth(
|
||||
const hashedCode = hashDeviceCode(code);
|
||||
|
||||
// Extract IP from request
|
||||
const ip = extractIpFromRequest(req);
|
||||
const ip = req.ip ? stripPortFromHost(req.ip) : undefined;
|
||||
|
||||
// Get city (optional, may return undefined)
|
||||
const city = ip ? await getCityFromIp(ip) : undefined;
|
||||
|
||||
@@ -19,6 +19,7 @@ import {
|
||||
import { SESSION_COOKIE_EXPIRES as RESOURCE_SESSION_COOKIE_EXPIRES } from "@server/auth/sessions/resource";
|
||||
import config from "@server/lib/config";
|
||||
import { response } from "@server/lib/response";
|
||||
import { stripPortFromHost } from "@server/lib/ip";
|
||||
|
||||
const exchangeSessionBodySchema = z.object({
|
||||
requestToken: z.string(),
|
||||
@@ -62,7 +63,7 @@ export async function exchangeSession(
|
||||
cleanHost = cleanHost.slice(0, -1 * matched.length);
|
||||
}
|
||||
|
||||
const clientIp = requestIp?.split(":")[0];
|
||||
const clientIp = requestIp ? stripPortFromHost(requestIp) : undefined;
|
||||
|
||||
const [resource] = await db
|
||||
.select()
|
||||
|
||||
@@ -3,6 +3,7 @@ import logger from "@server/logger";
|
||||
import { and, eq, lt } from "drizzle-orm";
|
||||
import cache from "@server/lib/cache";
|
||||
import { calculateCutoffTimestamp } from "@server/lib/cleanupLogs";
|
||||
import { stripPortFromHost } from "@server/lib/ip";
|
||||
|
||||
/**
|
||||
|
||||
@@ -10,7 +11,7 @@ Reasons:
|
||||
100 - Allowed by Rule
|
||||
101 - Allowed No Auth
|
||||
102 - Valid Access Token
|
||||
103 - Valid header auth
|
||||
103 - Valid Header Auth (HTTP Basic Auth)
|
||||
104 - Valid Pincode
|
||||
105 - Valid Password
|
||||
106 - Valid email
|
||||
@@ -48,27 +49,43 @@ const auditLogBuffer: Array<{
|
||||
|
||||
const BATCH_SIZE = 100; // Write to DB every 100 logs
|
||||
const BATCH_INTERVAL_MS = 5000; // Or every 5 seconds, whichever comes first
|
||||
const MAX_BUFFER_SIZE = 10000; // Prevent unbounded memory growth
|
||||
let flushTimer: NodeJS.Timeout | null = null;
|
||||
let isFlushInProgress = false;
|
||||
|
||||
/**
|
||||
* Flush buffered logs to database
|
||||
*/
|
||||
async function flushAuditLogs() {
|
||||
if (auditLogBuffer.length === 0) {
|
||||
if (auditLogBuffer.length === 0 || isFlushInProgress) {
|
||||
return;
|
||||
}
|
||||
|
||||
isFlushInProgress = true;
|
||||
|
||||
// Take all current logs and clear buffer
|
||||
const logsToWrite = auditLogBuffer.splice(0, auditLogBuffer.length);
|
||||
|
||||
try {
|
||||
// Batch insert all logs at once
|
||||
await db.insert(requestAuditLog).values(logsToWrite);
|
||||
// Batch insert logs in groups of 25 to avoid overwhelming the database
|
||||
const BATCH_DB_SIZE = 25;
|
||||
for (let i = 0; i < logsToWrite.length; i += BATCH_DB_SIZE) {
|
||||
const batch = logsToWrite.slice(i, i + BATCH_DB_SIZE);
|
||||
await db.insert(requestAuditLog).values(batch);
|
||||
}
|
||||
logger.debug(`Flushed ${logsToWrite.length} audit logs to database`);
|
||||
} catch (error) {
|
||||
logger.error("Error flushing audit logs:", error);
|
||||
// On error, we lose these logs - consider a fallback strategy if needed
|
||||
// (e.g., write to file, or put back in buffer with retry limit)
|
||||
} finally {
|
||||
isFlushInProgress = false;
|
||||
// If buffer filled up while we were flushing, flush again
|
||||
if (auditLogBuffer.length >= BATCH_SIZE) {
|
||||
flushAuditLogs().catch((err) =>
|
||||
logger.error("Error in follow-up flush:", err)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -94,6 +111,10 @@ export async function shutdownAuditLogger() {
|
||||
clearTimeout(flushTimer);
|
||||
flushTimer = null;
|
||||
}
|
||||
// Force flush even if one is in progress by waiting and retrying
|
||||
while (isFlushInProgress) {
|
||||
await new Promise((resolve) => setTimeout(resolve, 100));
|
||||
}
|
||||
await flushAuditLogs();
|
||||
}
|
||||
|
||||
@@ -208,28 +229,17 @@ export async function logRequestAudit(
|
||||
}
|
||||
|
||||
const clientIp = body.requestIp
|
||||
? (() => {
|
||||
if (
|
||||
body.requestIp.startsWith("[") &&
|
||||
body.requestIp.includes("]")
|
||||
) {
|
||||
// if brackets are found, extract the IPv6 address from between the brackets
|
||||
const ipv6Match = body.requestIp.match(/\[(.*?)\]/);
|
||||
if (ipv6Match) {
|
||||
return ipv6Match[1];
|
||||
}
|
||||
}
|
||||
|
||||
// ivp4
|
||||
// split at last colon
|
||||
const lastColonIndex = body.requestIp.lastIndexOf(":");
|
||||
if (lastColonIndex !== -1) {
|
||||
return body.requestIp.substring(0, lastColonIndex);
|
||||
}
|
||||
return body.requestIp;
|
||||
})()
|
||||
? stripPortFromHost(body.requestIp)
|
||||
: undefined;
|
||||
|
||||
// Prevent unbounded buffer growth - drop oldest entries if buffer is too large
|
||||
if (auditLogBuffer.length >= MAX_BUFFER_SIZE) {
|
||||
const dropped = auditLogBuffer.splice(0, BATCH_SIZE);
|
||||
logger.warn(
|
||||
`Audit log buffer exceeded max size (${MAX_BUFFER_SIZE}), dropped ${dropped.length} oldest entries`
|
||||
);
|
||||
}
|
||||
|
||||
// Add to buffer instead of writing directly to DB
|
||||
auditLogBuffer.push({
|
||||
timestamp,
|
||||
|
||||
@@ -14,13 +14,14 @@ import {
|
||||
Org,
|
||||
Resource,
|
||||
ResourceHeaderAuth,
|
||||
ResourceHeaderAuthExtendedCompatibility,
|
||||
ResourcePassword,
|
||||
ResourcePincode,
|
||||
ResourceRule,
|
||||
resourceSessions
|
||||
} from "@server/db";
|
||||
import config from "@server/lib/config";
|
||||
import { isIpInCidr } from "@server/lib/ip";
|
||||
import { isIpInCidr, stripPortFromHost } from "@server/lib/ip";
|
||||
import { response } from "@server/lib/response";
|
||||
import logger from "@server/logger";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
@@ -29,6 +30,7 @@ import createHttpError from "http-errors";
|
||||
import { z } from "zod";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { getCountryCodeForIp } from "@server/lib/geoip";
|
||||
import { getAsnForIp } from "@server/lib/asn";
|
||||
import { getOrgTierData } from "#dynamic/lib/billing";
|
||||
import { TierId } from "@server/lib/billing/tiers";
|
||||
import { verifyPassword } from "@server/auth/password";
|
||||
@@ -38,6 +40,8 @@ import {
|
||||
} from "#dynamic/lib/checkOrgAccessPolicy";
|
||||
import { logRequestAudit } from "./logRequestAudit";
|
||||
import cache from "@server/lib/cache";
|
||||
import semver from "semver";
|
||||
import { APP_VERSION } from "@server/lib/consts";
|
||||
|
||||
const verifyResourceSessionSchema = z.object({
|
||||
sessions: z.record(z.string(), z.string()).optional(),
|
||||
@@ -49,7 +53,8 @@ const verifyResourceSessionSchema = z.object({
|
||||
path: z.string(),
|
||||
method: z.string(),
|
||||
tls: z.boolean(),
|
||||
requestIp: z.string().optional()
|
||||
requestIp: z.string().optional(),
|
||||
badgerVersion: z.string().optional()
|
||||
});
|
||||
|
||||
export type VerifyResourceSessionSchema = z.infer<
|
||||
@@ -65,8 +70,10 @@ type BasicUserData = {
|
||||
|
||||
export type VerifyUserResponse = {
|
||||
valid: boolean;
|
||||
headerAuthChallenged?: boolean;
|
||||
redirectUrl?: string;
|
||||
userData?: BasicUserData;
|
||||
pangolinVersion?: string;
|
||||
};
|
||||
|
||||
export async function verifyResourceSession(
|
||||
@@ -95,31 +102,15 @@ export async function verifyResourceSession(
|
||||
requestIp,
|
||||
path,
|
||||
headers,
|
||||
query
|
||||
query,
|
||||
badgerVersion
|
||||
} = parsedBody.data;
|
||||
|
||||
// Extract HTTP Basic Auth credentials if present
|
||||
const clientHeaderAuth = extractBasicAuth(headers);
|
||||
|
||||
const clientIp = requestIp
|
||||
? (() => {
|
||||
logger.debug("Request IP:", { requestIp });
|
||||
if (requestIp.startsWith("[") && requestIp.includes("]")) {
|
||||
// if brackets are found, extract the IPv6 address from between the brackets
|
||||
const ipv6Match = requestIp.match(/\[(.*?)\]/);
|
||||
if (ipv6Match) {
|
||||
return ipv6Match[1];
|
||||
}
|
||||
}
|
||||
|
||||
// ivp4
|
||||
// split at last colon
|
||||
const lastColonIndex = requestIp.lastIndexOf(":");
|
||||
if (lastColonIndex !== -1) {
|
||||
return requestIp.substring(0, lastColonIndex);
|
||||
}
|
||||
return requestIp;
|
||||
})()
|
||||
? stripPortFromHost(requestIp, badgerVersion)
|
||||
: undefined;
|
||||
|
||||
logger.debug("Client IP:", { clientIp });
|
||||
@@ -128,6 +119,8 @@ export async function verifyResourceSession(
|
||||
? await getCountryCodeFromIp(clientIp)
|
||||
: undefined;
|
||||
|
||||
const ipAsn = clientIp ? await getAsnFromIp(clientIp) : undefined;
|
||||
|
||||
let cleanHost = host;
|
||||
// if the host ends with :port, strip it
|
||||
if (cleanHost.match(/:[0-9]{1,5}$/)) {
|
||||
@@ -142,6 +135,7 @@ export async function verifyResourceSession(
|
||||
pincode: ResourcePincode | null;
|
||||
password: ResourcePassword | null;
|
||||
headerAuth: ResourceHeaderAuth | null;
|
||||
headerAuthExtendedCompatibility: ResourceHeaderAuthExtendedCompatibility | null;
|
||||
org: Org;
|
||||
}
|
||||
| undefined = cache.get(resourceCacheKey);
|
||||
@@ -171,7 +165,13 @@ export async function verifyResourceSession(
|
||||
cache.set(resourceCacheKey, resourceData, 5);
|
||||
}
|
||||
|
||||
const { resource, pincode, password, headerAuth } = resourceData;
|
||||
const {
|
||||
resource,
|
||||
pincode,
|
||||
password,
|
||||
headerAuth,
|
||||
headerAuthExtendedCompatibility
|
||||
} = resourceData;
|
||||
|
||||
if (!resource) {
|
||||
logger.debug(`Resource not found ${cleanHost}`);
|
||||
@@ -216,7 +216,8 @@ export async function verifyResourceSession(
|
||||
resource.resourceId,
|
||||
clientIp,
|
||||
path,
|
||||
ipCC
|
||||
ipCC,
|
||||
ipAsn
|
||||
);
|
||||
|
||||
if (action == "ACCEPT") {
|
||||
@@ -450,7 +451,8 @@ export async function verifyResourceSession(
|
||||
!sso &&
|
||||
!pincode &&
|
||||
!password &&
|
||||
!resource.emailWhitelistEnabled
|
||||
!resource.emailWhitelistEnabled &&
|
||||
!headerAuthExtendedCompatibility?.extendedCompatibilityIsActivated
|
||||
) {
|
||||
logRequestAudit(
|
||||
{
|
||||
@@ -471,7 +473,8 @@ export async function verifyResourceSession(
|
||||
!sso &&
|
||||
!pincode &&
|
||||
!password &&
|
||||
!resource.emailWhitelistEnabled
|
||||
!resource.emailWhitelistEnabled &&
|
||||
!headerAuthExtendedCompatibility?.extendedCompatibilityIsActivated
|
||||
) {
|
||||
logRequestAudit(
|
||||
{
|
||||
@@ -557,7 +560,7 @@ export async function verifyResourceSession(
|
||||
}
|
||||
|
||||
if (resourceSession) {
|
||||
// only run this check if not SSO sesion; SSO session length is checked later
|
||||
// only run this check if not SSO session; SSO session length is checked later
|
||||
const accessPolicy = await enforceResourceSessionLength(
|
||||
resourceSession,
|
||||
resourceData.org
|
||||
@@ -701,6 +704,15 @@ export async function verifyResourceSession(
|
||||
}
|
||||
}
|
||||
|
||||
// If headerAuthExtendedCompatibility is activated but no clientHeaderAuth provided, force client to challenge
|
||||
if (
|
||||
headerAuthExtendedCompatibility &&
|
||||
headerAuthExtendedCompatibility.extendedCompatibilityIsActivated &&
|
||||
!clientHeaderAuth
|
||||
) {
|
||||
return headerAuthChallenged(res, redirectPath, resource.orgId);
|
||||
}
|
||||
|
||||
logger.debug("No more auth to check, resource not allowed");
|
||||
|
||||
if (config.getRawConfig().app.log_failed_attempts) {
|
||||
@@ -809,7 +821,7 @@ async function notAllowed(
|
||||
}
|
||||
|
||||
const data = {
|
||||
data: { valid: false, redirectUrl },
|
||||
data: { valid: false, redirectUrl, pangolinVersion: APP_VERSION },
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Access denied",
|
||||
@@ -823,8 +835,8 @@ function allowed(res: Response, userData?: BasicUserData) {
|
||||
const data = {
|
||||
data:
|
||||
userData !== undefined && userData !== null
|
||||
? { valid: true, ...userData }
|
||||
: { valid: true },
|
||||
? { valid: true, ...userData, pangolinVersion: APP_VERSION }
|
||||
: { valid: true, pangolinVersion: APP_VERSION },
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Access allowed",
|
||||
@@ -833,6 +845,51 @@ function allowed(res: Response, userData?: BasicUserData) {
|
||||
return response<VerifyUserResponse>(res, data);
|
||||
}
|
||||
|
||||
async function headerAuthChallenged(
|
||||
res: Response,
|
||||
redirectPath?: string,
|
||||
orgId?: string
|
||||
) {
|
||||
let loginPage: LoginPage | null = null;
|
||||
if (orgId) {
|
||||
const { tier } = await getOrgTierData(orgId); // returns null in oss
|
||||
if (tier === TierId.STANDARD) {
|
||||
loginPage = await getOrgLoginPage(orgId);
|
||||
}
|
||||
}
|
||||
|
||||
let redirectUrl: string | undefined = undefined;
|
||||
if (redirectPath) {
|
||||
let endpoint: string;
|
||||
|
||||
if (loginPage && loginPage.domainId && loginPage.fullDomain) {
|
||||
const secure = config
|
||||
.getRawConfig()
|
||||
.app.dashboard_url?.startsWith("https");
|
||||
const method = secure ? "https" : "http";
|
||||
endpoint = `${method}://${loginPage.fullDomain}`;
|
||||
} else {
|
||||
endpoint = config.getRawConfig().app.dashboard_url!;
|
||||
}
|
||||
redirectUrl = `${endpoint}${redirectPath}`;
|
||||
}
|
||||
|
||||
const data = {
|
||||
data: {
|
||||
headerAuthChallenged: true,
|
||||
valid: false,
|
||||
redirectUrl,
|
||||
pangolinVersion: APP_VERSION
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Access denied",
|
||||
status: HttpCode.OK
|
||||
};
|
||||
logger.debug(JSON.stringify(data));
|
||||
return response<VerifyUserResponse>(res, data);
|
||||
}
|
||||
|
||||
async function isUserAllowedToAccessResource(
|
||||
userSessionId: string,
|
||||
resource: Resource,
|
||||
@@ -910,7 +967,8 @@ async function checkRules(
|
||||
resourceId: number,
|
||||
clientIp: string | undefined,
|
||||
path: string | undefined,
|
||||
ipCC?: string
|
||||
ipCC?: string,
|
||||
ipAsn?: number
|
||||
): Promise<"ACCEPT" | "DROP" | "PASS" | undefined> {
|
||||
const ruleCacheKey = `rules:${resourceId}`;
|
||||
|
||||
@@ -954,6 +1012,12 @@ async function checkRules(
|
||||
(await isIpInGeoIP(ipCC, rule.value))
|
||||
) {
|
||||
return rule.action as any;
|
||||
} else if (
|
||||
clientIp &&
|
||||
rule.match == "ASN" &&
|
||||
(await isIpInAsn(ipAsn, rule.value))
|
||||
) {
|
||||
return rule.action as any;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -971,14 +1035,25 @@ export function isPathAllowed(pattern: string, path: string): boolean {
|
||||
logger.debug(`Normalized pattern parts: [${patternParts.join(", ")}]`);
|
||||
logger.debug(`Normalized path parts: [${pathParts.join(", ")}]`);
|
||||
|
||||
// Maximum recursion depth to prevent stack overflow and memory issues
|
||||
const MAX_RECURSION_DEPTH = 100;
|
||||
|
||||
// Recursive function to try different wildcard matches
|
||||
function matchSegments(patternIndex: number, pathIndex: number): boolean {
|
||||
const indent = " ".repeat(pathIndex); // Indent based on recursion depth
|
||||
function matchSegments(patternIndex: number, pathIndex: number, depth: number = 0): boolean {
|
||||
// Check recursion depth limit
|
||||
if (depth > MAX_RECURSION_DEPTH) {
|
||||
logger.warn(
|
||||
`Path matching exceeded maximum recursion depth (${MAX_RECURSION_DEPTH}) for pattern "${pattern}" and path "${path}"`
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
const indent = " ".repeat(depth); // Indent based on recursion depth
|
||||
const currentPatternPart = patternParts[patternIndex];
|
||||
const currentPathPart = pathParts[pathIndex];
|
||||
|
||||
logger.debug(
|
||||
`${indent}Checking patternIndex=${patternIndex} (${currentPatternPart || "END"}) vs pathIndex=${pathIndex} (${currentPathPart || "END"})`
|
||||
`${indent}Checking patternIndex=${patternIndex} (${currentPatternPart || "END"}) vs pathIndex=${pathIndex} (${currentPathPart || "END"}) [depth=${depth}]`
|
||||
);
|
||||
|
||||
// If we've consumed all pattern parts, we should have consumed all path parts
|
||||
@@ -1011,7 +1086,7 @@ export function isPathAllowed(pattern: string, path: string): boolean {
|
||||
logger.debug(
|
||||
`${indent}Trying to skip wildcard (consume 0 segments)`
|
||||
);
|
||||
if (matchSegments(patternIndex + 1, pathIndex)) {
|
||||
if (matchSegments(patternIndex + 1, pathIndex, depth + 1)) {
|
||||
logger.debug(
|
||||
`${indent}Successfully matched by skipping wildcard`
|
||||
);
|
||||
@@ -1022,7 +1097,7 @@ export function isPathAllowed(pattern: string, path: string): boolean {
|
||||
logger.debug(
|
||||
`${indent}Trying to consume segment "${currentPathPart}" for wildcard`
|
||||
);
|
||||
if (matchSegments(patternIndex, pathIndex + 1)) {
|
||||
if (matchSegments(patternIndex, pathIndex + 1, depth + 1)) {
|
||||
logger.debug(
|
||||
`${indent}Successfully matched by consuming segment for wildcard`
|
||||
);
|
||||
@@ -1050,7 +1125,7 @@ export function isPathAllowed(pattern: string, path: string): boolean {
|
||||
logger.debug(
|
||||
`${indent}Segment with wildcard matches: "${currentPatternPart}" matches "${currentPathPart}"`
|
||||
);
|
||||
return matchSegments(patternIndex + 1, pathIndex + 1);
|
||||
return matchSegments(patternIndex + 1, pathIndex + 1, depth + 1);
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
@@ -1071,10 +1146,10 @@ export function isPathAllowed(pattern: string, path: string): boolean {
|
||||
`${indent}Segments match: "${currentPatternPart}" = "${currentPathPart}"`
|
||||
);
|
||||
// Move to next segments in both pattern and path
|
||||
return matchSegments(patternIndex + 1, pathIndex + 1);
|
||||
return matchSegments(patternIndex + 1, pathIndex + 1, depth + 1);
|
||||
}
|
||||
|
||||
const result = matchSegments(0, 0);
|
||||
const result = matchSegments(0, 0, 0);
|
||||
logger.debug(`Final result: ${result}`);
|
||||
return result;
|
||||
}
|
||||
@@ -1090,6 +1165,52 @@ async function isIpInGeoIP(
|
||||
return ipCountryCode?.toUpperCase() === checkCountryCode.toUpperCase();
|
||||
}
|
||||
|
||||
async function isIpInAsn(
|
||||
ipAsn: number | undefined,
|
||||
checkAsn: string
|
||||
): Promise<boolean> {
|
||||
// Handle "ALL" special case
|
||||
if (checkAsn === "ALL" || checkAsn === "AS0") {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!ipAsn) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// Normalize the check ASN - remove "AS" prefix if present and convert to number
|
||||
const normalizedCheckAsn = checkAsn.toUpperCase().replace(/^AS/, "");
|
||||
const checkAsnNumber = parseInt(normalizedCheckAsn, 10);
|
||||
|
||||
if (isNaN(checkAsnNumber)) {
|
||||
logger.warn(`Invalid ASN format in rule: ${checkAsn}`);
|
||||
return false;
|
||||
}
|
||||
|
||||
const match = ipAsn === checkAsnNumber;
|
||||
logger.debug(
|
||||
`ASN check: IP ASN ${ipAsn} ${match ? "matches" : "does not match"} rule ASN ${checkAsnNumber}`
|
||||
);
|
||||
|
||||
return match;
|
||||
}
|
||||
|
||||
async function getAsnFromIp(ip: string): Promise<number | undefined> {
|
||||
const asnCacheKey = `asn:${ip}`;
|
||||
|
||||
let cachedAsn: number | undefined = cache.get(asnCacheKey);
|
||||
|
||||
if (!cachedAsn) {
|
||||
cachedAsn = await getAsnForIp(ip); // do it locally
|
||||
// Cache for longer since IP ASN doesn't change frequently
|
||||
if (cachedAsn) {
|
||||
cache.set(asnCacheKey, cachedAsn, 300); // 5 minutes
|
||||
}
|
||||
}
|
||||
|
||||
return cachedAsn;
|
||||
}
|
||||
|
||||
async function getCountryCodeFromIp(ip: string): Promise<string | undefined> {
|
||||
const geoIpCacheKey = `geoip:${ip}`;
|
||||
|
||||
@@ -1097,8 +1218,11 @@ async function getCountryCodeFromIp(ip: string): Promise<string | undefined> {
|
||||
|
||||
if (!cachedCountryCode) {
|
||||
cachedCountryCode = await getCountryCodeForIp(ip); // do it locally
|
||||
// Cache for longer since IP geolocation doesn't change frequently
|
||||
cache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
|
||||
// Only cache successful lookups to avoid filling cache with undefined values
|
||||
if (cachedCountryCode) {
|
||||
// Cache for longer since IP geolocation doesn't change frequently
|
||||
cache.set(geoIpCacheKey, cachedCountryCode, 300); // 5 minutes
|
||||
}
|
||||
}
|
||||
|
||||
return cachedCountryCode;
|
||||
|
||||
@@ -36,7 +36,7 @@ async function query(clientId?: number, niceId?: string, orgId?: string) {
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(and(eq(clients.niceId, niceId), eq(clients.orgId, orgId)))
|
||||
.leftJoin(olms, eq(olms.clientId, olms.clientId))
|
||||
.leftJoin(olms, eq(clients.clientId, olms.clientId))
|
||||
.limit(1);
|
||||
return res;
|
||||
}
|
||||
|
||||
@@ -56,12 +56,12 @@ async function getLatestOlmVersion(): Promise<string | null> {
|
||||
return null;
|
||||
}
|
||||
|
||||
const tags = await response.json();
|
||||
let tags = await response.json();
|
||||
if (!Array.isArray(tags) || tags.length === 0) {
|
||||
logger.warn("No tags found for Olm repository");
|
||||
return null;
|
||||
}
|
||||
|
||||
tags = tags.filter((version) => !version.name.includes("rc"));
|
||||
const latestVersion = tags[0].name;
|
||||
|
||||
olmVersionCache.set("latestOlmVersion", latestVersion);
|
||||
|
||||
@@ -48,7 +48,6 @@ import createHttpError from "http-errors";
|
||||
import { build } from "@server/build";
|
||||
import { createStore } from "#dynamic/lib/rateLimitStore";
|
||||
import { logActionAudit } from "#dynamic/middlewares";
|
||||
import { log } from "console";
|
||||
|
||||
// Root routes
|
||||
export const unauthenticated = Router();
|
||||
|
||||
@@ -52,7 +52,7 @@ export async function getConfig(
|
||||
}
|
||||
|
||||
// clean up the public key - keep only valid base64 characters (A-Z, a-z, 0-9, +, /, =)
|
||||
const cleanedPublicKey = publicKey.replace(/[^A-Za-z0-9+/=]/g, '');
|
||||
const cleanedPublicKey = publicKey.replace(/[^A-Za-z0-9+/=]/g, "");
|
||||
|
||||
const exitNode = await createExitNode(cleanedPublicKey, reachableAt);
|
||||
|
||||
|
||||
@@ -605,9 +605,18 @@ export async function validateOidcCallback(
|
||||
|
||||
res.appendHeader("Set-Cookie", cookie);
|
||||
|
||||
let finalRedirectUrl = postAuthRedirectUrl;
|
||||
if (loginPageId) {
|
||||
finalRedirectUrl = `/auth/org/?redirect=${encodeURIComponent(
|
||||
postAuthRedirectUrl
|
||||
)}`;
|
||||
}
|
||||
|
||||
logger.debug("Final redirect URL", { finalRedirectUrl });
|
||||
|
||||
return response<ValidateOidcUrlCallbackResponse>(res, {
|
||||
data: {
|
||||
redirectUrl: postAuthRedirectUrl
|
||||
redirectUrl: finalRedirectUrl
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
|
||||
@@ -858,6 +858,20 @@ authenticated.put(
|
||||
blueprints.applyJSONBlueprint
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/blueprint/:blueprintId",
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.getBlueprint),
|
||||
blueprints.getBlueprint
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/blueprints",
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.listBlueprints),
|
||||
blueprints.listBlueprints
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/request",
|
||||
verifyApiKeyOrgAccess,
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import { db } from "@server/db";
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
import { clients, Newt } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { clients } from "@server/db";
|
||||
import { eq, sql } from "drizzle-orm";
|
||||
import logger from "@server/logger";
|
||||
|
||||
interface PeerBandwidth {
|
||||
@@ -10,13 +10,57 @@ interface PeerBandwidth {
|
||||
bytesOut: number;
|
||||
}
|
||||
|
||||
// Retry configuration for deadlock handling
|
||||
const MAX_RETRIES = 3;
|
||||
const BASE_DELAY_MS = 50;
|
||||
|
||||
/**
|
||||
* Check if an error is a deadlock error
|
||||
*/
|
||||
function isDeadlockError(error: any): boolean {
|
||||
return (
|
||||
error?.code === "40P01" ||
|
||||
error?.cause?.code === "40P01" ||
|
||||
(error?.message && error.message.includes("deadlock"))
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a function with retry logic for deadlock handling
|
||||
*/
|
||||
async function withDeadlockRetry<T>(
|
||||
operation: () => Promise<T>,
|
||||
context: string
|
||||
): Promise<T> {
|
||||
let attempt = 0;
|
||||
while (true) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error: any) {
|
||||
if (isDeadlockError(error) && attempt < MAX_RETRIES) {
|
||||
attempt++;
|
||||
const baseDelay = Math.pow(2, attempt - 1) * BASE_DELAY_MS;
|
||||
const jitter = Math.random() * baseDelay;
|
||||
const delay = baseDelay + jitter;
|
||||
logger.warn(
|
||||
`Deadlock detected in ${context}, retrying attempt ${attempt}/${MAX_RETRIES} after ${delay.toFixed(0)}ms`
|
||||
);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
continue;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const handleReceiveBandwidthMessage: MessageHandler = async (
|
||||
context
|
||||
) => {
|
||||
const { message, client, sendToClient } = context;
|
||||
const { message } = context;
|
||||
|
||||
if (!message.data.bandwidthData) {
|
||||
logger.warn("No bandwidth data provided");
|
||||
return;
|
||||
}
|
||||
|
||||
const bandwidthData: PeerBandwidth[] = message.data.bandwidthData;
|
||||
@@ -25,30 +69,40 @@ export const handleReceiveBandwidthMessage: MessageHandler = async (
|
||||
throw new Error("Invalid bandwidth data");
|
||||
}
|
||||
|
||||
await db.transaction(async (trx) => {
|
||||
for (const peer of bandwidthData) {
|
||||
const { publicKey, bytesIn, bytesOut } = peer;
|
||||
// Sort bandwidth data by publicKey to ensure consistent lock ordering across all instances
|
||||
// This is critical for preventing deadlocks when multiple instances update the same clients
|
||||
const sortedBandwidthData = [...bandwidthData].sort((a, b) =>
|
||||
a.publicKey.localeCompare(b.publicKey)
|
||||
);
|
||||
|
||||
// Find the client by public key
|
||||
const [client] = await trx
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(eq(clients.pubKey, publicKey))
|
||||
.limit(1);
|
||||
const currentTime = new Date().toISOString();
|
||||
|
||||
if (!client) {
|
||||
continue;
|
||||
}
|
||||
// Update each client individually with retry logic
|
||||
// This reduces transaction scope and allows retries per-client
|
||||
for (const peer of sortedBandwidthData) {
|
||||
const { publicKey, bytesIn, bytesOut } = peer;
|
||||
|
||||
// Update the client's bandwidth usage
|
||||
await trx
|
||||
.update(clients)
|
||||
.set({
|
||||
megabytesOut: (client.megabytesIn || 0) + bytesIn,
|
||||
megabytesIn: (client.megabytesOut || 0) + bytesOut,
|
||||
lastBandwidthUpdate: new Date().toISOString()
|
||||
})
|
||||
.where(eq(clients.clientId, client.clientId));
|
||||
try {
|
||||
await withDeadlockRetry(async () => {
|
||||
// Use atomic SQL increment to avoid SELECT then UPDATE pattern
|
||||
// This eliminates the need to read the current value first
|
||||
await db
|
||||
.update(clients)
|
||||
.set({
|
||||
// Note: bytesIn from peer goes to megabytesOut (data sent to client)
|
||||
// and bytesOut from peer goes to megabytesIn (data received from client)
|
||||
megabytesOut: sql`COALESCE(${clients.megabytesOut}, 0) + ${bytesIn}`,
|
||||
megabytesIn: sql`COALESCE(${clients.megabytesIn}, 0) + ${bytesOut}`,
|
||||
lastBandwidthUpdate: currentTime
|
||||
})
|
||||
.where(eq(clients.pubKey, publicKey));
|
||||
}, `update client bandwidth ${publicKey}`);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to update bandwidth for client ${publicKey}:`,
|
||||
error
|
||||
);
|
||||
// Continue with other clients even if one fails
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
@@ -27,6 +27,7 @@ import { usageService } from "@server/lib/billing/usageService";
|
||||
import { FeatureId } from "@server/lib/billing";
|
||||
import { build } from "@server/build";
|
||||
import { calculateUserClientsForOrgs } from "@server/lib/calculateUserClientsForOrgs";
|
||||
import { doCidrsOverlap } from "@server/lib/ip";
|
||||
|
||||
const createOrgSchema = z.strictObject({
|
||||
orgId: z.string(),
|
||||
@@ -36,6 +37,11 @@ const createOrgSchema = z.strictObject({
|
||||
.union([z.cidrv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
|
||||
.refine((val) => isValidCIDR(val), {
|
||||
message: "Invalid subnet CIDR"
|
||||
}),
|
||||
utilitySubnet: z
|
||||
.union([z.cidrv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
|
||||
.refine((val) => isValidCIDR(val), {
|
||||
message: "Invalid utility subnet CIDR"
|
||||
})
|
||||
});
|
||||
|
||||
@@ -84,7 +90,7 @@ export async function createOrg(
|
||||
);
|
||||
}
|
||||
|
||||
const { orgId, name, subnet } = parsedBody.data;
|
||||
const { orgId, name, subnet, utilitySubnet } = parsedBody.data;
|
||||
|
||||
// TODO: for now we are making all of the orgs the same subnet
|
||||
// make sure the subnet is unique
|
||||
@@ -119,6 +125,15 @@ export async function createOrg(
|
||||
);
|
||||
}
|
||||
|
||||
if (doCidrsOverlap(subnet, utilitySubnet)) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
`Subnet ${subnet} overlaps with utility subnet ${utilitySubnet}`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
let error = "";
|
||||
let org: Org | null = null;
|
||||
|
||||
@@ -128,9 +143,6 @@ export async function createOrg(
|
||||
.from(domains)
|
||||
.where(eq(domains.configManaged, true));
|
||||
|
||||
const utilitySubnet =
|
||||
config.getRawConfig().orgs.utility_subnet_group;
|
||||
|
||||
const newOrg = await trx
|
||||
.insert(orgs)
|
||||
.values({
|
||||
|
||||
@@ -8,6 +8,7 @@ import config from "@server/lib/config";
|
||||
|
||||
export type PickOrgDefaultsResponse = {
|
||||
subnet: string;
|
||||
utilitySubnet: string;
|
||||
};
|
||||
|
||||
export async function pickOrgDefaults(
|
||||
@@ -20,10 +21,12 @@ export async function pickOrgDefaults(
|
||||
// const subnet = await getNextAvailableOrgSubnet();
|
||||
// Just hard code the subnet for now for everyone
|
||||
const subnet = config.getRawConfig().orgs.subnet_group;
|
||||
const utilitySubnet = config.getRawConfig().orgs.utility_subnet_group;
|
||||
|
||||
return response<PickOrgDefaultsResponse>(res, {
|
||||
data: {
|
||||
subnet: subnet
|
||||
subnet: subnet,
|
||||
utilitySubnet: utilitySubnet
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
|
||||
@@ -10,10 +10,10 @@ import logger from "@server/logger";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { OpenAPITags, registry } from "@server/openApi";
|
||||
import { build } from "@server/build";
|
||||
import license from "#dynamic/license/license";
|
||||
import { getOrgTierData } from "#dynamic/lib/billing";
|
||||
import { TierId } from "@server/lib/billing/tiers";
|
||||
import { cache } from "@server/lib/cache";
|
||||
import { isLicensedOrSubscribed } from "@server/lib/isLicencedOrSubscribed";
|
||||
|
||||
const updateOrgParamsSchema = z.strictObject({
|
||||
orgId: z.string()
|
||||
@@ -155,22 +155,3 @@ export async function updateOrg(
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
async function isLicensedOrSubscribed(orgId: string): Promise<boolean> {
|
||||
if (build === "enterprise") {
|
||||
const isUnlocked = await license.isUnlocked();
|
||||
if (!isUnlocked) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (build === "saas") {
|
||||
const { tier } = await getOrgTierData(orgId);
|
||||
const subscribed = tier === TierId.STANDARD;
|
||||
if (!subscribed) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -17,7 +17,7 @@ import { OpenAPITags, registry } from "@server/openApi";
|
||||
|
||||
const createResourceRuleSchema = z.strictObject({
|
||||
action: z.enum(["ACCEPT", "DROP", "PASS"]),
|
||||
match: z.enum(["CIDR", "IP", "PATH", "COUNTRY"]),
|
||||
match: z.enum(["CIDR", "IP", "PATH", "COUNTRY", "ASN"]),
|
||||
value: z.string().min(1),
|
||||
priority: z.int(),
|
||||
enabled: z.boolean().optional()
|
||||
|
||||
@@ -3,6 +3,7 @@ import { z } from "zod";
|
||||
import {
|
||||
db,
|
||||
resourceHeaderAuth,
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
resourcePassword,
|
||||
resourcePincode,
|
||||
resources
|
||||
@@ -27,6 +28,7 @@ export type GetResourceAuthInfoResponse = {
|
||||
password: boolean;
|
||||
pincode: boolean;
|
||||
headerAuth: boolean;
|
||||
headerAuthExtendedCompatibility: boolean;
|
||||
sso: boolean;
|
||||
blockAccess: boolean;
|
||||
url: string;
|
||||
@@ -76,6 +78,13 @@ export async function getResourceAuthInfo(
|
||||
resources.resourceId
|
||||
)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
resources.resourceId
|
||||
)
|
||||
)
|
||||
.where(eq(resources.resourceId, Number(resourceGuid)))
|
||||
.limit(1)
|
||||
: await db
|
||||
@@ -89,6 +98,7 @@ export async function getResourceAuthInfo(
|
||||
resourcePassword,
|
||||
eq(resourcePassword.resourceId, resources.resourceId)
|
||||
)
|
||||
|
||||
.leftJoin(
|
||||
resourceHeaderAuth,
|
||||
eq(
|
||||
@@ -96,6 +106,13 @@ export async function getResourceAuthInfo(
|
||||
resources.resourceId
|
||||
)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
resources.resourceId
|
||||
)
|
||||
)
|
||||
.where(eq(resources.resourceGuid, resourceGuid))
|
||||
.limit(1);
|
||||
|
||||
@@ -109,6 +126,8 @@ export async function getResourceAuthInfo(
|
||||
const pincode = result?.resourcePincode;
|
||||
const password = result?.resourcePassword;
|
||||
const headerAuth = result?.resourceHeaderAuth;
|
||||
const headerAuthExtendedCompatibility =
|
||||
result?.resourceHeaderAuthExtendedCompatibility;
|
||||
|
||||
const url = `${resource.ssl ? "https" : "http"}://${resource.fullDomain}`;
|
||||
|
||||
@@ -121,6 +140,8 @@ export async function getResourceAuthInfo(
|
||||
password: password !== null,
|
||||
pincode: pincode !== null,
|
||||
headerAuth: headerAuth !== null,
|
||||
headerAuthExtendedCompatibility:
|
||||
headerAuthExtendedCompatibility !== null,
|
||||
sso: resource.sso,
|
||||
blockAccess: resource.blockAccess,
|
||||
url,
|
||||
|
||||
@@ -30,3 +30,4 @@ export * from "./removeRoleFromResource";
|
||||
export * from "./addUserToResource";
|
||||
export * from "./removeUserFromResource";
|
||||
export * from "./listAllResourceNames";
|
||||
export * from "./removeEmailFromResourceWhitelist";
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db, resourceHeaderAuth } from "@server/db";
|
||||
import {
|
||||
db,
|
||||
resourceHeaderAuth,
|
||||
resourceHeaderAuthExtendedCompatibility
|
||||
} from "@server/db";
|
||||
import {
|
||||
resources,
|
||||
userResources,
|
||||
@@ -109,7 +113,8 @@ function queryResources(accessibleResourceIds: number[], orgId: string) {
|
||||
domainId: resources.domainId,
|
||||
niceId: resources.niceId,
|
||||
headerAuthId: resourceHeaderAuth.headerAuthId,
|
||||
|
||||
headerAuthExtendedCompatibilityId:
|
||||
resourceHeaderAuthExtendedCompatibility.headerAuthExtendedCompatibilityId,
|
||||
targetId: targets.targetId,
|
||||
targetIp: targets.ip,
|
||||
targetPort: targets.port,
|
||||
@@ -131,6 +136,13 @@ function queryResources(accessibleResourceIds: number[], orgId: string) {
|
||||
resourceHeaderAuth,
|
||||
eq(resourceHeaderAuth.resourceId, resources.resourceId)
|
||||
)
|
||||
.leftJoin(
|
||||
resourceHeaderAuthExtendedCompatibility,
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
resources.resourceId
|
||||
)
|
||||
)
|
||||
.leftJoin(targets, eq(targets.resourceId, resources.resourceId))
|
||||
.leftJoin(
|
||||
targetHealthCheck,
|
||||
|
||||
@@ -1,6 +1,10 @@
|
||||
import { Request, Response, NextFunction } from "express";
|
||||
import { z } from "zod";
|
||||
import { db, resourceHeaderAuth } from "@server/db";
|
||||
import {
|
||||
db,
|
||||
resourceHeaderAuth,
|
||||
resourceHeaderAuthExtendedCompatibility
|
||||
} from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import createHttpError from "http-errors";
|
||||
@@ -16,7 +20,8 @@ const setResourceAuthMethodsParamsSchema = z.object({
|
||||
|
||||
const setResourceAuthMethodsBodySchema = z.strictObject({
|
||||
user: z.string().min(4).max(100).nullable(),
|
||||
password: z.string().min(4).max(100).nullable()
|
||||
password: z.string().min(4).max(100).nullable(),
|
||||
extendedCompatibility: z.boolean().nullable()
|
||||
});
|
||||
|
||||
registry.registerPath({
|
||||
@@ -67,21 +72,38 @@ export async function setResourceHeaderAuth(
|
||||
}
|
||||
|
||||
const { resourceId } = parsedParams.data;
|
||||
const { user, password } = parsedBody.data;
|
||||
const { user, password, extendedCompatibility } = parsedBody.data;
|
||||
|
||||
await db.transaction(async (trx) => {
|
||||
await trx
|
||||
.delete(resourceHeaderAuth)
|
||||
.where(eq(resourceHeaderAuth.resourceId, resourceId));
|
||||
await trx
|
||||
.delete(resourceHeaderAuthExtendedCompatibility)
|
||||
.where(
|
||||
eq(
|
||||
resourceHeaderAuthExtendedCompatibility.resourceId,
|
||||
resourceId
|
||||
)
|
||||
);
|
||||
|
||||
if (user && password) {
|
||||
if (user && password && extendedCompatibility !== null) {
|
||||
const headerAuthHash = await hashPassword(
|
||||
Buffer.from(`${user}:${password}`).toString("base64")
|
||||
);
|
||||
|
||||
await trx
|
||||
.insert(resourceHeaderAuth)
|
||||
.values({ resourceId, headerAuthHash });
|
||||
await Promise.all([
|
||||
trx
|
||||
.insert(resourceHeaderAuth)
|
||||
.values({ resourceId, headerAuthHash }),
|
||||
trx
|
||||
.insert(resourceHeaderAuthExtendedCompatibility)
|
||||
.values({
|
||||
resourceId,
|
||||
extendedCompatibilityIsActivated:
|
||||
extendedCompatibility
|
||||
})
|
||||
]);
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
10
server/routers/resource/types.ts
Normal file
10
server/routers/resource/types.ts
Normal file
@@ -0,0 +1,10 @@
|
||||
export type GetMaintenanceInfoResponse = {
|
||||
resourceId: number;
|
||||
name: string;
|
||||
fullDomain: string | null;
|
||||
maintenanceModeEnabled: boolean;
|
||||
maintenanceModeType: "forced" | "automatic" | null;
|
||||
maintenanceTitle: string | null;
|
||||
maintenanceMessage: string | null;
|
||||
maintenanceEstimatedTime: string | null;
|
||||
};
|
||||
@@ -22,8 +22,8 @@ import { registry } from "@server/openApi";
|
||||
import { OpenAPITags } from "@server/openApi";
|
||||
import { createCertificate } from "#dynamic/routers/certificates/createCertificate";
|
||||
import { validateAndConstructDomain } from "@server/lib/domainUtils";
|
||||
import { validateHeaders } from "@server/lib/validators";
|
||||
import { build } from "@server/build";
|
||||
import { isLicensedOrSubscribed } from "@server/lib/isLicencedOrSubscribed";
|
||||
|
||||
const updateResourceParamsSchema = z.strictObject({
|
||||
resourceId: z.string().transform(Number).pipe(z.int().positive())
|
||||
@@ -48,7 +48,13 @@ const updateHttpResourceBodySchema = z
|
||||
headers: z
|
||||
.array(z.strictObject({ name: z.string(), value: z.string() }))
|
||||
.nullable()
|
||||
.optional()
|
||||
.optional(),
|
||||
// Maintenance mode fields
|
||||
maintenanceModeEnabled: z.boolean().optional(),
|
||||
maintenanceModeType: z.enum(["forced", "automatic"]).optional(),
|
||||
maintenanceTitle: z.string().max(255).nullable().optional(),
|
||||
maintenanceMessage: z.string().max(2000).nullable().optional(),
|
||||
maintenanceEstimatedTime: z.string().max(100).nullable().optional()
|
||||
})
|
||||
.refine((data) => Object.keys(data).length > 0, {
|
||||
error: "At least one field must be provided for update"
|
||||
@@ -335,6 +341,19 @@ async function updateHttpResource(
|
||||
headers = null;
|
||||
}
|
||||
|
||||
const isLicensed = await isLicensedOrSubscribed(resource.orgId);
|
||||
if (build == "enterprise" && !isLicensed) {
|
||||
logger.warn(
|
||||
"Server is not licensed! Clearing set maintenance screen values"
|
||||
);
|
||||
// null the maintenance mode fields if not licensed
|
||||
updateData.maintenanceModeEnabled = undefined;
|
||||
updateData.maintenanceModeType = undefined;
|
||||
updateData.maintenanceTitle = undefined;
|
||||
updateData.maintenanceMessage = undefined;
|
||||
updateData.maintenanceEstimatedTime = undefined;
|
||||
}
|
||||
|
||||
const updatedResource = await db
|
||||
.update(resources)
|
||||
.set({ ...updateData, headers })
|
||||
|
||||
@@ -25,7 +25,7 @@ const updateResourceRuleParamsSchema = z.strictObject({
|
||||
const updateResourceRuleSchema = z
|
||||
.strictObject({
|
||||
action: z.enum(["ACCEPT", "DROP", "PASS"]).optional(),
|
||||
match: z.enum(["CIDR", "IP", "PATH", "COUNTRY"]).optional(),
|
||||
match: z.enum(["CIDR", "IP", "PATH", "COUNTRY", "ASN"]).optional(),
|
||||
value: z.string().min(1).optional(),
|
||||
priority: z.int(),
|
||||
enabled: z.boolean().optional()
|
||||
|
||||
@@ -39,12 +39,12 @@ async function getLatestNewtVersion(): Promise<string | null> {
|
||||
return null;
|
||||
}
|
||||
|
||||
const tags = await response.json();
|
||||
let tags = await response.json();
|
||||
if (!Array.isArray(tags) || tags.length === 0) {
|
||||
logger.warn("No tags found for Newt repository");
|
||||
return null;
|
||||
}
|
||||
|
||||
tags = tags.filter((version) => !version.name.includes("rc"));
|
||||
const latestVersion = tags[0].name;
|
||||
|
||||
cache.set("latestNewtVersion", latestVersion);
|
||||
|
||||
@@ -11,7 +11,11 @@ import {
|
||||
userSiteResources
|
||||
} from "@server/db";
|
||||
import { getUniqueSiteResourceName } from "@server/db/names";
|
||||
import { getNextAvailableAliasAddress, isIpInCidr, portRangeStringSchema } from "@server/lib/ip";
|
||||
import {
|
||||
getNextAvailableAliasAddress,
|
||||
isIpInCidr,
|
||||
portRangeStringSchema
|
||||
} from "@server/lib/ip";
|
||||
import { rebuildClientAssociationsFromSiteResource } from "@server/lib/rebuildClientAssociations";
|
||||
import response from "@server/lib/response";
|
||||
import logger from "@server/logger";
|
||||
@@ -69,7 +73,10 @@ const createSiteResourceSchema = z
|
||||
const domainRegex =
|
||||
/^(?:[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?\.)*[a-zA-Z0-9](?:[a-zA-Z0-9-]{0,61}[a-zA-Z0-9])?$/;
|
||||
const isValidDomain = domainRegex.test(data.destination);
|
||||
const isValidAlias = data.alias !== undefined && data.alias !== null && data.alias.trim() !== "";
|
||||
const isValidAlias =
|
||||
data.alias !== undefined &&
|
||||
data.alias !== null &&
|
||||
data.alias.trim() !== "";
|
||||
|
||||
return isValidDomain && isValidAlias; // require the alias to be set in the case of domain
|
||||
}
|
||||
@@ -182,7 +189,9 @@ export async function createSiteResource(
|
||||
.limit(1);
|
||||
|
||||
if (!org) {
|
||||
return next(createHttpError(HttpCode.NOT_FOUND, "Organization not found"));
|
||||
return next(
|
||||
createHttpError(HttpCode.NOT_FOUND, "Organization not found")
|
||||
);
|
||||
}
|
||||
|
||||
if (!org.subnet || !org.utilitySubnet) {
|
||||
@@ -195,10 +204,13 @@ export async function createSiteResource(
|
||||
}
|
||||
|
||||
// Only check if destination is an IP address
|
||||
const isIp = z.union([z.ipv4(), z.ipv6()]).safeParse(destination).success;
|
||||
const isIp = z
|
||||
.union([z.ipv4(), z.ipv6()])
|
||||
.safeParse(destination).success;
|
||||
if (
|
||||
isIp &&
|
||||
(isIpInCidr(destination, org.subnet) || isIpInCidr(destination, org.utilitySubnet))
|
||||
(isIpInCidr(destination, org.subnet) ||
|
||||
isIpInCidr(destination, org.utilitySubnet))
|
||||
) {
|
||||
return next(
|
||||
createHttpError(
|
||||
|
||||
@@ -88,9 +88,7 @@ export async function deleteSiteResource(
|
||||
);
|
||||
});
|
||||
|
||||
logger.info(
|
||||
`Deleted site resource ${siteResourceId}`
|
||||
);
|
||||
logger.info(`Deleted site resource ${siteResourceId}`);
|
||||
|
||||
return response(res, {
|
||||
data: { message: "Site resource deleted successfully" },
|
||||
|
||||
@@ -204,7 +204,9 @@ export async function updateSiteResource(
|
||||
.limit(1);
|
||||
|
||||
if (!org) {
|
||||
return next(createHttpError(HttpCode.NOT_FOUND, "Organization not found"));
|
||||
return next(
|
||||
createHttpError(HttpCode.NOT_FOUND, "Organization not found")
|
||||
);
|
||||
}
|
||||
|
||||
if (!org.subnet || !org.utilitySubnet) {
|
||||
@@ -217,10 +219,13 @@ export async function updateSiteResource(
|
||||
}
|
||||
|
||||
// Only check if destination is an IP address
|
||||
const isIp = z.union([z.ipv4(), z.ipv6()]).safeParse(destination).success;
|
||||
const isIp = z
|
||||
.union([z.ipv4(), z.ipv6()])
|
||||
.safeParse(destination).success;
|
||||
if (
|
||||
isIp &&
|
||||
(isIpInCidr(destination!, org.subnet) || isIpInCidr(destination!, org.utilitySubnet))
|
||||
(isIpInCidr(destination!, org.subnet) ||
|
||||
isIpInCidr(destination!, org.utilitySubnet))
|
||||
) {
|
||||
return next(
|
||||
createHttpError(
|
||||
@@ -295,7 +300,7 @@ export async function updateSiteResource(
|
||||
const [insertedSiteResource] = await trx
|
||||
.insert(siteResources)
|
||||
.values({
|
||||
...existingSiteResource,
|
||||
...existingSiteResource
|
||||
})
|
||||
.returning();
|
||||
|
||||
@@ -517,9 +522,14 @@ export async function handleMessagingForUpdatedSiteResource(
|
||||
site: { siteId: number; orgId: string },
|
||||
trx: Transaction
|
||||
) {
|
||||
|
||||
logger.debug("handleMessagingForUpdatedSiteResource: existingSiteResource is: ", existingSiteResource);
|
||||
logger.debug("handleMessagingForUpdatedSiteResource: updatedSiteResource is: ", updatedSiteResource);
|
||||
logger.debug(
|
||||
"handleMessagingForUpdatedSiteResource: existingSiteResource is: ",
|
||||
existingSiteResource
|
||||
);
|
||||
logger.debug(
|
||||
"handleMessagingForUpdatedSiteResource: updatedSiteResource is: ",
|
||||
updatedSiteResource
|
||||
);
|
||||
|
||||
const { mergedAllClients } =
|
||||
await rebuildClientAssociationsFromSiteResource(
|
||||
|
||||
@@ -213,9 +213,11 @@ export async function updateTarget(
|
||||
|
||||
// When health check is disabled, reset hcHealth to "unknown"
|
||||
// to prevent previously unhealthy targets from being excluded
|
||||
// Also when the site is not a newt, set hcHealth to "unknown"
|
||||
const hcHealthValue =
|
||||
parsedBody.data.hcEnabled === false ||
|
||||
parsedBody.data.hcEnabled === null
|
||||
parsedBody.data.hcEnabled === null ||
|
||||
site.type !== "newt"
|
||||
? "unknown"
|
||||
: undefined;
|
||||
|
||||
|
||||
@@ -56,7 +56,7 @@ export async function ensureSetupToken() {
|
||||
.where(eq(setupTokens.used, false));
|
||||
|
||||
const envSetupToken = process.env.PANGOLIN_SETUP_TOKEN;
|
||||
console.debug("PANGOLIN_SETUP_TOKEN:", envSetupToken);
|
||||
// console.debug("PANGOLIN_SETUP_TOKEN:", envSetupToken);
|
||||
if (envSetupToken) {
|
||||
if (!validateToken(envSetupToken)) {
|
||||
throw new Error(
|
||||
|
||||
@@ -15,6 +15,7 @@ import m7 from "./scriptsPg/1.11.0";
|
||||
import m8 from "./scriptsPg/1.11.1";
|
||||
import m9 from "./scriptsPg/1.12.0";
|
||||
import m10 from "./scriptsPg/1.13.0";
|
||||
import m11 from "./scriptsPg/1.14.0";
|
||||
|
||||
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER
|
||||
// EXCEPT FOR THE DATABASE AND THE SCHEMA
|
||||
@@ -30,7 +31,8 @@ const migrations = [
|
||||
{ version: "1.11.0", run: m7 },
|
||||
{ version: "1.11.1", run: m8 },
|
||||
{ version: "1.12.0", run: m9 },
|
||||
{ version: "1.13.0", run: m10 }
|
||||
{ version: "1.13.0", run: m10 },
|
||||
{ version: "1.14.0", run: m11 }
|
||||
// Add new migrations here as they are created
|
||||
] as {
|
||||
version: string;
|
||||
|
||||
@@ -33,6 +33,7 @@ import m28 from "./scriptsSqlite/1.11.0";
|
||||
import m29 from "./scriptsSqlite/1.11.1";
|
||||
import m30 from "./scriptsSqlite/1.12.0";
|
||||
import m31 from "./scriptsSqlite/1.13.0";
|
||||
import m32 from "./scriptsSqlite/1.14.0";
|
||||
|
||||
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER
|
||||
// EXCEPT FOR THE DATABASE AND THE SCHEMA
|
||||
@@ -64,7 +65,8 @@ const migrations = [
|
||||
{ version: "1.11.0", run: m28 },
|
||||
{ version: "1.11.1", run: m29 },
|
||||
{ version: "1.12.0", run: m30 },
|
||||
{ version: "1.13.0", run: m31 }
|
||||
{ version: "1.13.0", run: m31 },
|
||||
{ version: "1.14.0", run: m32 }
|
||||
// Add new migrations here as they are created
|
||||
] as const;
|
||||
|
||||
|
||||
96
server/setup/scriptsPg/1.14.0.ts
Normal file
96
server/setup/scriptsPg/1.14.0.ts
Normal file
@@ -0,0 +1,96 @@
|
||||
import { db } from "@server/db/pg/driver";
|
||||
import { sql } from "drizzle-orm";
|
||||
import { __DIRNAME } from "@server/lib/consts";
|
||||
|
||||
const version = "1.14.0";
|
||||
|
||||
export default async function migration() {
|
||||
console.log(`Running setup script ${version}...`);
|
||||
|
||||
try {
|
||||
await db.execute(sql`BEGIN`);
|
||||
|
||||
await db.execute(sql`
|
||||
CREATE TABLE "loginPageBranding" (
|
||||
"loginPageBrandingId" serial PRIMARY KEY NOT NULL,
|
||||
"logoUrl" text NOT NULL,
|
||||
"logoWidth" integer NOT NULL,
|
||||
"logoHeight" integer NOT NULL,
|
||||
"primaryColor" text,
|
||||
"resourceTitle" text NOT NULL,
|
||||
"resourceSubtitle" text,
|
||||
"orgTitle" text,
|
||||
"orgSubtitle" text
|
||||
);
|
||||
`);
|
||||
|
||||
await db.execute(sql`
|
||||
CREATE TABLE "loginPageBrandingOrg" (
|
||||
"loginPageBrandingId" integer NOT NULL,
|
||||
"orgId" varchar NOT NULL
|
||||
);
|
||||
`);
|
||||
|
||||
await db.execute(sql`
|
||||
CREATE TABLE "resourceHeaderAuthExtendedCompatibility" (
|
||||
"headerAuthExtendedCompatibilityId" serial PRIMARY KEY NOT NULL,
|
||||
"resourceId" integer NOT NULL,
|
||||
"extendedCompatibilityIsActivated" boolean DEFAULT false NOT NULL
|
||||
);
|
||||
`);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "resources" ADD COLUMN "maintenanceModeEnabled" boolean DEFAULT false NOT NULL;`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "resources" ADD COLUMN "maintenanceModeType" text DEFAULT 'forced';`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "resources" ADD COLUMN "maintenanceTitle" text;`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "resources" ADD COLUMN "maintenanceMessage" text;`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "resources" ADD COLUMN "maintenanceEstimatedTime" text;`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "siteResources" ADD COLUMN "tcpPortRangeString" varchar NOT NULL DEFAULT '*';`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "siteResources" ADD COLUMN "udpPortRangeString" varchar NOT NULL DEFAULT '*';`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "siteResources" ADD COLUMN "disableIcmp" boolean DEFAULT false NOT NULL;`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "loginPageBrandingOrg" ADD CONSTRAINT "loginPageBrandingOrg_loginPageBrandingId_loginPageBranding_loginPageBrandingId_fk" FOREIGN KEY ("loginPageBrandingId") REFERENCES "public"."loginPageBranding"("loginPageBrandingId") ON DELETE cascade ON UPDATE no action;`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "loginPageBrandingOrg" ADD CONSTRAINT "loginPageBrandingOrg_orgId_orgs_orgId_fk" FOREIGN KEY ("orgId") REFERENCES "public"."orgs"("orgId") ON DELETE cascade ON UPDATE no action;`
|
||||
);
|
||||
|
||||
await db.execute(
|
||||
sql`ALTER TABLE "resourceHeaderAuthExtendedCompatibility" ADD CONSTRAINT "resourceHeaderAuthExtendedCompatibility_resourceId_resources_resourceId_fk" FOREIGN KEY ("resourceId") REFERENCES "public"."resources"("resourceId") ON DELETE cascade ON UPDATE no action;`
|
||||
);
|
||||
|
||||
await db.execute(sql`COMMIT`);
|
||||
console.log("Migrated database");
|
||||
} catch (e) {
|
||||
await db.execute(sql`ROLLBACK`);
|
||||
console.log("Unable to migrate database");
|
||||
console.log(e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
console.log(`${version} migration complete`);
|
||||
}
|
||||
99
server/setup/scriptsSqlite/1.14.0.ts
Normal file
99
server/setup/scriptsSqlite/1.14.0.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
import { __DIRNAME, APP_PATH } from "@server/lib/consts";
|
||||
import Database from "better-sqlite3";
|
||||
import path from "path";
|
||||
|
||||
const version = "1.14.0";
|
||||
|
||||
export default async function migration() {
|
||||
console.log(`Running setup script ${version}...`);
|
||||
|
||||
const location = path.join(APP_PATH, "db", "db.sqlite");
|
||||
const db = new Database(location);
|
||||
|
||||
try {
|
||||
db.pragma("foreign_keys = OFF");
|
||||
|
||||
db.transaction(() => {
|
||||
db.prepare(
|
||||
`
|
||||
CREATE TABLE 'loginPageBranding' (
|
||||
'loginPageBrandingId' integer PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||
'logoUrl' text NOT NULL,
|
||||
'logoWidth' integer NOT NULL,
|
||||
'logoHeight' integer NOT NULL,
|
||||
'primaryColor' text,
|
||||
'resourceTitle' text NOT NULL,
|
||||
'resourceSubtitle' text,
|
||||
'orgTitle' text,
|
||||
'orgSubtitle' text
|
||||
);
|
||||
`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`
|
||||
CREATE TABLE 'loginPageBrandingOrg' (
|
||||
'loginPageBrandingId' integer NOT NULL,
|
||||
'orgId' text NOT NULL,
|
||||
FOREIGN KEY ('loginPageBrandingId') REFERENCES 'loginPageBranding'('loginPageBrandingId') ON UPDATE no action ON DELETE cascade,
|
||||
FOREIGN KEY ('orgId') REFERENCES 'orgs'('orgId') ON UPDATE no action ON DELETE cascade
|
||||
);
|
||||
`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`
|
||||
CREATE TABLE 'resourceHeaderAuthExtendedCompatibility' (
|
||||
'headerAuthExtendedCompatibilityId' integer PRIMARY KEY AUTOINCREMENT NOT NULL,
|
||||
'resourceId' integer NOT NULL,
|
||||
'extendedCompatibilityIsActivated' integer NOT NULL,
|
||||
FOREIGN KEY ('resourceId') REFERENCES 'resources'('resourceId') ON UPDATE no action ON DELETE cascade
|
||||
);
|
||||
`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'resources' ADD 'maintenanceModeEnabled' integer DEFAULT false NOT NULL;`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'resources' ADD 'maintenanceModeType' text DEFAULT 'forced';`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'resources' ADD 'maintenanceTitle' text;`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'resources' ADD 'maintenanceMessage' text;`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'resources' ADD 'maintenanceEstimatedTime' text;`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'siteResources' ADD 'tcpPortRangeString' text DEFAULT '*' NOT NULL;`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'siteResources' ADD 'udpPortRangeString' text DEFAULT '*' NOT NULL;`
|
||||
).run();
|
||||
|
||||
db.prepare(
|
||||
`ALTER TABLE 'siteResources' ADD 'disableIcmp' integer NOT NULL DEFAULT false;`
|
||||
).run();
|
||||
|
||||
|
||||
})();
|
||||
|
||||
db.pragma("foreign_keys = ON");
|
||||
|
||||
console.log(`Migrated database`);
|
||||
} catch (e) {
|
||||
console.log("Failed to migrate db:", e);
|
||||
throw e;
|
||||
}
|
||||
|
||||
console.log(`${version} migration complete`);
|
||||
}
|
||||
Reference in New Issue
Block a user