Merge branch 'dev' into resource-policies

This commit is contained in:
Owen
2026-05-12 21:12:40 -07:00
100 changed files with 4604 additions and 1452 deletions

View File

@@ -87,7 +87,7 @@ function createDb() {
export const db = createDb();
export default db;
export const primaryDb = db.$primary;
export const primaryDb = db.$primary as typeof db; // is this typeof a problem - techincally they are different types
export type Transaction = Parameters<
Parameters<(typeof db)["transaction"]>[0]
>[0];

View File

@@ -332,6 +332,7 @@ export const connectionAuditLog = pgTable(
clientId: integer("clientId").references(() => clients.clientId, {
onDelete: "cascade"
}),
clientEndpoint: text("clientEndpoint"),
userId: text("userId").references(() => users.userId, {
onDelete: "cascade"
}),
@@ -439,6 +440,8 @@ export const eventStreamingDestinations = pgTable(
type: varchar("type", { length: 50 }).notNull(), // e.g. "http", "kafka", etc.
config: text("config").notNull(), // JSON string with the configuration for the destination
enabled: boolean("enabled").notNull().default(true),
lastError: text("lastError"), // last send error message, null if healthy
lastErrorAt: bigint("lastErrorAt", { mode: "number" }), // epoch ms of last error, null if healthy
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
updatedAt: bigint("updatedAt", { mode: "number" }).notNull()
}

View File

@@ -332,6 +332,7 @@ export const connectionAuditLog = sqliteTable(
clientId: integer("clientId").references(() => clients.clientId, {
onDelete: "cascade"
}),
clientEndpoint: text("clientEndpoint"),
userId: text("userId").references(() => users.userId, {
onDelete: "cascade"
}),
@@ -445,6 +446,8 @@ export const eventStreamingDestinations = sqliteTable(
enabled: integer("enabled", { mode: "boolean" })
.notNull()
.default(true),
lastError: text("lastError"), // last send error message, null if healthy
lastErrorAt: integer("lastErrorAt"), // epoch ms of last error, null if healthy
createdAt: integer("createdAt").notNull(),
updatedAt: integer("updatedAt").notNull()
}

View File

@@ -361,7 +361,7 @@ export async function updateClientResources(
} else {
let aliasAddress: string | null = null;
if (resourceData.mode === "host" || resourceData.mode === "http") {
aliasAddress = await getNextAvailableAliasAddress(orgId);
aliasAddress = await getNextAvailableAliasAddress(orgId, trx);
}
let domainInfo:

View File

@@ -1871,7 +1871,11 @@ async function getDomainId(
return null;
}
const domainSelection = validDomains[0].domains;
// Pick the most specific (longest baseDomain) valid domain so that, e.g.,
// *.test.dev.example.com is assigned to *.dev.example.com rather than *.example.com.
const domainSelection = validDomains.sort(
(a, b) => b.domains.baseDomain.length - a.domains.baseDomain.length
)[0].domains;
const baseDomain = domainSelection.baseDomain;
// Wildcard full-domains are not allowed on namespace (provided/free) domains

View File

@@ -25,9 +25,9 @@ import { tierMatrix } from "./billing/tierMatrix";
export async function calculateUserClientsForOrgs(
userId: string,
trx?: Transaction
trx: Transaction | typeof db = db
): Promise<void> {
const execute = async (transaction: Transaction) => {
const execute = async (transaction: Transaction | typeof db) => {
const orgCache = new Map<string, typeof orgs.$inferSelect | null>();
const adminRoleCache = new Map<
string,
@@ -437,7 +437,7 @@ export async function calculateUserClientsForOrgs(
async function cleanupOrphanedClients(
userId: string,
trx: Transaction,
trx: Transaction | typeof db,
userOrgIds: string[] = []
): Promise<void> {
// Find all OLM clients for this user that should be deleted

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.18.2";
export const APP_VERSION = "1.18.4";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

View File

@@ -6,6 +6,7 @@ import z from "zod";
import logger from "@server/logger";
import semver from "semver";
import { getValidCertificatesForDomains } from "#dynamic/lib/certificates";
import { lockManager } from "#dynamic/lib/lock";
interface IPRange {
start: bigint;
@@ -327,120 +328,146 @@ export async function getNextAvailableClientSubnet(
orgId: string,
transaction: Transaction | typeof db = db
): Promise<string> {
const [org] = await transaction
.select()
.from(orgs)
.where(eq(orgs.orgId, orgId));
return await lockManager.withLock(
`client-subnet-allocation:${orgId}`,
async () => {
const [org] = await transaction
.select()
.from(orgs)
.where(eq(orgs.orgId, orgId));
if (!org) {
throw new Error(`Organization with ID ${orgId} not found`);
}
if (!org) {
throw new Error(`Organization with ID ${orgId} not found`);
}
if (!org.subnet) {
throw new Error(`Organization with ID ${orgId} has no subnet defined`);
}
if (!org.subnet) {
throw new Error(
`Organization with ID ${orgId} has no subnet defined`
);
}
const existingAddressesSites = await transaction
.select({
address: sites.address
})
.from(sites)
.where(and(isNotNull(sites.address), eq(sites.orgId, orgId)));
const existingAddressesSites = await transaction
.select({
address: sites.address
})
.from(sites)
.where(and(isNotNull(sites.address), eq(sites.orgId, orgId)));
const existingAddressesClients = await transaction
.select({
address: clients.subnet
})
.from(clients)
.where(and(isNotNull(clients.subnet), eq(clients.orgId, orgId)));
const existingAddressesClients = await transaction
.select({
address: clients.subnet
})
.from(clients)
.where(
and(isNotNull(clients.subnet), eq(clients.orgId, orgId))
);
const addresses = [
...existingAddressesSites.map(
(site) => `${site.address?.split("/")[0]}/32`
), // we are overriding the 32 so that we pick individual addresses in the subnet of the org for the site and the client even though they are stored with the /block_size of the org
...existingAddressesClients.map(
(client) => `${client.address.split("/")}/32`
)
].filter((address) => address !== null) as string[];
const addresses = [
...existingAddressesSites.map(
(site) => `${site.address?.split("/")[0]}/32`
), // we are overriding the 32 so that we pick individual addresses in the subnet of the org for the site and the client even though they are stored with the /block_size of the org
...existingAddressesClients.map(
(client) => `${client.address.split("/")}/32`
)
].filter((address) => address !== null) as string[];
const subnet = findNextAvailableCidr(addresses, 32, org.subnet); // pick the sites address in the org
if (!subnet) {
throw new Error("No available subnets remaining in space");
}
const subnet = findNextAvailableCidr(addresses, 32, org.subnet); // pick the sites address in the org
if (!subnet) {
throw new Error("No available subnets remaining in space");
}
return subnet;
return subnet;
}
);
}
export async function getNextAvailableAliasAddress(
orgId: string
orgId: string,
trx: Transaction | typeof db = db
): Promise<string> {
const [org] = await db.select().from(orgs).where(eq(orgs.orgId, orgId));
return await lockManager.withLock(
`alias-address-allocation:${orgId}`,
async () => {
const [org] = await trx
.select()
.from(orgs)
.where(eq(orgs.orgId, orgId));
if (!org) {
throw new Error(`Organization with ID ${orgId} not found`);
}
if (!org) {
throw new Error(`Organization with ID ${orgId} not found`);
}
if (!org.subnet) {
throw new Error(`Organization with ID ${orgId} has no subnet defined`);
}
if (!org.subnet) {
throw new Error(
`Organization with ID ${orgId} has no subnet defined`
);
}
if (!org.utilitySubnet) {
throw new Error(
`Organization with ID ${orgId} has no utility subnet defined`
);
}
if (!org.utilitySubnet) {
throw new Error(
`Organization with ID ${orgId} has no utility subnet defined`
);
}
const existingAddresses = await db
.select({
aliasAddress: siteResources.aliasAddress
})
.from(siteResources)
.where(
and(
isNotNull(siteResources.aliasAddress),
eq(siteResources.orgId, orgId)
)
);
const existingAddresses = await trx
.select({
aliasAddress: siteResources.aliasAddress
})
.from(siteResources)
.where(
and(
isNotNull(siteResources.aliasAddress),
eq(siteResources.orgId, orgId)
)
);
const addresses = [
...existingAddresses.map(
(site) => `${site.aliasAddress?.split("/")[0]}/32`
),
// reserve a /29 for the dns server and other stuff
`${org.utilitySubnet.split("/")[0]}/29`
].filter((address) => address !== null) as string[];
const addresses = [
...existingAddresses.map(
(site) => `${site.aliasAddress?.split("/")[0]}/32`
),
// reserve a /29 for the dns server and other stuff
`${org.utilitySubnet.split("/")[0]}/29`
].filter((address) => address !== null) as string[];
let subnet = findNextAvailableCidr(addresses, 32, org.utilitySubnet);
if (!subnet) {
throw new Error("No available subnets remaining in space");
}
let subnet = findNextAvailableCidr(
addresses,
32,
org.utilitySubnet
);
if (!subnet) {
throw new Error("No available subnets remaining in space");
}
// remove the cidr
subnet = subnet.split("/")[0];
// remove the cidr
subnet = subnet.split("/")[0];
return subnet;
return subnet;
}
);
}
export async function getNextAvailableOrgSubnet(): Promise<string> {
const existingAddresses = await db
.select({
subnet: orgs.subnet
})
.from(orgs)
.where(isNotNull(orgs.subnet));
return await lockManager.withLock("org-subnet-allocation", async () => {
const existingAddresses = await db
.select({
subnet: orgs.subnet
})
.from(orgs)
.where(isNotNull(orgs.subnet));
const addresses = existingAddresses.map((org) => org.subnet!);
const addresses = existingAddresses.map((org) => org.subnet!);
const subnet = findNextAvailableCidr(
addresses,
config.getRawConfig().orgs.block_size,
config.getRawConfig().orgs.subnet_group
);
if (!subnet) {
throw new Error("No available subnets remaining in space");
}
const subnet = findNextAvailableCidr(
addresses,
config.getRawConfig().orgs.block_size,
config.getRawConfig().orgs.subnet_group
);
if (!subnet) {
throw new Error("No available subnets remaining in space");
}
return subnet;
return subnet;
});
}
export function generateRemoteSubnets(
@@ -478,7 +505,12 @@ export type Alias = { alias: string | null; aliasAddress: string | null };
export function generateAliasConfig(allSiteResources: SiteResource[]): Alias[] {
return allSiteResources
.filter((sr) => sr.aliasAddress && ((sr.alias && sr.mode == "host") || (sr.fullDomain && sr.mode == "http")))
.filter(
(sr) =>
sr.aliasAddress &&
((sr.alias && sr.mode == "host") ||
(sr.fullDomain && sr.mode == "http"))
)
.map((sr) => ({
alias: sr.alias || sr.fullDomain,
aliasAddress: sr.aliasAddress

View File

@@ -24,8 +24,11 @@ export async function getCachedStatusHistory(
return cached;
}
const nowSec = Math.floor(Date.now() / 1000);
const startSec = nowSec - days * 86400;
// Anchor to UTC midnight so the query window aligns with stable calendar days
const utcToday = new Date();
utcToday.setUTCHours(0, 0, 0, 0);
const todayMidnightSec = Math.floor(utcToday.getTime() / 1000);
const startSec = todayMidnightSec - days * 86400;
const events = await logsDb
.select()
@@ -110,11 +113,18 @@ export function computeBuckets(
days: number
): { buckets: StatusHistoryDayBucket[]; totalDowntime: number } {
const nowSec = Math.floor(Date.now() / 1000);
// Anchor bucket boundaries to UTC midnight so dates are stable calendar days
// and don't drift as the cache expires and is recomputed
const utcToday = new Date();
utcToday.setUTCHours(0, 0, 0, 0);
const todayMidnightSec = Math.floor(utcToday.getTime() / 1000);
const buckets: StatusHistoryDayBucket[] = [];
let totalDowntime = 0;
for (let d = 0; d < days; d++) {
const dayStartSec = nowSec - (days - d) * 86400;
const dayStartSec = todayMidnightSec - (days - 1 - d) * 86400;
const dayEndSec = dayStartSec + 86400;
const dayEvents = events.filter(

View File

@@ -485,6 +485,133 @@ async function syncAcmeCertsFromHttp(endpoint: string): Promise<void> {
}
}
async function storeCertForDomain(
domain: string,
certPem: string,
keyPem: string,
validatedX509: crypto.X509Certificate
): Promise<void> {
const wildcard = domain.startsWith("*.");
const existing = await db
.select()
.from(certificates)
.where(eq(certificates.domain, domain))
.limit(1);
let oldCertPem: string | null = null;
let oldKeyPem: string | null = null;
if (existing.length > 0 && existing[0].certFile) {
try {
const storedCertPem = decrypt(
existing[0].certFile,
config.getRawConfig().server.secret!
);
const wildcardUnchanged = existing[0].wildcard === wildcard;
if (storedCertPem === certPem && wildcardUnchanged) {
return;
}
oldCertPem = storedCertPem;
if (existing[0].keyFile) {
try {
oldKeyPem = decrypt(
existing[0].keyFile,
config.getRawConfig().server.secret!
);
} catch (keyErr) {
logger.debug(
`acmeCertSync: could not decrypt stored key for ${domain}: ${keyErr}`
);
}
}
} catch (err) {
logger.debug(
`acmeCertSync: could not decrypt stored cert for ${domain}, will update: ${err}`
);
}
}
let expiresAt: number | null = null;
try {
expiresAt = Math.floor(
new Date(validatedX509.validTo).getTime() / 1000
);
} catch (err) {
logger.debug(
`acmeCertSync: could not parse cert expiry for ${domain}: ${err}`
);
}
const encryptedCert = encrypt(
certPem,
config.getRawConfig().server.secret!
);
const encryptedKey = encrypt(keyPem, config.getRawConfig().server.secret!);
const now = Math.floor(Date.now() / 1000);
const domainId = await findDomainId(domain);
if (domainId) {
logger.debug(
`acmeCertSync: resolved domainId "${domainId}" for cert domain "${domain}"`
);
} else {
logger.debug(
`acmeCertSync: no matching domain record found for cert domain "${domain}"`
);
}
if (existing.length > 0) {
logger.debug(
`acmeCertSync: updating existing certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await db
.update(certificates)
.set({
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
updatedAt: now,
wildcard,
...(domainId !== null && { domainId })
})
.where(eq(certificates.domain, domain));
logger.debug(
`acmeCertSync: updated certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await pushCertUpdateToAffectedNewts(
domain,
domainId,
oldCertPem,
oldKeyPem
);
} else {
logger.debug(
`acmeCertSync: inserting new certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await db.insert(certificates).values({
domain,
domainId,
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
createdAt: now,
updatedAt: now,
wildcard
});
logger.debug(
`acmeCertSync: inserted new certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await pushCertUpdateToAffectedNewts(domain, domainId, null, null);
}
}
function findAcmeJsonFiles(dirPath: string): string[] {
const results: string[] = [];
let entries: fs.Dirent[];
@@ -575,18 +702,16 @@ async function syncAcmeCerts(acmeJsonPath: string): Promise<void> {
}
for (const cert of allCerts) {
const domain = cert?.domain?.main;
const mainDomain = cert?.domain?.main;
if (!domain || typeof domain !== "string") {
if (!mainDomain || typeof mainDomain !== "string") {
logger.debug(`acmeCertSync: skipping cert with missing domain`);
continue;
}
const { wildcard } = detectWildcard(domain, cert.domain?.sans);
if (!cert.certificate || !cert.key) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - empty certificate or key field`
`acmeCertSync: skipping cert for ${mainDomain} - empty certificate or key field`
);
continue;
}
@@ -598,14 +723,14 @@ async function syncAcmeCerts(acmeJsonPath: string): Promise<void> {
keyPem = Buffer.from(cert.key, "base64").toString("utf8");
} catch (err) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - failed to base64-decode cert/key: ${err}`
`acmeCertSync: skipping cert for ${mainDomain} - failed to base64-decode cert/key: ${err}`
);
continue;
}
if (!certPem.trim() || !keyPem.trim()) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - blank PEM after base64 decode`
`acmeCertSync: skipping cert for ${mainDomain} - blank PEM after base64 decode`
);
continue;
}
@@ -616,7 +741,7 @@ async function syncAcmeCerts(acmeJsonPath: string): Promise<void> {
const firstCertPemForValidation = extractFirstCert(certPem);
if (!firstCertPemForValidation) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - no PEM certificate block found`
`acmeCertSync: skipping cert for ${mainDomain} - no PEM certificate block found`
);
continue;
}
@@ -628,7 +753,7 @@ async function syncAcmeCerts(acmeJsonPath: string): Promise<void> {
);
} catch (err) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - invalid X.509 certificate: ${err}`
`acmeCertSync: skipping cert for ${mainDomain} - invalid X.509 certificate: ${err}`
);
continue;
}
@@ -638,139 +763,40 @@ async function syncAcmeCerts(acmeJsonPath: string): Promise<void> {
crypto.createPrivateKey(keyPem);
} catch (err) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - invalid private key: ${err}`
`acmeCertSync: skipping cert for ${mainDomain} - invalid private key: ${err}`
);
continue;
}
// Check if cert already exists in DB
const existing = await db
.select()
.from(certificates)
.where(and(eq(certificates.domain, domain)))
.limit(1);
let oldCertPem: string | null = null;
let oldKeyPem: string | null = null;
if (existing.length > 0 && existing[0].certFile) {
try {
const storedCertPem = decrypt(
existing[0].certFile,
config.getRawConfig().server.secret!
);
const wildcardUnchanged = existing[0].wildcard === wildcard;
if (storedCertPem === certPem && wildcardUnchanged) {
// logger.debug(
// `acmeCertSync: cert for ${domain} is unchanged, skipping`
// );
continue;
// Collect all domains covered by this cert: main + every SAN.
// Each domain gets its own row in the certificates table so that
// lookups by any hostname on the cert succeed independently.
const allDomains = new Set<string>([mainDomain]);
if (Array.isArray(cert.domain?.sans)) {
for (const san of cert.domain.sans) {
if (typeof san === "string" && san.trim()) {
allDomains.add(san.trim());
}
// Cert has changed; capture old values so we can send a correct
// update message to the newt after the DB write.
oldCertPem = storedCertPem;
if (existing[0].keyFile) {
try {
oldKeyPem = decrypt(
existing[0].keyFile,
config.getRawConfig().server.secret!
);
} catch (keyErr) {
logger.debug(
`acmeCertSync: could not decrypt stored key for ${domain}: ${keyErr}`
);
}
}
} catch (err) {
// Decryption failure means we should proceed with the update
logger.debug(
`acmeCertSync: could not decrypt stored cert for ${domain}, will update: ${err}`
);
}
}
// Parse cert expiry from the validated X.509 certificate
let expiresAt: number | null = null;
try {
expiresAt = Math.floor(
new Date(validatedX509.validTo).getTime() / 1000
);
} catch (err) {
logger.debug(
`acmeCertSync: could not parse cert expiry for ${domain}: ${err}`
);
}
const encryptedCert = encrypt(
certPem,
config.getRawConfig().server.secret!
logger.debug(
`acmeCertSync: cert for ${mainDomain} covers ${allDomains.size} domain(s): ${[...allDomains].join(", ")}`
);
const encryptedKey = encrypt(
keyPem,
config.getRawConfig().server.secret!
);
const now = Math.floor(Date.now() / 1000);
const domainId = await findDomainId(domain);
if (domainId) {
logger.debug(
`acmeCertSync: resolved domainId "${domainId}" for cert domain "${domain}"`
);
} else {
logger.debug(
`acmeCertSync: no matching domain record found for cert domain "${domain}"`
);
}
if (existing.length > 0) {
logger.debug(
`acmeCertSync: updating existing certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await db
.update(certificates)
.set({
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
updatedAt: now,
wildcard,
...(domainId !== null && { domainId })
})
.where(eq(certificates.domain, domain));
logger.debug(
`acmeCertSync: updated certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await pushCertUpdateToAffectedNewts(
domain,
domainId,
oldCertPem,
oldKeyPem
);
} else {
logger.debug(
`acmeCertSync: inserting new certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await db.insert(certificates).values({
domain,
domainId,
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
createdAt: now,
updatedAt: now,
wildcard
});
logger.debug(
`acmeCertSync: inserted new certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
// For a brand-new cert, push to any SSL resources that were waiting for it
await pushCertUpdateToAffectedNewts(domain, domainId, null, null);
for (const domain of allDomains) {
try {
await storeCertForDomain(
domain,
certPem,
keyPem,
validatedX509
);
} catch (err) {
logger.error(
`acmeCertSync: error storing cert for domain "${domain}": ${err}`
);
}
}
}
}

View File

@@ -29,7 +29,10 @@ import { decrypt } from "@server/lib/crypto";
import logger from "@server/logger";
import { sendAlertWebhook } from "./sendAlertWebhook";
import { sendAlertEmail } from "./sendAlertEmail";
import { AlertContext, WebhookAlertConfig } from "@server/routers/alertRule/types";
import {
AlertContext,
WebhookAlertConfig
} from "@server/routers/alertRule/types";
/**
* Core alert processing pipeline.
@@ -99,7 +102,10 @@ export async function processAlerts(context: AlertContext): Promise<void> {
baseConditions,
or(
eq(alertRules.allHealthChecks, true),
eq(alertHealthChecks.healthCheckId, context.healthCheckId)
eq(
alertHealthChecks.healthCheckId,
context.healthCheckId
)
)
)
);
@@ -208,14 +214,19 @@ async function processRule(
for (const action of emailActions) {
try {
const recipients = await resolveEmailRecipients(action.emailActionId);
const recipients = await resolveEmailRecipients(
action.emailActionId
);
if (recipients.length > 0) {
await sendAlertEmail(recipients, context);
await db
.update(alertEmailActions)
.set({ lastSentAt: now })
.where(
eq(alertEmailActions.emailActionId, action.emailActionId)
eq(
alertEmailActions.emailActionId,
action.emailActionId
)
);
}
} catch (err) {
@@ -269,7 +280,7 @@ async function processRule(
)
);
} catch (err) {
logger.error(
logger.warn(
`processAlerts: failed to send alert webhook for action ${action.webhookActionId}`,
err
);
@@ -289,7 +300,9 @@ async function processRule(
* - All users in a role (by `roleId`, resolved via `userOrgRoles`)
* - Direct external email addresses
*/
async function resolveEmailRecipients(emailActionId: number): Promise<string[]> {
async function resolveEmailRecipients(
emailActionId: number
): Promise<string[]> {
const rows = await db
.select()
.from(alertEmailRecipients)

View File

@@ -236,15 +236,43 @@ interface TemplateContext {
}
/**
* Render a body template with {{event}}, {{timestamp}}, {{status}}, and
* {{data}} placeholders, mirroring the logic in HttpLogDestination.
* Render a body template with {{event}}, {{timestamp}}, {{status}}, {{data}},
* and individual data-field placeholders (e.g. {{orgId}}, {{siteId}}, …).
*
* {{data}} is replaced first (as raw JSON) so that any literal "{{…}}"
* strings inside data values are not re-expanded.
* Replacement order:
* 1. {{data}} → raw JSON of the full data object (prevents re-expansion of
* nested values that might look like placeholders).
* 2. Top-level scalar fields from data (string values are JSON-escaped;
* numbers and booleans are rendered as-is). Unknown placeholders are
* left untouched.
* 3. The fixed top-level keys: event, timestamp, status.
*/
function renderTemplate(template: string, ctx: TemplateContext): string {
const rendered = template
.replace(/\{\{data\}\}/g, JSON.stringify(ctx.data))
// Step 1 expand {{data}} first so its contents are already serialised
// and won't be touched by later passes.
let rendered = template.replace(/\{\{data\}\}/g, JSON.stringify(ctx.data));
// Step 2 expand individual data fields. Only replace placeholders whose
// key actually exists in ctx.data; leave everything else as-is.
for (const [key, value] of Object.entries(ctx.data)) {
if (value === null || value === undefined) continue;
const placeholder = new RegExp(
`\\{\\{${key.replace(/[.*+?^${}()|[\]\\]/g, "\\$&")}\\}\\}`,
"g"
);
let serialised: string;
if (typeof value === "string") {
serialised = escapeJsonString(value);
} else if (typeof value === "number" || typeof value === "boolean") {
serialised = String(value);
} else {
serialised = escapeJsonString(JSON.stringify(value));
}
rendered = rendered.replace(placeholder, serialised);
}
// Step 3 expand the fixed top-level keys.
rendered = rendered
.replace(/\{\{event\}\}/g, escapeJsonString(ctx.event))
.replace(/\{\{timestamp\}\}/g, escapeJsonString(ctx.timestamp))
.replace(/\{\{status\}\}/g, escapeJsonString(ctx.status));

View File

@@ -97,6 +97,13 @@ export class PrivateConfig {
);
}
process.env.BRANDING_HIDE_POWERED_BY =
this.rawPrivateConfig.branding?.hide_powered_by === true ||
this.rawPrivateConfig.branding?.resource_auth_page
?.hide_powered_by === true
? "true"
: "false";
process.env.LOGIN_PAGE_SUBTITLE_TEXT =
this.rawPrivateConfig.branding?.login_page?.subtitle_text || "";

View File

@@ -46,6 +46,7 @@ export interface ConnectionLogRecord {
orgId: string;
siteId: number;
clientId: number | null;
clientEndpoint: string | null;
userId: string | null;
sourceAddr: string;
destAddr: string;

View File

@@ -30,10 +30,12 @@ import {
LOG_TYPES,
LogEvent,
DestinationFailureState,
HttpConfig
HttpConfig,
S3Config
} from "./types";
import { LogDestinationProvider } from "./providers/LogDestinationProvider";
import { HttpLogDestination } from "./providers/HttpLogDestination";
import { S3LogDestination } from "./providers/S3LogDestination";
import type { EventStreamingDestination } from "@server/db";
// ---------------------------------------------------------------------------
@@ -72,11 +74,11 @@ const MAX_CATCHUP_BATCHES = 20;
* After the last entry the max value is re-used.
*/
const BACKOFF_SCHEDULE_MS = [
60_000, // 1 min (failure 1)
2 * 60_000, // 2 min (failure 2)
5 * 60_000, // 5 min (failure 3)
10 * 60_000, // 10 min (failure 4)
30 * 60_000 // 30 min (failure 5+)
60_000, // 1 min (failure 1)
2 * 60_000, // 2 min (failure 2)
5 * 60_000, // 5 min (failure 3)
10 * 60_000, // 10 min (failure 4)
30 * 60_000 // 30 min (failure 5+)
];
/**
@@ -204,7 +206,10 @@ export class LogStreamingManager {
this.pollTimer = null;
this.runPoll()
.catch((err) =>
logger.error("LogStreamingManager: unexpected poll error", err)
logger.error(
"LogStreamingManager: unexpected poll error",
err
)
)
.finally(() => {
if (this.isRunning) {
@@ -275,10 +280,13 @@ export class LogStreamingManager {
}
// Decrypt and parse config skip destination if either step fails
let configFromDb: HttpConfig;
let configFromDb: unknown;
try {
const decryptedConfig = decrypt(dest.config, config.getRawConfig().server.secret!);
configFromDb = JSON.parse(decryptedConfig) as HttpConfig;
const decryptedConfig = decrypt(
dest.config,
config.getRawConfig().server.secret!
);
configFromDb = JSON.parse(decryptedConfig);
} catch (err) {
logger.error(
`LogStreamingManager: destination ${dest.destinationId} has invalid or undecryptable config`,
@@ -305,6 +313,7 @@ export class LogStreamingManager {
if (enabledTypes.length === 0) return;
let anyFailure = false;
let firstError: string | null = null;
for (const logType of enabledTypes) {
if (!this.isRunning) break;
@@ -312,6 +321,10 @@ export class LogStreamingManager {
await this.processLogType(dest, provider, logType);
} catch (err) {
anyFailure = true;
if (firstError === null) {
firstError =
err instanceof Error ? err.message : String(err);
}
logger.error(
`LogStreamingManager: failed to process "${logType}" logs ` +
`for destination ${dest.destinationId}`,
@@ -322,6 +335,10 @@ export class LogStreamingManager {
if (anyFailure) {
this.recordFailure(dest.destinationId);
await this.setDestinationError(
dest.destinationId,
firstError ?? "Unknown error"
);
} else {
// Any success resets the failure/back-off state
if (this.failures.has(dest.destinationId)) {
@@ -330,6 +347,7 @@ export class LogStreamingManager {
`LogStreamingManager: destination ${dest.destinationId} recovered`
);
}
await this.clearDestinationError(dest.destinationId);
}
}
@@ -362,7 +380,10 @@ export class LogStreamingManager {
.from(eventStreamingCursors)
.where(
and(
eq(eventStreamingCursors.destinationId, dest.destinationId),
eq(
eventStreamingCursors.destinationId,
dest.destinationId
),
eq(eventStreamingCursors.logType, logType)
)
)
@@ -431,9 +452,7 @@ export class LogStreamingManager {
if (rows.length === 0) break;
const events = rows.map((row) =>
this.rowToLogEvent(logType, row)
);
const events = rows.map((row) => this.rowToLogEvent(logType, row));
// Throws on failure caught by the caller which applies back-off
await provider.send(events);
@@ -677,8 +696,7 @@ export class LogStreamingManager {
break;
}
const orgId =
typeof row.orgId === "string" ? row.orgId : "";
const orgId = typeof row.orgId === "string" ? row.orgId : "";
return {
id: row.id,
@@ -708,6 +726,8 @@ export class LogStreamingManager {
switch (type) {
case "http":
return new HttpLogDestination(config as HttpConfig);
case "s3":
return new S3LogDestination(config as S3Config);
// Future providers:
// case "datadog": return new DatadogLogDestination(config as DatadogConfig);
default:
@@ -749,6 +769,45 @@ export class LogStreamingManager {
// DB helpers
// -------------------------------------------------------------------------
private async setDestinationError(
destinationId: number,
errorMessage: string
): Promise<void> {
// Truncate to 1000 chars so it fits comfortably in the text column.
const truncated = errorMessage.slice(0, 1000);
try {
await db
.update(eventStreamingDestinations)
.set({ lastError: truncated, lastErrorAt: Date.now() })
.where(
eq(eventStreamingDestinations.destinationId, destinationId)
);
} catch (err) {
logger.warn(
`LogStreamingManager: could not persist error status for destination ${destinationId}`,
err
);
}
}
private async clearDestinationError(destinationId: number): Promise<void> {
try {
// Only update if there is actually an error stored, to avoid
// unnecessary writes on every successful poll cycle.
await db
.update(eventStreamingDestinations)
.set({ lastError: null, lastErrorAt: null })
.where(
eq(eventStreamingDestinations.destinationId, destinationId)
);
} catch (err) {
logger.warn(
`LogStreamingManager: could not clear error status for destination ${destinationId}`,
err
);
}
}
private async loadEnabledDestinations(): Promise<
EventStreamingDestination[]
> {

View File

@@ -0,0 +1,279 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { S3Client, PutObjectCommand } from "@aws-sdk/client-s3";
import { gzip as gzipCallback } from "zlib";
import { promisify } from "util";
import { randomUUID } from "crypto";
import logger from "@server/logger";
import { LogEvent, S3Config, S3PayloadFormat } from "../types";
import { LogDestinationProvider } from "./LogDestinationProvider";
const gzipAsync = promisify(gzipCallback);
// ---------------------------------------------------------------------------
// Constants
// ---------------------------------------------------------------------------
/** Maximum time (ms) to wait for a single S3 PutObject response. */
const REQUEST_TIMEOUT_MS = 60_000;
/** Default payload format when none is specified in the config. */
const DEFAULT_FORMAT: S3PayloadFormat = "json_array";
// ---------------------------------------------------------------------------
// S3LogDestination
// ---------------------------------------------------------------------------
/**
* Forwards a batch of log events to an S3-compatible object store by
* uploading a single object per `send()` call.
*
* **Object key layout**
* ```
* {prefix}/{logType}/{YYYY}/{MM}/{DD}/{HH}-{mm}-{ss}-{uuid}.{ext}[.gz]
* ```
* - `prefix` from `config.prefix` (default: empty key starts at logType)
* - `logType` one of "request", "action", "access", "connection"
* - Date components are derived from the upload time (UTC)
* - `ext` `json` | `ndjson` | `csv`
* - `.gz` appended when `config.gzip` is true
*
* **Payload formats** (controlled by `config.format`):
* - `json_array` (default) body is a JSON array of event objects.
* - `ndjson` one JSON object per line (newline-delimited).
* - `csv` RFC-4180 CSV with a header row; columns are the
* union of all field names in the batch's event data.
*
* **Compression**: when `config.gzip` is `true` the body is gzip-compressed
* before upload and `Content-Encoding: gzip` is set on the object.
*
* **Custom endpoint**: set `config.endpoint` to target any S3-compatible
* storage service (e.g. MinIO, Cloudflare R2).
*/
export class S3LogDestination implements LogDestinationProvider {
readonly type = "s3";
private readonly config: S3Config;
constructor(config: S3Config) {
this.config = config;
}
// -----------------------------------------------------------------------
// LogDestinationProvider implementation
// -----------------------------------------------------------------------
async send(events: LogEvent[]): Promise<void> {
if (events.length === 0) return;
const format = this.config.format ?? DEFAULT_FORMAT;
const useGzip = this.config.gzip ?? false;
const logType = events[0].logType;
const rawBody = this.serialize(events, format);
const bodyBuffer = Buffer.from(rawBody, "utf-8");
let uploadBody: Buffer;
let contentEncoding: string | undefined;
if (useGzip) {
uploadBody = (await gzipAsync(bodyBuffer)) as Buffer;
contentEncoding = "gzip";
} else {
uploadBody = bodyBuffer;
}
const key = this.buildObjectKey(logType, format, useGzip);
const contentType = this.contentType(format);
const clientConfig: ConstructorParameters<typeof S3Client>[0] = {
region: this.config.region,
credentials: {
accessKeyId: this.config.accessKeyId,
secretAccessKey: this.config.secretAccessKey
},
requestHandler: {
requestTimeout: REQUEST_TIMEOUT_MS
}
};
if (this.config.endpoint?.trim()) {
clientConfig.endpoint = this.config.endpoint.trim();
}
const client = new S3Client(clientConfig);
try {
await client.send(
new PutObjectCommand({
Bucket: this.config.bucket,
Key: key,
Body: uploadBody,
ContentType: contentType,
...(contentEncoding
? { ContentEncoding: contentEncoding }
: {})
})
);
} catch (err: unknown) {
const msg = err instanceof Error ? err.message : String(err);
throw new Error(
`S3LogDestination: failed to upload object "${key}" ` +
`to bucket "${this.config.bucket}" ${msg}`
);
}
}
// -----------------------------------------------------------------------
// Internal helpers
// -----------------------------------------------------------------------
/**
* Construct a unique S3 object key for the given log type and format.
* Keys are partitioned by logType and date so they can be queried or
* lifecycle-managed independently.
*/
private buildObjectKey(
logType: string,
format: S3PayloadFormat,
gzip: boolean
): string {
const now = new Date();
const year = now.getUTCFullYear();
const month = String(now.getUTCMonth() + 1).padStart(2, "0");
const day = String(now.getUTCDate()).padStart(2, "0");
const hh = String(now.getUTCHours()).padStart(2, "0");
const mm = String(now.getUTCMinutes()).padStart(2, "0");
const ss = String(now.getUTCSeconds()).padStart(2, "0");
const uid = randomUUID();
const ext =
format === "csv" ? "csv" : format === "ndjson" ? "ndjson" : "json";
const fileName = `${hh}-${mm}-${ss}-${uid}.${ext}${gzip ? ".gz" : ""}`;
const rawPrefix = (this.config.prefix ?? "").trim().replace(/\/+$/, "");
const parts = [
rawPrefix,
logType,
`${year}/${month}/${day}`,
fileName
].filter((p) => p !== "");
return parts.join("/");
}
private contentType(format: S3PayloadFormat): string {
switch (format) {
case "csv":
return "text/csv; charset=utf-8";
case "ndjson":
return "application/x-ndjson";
default:
return "application/json";
}
}
private serialize(events: LogEvent[], format: S3PayloadFormat): string {
switch (format) {
case "json_array":
return JSON.stringify(events.map(toPayload));
case "ndjson":
return events
.map((e) => JSON.stringify(toPayload(e)))
.join("\n");
case "csv":
return toCsv(events);
}
}
}
// ---------------------------------------------------------------------------
// Payload helpers
// ---------------------------------------------------------------------------
function toPayload(event: LogEvent): unknown {
return {
event: event.logType,
timestamp: new Date(event.timestamp * 1000).toISOString(),
data: event.data
};
}
/**
* Convert a batch of events to RFC-4180 CSV.
*
* The column set is the union of `event`, `timestamp`, and all keys present in
* `event.data` across the batch, preserving insertion order. Values that
* contain commas, double-quotes, or newlines are quoted and escaped.
*/
function toCsv(events: LogEvent[]): string {
if (events.length === 0) return "";
// Collect all unique data keys in stable order
const keySet = new LinkedSet<string>();
keySet.add("event");
keySet.add("timestamp");
for (const e of events) {
for (const k of Object.keys(e.data)) {
keySet.add(k);
}
}
const headers = keySet.toArray();
const rows: string[] = [headers.map(csvEscape).join(",")];
for (const e of events) {
const flat: Record<string, unknown> = {
event: e.logType,
timestamp: new Date(e.timestamp * 1000).toISOString(),
...e.data
};
rows.push(
headers.map((h) => csvEscape(flattenValue(flat[h]))).join(",")
);
}
return rows.join("\n");
}
/** Flatten a value to a plain string suitable for a CSV cell. */
function flattenValue(value: unknown): string {
if (value === null || value === undefined) return "";
if (typeof value === "object") return JSON.stringify(value);
return String(value);
}
/** RFC-4180 CSV escaping. */
function csvEscape(value: string): string {
if (/[",\n\r]/.test(value)) {
return `"${value.replace(/"/g, '""')}"`;
}
return value;
}
// ---------------------------------------------------------------------------
// Minimal ordered set (preserves insertion order, deduplicates)
// ---------------------------------------------------------------------------
class LinkedSet<T> {
private readonly map = new Map<T, true>();
add(value: T): void {
this.map.set(value, true);
}
toArray(): T[] {
return Array.from(this.map.keys());
}
}

View File

@@ -107,6 +107,40 @@ export interface HttpConfig {
bodyTemplate?: string;
}
// ---------------------------------------------------------------------------
// S3 destination configuration
// ---------------------------------------------------------------------------
/**
* Controls how the batch of events is serialised into each S3 object.
*
* - `json_array` `[{…}, {…}]` default; each object is a JSON array.
* - `ndjson` `{…}\n{…}` newline-delimited JSON, one object per line.
* - `csv` RFC-4180 CSV with a header row derived from the event fields.
*/
export type S3PayloadFormat = "json_array" | "ndjson" | "csv";
export interface S3Config {
/** Human-readable label for the destination */
name: string;
/** AWS Access Key ID */
accessKeyId: string;
/** AWS Secret Access Key */
secretAccessKey: string;
/** AWS region (e.g. "us-east-1") */
region: string;
/** Target S3 bucket name */
bucket: string;
/** Optional key prefix appended before the auto-generated path */
prefix?: string;
/** Override the S3 endpoint for S3-compatible storage (e.g. MinIO, R2) */
endpoint?: string;
/** How events are serialised into each object. Defaults to "json_array". */
format: S3PayloadFormat;
/** Whether to gzip-compress the object before upload. */
gzip: boolean;
}
// ---------------------------------------------------------------------------
// Per-destination per-log-type cursor (reflects the DB table)
// ---------------------------------------------------------------------------

View File

@@ -141,6 +141,7 @@ export const privateConfigSchema = z
)
.optional(),
hide_auth_layout_footer: z.boolean().optional().default(false),
hide_powered_by: z.boolean().optional(),
login_page: z
.object({
subtitle_text: z.string().optional()

View File

@@ -124,15 +124,11 @@ function getWhere(data: Q) {
data.clientId
? eq(connectionAuditLog.clientId, data.clientId)
: undefined,
data.siteId
? eq(connectionAuditLog.siteId, data.siteId)
: undefined,
data.siteId ? eq(connectionAuditLog.siteId, data.siteId) : undefined,
data.siteResourceId
? eq(connectionAuditLog.siteResourceId, data.siteResourceId)
: undefined,
data.userId
? eq(connectionAuditLog.userId, data.userId)
: undefined
data.userId ? eq(connectionAuditLog.userId, data.userId) : undefined
);
}
@@ -144,6 +140,7 @@ export function queryConnection(data: Q) {
orgId: connectionAuditLog.orgId,
siteId: connectionAuditLog.siteId,
clientId: connectionAuditLog.clientId,
clientEndpoint: connectionAuditLog.clientEndpoint,
userId: connectionAuditLog.userId,
sourceAddr: connectionAuditLog.sourceAddr,
destAddr: connectionAuditLog.destAddr,
@@ -203,10 +200,7 @@ async function enrichWithDetails(
];
// Fetch resource details from main database
const resourceMap = new Map<
number,
{ name: string; niceId: string }
>();
const resourceMap = new Map<number, { name: string; niceId: string }>();
if (siteResourceIds.length > 0) {
const resourceDetails = await primaryDb
.select({
@@ -268,10 +262,7 @@ async function enrichWithDetails(
}
// Fetch user details from main database
const userMap = new Map<
string,
{ email: string | null }
>();
const userMap = new Map<string, { email: string | null }>();
if (userIds.length > 0) {
const userDetails = await primaryDb
.select({
@@ -290,29 +281,25 @@ async function enrichWithDetails(
return logs.map((log) => ({
...log,
resourceName: log.siteResourceId
? resourceMap.get(log.siteResourceId)?.name ?? null
? (resourceMap.get(log.siteResourceId)?.name ?? null)
: null,
resourceNiceId: log.siteResourceId
? resourceMap.get(log.siteResourceId)?.niceId ?? null
: null,
siteName: log.siteId
? siteMap.get(log.siteId)?.name ?? null
? (resourceMap.get(log.siteResourceId)?.niceId ?? null)
: null,
siteName: log.siteId ? (siteMap.get(log.siteId)?.name ?? null) : null,
siteNiceId: log.siteId
? siteMap.get(log.siteId)?.niceId ?? null
? (siteMap.get(log.siteId)?.niceId ?? null)
: null,
clientName: log.clientId
? clientMap.get(log.clientId)?.name ?? null
? (clientMap.get(log.clientId)?.name ?? null)
: null,
clientNiceId: log.clientId
? clientMap.get(log.clientId)?.niceId ?? null
? (clientMap.get(log.clientId)?.niceId ?? null)
: null,
clientType: log.clientId
? clientMap.get(log.clientId)?.type ?? null
? (clientMap.get(log.clientId)?.type ?? null)
: null,
userEmail: log.userId
? userMap.get(log.userId)?.email ?? null
: null
userEmail: log.userId ? (userMap.get(log.userId)?.email ?? null) : null
}));
}
@@ -521,4 +508,4 @@ export async function queryConnectionAuditLogs(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
);
}
}
}

View File

@@ -51,6 +51,8 @@ export type ListEventStreamingDestinationsResponse = {
type: string;
config: string;
enabled: boolean;
lastError: string | null;
lastErrorAt: number | null;
createdAt: number;
updatedAt: number;
sendConnectionLogs: boolean;
@@ -79,7 +81,8 @@ async function query(orgId: string, limit: number, offset: number) {
registry.registerPath({
method: "get",
path: "/org/{orgId}/event-streaming-destination",
description: "List all event streaming destinations for a specific organization.",
description:
"List all event streaming destinations for a specific organization.",
tags: [OpenAPITags.Org],
request: {
query: querySchema,

View File

@@ -11,7 +11,7 @@
* This file is not licensed under the AGPLv3.
*/
import { db } from "@server/db";
import { clientSitesAssociationsCache, db } from "@server/db";
import { MessageHandler } from "@server/routers/ws";
import { sites, Newt, clients, orgs } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
@@ -146,7 +146,11 @@ export const handleConnectionLogMessage: MessageHandler = async (context) => {
// each unique sourceAddr + the org's CIDR suffix and do a targeted IN query.
const ipToClient = new Map<
string,
{ clientId: number; userId: string | null }
{
clientId: number;
userId: string | null;
clientEndpoint: string | null;
}
>();
if (cidrSuffix) {
@@ -172,9 +176,21 @@ export const handleConnectionLogMessage: MessageHandler = async (context) => {
.select({
clientId: clients.clientId,
userId: clients.userId,
subnet: clients.subnet
subnet: clients.subnet,
clientEndpoint: clientSitesAssociationsCache.endpoint
})
.from(clients)
.leftJoin(
// this should be one to one
clientSitesAssociationsCache,
and(
eq(
clients.clientId,
clientSitesAssociationsCache.clientId
),
eq(clientSitesAssociationsCache.siteId, newt.siteId)
)
)
.where(
and(
eq(clients.orgId, orgId),
@@ -189,7 +205,8 @@ export const handleConnectionLogMessage: MessageHandler = async (context) => {
);
ipToClient.set(ip, {
clientId: c.clientId,
userId: c.userId
userId: c.userId,
clientEndpoint: c.clientEndpoint
});
}
}
@@ -234,6 +251,7 @@ export const handleConnectionLogMessage: MessageHandler = async (context) => {
orgId,
siteId: newt.siteId,
clientId: clientInfo?.clientId ?? null,
clientEndpoint: clientInfo?.clientEndpoint ?? null,
userId: clientInfo?.userId ?? null,
sourceAddr: session.sourceAddr,
destAddr: session.destAddr,

View File

@@ -14,7 +14,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import stoi from "@server/lib/stoi";
import { clients, db } from "@server/db";
import { clients, db, primaryDb, Client } from "@server/db";
import { userOrgRoles, userOrgs, roles } from "@server/db";
import { eq, and } from "drizzle-orm";
import response from "@server/lib/response";
@@ -98,15 +98,6 @@ export async function addUserRole(
);
}
if (existingUser[0].isOwner) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Cannot change the role of the owner of the organization"
)
);
}
const roleExists = await db
.select()
.from(roles)
@@ -122,8 +113,12 @@ export async function addUserRole(
);
}
let newUserRole: { userId: string; orgId: string; roleId: number } | null =
null;
let newUserRole: {
userId: string;
orgId: string;
roleId: number;
} | null = null;
let orgClientsToRebuild: Client[] = [];
await db.transaction(async (trx) => {
const inserted = await trx
.insert(userOrgRoles)
@@ -149,11 +144,19 @@ export async function addUserRole(
)
);
for (const orgClient of orgClients) {
await rebuildClientAssociationsFromClient(orgClient, trx);
}
orgClientsToRebuild = orgClients;
});
for (const orgClient of orgClientsToRebuild) {
rebuildClientAssociationsFromClient(orgClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client associations for client ${orgClient.clientId} after adding role: ${e}`
);
}
);
}
return response(res, {
data: newUserRole ?? { userId, orgId: role.orgId, roleId },
success: true,

View File

@@ -14,7 +14,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import stoi from "@server/lib/stoi";
import { db } from "@server/db";
import { db, primaryDb, Client } from "@server/db";
import { userOrgRoles, userOrgs, roles, clients } from "@server/db";
import { eq, and } from "drizzle-orm";
import response from "@server/lib/response";
@@ -98,11 +98,11 @@ export async function removeUserRole(
);
}
if (existingUser.isOwner) {
if (existingUser.isOwner && role.isAdmin === true) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Cannot change the roles of the owner of the organization"
"Cannot remove the administrator role from the organization owner"
)
);
}
@@ -129,6 +129,7 @@ export async function removeUserRole(
}
}
let orgClientsToRebuild: Client[] = [];
await db.transaction(async (trx) => {
await trx
.delete(userOrgRoles)
@@ -150,11 +151,19 @@ export async function removeUserRole(
)
);
for (const orgClient of orgClients) {
await rebuildClientAssociationsFromClient(orgClient, trx);
}
orgClientsToRebuild = orgClients;
});
for (const orgClient of orgClientsToRebuild) {
rebuildClientAssociationsFromClient(orgClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client associations for client ${orgClient.clientId} after removing role: ${e}`
);
}
);
}
return response(res, {
data: { userId, orgId: role.orgId, roleId },
success: true,

View File

@@ -13,7 +13,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { clients, db } from "@server/db";
import { clients, db, primaryDb, Client } from "@server/db";
import { userOrgRoles, userOrgs, roles } from "@server/db";
import { eq, and, inArray } from "drizzle-orm";
import response from "@server/lib/response";
@@ -87,17 +87,8 @@ export async function setUserOrgRoles(
);
}
if (existingUser.isOwner) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Cannot change the roles of the owner of the organization"
)
);
}
const orgRoles = await db
.select({ roleId: roles.roleId })
.select({ roleId: roles.roleId, isAdmin: roles.isAdmin })
.from(roles)
.where(
and(
@@ -115,6 +106,19 @@ export async function setUserOrgRoles(
);
}
if (existingUser.isOwner) {
const hasAdminRole = orgRoles.some((r) => r.isAdmin === true);
if (!hasAdminRole) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"The organization owner must retain an administrator role"
)
);
}
}
let orgClientsToRebuild: Client[] = [];
await db.transaction(async (trx) => {
await trx
.delete(userOrgRoles)
@@ -142,11 +146,19 @@ export async function setUserOrgRoles(
and(eq(clients.userId, userId), eq(clients.orgId, orgId))
);
for (const orgClient of orgClients) {
await rebuildClientAssociationsFromClient(orgClient, trx);
}
orgClientsToRebuild = orgClients;
});
for (const orgClient of orgClientsToRebuild) {
rebuildClientAssociationsFromClient(orgClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client associations for client ${orgClient.clientId} after setting roles: ${e}`
);
}
);
}
return response(res, {
data: { userId, orgId, roleIds: uniqueRoleIds },
success: true,

View File

@@ -100,6 +100,7 @@ export type QueryConnectionAuditLogResponse = {
orgId: string | null;
siteId: number | null;
clientId: number | null;
clientEndpoint: string | null;
userId: string | null;
sourceAddr: string;
destAddr: string;

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, orgs, userOrgs, users } from "@server/db";
import { db, orgs, userOrgs, users, primaryDb } from "@server/db";
import { eq, and, inArray, not } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -218,13 +218,18 @@ export async function deleteMyAccount(
await db.transaction(async (trx) => {
await trx.delete(users).where(eq(users.userId, userId));
await calculateUserClientsForOrgs(userId, trx);
// loop through the other orgs and decrement the count
for (const userOrg of otherOrgsTheUserWasIn) {
await usageService.add(userOrg.orgId, FeatureId.USERS, -1, trx);
}
});
calculateUserClientsForOrgs(userId, primaryDb).catch((e) => {
logger.error(
`Failed to calculate user clients after deleting account for user ${userId}: ${e}`
);
});
try {
await invalidateSession(session.sessionId);
} catch (error) {

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { db, primaryDb } from "@server/db";
import {
roles,
Client,
@@ -92,7 +92,10 @@ export async function createClient(
const { orgId } = parsedParams.data;
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
if (
req.user &&
(!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)
) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
);
@@ -198,7 +201,10 @@ export async function createClient(
if (!randomExitNode) {
return next(
createHttpError(HttpCode.NOT_FOUND, `No exit nodes available. ${build == "saas" ? "Please contact support." : "You need to install gerbil to use the clients."}`)
createHttpError(
HttpCode.NOT_FOUND,
`No exit nodes available. ${build == "saas" ? "Please contact support." : "You need to install gerbil to use the clients."}`
)
);
}
@@ -256,10 +262,18 @@ export async function createClient(
clientId: newClient.clientId,
dateCreated: moment().toISOString()
});
await rebuildClientAssociationsFromClient(newClient, trx);
});
if (newClient) {
rebuildClientAssociationsFromClient(newClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client associations after creating client: ${e}`
);
}
);
}
return response<CreateClientResponse>(res, {
data: newClient,
success: true,

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { db, primaryDb } from "@server/db";
import {
roles,
Client,
@@ -237,10 +237,18 @@ export async function createUserClient(
userId,
clientId: newClient.clientId
});
await rebuildClientAssociationsFromClient(newClient, trx);
});
if (newClient) {
rebuildClientAssociationsFromClient(newClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client associations after creating user client: ${e}`
);
}
);
}
return response<CreateClientAndOlmResponse>(res, {
data: newClient,
success: true,

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, olms } from "@server/db";
import { db, olms, primaryDb, Client, Olm } from "@server/db";
import { clients, clientSitesAssociationsCache } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
@@ -71,14 +71,17 @@ export async function deleteClient(
);
}
let deletedClient: Client | undefined;
let olm: Olm | undefined;
await db.transaction(async (trx) => {
// Then delete the client itself
const [deletedClient] = await trx
[deletedClient] = await trx
.delete(clients)
.where(eq(clients.clientId, clientId))
.returning();
const [olm] = await trx
[olm] = await trx
.select()
.from(olms)
.where(eq(olms.clientId, clientId))
@@ -88,14 +91,29 @@ export async function deleteClient(
if (!client.userId && client.olmId) {
await trx.delete(olms).where(eq(olms.olmId, client.olmId));
}
await rebuildClientAssociationsFromClient(deletedClient, trx);
if (olm) {
await sendTerminateClient(deletedClient.clientId, OlmErrorCodes.TERMINATED_DELETED, olm.olmId); // the olmId needs to be provided because it cant look it up after deletion
}
});
if (deletedClient) {
rebuildClientAssociationsFromClient(deletedClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client associations after deleting client ${clientId}: ${e}`
);
}
);
if (olm) {
sendTerminateClient(
deletedClient.clientId,
OlmErrorCodes.TERMINATED_DELETED,
olm.olmId
).catch((e) => {
logger.error(
`Failed to send terminate message for client ${deletedClient?.clientId} after deleting client ${clientId}: ${e}`
);
});
}
}
return response(res, {
data: null,
success: true,

View File

@@ -1,5 +1,5 @@
import { NextFunction, Request, Response } from "express";
import { db, olms } from "@server/db";
import { db, olms, primaryDb } from "@server/db";
import HttpCode from "@server/types/HttpCode";
import { z } from "zod";
import createHttpError from "http-errors";
@@ -81,16 +81,19 @@ export async function createUserOlm(
const secretHash = await hashPassword(secret);
await db.transaction(async (trx) => {
await trx.insert(olms).values({
olmId: olmId,
userId,
name,
secretHash,
dateCreated: moment().toISOString()
});
await db.insert(olms).values({
olmId: olmId,
userId,
name,
secretHash,
dateCreated: moment().toISOString()
});
await calculateUserClientsForOrgs(userId, trx);
calculateUserClientsForOrgs(userId, primaryDb).catch((e) => {
console.error(
"Error calculating user clients after creating olm:",
e
);
});
return response<CreateOlmResponse>(res, {

View File

@@ -1,5 +1,5 @@
import { NextFunction, Request, Response } from "express";
import { Client, db } from "@server/db";
import { Client, db, Olm, primaryDb } from "@server/db";
import { olms, clients, clientSitesAssociationsCache } from "@server/db";
import { eq } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
@@ -49,6 +49,7 @@ export async function deleteUserOlm(
const { olmId } = parsedParams.data;
let deletedClient: Client | undefined;
// Delete associated clients and the OLM in a transaction
await db.transaction(async (trx) => {
// Find all clients associated with this OLM
@@ -57,7 +58,6 @@ export async function deleteUserOlm(
.from(clients)
.where(eq(clients.olmId, olmId));
let deletedClient: Client | null = null;
// Delete all associated clients
if (associatedClients.length > 0) {
[deletedClient] = await trx
@@ -67,23 +67,28 @@ export async function deleteUserOlm(
}
// Finally, delete the OLM itself
const [olm] = await trx
.delete(olms)
.where(eq(olms.olmId, olmId))
.returning();
if (deletedClient) {
await rebuildClientAssociationsFromClient(deletedClient, trx);
if (olm) {
await sendTerminateClient(
deletedClient.clientId,
OlmErrorCodes.TERMINATED_DELETED,
olm.olmId
); // the olmId needs to be provided because it cant look it up after deletion
}
}
await trx.delete(olms).where(eq(olms.olmId, olmId)).returning();
});
if (deletedClient) {
rebuildClientAssociationsFromClient(deletedClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client-site associations after deleting OLM ${olmId}: ${e}`
);
}
);
sendTerminateClient(
deletedClient.clientId,
OlmErrorCodes.TERMINATED_DELETED,
olmId
).catch((e) => {
logger.error(
`Failed to send terminate message for client ${deletedClient?.clientId} after deleting OLM ${olmId}: ${e}`
);
});
}
return response(res, {
data: null,
success: true,

View File

@@ -22,14 +22,14 @@ import { canCompress } from "@server/lib/clientVersionChecks";
import config from "@server/lib/config";
export const handleOlmRegisterMessage: MessageHandler = async (context) => {
logger.info("Handling register olm message!");
logger.info("[handleOlmRegisterMessage] Handling register olm message");
const { message, client: c, sendToClient } = context;
const olm = c as Olm;
const now = Math.floor(Date.now() / 1000);
if (!olm) {
logger.warn("Olm not found");
logger.warn("[handleOlmRegisterMessage] Olm not found");
return;
}
@@ -46,16 +46,19 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
} = message.data;
if (!olm.clientId) {
logger.warn("Olm client ID not found");
logger.warn("[handleOlmRegisterMessage] Olm client ID not found");
sendOlmError(OlmErrorCodes.CLIENT_ID_NOT_FOUND, olm.olmId);
return;
}
logger.debug("Handling fingerprint insertion for olm register...", {
olmId: olm.olmId,
fingerprint,
postures
});
logger.debug(
"[handleOlmRegisterMessage] Handling fingerprint insertion for olm register...",
{
olmId: olm.olmId,
fingerprint,
postures
}
);
const isUserDevice = olm.userId !== null && olm.userId !== undefined;
@@ -85,14 +88,17 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
.limit(1);
if (!client) {
logger.warn("Client ID not found");
logger.warn("[handleOlmRegisterMessage] Client not found", {
clientId: olm.clientId
});
sendOlmError(OlmErrorCodes.CLIENT_NOT_FOUND, olm.olmId);
return;
}
if (client.blocked) {
logger.debug(
`Client ${client.clientId} is blocked. Ignoring register.`
`[handleOlmRegisterMessage] Client ${client.clientId} is blocked. Ignoring register.`,
{ orgId: client.orgId }
);
sendOlmError(OlmErrorCodes.CLIENT_BLOCKED, olm.olmId);
return;
@@ -100,7 +106,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
if (client.approvalState == "pending") {
logger.debug(
`Client ${client.clientId} approval is pending. Ignoring register.`
`[handleOlmRegisterMessage] Client ${client.clientId} approval is pending. Ignoring register.`,
{ orgId: client.orgId }
);
sendOlmError(OlmErrorCodes.CLIENT_PENDING, olm.olmId);
return;
@@ -128,14 +135,18 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
.limit(1);
if (!org) {
logger.warn("Org not found");
logger.warn("[handleOlmRegisterMessage] Org not found", {
orgId: client.orgId
});
sendOlmError(OlmErrorCodes.ORG_NOT_FOUND, olm.olmId);
return;
}
if (orgId) {
if (!olm.userId) {
logger.warn("Olm has no user ID");
logger.warn("[handleOlmRegisterMessage] Olm has no user ID", {
orgId: client.orgId
});
sendOlmError(OlmErrorCodes.USER_ID_NOT_FOUND, olm.olmId);
return;
}
@@ -143,12 +154,18 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
const { session: userSession, user } =
await validateSessionToken(userToken);
if (!userSession || !user) {
logger.warn("Invalid user session for olm register");
logger.warn(
"[handleOlmRegisterMessage] Invalid user session for olm register",
{ orgId: client.orgId }
);
sendOlmError(OlmErrorCodes.INVALID_USER_SESSION, olm.olmId);
return;
}
if (user.userId !== olm.userId) {
logger.warn("User ID mismatch for olm register");
logger.warn(
"[handleOlmRegisterMessage] User ID mismatch for olm register",
{ orgId: client.orgId }
);
sendOlmError(OlmErrorCodes.USER_ID_MISMATCH, olm.olmId);
return;
}
@@ -163,11 +180,15 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
sessionId // this is the user token passed in the message
});
logger.debug("Policy check result:", policyCheck);
logger.debug("[handleOlmRegisterMessage] Policy check result", {
orgId: client.orgId,
policyCheck
});
if (policyCheck?.error) {
logger.error(
`Error checking access policies for olm user ${olm.userId} in org ${orgId}: ${policyCheck?.error}`
`[handleOlmRegisterMessage] Error checking access policies for olm user ${olm.userId} in org ${orgId}: ${policyCheck?.error}`,
{ orgId: client.orgId }
);
sendOlmError(OlmErrorCodes.ORG_ACCESS_POLICY_DENIED, olm.olmId);
return;
@@ -175,7 +196,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
if (policyCheck.policies?.passwordAge?.compliant === false) {
logger.warn(
`Olm user ${olm.userId} has non-compliant password age for org ${orgId}`
`[handleOlmRegisterMessage] Olm user ${olm.userId} has non-compliant password age for org ${orgId}`,
{ orgId: client.orgId }
);
sendOlmError(
OlmErrorCodes.ORG_ACCESS_POLICY_PASSWORD_EXPIRED,
@@ -186,7 +208,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
policyCheck.policies?.maxSessionLength?.compliant === false
) {
logger.warn(
`Olm user ${olm.userId} has non-compliant session length for org ${orgId}`
`[handleOlmRegisterMessage] Olm user ${olm.userId} has non-compliant session length for org ${orgId}`,
{ orgId: client.orgId }
);
sendOlmError(
OlmErrorCodes.ORG_ACCESS_POLICY_SESSION_EXPIRED,
@@ -195,7 +218,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
return;
} else if (policyCheck.policies?.requiredTwoFactor === false) {
logger.warn(
`Olm user ${olm.userId} does not have 2FA enabled for org ${orgId}`
`[handleOlmRegisterMessage] Olm user ${olm.userId} does not have 2FA enabled for org ${orgId}`,
{ orgId: client.orgId }
);
sendOlmError(
OlmErrorCodes.ORG_ACCESS_POLICY_2FA_REQUIRED,
@@ -204,7 +228,8 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
return;
} else if (!policyCheck.allowed) {
logger.warn(
`Olm user ${olm.userId} does not pass access policies for org ${orgId}: ${policyCheck.error}`
`[handleOlmRegisterMessage] Olm user ${olm.userId} does not pass access policies for org ${orgId}: ${policyCheck.error}`,
{ orgId: client.orgId }
);
sendOlmError(OlmErrorCodes.ORG_ACCESS_POLICY_DENIED, olm.olmId);
return;
@@ -226,29 +251,39 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
sitesCountResult.length > 0 ? sitesCountResult[0].count : 0;
// Prepare an array to store site configurations
logger.debug(`Found ${sitesCount} sites for client ${client.clientId}`);
logger.debug(
`[handleOlmRegisterMessage] Found ${sitesCount} sites for client ${client.clientId}`,
{ orgId: client.orgId }
);
let jitMode = false;
if (sitesCount > 250 && build == "saas") {
// THIS IS THE MAX ON THE BUSINESS TIER
// we have too many sites
// If we have too many sites we need to drop into fully JIT mode by not sending any of the sites
logger.info("Too many sites (%d), dropping into JIT mode", sitesCount);
logger.info(
`[handleOlmRegisterMessage] Too many sites (${sitesCount}), dropping into JIT mode`,
{ orgId: client.orgId }
);
jitMode = true;
}
logger.debug(
`Olm client ID: ${client.clientId}, Public Key: ${publicKey}, Relay: ${relay}`
`[handleOlmRegisterMessage] Olm client ID: ${client.clientId}, Public Key: ${publicKey}, Relay: ${relay}`,
{ orgId: client.orgId }
);
if (!publicKey) {
logger.warn("Public key not provided");
logger.warn("[handleOlmRegisterMessage] Public key not provided", {
orgId: client.orgId
});
return;
}
if (client.pubKey !== publicKey || client.archived) {
logger.info(
"Public key mismatch. Updating public key and clearing session info..."
"[handleOlmRegisterMessage] Public key mismatch. Updating public key and clearing session info...",
{ orgId: client.orgId }
);
// Update the client's public key
await db
@@ -274,12 +309,13 @@ export const handleOlmRegisterMessage: MessageHandler = async (context) => {
// TODO: I still think there is a better way to do this rather than locking it out here but ???
if (now - (client.lastHolePunch || 0) > 5 && sitesCount > 0) {
logger.warn(
`Client last hole punch is too old and we have sites to send; skipping this register. The client is failing to hole punch and identify its network address with the server. Can the client reach the server on UDP port ${config.getRawConfig().gerbil.clients_start_port}?`
`[handleOlmRegisterMessage] Client last hole punch is too old and we have sites to send; skipping this register. The client is failing to hole punch and identify its network address with the server. Can the client reach the server on UDP port ${config.getRawConfig().gerbil.clients_start_port}?`,
{ orgId: client.orgId }
);
return;
}
// NOTE: its important that the client here is the old client and the public key is the new key
// NOTE: its important that the client here is the old client and the public key is the new key
const siteConfigurations = await buildSiteConfigurationForOlmClient(
client,
publicKey,

View File

@@ -151,6 +151,8 @@ export async function getUserResources(
destination: string;
mode: string;
scheme: string | null;
ssl: boolean;
fullDomain: string | null;
enabled: boolean;
alias: string | null;
aliasAddress: string | null;
@@ -164,6 +166,8 @@ export async function getUserResources(
destination: siteResources.destination,
mode: siteResources.mode,
scheme: siteResources.scheme,
ssl: siteResources.ssl,
fullDomain: siteResources.fullDomain,
enabled: siteResources.enabled,
alias: siteResources.alias,
aliasAddress: siteResources.aliasAddress
@@ -251,6 +255,8 @@ export async function getUserResources(
destination: siteResource.destination,
mode: siteResource.mode,
protocol: siteResource.scheme,
ssl: siteResource.ssl,
fullDomain: siteResource.fullDomain,
enabled: siteResource.enabled,
alias: siteResource.alias,
aliasAddress: siteResource.aliasAddress,
@@ -296,6 +302,8 @@ export type GetUserResourcesResponse = {
destination: string;
mode: string;
protocol: string | null;
ssl: boolean;
fullDomain: string | null;
enabled: boolean;
alias: string | null;
aliasAddress: string | null;

View File

@@ -5,7 +5,8 @@ import {
clients,
clientSiteResources,
siteResources,
apiKeyOrg
apiKeyOrg,
primaryDb
} from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -220,8 +221,12 @@ export async function batchAddClientToSiteResources(
siteResourceId: siteResource.siteResourceId
});
}
});
await rebuildClientAssociationsFromClient(client, trx);
rebuildClientAssociationsFromClient(client, primaryDb).catch((e) => {
logger.error(
`Failed to rebuild client associations after batch adding site resources for client ${clientId}: ${e}`
);
});
return response(res, {

View File

@@ -10,7 +10,8 @@ import {
SiteResource,
siteResources,
sites,
userSiteResources
userSiteResources,
primaryDb
} from "@server/db";
import { getUniqueSiteResourceName } from "@server/db/names";
import {
@@ -74,16 +75,14 @@ const createSiteResourceSchema = z
.refine(
(data) => {
if (data.mode === "host") {
if (data.mode == "host") {
// Check if it's a valid IP address using zod (v4 or v6)
const isValidIP = z
// .union([z.ipv4(), z.ipv6()])
.union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
.safeParse(data.destination).success;
// Check if it's a valid IP address using zod (v4 or v6)
const isValidIP = z
// .union([z.ipv4(), z.ipv6()])
.union([z.ipv4()]) // for now lets just do ipv4 until we verify ipv6 works everywhere
.safeParse(data.destination).success;
if (isValidIP) {
return true;
}
if (isValidIP) {
return true;
}
// Check if it's a valid domain (hostname pattern, TLD not required)
@@ -96,17 +95,12 @@ const createSiteResourceSchema = z
data.alias.trim() !== "";
return isValidDomain && isValidAlias; // require the alias to be set in the case of domain
}
return true;
},
{
message:
"Destination must be a valid IPV4 address or valid domain AND alias is required"
}
)
.refine(
(data) => {
if (data.mode === "cidr") {
} else if (data.mode === "http") {
// we have to have a domainId defined
if (!data.domainId) {
return false;
}
} else if (data.mode === "cidr") {
// Check if it's a valid CIDR (v4 or v6)
const isValidCIDR = z
.union([z.cidrv4(), z.cidrv6()])
@@ -116,7 +110,8 @@ const createSiteResourceSchema = z
return true;
},
{
message: "Destination must be a valid CIDR notation for cidr mode"
message:
"Destination must be a valid IPV4 address or valid domain AND alias is required"
}
)
.refine(
@@ -525,12 +520,10 @@ export async function createSiteResource(
// own transaction so it always executes on the primary — avoiding any
// replica-lag issues while still allowing the HTTP response to return
// early.
db.transaction(async (trx) => {
await rebuildClientAssociationsFromSiteResource(
newSiteResource!,
trx
);
}).catch((err) => {
rebuildClientAssociationsFromSiteResource(
newSiteResource!,
primaryDb
).catch((err) => {
logger.error(
`Error rebuilding client associations for site resource ${newSiteResource!.siteResourceId}:`,
err

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, newts, sites } from "@server/db";
import { db, newts, primaryDb, sites } from "@server/db";
import { siteResources } from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -73,12 +73,10 @@ export async function deleteSiteResource(
// own transaction so it always executes on the primary — avoiding any
// replica-lag issues while still allowing the HTTP response to return
// early.
db.transaction(async (trx) => {
await rebuildClientAssociationsFromSiteResource(
removedSiteResource,
trx
);
}).catch((err) => {
rebuildClientAssociationsFromSiteResource(
removedSiteResource,
primaryDb
).catch((err) => {
logger.error(
`Error rebuilding client associations for site resource ${removedSiteResource!.siteResourceId}:`,
err

View File

@@ -104,6 +104,17 @@ const updateSiteResourceSchema = z
data.alias.trim() !== "";
return isValidDomain && isValidAlias; // require the alias to be set in the case of domain
} else if (data.mode === "cidr" && data.destination) {
// Check if it's a valid CIDR (v4 or v6)
const isValidCIDR = z
.union([z.cidrv4(), z.cidrv6()])
.safeParse(data.destination).success;
return isValidCIDR;
} else if (data.mode === "http") {
// we have to have a domainId defined
if (!data.domainId) {
return false;
}
}
return true;
},
@@ -112,21 +123,6 @@ const updateSiteResourceSchema = z
"Destination must be a valid IP address or valid domain AND alias is required"
}
)
.refine(
(data) => {
if (data.mode === "cidr" && data.destination) {
// Check if it's a valid CIDR (v4 or v6)
const isValidCIDR = z
.union([z.cidrv4(), z.cidrv6()])
.safeParse(data.destination).success;
return isValidCIDR;
}
return true;
},
{
message: "Destination must be a valid CIDR notation for cidr mode"
}
)
.refine(
(data) => {
if (data.mode !== "http") return true;

View File

@@ -1,7 +1,13 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, orgs } from "@server/db";
import { roles, userInviteRoles, userInvites, userOrgs, users } from "@server/db";
import { db, orgs, primaryDb } from "@server/db";
import {
roles,
userInviteRoles,
userInvites,
userOrgs,
users
} from "@server/db";
import { eq, and, inArray } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -146,9 +152,7 @@ export async function acceptInvite(
.from(userInviteRoles)
.where(eq(userInviteRoles.inviteId, inviteId));
const inviteRoleIds = [
...new Set(inviteRoleRows.map((r) => r.roleId))
];
const inviteRoleIds = [...new Set(inviteRoleRows.map((r) => r.roleId))];
if (inviteRoleIds.length === 0) {
return next(
createHttpError(
@@ -193,13 +197,19 @@ export async function acceptInvite(
.delete(userInvites)
.where(eq(userInvites.inviteId, inviteId));
await calculateUserClientsForOrgs(existingUser[0].userId, trx);
logger.debug(
`User ${existingUser[0].userId} accepted invite to org ${existingInvite.orgId}`
);
});
calculateUserClientsForOrgs(existingUser[0].userId, primaryDb).catch(
(e) => {
logger.error(
`Failed to calculate user clients after accepting invite for user ${existingUser[0].userId}: ${e}`
);
}
);
return response<AcceptInviteResponse>(res, {
data: { accepted: true, orgId: existingInvite.orgId },
success: true,

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import stoi from "@server/lib/stoi";
import { clients, db } from "@server/db";
import { clients, db, primaryDb, Client } from "@server/db";
import { userOrgRoles, userOrgs, roles } from "@server/db";
import { eq, and } from "drizzle-orm";
import response from "@server/lib/response";
@@ -88,11 +88,11 @@ export async function addUserRoleLegacy(
);
}
if (existingUser.isOwner) {
if (existingUser.isOwner && role.isAdmin !== true) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Cannot change the role of the owner of the organization"
"The organization owner must retain an administrator role"
)
);
}
@@ -112,6 +112,8 @@ export async function addUserRoleLegacy(
);
}
let orgClientsToRebuild: Client[] = [];
await db.transaction(async (trx) => {
await trx
.delete(userOrgRoles)
@@ -138,11 +140,19 @@ export async function addUserRoleLegacy(
)
);
for (const orgClient of orgClients) {
await rebuildClientAssociationsFromClient(orgClient, trx);
}
orgClientsToRebuild = orgClients;
});
for (const orgClient of orgClientsToRebuild) {
rebuildClientAssociationsFromClient(orgClient, primaryDb).catch(
(e) => {
logger.error(
`Failed to rebuild client associations for client ${orgClient.clientId} after adding role: ${e}`
);
}
);
}
return response(res, {
data: { ...existingUser, roleId },
success: true,

View File

@@ -1,6 +1,6 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { db, primaryDb } from "@server/db";
import { users } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
@@ -53,8 +53,12 @@ export async function adminRemoveUser(
await db.transaction(async (trx) => {
await trx.delete(users).where(eq(users.userId, userId));
});
await calculateUserClientsForOrgs(userId, trx);
calculateUserClientsForOrgs(userId, primaryDb).catch((e) => {
logger.error(
`Failed to calculate user clients after removing user ${userId}: ${e}`
);
});
return response(res, {

View File

@@ -6,7 +6,7 @@ import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { db, orgs } from "@server/db";
import { db, orgs, primaryDb } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { idp, idpOidcConfig, roles, userOrgs, users } from "@server/db";
import { generateId } from "@server/auth/sessions/app";
@@ -34,8 +34,7 @@ const bodySchema = z
roleId: z.number().int().positive().optional()
})
.refine(
(d) =>
(d.roleIds != null && d.roleIds.length > 0) || d.roleId != null,
(d) => (d.roleIds != null && d.roleIds.length > 0) || d.roleId != null,
{ message: "roleIds or roleId is required", path: ["roleIds"] }
)
.transform((data) => ({
@@ -100,8 +99,14 @@ export async function createOrgUser(
}
const { orgId } = parsedParams.data;
const { username, email, name, type, idpId, roleIds: uniqueRoleIds } =
parsedBody.data;
const {
username,
email,
name,
type,
idpId,
roleIds: uniqueRoleIds
} = parsedBody.data;
if (build == "saas") {
const usage = await usageService.getUsage(orgId, FeatureId.USERS);
@@ -232,6 +237,7 @@ export async function createOrgUser(
);
}
let userIdForClients: string | undefined;
await db.transaction(async (trx) => {
const [existingUser] = await trx
.select()
@@ -270,7 +276,7 @@ export async function createOrgUser(
{
orgId,
userId: existingUser.userId,
autoProvisioned: false,
autoProvisioned: false
},
uniqueRoleIds,
trx
@@ -292,20 +298,30 @@ export async function createOrgUser(
})
.returning();
await assignUserToOrg(
org,
{
orgId,
userId: newUser.userId,
autoProvisioned: false,
},
uniqueRoleIds,
trx
);
await assignUserToOrg(
org,
{
orgId,
userId: newUser.userId,
autoProvisioned: false
},
uniqueRoleIds,
trx
);
}
await calculateUserClientsForOrgs(userId, trx);
userIdForClients = userId;
});
if (userIdForClients) {
calculateUserClientsForOrgs(userIdForClients, primaryDb).catch(
(e) => {
logger.error(
`Failed to calculate user clients after creating org user: ${e}`
);
}
);
}
} else {
return next(
createHttpError(HttpCode.BAD_REQUEST, "User type is required")

View File

@@ -58,7 +58,8 @@ export async function queryUser(orgId: string, userId: string) {
roleIds: roleRows.map((r) => r.roleId),
roles: roleRows.map((r) => ({
roleId: r.roleId,
name: r.roleName ?? ""
name: r.roleName ?? "",
isAdmin: r.isAdmin === true
}))
};
}

View File

@@ -7,7 +7,8 @@ import {
siteResources,
sites,
UserOrg,
userSiteResources
userSiteResources,
primaryDb
} from "@server/db";
import { userOrgs, userResources, users, userSites } from "@server/db";
import { and, count, eq, exists, inArray } from "drizzle-orm";
@@ -91,25 +92,12 @@ export async function removeUserOrg(
await db.transaction(async (trx) => {
await removeUserFromOrg(org, userId, trx);
});
// if (build === "saas") {
// const [rootUser] = await trx
// .select()
// .from(users)
// .where(eq(users.userId, userId));
//
// const [leftInOrgs] = await trx
// .select({ count: count() })
// .from(userOrgs)
// .where(eq(userOrgs.userId, userId));
//
// // if the user is not an internal user and does not belong to any org, delete the entire user
// if (rootUser?.type !== UserType.Internal && !leftInOrgs.count) {
// await trx.delete(users).where(eq(users.userId, userId));
// }
// }
await calculateUserClientsForOrgs(userId, trx);
calculateUserClientsForOrgs(userId, primaryDb).catch((e) => {
logger.error(
`Failed to calculate user clients after removing user ${userId} from org ${orgId}: ${e}`
);
});
return response(res, {

View File

@@ -24,6 +24,7 @@ import m15 from "./scriptsPg/1.16.0";
import m16 from "./scriptsPg/1.17.0";
import m17 from "./scriptsPg/1.18.0";
import m18 from "./scriptsPg/1.18.3";
import m19 from "./scriptsPg/1.18.4";
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER
// EXCEPT FOR THE DATABASE AND THE SCHEMA
@@ -47,7 +48,8 @@ const migrations = [
{ version: "1.16.0", run: m15 },
{ version: "1.17.0", run: m16 },
{ version: "1.18.0", run: m17 },
{ version: "1.18.3", run: m18 }
{ version: "1.18.3", run: m18 },
{ version: "1.18.4", run: m19 }
// Add new migrations here as they are created
] as {
version: string;

View File

@@ -42,6 +42,7 @@ import m36 from "./scriptsSqlite/1.16.0";
import m37 from "./scriptsSqlite/1.17.0";
import m38 from "./scriptsSqlite/1.18.0";
import m39 from "./scriptsSqlite/1.18.3";
import m40 from "./scriptsSqlite/1.18.4";
// THIS CANNOT IMPORT ANYTHING FROM THE SERVER
// EXCEPT FOR THE DATABASE AND THE SCHEMA
@@ -81,7 +82,8 @@ const migrations = [
{ version: "1.16.0", run: m36 },
{ version: "1.17.0", run: m37 },
{ version: "1.18.0", run: m38 },
{ version: "1.18.3", run: m39 }
{ version: "1.18.3", run: m39 },
{ version: "1.18.4", run: m40 }
// Add new migrations here as they are created
] as const;

View File

@@ -44,7 +44,7 @@ export default async function migration() {
await db.execute(sql`BEGIN`);
await db.execute(sql`
CREATE TABLE "trialNotifications" (
CREATE TABLE IF NOT EXISTS "trialNotifications" (
"notificationId" serial PRIMARY KEY NOT NULL,
"subscriptionId" varchar(255) NOT NULL,
"notificationType" varchar(50) NOT NULL,
@@ -52,10 +52,6 @@ export default async function migration() {
);
`);
await db.execute(sql`
ALTER TABLE "trialNotifications" ADD CONSTRAINT "trialNotifications_subscriptionId_subscriptions_subscriptionId_fk" FOREIGN KEY ("subscriptionId") REFERENCES "public"."subscriptions"("subscriptionId") ON DELETE cascade ON UPDATE no action;
`);
await db.execute(sql`COMMIT`);
console.log("Migrated database");
} catch (e) {
@@ -77,7 +73,7 @@ export default async function migration() {
}
console.log(
`Migrated ${existingHealthChecks.length} targetHealthCheck row(s) with corrected IDs`
`Updated names for ${existingHealthChecks.length} existing targetHealthCheck row(s)`
);
} catch (e) {
console.error("Error while migrating targetHealthCheck rows:", e);

View File

@@ -0,0 +1,34 @@
import { db } from "@server/db/pg/driver";
import { sql } from "drizzle-orm";
const version = "1.18.4";
export default async function migration() {
console.log(`Running setup script ${version}...`);
try {
await db.execute(sql`BEGIN`);
await db.execute(sql`
ALTER TABLE "connectionAuditLog" ADD COLUMN "clientEndpoint" text;
`);
await db.execute(sql`
ALTER TABLE "eventStreamingDestinations" ADD COLUMN "lastError" text;
`);
await db.execute(sql`
ALTER TABLE "eventStreamingDestinations" ADD COLUMN "lastErrorAt" bigint;
`);
await db.execute(sql`COMMIT`);
console.log("Migrated database");
} catch (e) {
await db.execute(sql`ROLLBACK`);
console.log("Unable to migrate database");
console.log(e);
throw e;
}
console.log(`${version} migration complete`);
}

View File

@@ -16,7 +16,7 @@ export default async function migration() {
db.transaction(() => {
db.prepare(
`
CREATE TABLE 'trialNotifications' (
CREATE TABLE IF NOT EXISTS 'trialNotifications' (
'notificationId' integer PRIMARY KEY AUTOINCREMENT NOT NULL,
'subscriptionId' text NOT NULL,
'notificationType' text NOT NULL,

View File

@@ -0,0 +1,43 @@
import { APP_PATH } from "@server/lib/consts";
import Database from "better-sqlite3";
import path from "path";
const version = "1.18.4";
export default async function migration() {
console.log(`Running setup script ${version}...`);
const location = path.join(APP_PATH, "db", "db.sqlite");
const db = new Database(location);
try {
db.pragma("foreign_keys = OFF");
db.transaction(() => {
db.prepare(
`
ALTER TABLE 'connectionAuditLog' ADD 'clientEndpoint' text;
`
).run();
db.prepare(
`
ALTER TABLE 'eventStreamingDestinations' ADD 'lastError' text;
`
).run();
db.prepare(
`
ALTER TABLE 'eventStreamingDestinations' ADD 'lastErrorAt' integer;
`
).run();
})();
db.pragma("foreign_keys = ON");
console.log("Migrated database");
} catch (e) {
console.log("Failed to migrate db:", e);
throw e;
}
console.log(`${version} migration complete`);
}