Compare commits

..

13 Commits

Author SHA1 Message Date
Owen Schwartz
6d7a19b0a0 Merge pull request #2716 from fosrl/patch-1
Add typecasts
2026-03-25 22:12:59 -07:00
Owen
6b3a6fa380 Add typecasts 2026-03-25 22:11:56 -07:00
Owen Schwartz
e2a65b4b74 Merge pull request #2715 from fosrl/batch-band
Batch set bandwidth
2026-03-25 21:54:44 -07:00
Owen
1f01108b62 Batch set bandwidth 2026-03-25 21:53:20 -07:00
Owen Schwartz
62c63ddcaa Merge pull request #2710 from fosrl/thundering-herd
thundering herd
2026-03-24 20:29:01 -07:00
Owen
dfd604c781 Fix import problems 2026-03-24 20:27:34 -07:00
Owen
c96c5e8ae8 Cache token for thundering hurd 2026-03-24 18:12:51 -07:00
Owen
6f71e9f0f2 Clean up 2026-03-24 17:55:14 -07:00
Owen
d17ec6dc1f Try to solve th problem 2026-03-24 17:39:43 -07:00
Owen Schwartz
c36a019f5d Merge pull request #2709 from fosrl/pool-update
Update pool and disable idp
2026-03-24 16:48:28 -07:00
Owen
cf2dfdea5b Add better pooling controls 2026-03-24 16:38:50 -07:00
Owen
985e1bb9ab Disable everything if not paid 2026-03-24 16:38:46 -07:00
Owen Schwartz
85335bfecc Merge pull request #2685 from fosrl/dev
1.16.2-s.16
2026-03-21 10:47:18 -07:00
79 changed files with 1180 additions and 1202 deletions

View File

@@ -1,7 +1,7 @@
import { Request } from "express";
import { db } from "@server/db";
import { userActions, roleActions } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { userActions, roleActions, userOrgs } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
@@ -53,7 +53,6 @@ export enum ActionsEnum {
listRoleResources = "listRoleResources",
// listRoleActions = "listRoleActions",
addUserRole = "addUserRole",
removeUserRole = "removeUserRole",
// addUserSite = "addUserSite",
// addUserAction = "addUserAction",
// removeUserAction = "removeUserAction",
@@ -155,19 +154,29 @@ export async function checkUserActionPermission(
}
try {
let userOrgRoleIds = req.userOrgRoleIds;
let userOrgRoleId = req.userOrgRoleId;
if (userOrgRoleIds === undefined) {
const { getUserOrgRoleIds } = await import(
"@server/lib/userOrgRoles"
);
userOrgRoleIds = await getUserOrgRoleIds(userId, req.userOrgId!);
if (userOrgRoleIds.length === 0) {
// If userOrgRoleId is not available on the request, fetch it
if (userOrgRoleId === undefined) {
const userOrgRole = await db
.select()
.from(userOrgs)
.where(
and(
eq(userOrgs.userId, userId),
eq(userOrgs.orgId, req.userOrgId!)
)
)
.limit(1);
if (userOrgRole.length === 0) {
throw createHttpError(
HttpCode.FORBIDDEN,
"User does not have access to this organization"
);
}
userOrgRoleId = userOrgRole[0].roleId;
}
// Check if the user has direct permission for the action in the current org
@@ -178,7 +187,7 @@ export async function checkUserActionPermission(
and(
eq(userActions.userId, userId),
eq(userActions.actionId, actionId),
eq(userActions.orgId, req.userOrgId!)
eq(userActions.orgId, req.userOrgId!) // TODO: we cant pass the org id if we are not checking the org
)
)
.limit(1);
@@ -187,14 +196,14 @@ export async function checkUserActionPermission(
return true;
}
// If no direct permission, check role-based permission (any of user's roles)
// If no direct permission, check role-based permission
const roleActionPermission = await db
.select()
.from(roleActions)
.where(
and(
eq(roleActions.actionId, actionId),
inArray(roleActions.roleId, userOrgRoleIds),
eq(roleActions.roleId, userOrgRoleId!),
eq(roleActions.orgId, req.userOrgId!)
)
)

View File

@@ -1,29 +1,26 @@
import { db } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { and, eq } from "drizzle-orm";
import { roleResources, userResources } from "@server/db";
export async function canUserAccessResource({
userId,
resourceId,
roleIds
roleId
}: {
userId: string;
resourceId: number;
roleIds: number[];
roleId: number;
}): Promise<boolean> {
const roleResourceAccess =
roleIds.length > 0
? await db
.select()
.from(roleResources)
.where(
and(
eq(roleResources.resourceId, resourceId),
inArray(roleResources.roleId, roleIds)
)
)
.limit(1)
: [];
const roleResourceAccess = await db
.select()
.from(roleResources)
.where(
and(
eq(roleResources.resourceId, resourceId),
eq(roleResources.roleId, roleId)
)
)
.limit(1);
if (roleResourceAccess.length > 0) {
return true;

View File

@@ -1,29 +1,26 @@
import { db } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { and, eq } from "drizzle-orm";
import { roleSiteResources, userSiteResources } from "@server/db";
export async function canUserAccessSiteResource({
userId,
resourceId,
roleIds
roleId
}: {
userId: string;
resourceId: number;
roleIds: number[];
roleId: number;
}): Promise<boolean> {
const roleResourceAccess =
roleIds.length > 0
? await db
.select()
.from(roleSiteResources)
.where(
and(
eq(roleSiteResources.siteResourceId, resourceId),
inArray(roleSiteResources.roleId, roleIds)
)
)
.limit(1)
: [];
const roleResourceAccess = await db
.select()
.from(roleSiteResources)
.where(
and(
eq(roleSiteResources.siteResourceId, resourceId),
eq(roleSiteResources.roleId, roleId)
)
)
.limit(1);
if (roleResourceAccess.length > 0) {
return true;

View File

@@ -1,8 +1,10 @@
import { flushBandwidthToDb } from "@server/routers/newt/handleReceiveBandwidthMessage";
import { flushSiteBandwidthToDb } from "@server/routers/gerbil/receiveBandwidth";
import { stopPingAccumulator } from "@server/routers/newt/pingAccumulator";
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
async function cleanup() {
await stopPingAccumulator();
await flushBandwidthToDb();
await flushSiteBandwidthToDb();
await wsCleanup();

View File

@@ -1,7 +1,7 @@
import { drizzle as DrizzlePostgres } from "drizzle-orm/node-postgres";
import { Pool } from "pg";
import { readConfigFile } from "@server/lib/readConfigFile";
import { withReplicas } from "drizzle-orm/pg-core";
import { createPool } from "./poolConfig";
function createDb() {
const config = readConfigFile();
@@ -39,12 +39,17 @@ function createDb() {
// Create connection pools instead of individual connections
const poolConfig = config.postgres.pool;
const primaryPool = new Pool({
const maxConnections = poolConfig?.max_connections || 20;
const idleTimeoutMs = poolConfig?.idle_timeout_ms || 30000;
const connectionTimeoutMs = poolConfig?.connection_timeout_ms || 5000;
const primaryPool = createPool(
connectionString,
max: poolConfig?.max_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000
});
maxConnections,
idleTimeoutMs,
connectionTimeoutMs,
"primary"
);
const replicas = [];
@@ -55,14 +60,16 @@ function createDb() {
})
);
} else {
const maxReplicaConnections =
poolConfig?.max_replica_connections || 20;
for (const conn of replicaConnections) {
const replicaPool = new Pool({
connectionString: conn.connection_string,
max: poolConfig?.max_replica_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis:
poolConfig?.connection_timeout_ms || 5000
});
const replicaPool = createPool(
conn.connection_string,
maxReplicaConnections,
idleTimeoutMs,
connectionTimeoutMs,
"replica"
);
replicas.push(
DrizzlePostgres(replicaPool, {
logger: process.env.QUERY_LOGGING == "true"
@@ -84,4 +91,4 @@ export default db;
export const primaryDb = db.$primary;
export type Transaction = Parameters<
Parameters<(typeof db)["transaction"]>[0]
>[0];
>[0];

View File

@@ -1,9 +1,9 @@
import { drizzle as DrizzlePostgres } from "drizzle-orm/node-postgres";
import { Pool } from "pg";
import { readConfigFile } from "@server/lib/readConfigFile";
import { withReplicas } from "drizzle-orm/pg-core";
import { build } from "@server/build";
import { db as mainDb, primaryDb as mainPrimaryDb } from "./driver";
import { createPool } from "./poolConfig";
function createLogsDb() {
// Only use separate logs database in SaaS builds
@@ -42,12 +42,17 @@ function createLogsDb() {
// Create separate connection pool for logs database
const poolConfig = logsConfig?.pool || config.postgres?.pool;
const primaryPool = new Pool({
const maxConnections = poolConfig?.max_connections || 20;
const idleTimeoutMs = poolConfig?.idle_timeout_ms || 30000;
const connectionTimeoutMs = poolConfig?.connection_timeout_ms || 5000;
const primaryPool = createPool(
connectionString,
max: poolConfig?.max_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000
});
maxConnections,
idleTimeoutMs,
connectionTimeoutMs,
"logs-primary"
);
const replicas = [];
@@ -58,14 +63,16 @@ function createLogsDb() {
})
);
} else {
const maxReplicaConnections =
poolConfig?.max_replica_connections || 20;
for (const conn of replicaConnections) {
const replicaPool = new Pool({
connectionString: conn.connection_string,
max: poolConfig?.max_replica_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis:
poolConfig?.connection_timeout_ms || 5000
});
const replicaPool = createPool(
conn.connection_string,
maxReplicaConnections,
idleTimeoutMs,
connectionTimeoutMs,
"logs-replica"
);
replicas.push(
DrizzlePostgres(replicaPool, {
logger: process.env.QUERY_LOGGING == "true"
@@ -84,4 +91,4 @@ function createLogsDb() {
export const logsDb = createLogsDb();
export default logsDb;
export const primaryLogsDb = logsDb.$primary;
export const primaryLogsDb = logsDb.$primary;

View File

@@ -0,0 +1,63 @@
import { Pool, PoolConfig } from "pg";
import logger from "@server/logger";
export function createPoolConfig(
connectionString: string,
maxConnections: number,
idleTimeoutMs: number,
connectionTimeoutMs: number
): PoolConfig {
return {
connectionString,
max: maxConnections,
idleTimeoutMillis: idleTimeoutMs,
connectionTimeoutMillis: connectionTimeoutMs,
// TCP keepalive to prevent silent connection drops by NAT gateways,
// load balancers, and other intermediate network devices (e.g. AWS
// NAT Gateway drops idle TCP connections after ~350s)
keepAlive: true,
keepAliveInitialDelayMillis: 10000, // send first keepalive after 10s of idle
// Allow connections to be released and recreated more aggressively
// to avoid stale connections building up
allowExitOnIdle: false
};
}
export function attachPoolErrorHandlers(pool: Pool, label: string): void {
pool.on("error", (err) => {
// This catches errors on idle clients in the pool. Without this
// handler an unexpected disconnect would crash the process.
logger.error(
`Unexpected error on idle ${label} database client: ${err.message}`
);
});
pool.on("connect", (client) => {
// Set a statement timeout on every new connection so a single slow
// query can't block the pool forever
client.query("SET statement_timeout = '30s'").catch((err: Error) => {
logger.warn(
`Failed to set statement_timeout on ${label} client: ${err.message}`
);
});
});
}
export function createPool(
connectionString: string,
maxConnections: number,
idleTimeoutMs: number,
connectionTimeoutMs: number,
label: string
): Pool {
const pool = new Pool(
createPoolConfig(
connectionString,
maxConnections,
idleTimeoutMs,
connectionTimeoutMs
)
);
attachPoolErrorHandlers(pool, label);
return pool;
}

View File

@@ -9,7 +9,6 @@ import {
real,
serial,
text,
unique,
varchar
} from "drizzle-orm/pg-core";
@@ -336,6 +335,9 @@ export const userOrgs = pgTable("userOrgs", {
onDelete: "cascade"
})
.notNull(),
roleId: integer("roleId")
.notNull()
.references(() => roles.roleId),
isOwner: boolean("isOwner").notNull().default(false),
autoProvisioned: boolean("autoProvisioned").default(false),
pamUsername: varchar("pamUsername") // cleaned username for ssh and such
@@ -384,22 +386,6 @@ export const roles = pgTable("roles", {
sshUnixGroups: text("sshUnixGroups").default("[]")
});
export const userOrgRoles = pgTable(
"userOrgRoles",
{
userId: varchar("userId")
.notNull()
.references(() => users.userId, { onDelete: "cascade" }),
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
roleId: integer("roleId")
.notNull()
.references(() => roles.roleId, { onDelete: "cascade" })
},
(t) => [unique().on(t.userId, t.orgId, t.roleId)]
);
export const roleActions = pgTable("roleActions", {
roleId: integer("roleId")
.notNull()
@@ -1049,7 +1035,6 @@ export type RoleResource = InferSelectModel<typeof roleResources>;
export type UserResource = InferSelectModel<typeof userResources>;
export type UserInvite = InferSelectModel<typeof userInvites>;
export type UserOrg = InferSelectModel<typeof userOrgs>;
export type UserOrgRole = InferSelectModel<typeof userOrgRoles>;
export type ResourceSession = InferSelectModel<typeof resourceSessions>;
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;

View File

@@ -12,7 +12,6 @@ import {
resources,
roleResources,
sessions,
userOrgRoles,
userOrgs,
userResources,
users,
@@ -105,57 +104,24 @@ export async function getUserSessionWithUser(
}
/**
* Get user organization role (single role; prefer getUserOrgRoleIds + roles for multi-role).
* @deprecated Use userOrgRoles table and getUserOrgRoleIds for multi-role support.
* Get user organization role
*/
export async function getUserOrgRole(userId: string, orgId: string) {
const userOrg = await db
const userOrgRole = await db
.select({
userId: userOrgs.userId,
orgId: userOrgs.orgId,
roleId: userOrgs.roleId,
isOwner: userOrgs.isOwner,
autoProvisioned: userOrgs.autoProvisioned
autoProvisioned: userOrgs.autoProvisioned,
roleName: roles.name
})
.from(userOrgs)
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
.limit(1);
if (userOrg.length === 0) return null;
const [firstRole] = await db
.select({
roleId: userOrgRoles.roleId,
roleName: roles.name
})
.from(userOrgRoles)
.leftJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, orgId)
)
)
.limit(1);
return firstRole
? {
...userOrg[0],
roleId: firstRole.roleId,
roleName: firstRole.roleName
}
: { ...userOrg[0], roleId: null, roleName: null };
}
/**
* Get role name by role ID (for display).
*/
export async function getRoleName(roleId: number): Promise<string | null> {
const [row] = await db
.select({ name: roles.name })
.from(roles)
.where(eq(roles.roleId, roleId))
.limit(1);
return row?.name ?? null;
return userOrgRole.length > 0 ? userOrgRole[0] : null;
}
/**

View File

@@ -1,12 +1,6 @@
import { randomUUID } from "crypto";
import { InferSelectModel } from "drizzle-orm";
import {
index,
integer,
sqliteTable,
text,
unique
} from "drizzle-orm/sqlite-core";
import { index, integer, sqliteTable, text } from "drizzle-orm/sqlite-core";
export const domains = sqliteTable("domains", {
domainId: text("domainId").primaryKey(),
@@ -649,6 +643,9 @@ export const userOrgs = sqliteTable("userOrgs", {
onDelete: "cascade"
})
.notNull(),
roleId: integer("roleId")
.notNull()
.references(() => roles.roleId),
isOwner: integer("isOwner", { mode: "boolean" }).notNull().default(false),
autoProvisioned: integer("autoProvisioned", {
mode: "boolean"
@@ -703,22 +700,6 @@ export const roles = sqliteTable("roles", {
sshUnixGroups: text("sshUnixGroups").default("[]")
});
export const userOrgRoles = sqliteTable(
"userOrgRoles",
{
userId: text("userId")
.notNull()
.references(() => users.userId, { onDelete: "cascade" }),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
roleId: integer("roleId")
.notNull()
.references(() => roles.roleId, { onDelete: "cascade" })
},
(t) => [unique().on(t.userId, t.orgId, t.roleId)]
);
export const roleActions = sqliteTable("roleActions", {
roleId: integer("roleId")
.notNull()
@@ -1153,7 +1134,6 @@ export type RoleResource = InferSelectModel<typeof roleResources>;
export type UserResource = InferSelectModel<typeof userResources>;
export type UserInvite = InferSelectModel<typeof userInvites>;
export type UserOrg = InferSelectModel<typeof userOrgs>;
export type UserOrgRole = InferSelectModel<typeof userOrgRoles>;
export type ResourceSession = InferSelectModel<typeof resourceSessions>;
export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
export type ResourcePassword = InferSelectModel<typeof resourcePassword>;

View File

@@ -74,7 +74,7 @@ declare global {
session: Session;
userOrg?: UserOrg;
apiKeyOrg?: ApiKeyOrg;
userOrgRoleIds?: number[];
userOrgRoleId?: number;
userOrgId?: string;
userOrgIds?: string[];
remoteExitNode?: RemoteExitNode;

View File

@@ -10,7 +10,6 @@ import {
roles,
Transaction,
userClients,
userOrgRoles,
userOrgs
} from "@server/db";
import { getUniqueClientName } from "@server/db/names";
@@ -40,36 +39,20 @@ export async function calculateUserClientsForOrgs(
return;
}
// Get all user orgs with all roles (for org list and role-based logic)
const userOrgRoleRows = await transaction
// Get all user orgs
const allUserOrgs = await transaction
.select()
.from(userOrgs)
.innerJoin(
userOrgRoles,
and(
eq(userOrgs.userId, userOrgRoles.userId),
eq(userOrgs.orgId, userOrgRoles.orgId)
)
)
.innerJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
.innerJoin(roles, eq(roles.roleId, userOrgs.roleId))
.where(eq(userOrgs.userId, userId));
const userOrgIds = [...new Set(userOrgRoleRows.map((r) => r.userOrgs.orgId))];
const orgIdToRoleRows = new Map<
string,
(typeof userOrgRoleRows)[0][]
>();
for (const r of userOrgRoleRows) {
const list = orgIdToRoleRows.get(r.userOrgs.orgId) ?? [];
list.push(r);
orgIdToRoleRows.set(r.userOrgs.orgId, list);
}
const userOrgIds = allUserOrgs.map(({ userOrgs: uo }) => uo.orgId);
// For each OLM, ensure there's a client in each org the user is in
for (const olm of userOlms) {
for (const orgId of orgIdToRoleRows.keys()) {
const roleRowsForOrg = orgIdToRoleRows.get(orgId)!;
const userOrg = roleRowsForOrg[0].userOrgs;
for (const userRoleOrg of allUserOrgs) {
const { userOrgs: userOrg, roles: role } = userRoleOrg;
const orgId = userOrg.orgId;
const [org] = await transaction
.select()
@@ -213,7 +196,7 @@ export async function calculateUserClientsForOrgs(
const requireApproval =
build !== "oss" &&
isOrgLicensed &&
roleRowsForOrg.some((r) => r.roles.requireDeviceApproval);
role.requireDeviceApproval;
const newClientData: InferInsertModel<typeof clients> = {
userId,

View File

@@ -14,7 +14,6 @@ import {
siteResources,
sites,
Transaction,
userOrgRoles,
userOrgs,
userSiteResources
} from "@server/db";
@@ -78,10 +77,10 @@ export async function getClientSiteResourceAccess(
// get all of the users in these roles
const userIdsFromRoles = await trx
.select({
userId: userOrgRoles.userId
userId: userOrgs.userId
})
.from(userOrgRoles)
.where(inArray(userOrgRoles.roleId, roleIds))
.from(userOrgs)
.where(inArray(userOrgs.roleId, roleIds))
.then((rows) => rows.map((row) => row.userId));
const newAllUserIds = Array.from(
@@ -821,12 +820,12 @@ export async function rebuildClientAssociationsFromClient(
// Role-based access
const roleIds = await trx
.select({ roleId: userOrgRoles.roleId })
.from(userOrgRoles)
.select({ roleId: userOrgs.roleId })
.from(userOrgs)
.where(
and(
eq(userOrgRoles.userId, client.userId),
eq(userOrgRoles.orgId, client.orgId)
eq(userOrgs.userId, client.userId),
eq(userOrgs.orgId, client.orgId)
)
) // this needs to be locked onto this org or else cross-org access could happen
.then((rows) => rows.map((row) => row.roleId));

22
server/lib/tokenCache.ts Normal file
View File

@@ -0,0 +1,22 @@
/**
* Returns a cached plaintext token from Redis if one exists and decrypts
* cleanly, otherwise calls `createSession` to mint a fresh token, stores the
* encrypted value in Redis with the given TTL, and returns it.
*
* Failures at the Redis layer are non-fatal the function always falls
* through to session creation so the caller is never blocked by a Redis outage.
*
* @param cacheKey Unique Redis key, e.g. `"newt:token_cache:abc123"`
* @param secret Server secret used for AES encryption/decryption
* @param ttlSeconds Cache TTL in seconds (should match session expiry)
* @param createSession Factory that mints a new session and returns its raw token
*/
export async function getOrCreateCachedToken(
cacheKey: string,
secret: string,
ttlSeconds: number,
createSession: () => Promise<string>
): Promise<string> {
const token = await createSession();
return token;
}

View File

@@ -6,7 +6,7 @@ import {
siteResources,
sites,
Transaction,
userOrgRoles,
UserOrg,
userOrgs,
userResources,
userSiteResources,
@@ -19,15 +19,9 @@ import { FeatureId } from "@server/lib/billing";
export async function assignUserToOrg(
org: Org,
values: typeof userOrgs.$inferInsert,
roleId: number,
trx: Transaction | typeof db = db
) {
const [userOrg] = await trx.insert(userOrgs).values(values).returning();
await trx.insert(userOrgRoles).values({
userId: userOrg.userId,
orgId: userOrg.orgId,
roleId
});
// calculate if the user is in any other of the orgs before we count it as an add to the billing org
if (org.billingOrgId) {
@@ -64,14 +58,6 @@ export async function removeUserFromOrg(
userId: string,
trx: Transaction | typeof db = db
) {
await trx
.delete(userOrgRoles)
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, org.orgId)
)
);
await trx
.delete(userOrgs)
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, org.orgId)));

View File

@@ -1,22 +0,0 @@
import { db, userOrgRoles } from "@server/db";
import { and, eq } from "drizzle-orm";
/**
* Get all role IDs a user has in an organization.
* Returns empty array if the user has no roles in the org (callers must treat as no access).
*/
export async function getUserOrgRoleIds(
userId: string,
orgId: string
): Promise<number[]> {
const rows = await db
.select({ roleId: userOrgRoles.roleId })
.from(userOrgRoles)
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, orgId)
)
);
return rows.map((r) => r.roleId);
}

View File

@@ -21,7 +21,8 @@ export async function getUserOrgs(
try {
const userOrganizations = await db
.select({
orgId: userOrgs.orgId
orgId: userOrgs.orgId,
roleId: userOrgs.roleId
})
.from(userOrgs)
.where(eq(userOrgs.userId, userId));

View File

@@ -6,7 +6,6 @@ import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { canUserAccessResource } from "@server/auth/canUserAccessResource";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyAccessTokenAccess(
req: Request,
@@ -94,10 +93,7 @@ export async function verifyAccessTokenAccess(
)
);
} else {
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
resource[0].orgId!
);
req.userOrgRoleId = req.userOrg.roleId;
req.userOrgId = resource[0].orgId!;
}
@@ -122,7 +118,7 @@ export async function verifyAccessTokenAccess(
const resourceAllowed = await canUserAccessResource({
userId,
resourceId,
roleIds: req.userOrgRoleIds ?? []
roleId: req.userOrgRoleId!
});
if (!resourceAllowed) {

View File

@@ -1,11 +1,10 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { roles, userOrgs } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyAdmin(
req: Request,
@@ -63,29 +62,13 @@ export async function verifyAdmin(
}
}
req.userOrgRoleIds = await getUserOrgRoleIds(req.userOrg.userId, orgId!);
if (req.userOrgRoleIds.length === 0) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User does not have Admin access"
)
);
}
const userAdminRoles = await db
const userRole = await db
.select()
.from(roles)
.where(
and(
inArray(roles.roleId, req.userOrgRoleIds),
eq(roles.isAdmin, true)
)
)
.where(eq(roles.roleId, req.userOrg.roleId))
.limit(1);
if (userAdminRoles.length === 0) {
if (userRole.length === 0 || !userRole[0].isAdmin) {
return next(
createHttpError(
HttpCode.FORBIDDEN,

View File

@@ -1,11 +1,10 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { userOrgs, apiKeys, apiKeyOrg } from "@server/db";
import { and, eq } from "drizzle-orm";
import { and, eq, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyApiKeyAccess(
req: Request,
@@ -104,10 +103,8 @@ export async function verifyApiKeyAccess(
}
}
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
orgId
);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
return next();
} catch (error) {

View File

@@ -1,12 +1,11 @@
import { Request, Response, NextFunction } from "express";
import { Client, db } from "@server/db";
import { userOrgs, clients, roleClients, userClients } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import logger from "@server/logger";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyClientAccess(
req: Request,
@@ -114,30 +113,21 @@ export async function verifyClientAccess(
}
}
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
client.orgId
);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = client.orgId;
// Check role-based client access (any of user's roles)
const roleClientAccessList =
(req.userOrgRoleIds?.length ?? 0) > 0
? await db
.select()
.from(roleClients)
.where(
and(
eq(roleClients.clientId, client.clientId),
inArray(
roleClients.roleId,
req.userOrgRoleIds!
)
)
)
.limit(1)
: [];
const [roleClientAccess] = roleClientAccessList;
// Check role-based site access first
const [roleClientAccess] = await db
.select()
.from(roleClients)
.where(
and(
eq(roleClients.clientId, client.clientId),
eq(roleClients.roleId, userOrgRoleId)
)
)
.limit(1);
if (roleClientAccess) {
// User has access to the site through their role

View File

@@ -1,11 +1,10 @@
import { Request, Response, NextFunction } from "express";
import { db, domains, orgDomains } from "@server/db";
import { userOrgs } from "@server/db";
import { userOrgs, apiKeyOrg } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyDomainAccess(
req: Request,
@@ -64,7 +63,7 @@ export async function verifyDomainAccess(
.where(
and(
eq(userOrgs.userId, userId),
eq(userOrgs.orgId, orgId)
eq(userOrgs.orgId, apiKeyOrg.orgId)
)
)
.limit(1);
@@ -98,7 +97,8 @@ export async function verifyDomainAccess(
}
}
req.userOrgRoleIds = await getUserOrgRoleIds(req.userOrg.userId, orgId);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
return next();
} catch (error) {

View File

@@ -1,11 +1,10 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { db, orgs } from "@server/db";
import { userOrgs } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyOrgAccess(
req: Request,
@@ -65,8 +64,8 @@ export async function verifyOrgAccess(
}
}
// User has access, attach the user's role(s) to the request for potential future use
req.userOrgRoleIds = await getUserOrgRoleIds(req.userOrg.userId, orgId);
// User has access, attach the user's role to the request for potential future use
req.userOrgRoleId = req.userOrg.roleId;
req.userOrgId = orgId;
return next();

View File

@@ -1,11 +1,10 @@
import { Request, Response, NextFunction } from "express";
import { db, Resource } from "@server/db";
import { resources, userOrgs, userResources, roleResources } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyResourceAccess(
req: Request,
@@ -108,28 +107,20 @@ export async function verifyResourceAccess(
}
}
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
resource.orgId
);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = resource.orgId;
const roleResourceAccess =
(req.userOrgRoleIds?.length ?? 0) > 0
? await db
.select()
.from(roleResources)
.where(
and(
eq(roleResources.resourceId, resource.resourceId),
inArray(
roleResources.roleId,
req.userOrgRoleIds!
)
)
)
.limit(1)
: [];
const roleResourceAccess = await db
.select()
.from(roleResources)
.where(
and(
eq(roleResources.resourceId, resource.resourceId),
eq(roleResources.roleId, userOrgRoleId)
)
)
.limit(1);
if (roleResourceAccess.length > 0) {
return next();

View File

@@ -6,7 +6,6 @@ import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyRoleAccess(
req: Request,
@@ -100,6 +99,7 @@ export async function verifyRoleAccess(
}
if (!req.userOrg) {
// get the userORg
const userOrg = await db
.select()
.from(userOrgs)
@@ -109,7 +109,7 @@ export async function verifyRoleAccess(
.limit(1);
req.userOrg = userOrg[0];
req.userOrgRoleIds = await getUserOrgRoleIds(userId, orgId!);
req.userOrgRoleId = userOrg[0].roleId;
}
if (!req.userOrg) {

View File

@@ -1,11 +1,10 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { sites, Site, userOrgs, userSites, roleSites, roles } from "@server/db";
import { and, eq, inArray, or } from "drizzle-orm";
import { and, eq, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifySiteAccess(
req: Request,
@@ -113,29 +112,21 @@ export async function verifySiteAccess(
}
}
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
site.orgId
);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = site.orgId;
// Check role-based site access first (any of user's roles)
const roleSiteAccess =
(req.userOrgRoleIds?.length ?? 0) > 0
? await db
.select()
.from(roleSites)
.where(
and(
eq(roleSites.siteId, site.siteId),
inArray(
roleSites.roleId,
req.userOrgRoleIds!
)
)
)
.limit(1)
: [];
// Check role-based site access first
const roleSiteAccess = await db
.select()
.from(roleSites)
.where(
and(
eq(roleSites.siteId, site.siteId),
eq(roleSites.roleId, userOrgRoleId)
)
)
.limit(1);
if (roleSiteAccess.length > 0) {
// User's role has access to the site

View File

@@ -1,12 +1,11 @@
import { Request, Response, NextFunction } from "express";
import { db, roleSiteResources, userOrgs, userSiteResources } from "@server/db";
import { siteResources } from "@server/db";
import { eq, and, inArray } from "drizzle-orm";
import { eq, and } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifySiteResourceAccess(
req: Request,
@@ -110,34 +109,23 @@ export async function verifySiteResourceAccess(
}
}
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
siteResource.orgId
);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = siteResource.orgId;
// Attach the siteResource to the request for use in the next middleware/route
req.siteResource = siteResource;
const roleResourceAccess =
(req.userOrgRoleIds?.length ?? 0) > 0
? await db
.select()
.from(roleSiteResources)
.where(
and(
eq(
roleSiteResources.siteResourceId,
siteResourceIdNum
),
inArray(
roleSiteResources.roleId,
req.userOrgRoleIds!
)
)
)
.limit(1)
: [];
const roleResourceAccess = await db
.select()
.from(roleSiteResources)
.where(
and(
eq(roleSiteResources.siteResourceId, siteResourceIdNum),
eq(roleSiteResources.roleId, userOrgRoleId)
)
)
.limit(1);
if (roleResourceAccess.length > 0) {
return next();

View File

@@ -6,7 +6,6 @@ import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { canUserAccessResource } from "../auth/canUserAccessResource";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyTargetAccess(
req: Request,
@@ -100,10 +99,7 @@ export async function verifyTargetAccess(
)
);
} else {
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
resource[0].orgId!
);
req.userOrgRoleId = req.userOrg.roleId;
req.userOrgId = resource[0].orgId!;
}
@@ -130,7 +126,7 @@ export async function verifyTargetAccess(
const resourceAllowed = await canUserAccessResource({
userId,
resourceId,
roleIds: req.userOrgRoleIds ?? []
roleId: req.userOrgRoleId!
});
if (!resourceAllowed) {

View File

@@ -12,7 +12,7 @@ export async function verifyUserInRole(
const roleId = parseInt(
req.params.roleId || req.body.roleId || req.query.roleId
);
const userOrgRoleIds = req.userOrgRoleIds ?? [];
const userRoleId = req.userOrgRoleId;
if (isNaN(roleId)) {
return next(
@@ -20,7 +20,7 @@ export async function verifyUserInRole(
);
}
if (userOrgRoleIds.length === 0) {
if (!userRoleId) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
@@ -29,7 +29,7 @@ export async function verifyUserInRole(
);
}
if (!userOrgRoleIds.includes(roleId)) {
if (userRoleId !== roleId) {
return next(
createHttpError(
HttpCode.FORBIDDEN,

View File

@@ -15,8 +15,10 @@ import { rateLimitService } from "#private/lib/rateLimit";
import { cleanup as wsCleanup } from "#private/routers/ws";
import { flushBandwidthToDb } from "@server/routers/newt/handleReceiveBandwidthMessage";
import { flushSiteBandwidthToDb } from "@server/routers/gerbil/receiveBandwidth";
import { stopPingAccumulator } from "@server/routers/newt/pingAccumulator";
async function cleanup() {
await stopPingAccumulator();
await flushBandwidthToDb();
await flushSiteBandwidthToDb();
await rateLimitService.cleanup();

View File

@@ -1,3 +1,16 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import NodeCache from "node-cache";
import logger from "@server/logger";
import { redisManager } from "@server/private/lib/redis";

View File

@@ -0,0 +1,77 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import redisManager from "#private/lib/redis";
import { encrypt, decrypt } from "@server/lib/crypto";
import logger from "@server/logger";
/**
* Returns a cached plaintext token from Redis if one exists and decrypts
* cleanly, otherwise calls `createSession` to mint a fresh token, stores the
* encrypted value in Redis with the given TTL, and returns it.
*
* Failures at the Redis layer are non-fatal the function always falls
* through to session creation so the caller is never blocked by a Redis outage.
*
* @param cacheKey Unique Redis key, e.g. `"newt:token_cache:abc123"`
* @param secret Server secret used for AES encryption/decryption
* @param ttlSeconds Cache TTL in seconds (should match session expiry)
* @param createSession Factory that mints a new session and returns its raw token
*/
export async function getOrCreateCachedToken(
cacheKey: string,
secret: string,
ttlSeconds: number,
createSession: () => Promise<string>
): Promise<string> {
if (redisManager.isRedisEnabled()) {
try {
const cached = await redisManager.get(cacheKey);
if (cached) {
const token = decrypt(cached, secret);
if (token) {
logger.debug(`Token cache hit for key: ${cacheKey}`);
return token;
}
// Decryption produced an empty string treat as a miss
logger.warn(
`Token cache decryption returned empty string for key: ${cacheKey}, treating as miss`
);
}
} catch (e) {
logger.warn(
`Token cache read/decrypt failed for key ${cacheKey}, falling through to session creation:`,
e
);
}
}
const token = await createSession();
if (redisManager.isRedisEnabled()) {
try {
const encrypted = encrypt(token, secret);
await redisManager.set(cacheKey, encrypted, ttlSeconds);
logger.debug(
`Token cached in Redis for key: ${cacheKey} (TTL ${ttlSeconds}s)`
);
} catch (e) {
logger.warn(
`Token cache write failed for key ${cacheKey} (session was still created):`,
e
);
}
}
return token;
}

View File

@@ -13,10 +13,9 @@
import { Request, Response, NextFunction } from "express";
import { userOrgs, db, idp, idpOrg } from "@server/db";
import { and, eq } from "drizzle-orm";
import { and, eq, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyIdpAccess(
req: Request,
@@ -85,10 +84,8 @@ export async function verifyIdpAccess(
);
}
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
idpRes.idpOrg.orgId
);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
return next();
} catch (error) {

View File

@@ -12,12 +12,11 @@
*/
import { Request, Response, NextFunction } from "express";
import { db, exitNodeOrgs, remoteExitNodes } from "@server/db";
import { userOrgs } from "@server/db";
import { and, eq } from "drizzle-orm";
import { db, exitNodeOrgs, exitNodes, remoteExitNodes } from "@server/db";
import { sites, userOrgs, userSites, roleSites, roles } from "@server/db";
import { and, eq, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
export async function verifyRemoteExitNodeAccess(
req: Request,
@@ -104,10 +103,8 @@ export async function verifyRemoteExitNodeAccess(
);
}
req.userOrgRoleIds = await getUserOrgRoleIds(
req.userOrg.userId,
exitNodeOrg.orgId
);
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
return next();
} catch (error) {

View File

@@ -14,7 +14,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { userOrgs, userOrgRoles, users, roles, orgs } from "@server/db";
import { userOrgs, users, roles, orgs } from "@server/db";
import { eq, and, or } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -95,14 +95,7 @@ async function getOrgAdmins(orgId: string) {
})
.from(userOrgs)
.innerJoin(users, eq(userOrgs.userId, users.userId))
.leftJoin(
userOrgRoles,
and(
eq(userOrgs.userId, userOrgRoles.userId),
eq(userOrgs.orgId, userOrgRoles.orgId)
)
)
.leftJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
.where(
and(
eq(userOrgs.orgId, orgId),
@@ -110,11 +103,8 @@ async function getOrgAdmins(orgId: string) {
)
);
// Dedupe by userId (user may have multiple roles)
const byUserId = new Map(
admins.map((a) => [a.userId, a])
);
const orgAdmins = Array.from(byUserId.values()).filter(
// Filter to only include users with verified emails
const orgAdmins = admins.filter(
(admin) => admin.email && admin.email.length > 0
);

View File

@@ -79,7 +79,7 @@ export async function createRemoteExitNode(
const { remoteExitNodeId, secret } = parsedBody.data;
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
if (req.user && !req.userOrgRoleId) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
);

View File

@@ -23,8 +23,10 @@ import { z } from "zod";
import { fromError } from "zod-validation-error";
import {
createRemoteExitNodeSession,
validateRemoteExitNodeSessionToken
validateRemoteExitNodeSessionToken,
EXPIRES
} from "#private/auth/sessions/remoteExitNode";
import { getOrCreateCachedToken } from "@server/private/lib/tokenCache";
import { verifyPassword } from "@server/auth/password";
import logger from "@server/logger";
import config from "@server/lib/config";
@@ -103,14 +105,23 @@ export async function getRemoteExitNodeToken(
);
}
const resToken = generateSessionToken();
await createRemoteExitNodeSession(
resToken,
existingRemoteExitNode.remoteExitNodeId
// Return a cached token if one exists to prevent thundering herd on
// simultaneous restarts; falls back to creating a fresh session when
// Redis is unavailable or the cache has expired.
const resToken = await getOrCreateCachedToken(
`remote_exit_node:token_cache:${existingRemoteExitNode.remoteExitNodeId}`,
config.getRawConfig().server.secret!,
Math.floor(EXPIRES / 1000),
async () => {
const token = generateSessionToken();
await createRemoteExitNodeSession(
token,
existingRemoteExitNode.remoteExitNodeId
);
return token;
}
);
// logger.debug(`Created RemoteExitNode token response: ${JSON.stringify(resToken)}`);
return response<{ token: string }>(res, {
data: {
token: resToken

View File

@@ -31,7 +31,7 @@ import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { and, eq, inArray, or } from "drizzle-orm";
import { eq, or, and } from "drizzle-orm";
import { canUserAccessSiteResource } from "@server/auth/canUserAccessSiteResource";
import { signPublicKey, getOrgCAKeys } from "@server/lib/sshCA";
import config from "@server/lib/config";
@@ -125,7 +125,7 @@ export async function signSshKey(
resource: resourceQueryString
} = parsedBody.data;
const userId = req.user?.userId;
const roleIds = req.userOrgRoleIds ?? [];
const roleId = req.userOrgRoleId!;
if (!userId) {
return next(
@@ -133,15 +133,6 @@ export async function signSshKey(
);
}
if (roleIds.length === 0) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User has no role in organization"
)
);
}
const [userOrg] = await db
.select()
.from(userOrgs)
@@ -348,7 +339,7 @@ export async function signSshKey(
const hasAccess = await canUserAccessSiteResource({
userId: userId,
resourceId: resource.siteResourceId,
roleIds
roleId: roleId
});
if (!hasAccess) {
@@ -360,39 +351,28 @@ export async function signSshKey(
);
}
const roleRows = await db
const [roleRow] = await db
.select()
.from(roles)
.where(inArray(roles.roleId, roleIds));
.where(eq(roles.roleId, roleId))
.limit(1);
const parsedSudoCommands: string[] = [];
const parsedGroupsSet = new Set<string>();
let homedir: boolean | null = null;
const sudoModeOrder = { none: 0, commands: 1, all: 2 };
let sudoMode: "none" | "commands" | "all" = "none";
for (const roleRow of roleRows) {
try {
const cmds = JSON.parse(roleRow?.sshSudoCommands ?? "[]");
if (Array.isArray(cmds)) parsedSudoCommands.push(...cmds);
} catch {
// skip
}
try {
const grps = JSON.parse(roleRow?.sshUnixGroups ?? "[]");
if (Array.isArray(grps)) grps.forEach((g: string) => parsedGroupsSet.add(g));
} catch {
// skip
}
if (roleRow?.sshCreateHomeDir === true) homedir = true;
const m = roleRow?.sshSudoMode ?? "none";
if (sudoModeOrder[m as keyof typeof sudoModeOrder] > sudoModeOrder[sudoMode]) {
sudoMode = m as "none" | "commands" | "all";
}
let parsedSudoCommands: string[] = [];
let parsedGroups: string[] = [];
try {
parsedSudoCommands = JSON.parse(roleRow?.sshSudoCommands ?? "[]");
if (!Array.isArray(parsedSudoCommands)) parsedSudoCommands = [];
} catch {
parsedSudoCommands = [];
}
const parsedGroups = Array.from(parsedGroupsSet);
if (homedir === null && roleRows.length > 0) {
homedir = roleRows[0].sshCreateHomeDir ?? null;
try {
parsedGroups = JSON.parse(roleRow?.sshUnixGroups ?? "[]");
if (!Array.isArray(parsedGroups)) parsedGroups = [];
} catch {
parsedGroups = [];
}
const homedir = roleRow?.sshCreateHomeDir ?? null;
const sudoMode = roleRow?.sshSudoMode ?? "none";
// get the site
const [newt] = await db

View File

@@ -19,17 +19,14 @@ import { Socket } from "net";
import {
Newt,
newts,
NewtSession,
olms,
Olm,
OlmSession,
olms,
RemoteExitNode,
RemoteExitNodeSession,
remoteExitNodes,
sites
} from "@server/db";
import { eq } from "drizzle-orm";
import { db } from "@server/db";
import { recordPing } from "@server/routers/newt/pingAccumulator";
import { validateNewtSessionToken } from "@server/auth/sessions/newt";
import { validateOlmSessionToken } from "@server/auth/sessions/olm";
import logger from "@server/logger";
@@ -197,11 +194,7 @@ const connectedClients: Map<string, AuthenticatedWebSocket[]> = new Map();
// Config version tracking map (local to this node, resets on server restart)
const clientConfigVersions: Map<string, number> = new Map();
// Tracks the last Unix timestamp (seconds) at which a ping was flushed to the
// DB for a given siteId. Resets on server restart which is fine the first
// ping after startup will always write, re-establishing the online state.
const lastPingDbWrite: Map<number, number> = new Map();
const PING_DB_WRITE_INTERVAL = 45; // seconds
// Recovery tracking
let isRedisRecoveryInProgress = false;
@@ -853,32 +846,16 @@ const setupConnection = async (
);
});
// Handle WebSocket protocol-level pings from older newt clients that do
// not send application-level "newt/ping" messages. Update the site's
// online state and lastPing timestamp so the offline checker treats them
// the same as modern newt clients.
if (clientType === "newt") {
const newtClient = client as Newt;
ws.on("ping", async () => {
ws.on("ping", () => {
if (!newtClient.siteId) return;
const now = Math.floor(Date.now() / 1000);
const lastWrite = lastPingDbWrite.get(newtClient.siteId) ?? 0;
if (now - lastWrite < PING_DB_WRITE_INTERVAL) return;
lastPingDbWrite.set(newtClient.siteId, now);
try {
await db
.update(sites)
.set({
online: true,
lastPing: now
})
.where(eq(sites.siteId, newtClient.siteId));
} catch (error) {
logger.error(
"Error updating newt site online state on WS ping",
{ error }
);
}
// Record the ping in the accumulator instead of writing to the
// database on every WS ping frame. The accumulator flushes all
// pending pings in a single batched UPDATE every ~10s, which
// prevents connection pool exhaustion under load (especially
// with cross-region latency to the database).
recordPing(newtClient.siteId);
});
}

View File

@@ -208,7 +208,7 @@ export async function listAccessTokens(
.where(
or(
eq(userResources.userId, req.user!.userId),
inArray(roleResources.roleId, req.userOrgRoleIds!)
eq(roleResources.roleId, req.userOrgRoleId!)
)
);
} else {

View File

@@ -3,13 +3,12 @@ import { verifyResourceAccessToken } from "@server/auth/verifyResourceAccessToke
import {
getResourceByDomain,
getResourceRules,
getRoleName,
getRoleResourceAccess,
getUserOrgRole,
getUserResourceAccess,
getOrgLoginPage,
getUserSessionWithUser
} from "@server/db/queries/verifySessionQueries";
import { getUserOrgRoleIds } from "@server/lib/userOrgRoles";
import {
LoginPage,
Org,
@@ -917,9 +916,9 @@ async function isUserAllowedToAccessResource(
return null;
}
const userOrgRoleIds = await getUserOrgRoleIds(user.userId, resource.orgId);
const userOrgRole = await getUserOrgRole(user.userId, resource.orgId);
if (!userOrgRoleIds.length) {
if (!userOrgRole) {
return null;
}
@@ -935,23 +934,17 @@ async function isUserAllowedToAccessResource(
return null;
}
const roleNames: string[] = [];
for (const roleId of userOrgRoleIds) {
const roleResourceAccess = await getRoleResourceAccess(
resource.resourceId,
roleId
);
if (roleResourceAccess) {
const roleName = await getRoleName(roleId);
if (roleName) roleNames.push(roleName);
}
}
if (roleNames.length > 0) {
const roleResourceAccess = await getRoleResourceAccess(
resource.resourceId,
userOrgRole.roleId
);
if (roleResourceAccess) {
return {
username: user.username,
email: user.email,
name: user.name,
role: roleNames.join(", ")
role: userOrgRole.roleName
};
}
@@ -961,15 +954,11 @@ async function isUserAllowedToAccessResource(
);
if (userResourceAccess) {
const names = await Promise.all(
userOrgRoleIds.map((id) => getRoleName(id))
);
const role = names.filter(Boolean).join(", ") || "";
return {
username: user.username,
email: user.email,
name: user.name,
role
role: userOrgRole.roleName
};
}

View File

@@ -92,7 +92,7 @@ export async function createClient(
const { orgId } = parsedParams.data;
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
if (req.user && !req.userOrgRoleId) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
);
@@ -234,7 +234,7 @@ export async function createClient(
clientId: newClient.clientId
});
if (req.user && !req.userOrgRoleIds?.includes(adminRole.roleId)) {
if (req.user && req.userOrgRoleId != adminRole.roleId) {
// make sure the user can access the client
trx.insert(userClients).values({
userId: req.user.userId,

View File

@@ -297,7 +297,7 @@ export async function listClients(
.where(
or(
eq(userClients.userId, req.user!.userId),
inArray(roleClients.roleId, req.userOrgRoleIds!)
eq(roleClients.roleId, req.userOrgRoleId!)
)
);
} else {

View File

@@ -316,7 +316,7 @@ export async function listUserDevices(
.where(
or(
eq(userClients.userId, req.user!.userId),
inArray(roleClients.roleId, req.userOrgRoleIds!)
eq(roleClients.roleId, req.userOrgRoleId!)
)
);
} else {

View File

@@ -654,16 +654,6 @@ authenticated.post(
user.addUserRole
);
authenticated.delete(
"/role/:roleId/remove/:userId",
verifyRoleAccess,
verifyUserAccess,
verifyLimits,
verifyUserHasAction(ActionsEnum.removeUserRole),
logActionAudit(ActionsEnum.removeUserRole),
user.removeUserRole
);
authenticated.post(
"/resource/:resourceId/roles",
verifyResourceAccess,

View File

@@ -1,6 +1,5 @@
import { Request, Response, NextFunction } from "express";
import { eq, sql } from "drizzle-orm";
import { sites } from "@server/db";
import { sql } from "drizzle-orm";
import { db } from "@server/db";
import logger from "@server/logger";
import createHttpError from "http-errors";
@@ -31,7 +30,10 @@ const MAX_RETRIES = 3;
const BASE_DELAY_MS = 50;
// How often to flush accumulated bandwidth data to the database
const FLUSH_INTERVAL_MS = 30_000; // 30 seconds
const FLUSH_INTERVAL_MS = 300_000; // 300 seconds
// Maximum number of sites to include in a single batch UPDATE statement
const BATCH_CHUNK_SIZE = 250;
// In-memory accumulator: publicKey -> AccumulatorEntry
let accumulator = new Map<string, AccumulatorEntry>();
@@ -75,13 +77,33 @@ async function withDeadlockRetry<T>(
}
}
/**
* Execute a raw SQL query that returns rows, in a way that works across both
* the PostgreSQL driver (which exposes `execute`) and the SQLite driver (which
* exposes `all`). Drizzle's typed query builder doesn't support bulk
* UPDATE … FROM (VALUES …) natively, so we drop to raw SQL here.
*/
async function dbQueryRows<T extends Record<string, unknown>>(
query: Parameters<(typeof sql)["join"]>[0][number]
): Promise<T[]> {
const anyDb = db as any;
if (typeof anyDb.execute === "function") {
// PostgreSQL (node-postgres via Drizzle) — returns { rows: [...] } or an array
const result = await anyDb.execute(query);
return (Array.isArray(result) ? result : (result.rows ?? [])) as T[];
}
// SQLite (better-sqlite3 via Drizzle) — returns an array directly
return (await anyDb.all(query)) as T[];
}
/**
* Flush all accumulated site bandwidth data to the database.
*
* Swaps out the accumulator before writing so that any bandwidth messages
* received during the flush are captured in the new accumulator rather than
* being lost or causing contention. Entries that fail to write are re-queued
* back into the accumulator so they will be retried on the next flush.
* being lost or causing contention. Sites are updated in chunks via a single
* batch UPDATE per chunk. Failed chunks are discarded — exact per-flush
* accuracy is not critical and re-queuing is not worth the added complexity.
*
* This function is exported so that the application's graceful-shutdown
* cleanup handler can call it before the process exits.
@@ -108,76 +130,76 @@ export async function flushSiteBandwidthToDb(): Promise<void> {
`Flushing accumulated bandwidth data for ${sortedEntries.length} site(s) to the database`
);
// Aggregate billing usage by org, collected during the DB update loop.
// Build a lookup so post-processing can reach each entry by publicKey.
const snapshotMap = new Map(sortedEntries);
// Aggregate billing usage by org across all chunks.
const orgUsageMap = new Map<string, number>();
for (const [publicKey, { bytesIn, bytesOut, exitNodeId, calcUsage }] of sortedEntries) {
// Process in chunks so individual queries stay at a reasonable size.
for (let i = 0; i < sortedEntries.length; i += BATCH_CHUNK_SIZE) {
const chunk = sortedEntries.slice(i, i + BATCH_CHUNK_SIZE);
const chunkEnd = i + chunk.length - 1;
// Build a parameterised VALUES list: (pubKey, bytesIn, bytesOut), ...
// Both PostgreSQL and SQLite (≥ 3.33.0, which better-sqlite3 bundles)
// support UPDATE … FROM (VALUES …), letting us update the whole chunk
// in a single query instead of N individual round-trips.
const valuesList = chunk.map(([publicKey, { bytesIn, bytesOut }]) =>
sql`(${publicKey}::text, ${bytesIn}::real, ${bytesOut}::real)`
);
const valuesClause = sql.join(valuesList, sql`, `);
let rows: { orgId: string; pubKey: string }[] = [];
try {
const updatedSite = await withDeadlockRetry(async () => {
const [result] = await db
.update(sites)
.set({
megabytesOut: sql`COALESCE(${sites.megabytesOut}, 0) + ${bytesIn}`,
megabytesIn: sql`COALESCE(${sites.megabytesIn}, 0) + ${bytesOut}`,
lastBandwidthUpdate: currentTime,
})
.where(eq(sites.pubKey, publicKey))
.returning({
orgId: sites.orgId,
siteId: sites.siteId
});
return result;
}, `flush bandwidth for site ${publicKey}`);
if (updatedSite) {
if (exitNodeId) {
const notAllowed = await checkExitNodeOrg(
exitNodeId,
updatedSite.orgId
);
if (notAllowed) {
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${updatedSite.orgId}`
);
// Skip usage tracking for this site but continue
// processing the rest.
continue;
}
}
if (calcUsage) {
const totalBandwidth = bytesIn + bytesOut;
const current = orgUsageMap.get(updatedSite.orgId) ?? 0;
orgUsageMap.set(updatedSite.orgId, current + totalBandwidth);
}
}
rows = await withDeadlockRetry(async () => {
return dbQueryRows<{ orgId: string; pubKey: string }>(sql`
UPDATE sites
SET
"bytesOut" = COALESCE("bytesOut", 0) + v.bytes_in,
"bytesIn" = COALESCE("bytesIn", 0) + v.bytes_out,
"lastBandwidthUpdate" = ${currentTime}
FROM (VALUES ${valuesClause}) AS v(pub_key, bytes_in, bytes_out)
WHERE sites."pubKey" = v.pub_key
RETURNING sites."orgId" AS "orgId", sites."pubKey" AS "pubKey"
`);
}, `flush bandwidth chunk [${i}${chunkEnd}]`);
} catch (error) {
logger.error(
`Failed to flush bandwidth for site ${publicKey}:`,
`Failed to flush bandwidth chunk [${i}${chunkEnd}], discarding ${chunk.length} site(s):`,
error
);
// Discard the chunk — exact per-flush accuracy is not critical.
continue;
}
// Re-queue the failed entry so it is retried on the next flush
// rather than silently dropped.
const existing = accumulator.get(publicKey);
if (existing) {
existing.bytesIn += bytesIn;
existing.bytesOut += bytesOut;
} else {
accumulator.set(publicKey, {
bytesIn,
bytesOut,
exitNodeId,
calcUsage
});
// Collect billing usage from the returned rows.
for (const { orgId, pubKey } of rows) {
const entry = snapshotMap.get(pubKey);
if (!entry) continue;
const { bytesIn, bytesOut, exitNodeId, calcUsage } = entry;
if (exitNodeId) {
const notAllowed = await checkExitNodeOrg(exitNodeId, orgId);
if (notAllowed) {
logger.warn(
`Exit node ${exitNodeId} is not allowed for org ${orgId}`
);
continue;
}
}
if (calcUsage) {
const current = orgUsageMap.get(orgId) ?? 0;
orgUsageMap.set(orgId, current + bytesIn + bytesOut);
}
}
}
// Process billing usage updates outside the site-update loop to keep
// lock scope small and concerns separated.
// Process billing usage updates after all chunks are written.
if (orgUsageMap.size > 0) {
// Sort org IDs for consistent lock ordering.
const sortedOrgIds = [...orgUsageMap.keys()].sort();
for (const orgId of sortedOrgIds) {

View File

@@ -13,7 +13,6 @@ import {
orgs,
Role,
roles,
userOrgRoles,
userOrgs,
users
} from "@server/db";
@@ -571,28 +570,32 @@ export async function validateOidcCallback(
}
}
// Ensure IDP-provided role exists for existing auto-provisioned orgs (add only; never delete other roles)
const userRolesInOrgs = await trx
.select()
.from(userOrgRoles)
.where(eq(userOrgRoles.userId, userId!));
for (const currentOrg of autoProvisionedOrgs) {
const newRole = userOrgInfo.find(
(newOrg) => newOrg.orgId === currentOrg.orgId
);
if (!newRole) continue;
const currentRolesInOrg = userRolesInOrgs.filter(
(r) => r.orgId === currentOrg.orgId
);
const hasIdpRole = currentRolesInOrg.some(
(r) => r.roleId === newRole.roleId
);
if (!hasIdpRole) {
await trx.insert(userOrgRoles).values({
userId: userId!,
orgId: currentOrg.orgId,
roleId: newRole.roleId
});
// Update roles for existing auto-provisioned orgs where the role has changed
const orgsToUpdate = autoProvisionedOrgs.filter(
(currentOrg) => {
const newOrg = userOrgInfo.find(
(newOrg) => newOrg.orgId === currentOrg.orgId
);
return newOrg && newOrg.roleId !== currentOrg.roleId;
}
);
if (orgsToUpdate.length > 0) {
for (const org of orgsToUpdate) {
const newRole = userOrgInfo.find(
(newOrg) => newOrg.orgId === org.orgId
);
if (newRole) {
await trx
.update(userOrgs)
.set({ roleId: newRole.roleId })
.where(
and(
eq(userOrgs.userId, userId!),
eq(userOrgs.orgId, org.orgId)
)
);
}
}
}
@@ -616,9 +619,9 @@ export async function validateOidcCallback(
{
orgId: org.orgId,
userId: userId!,
roleId: org.roleId,
autoProvisioned: true,
},
org.roleId,
trx
);
}

View File

@@ -598,16 +598,6 @@ authenticated.post(
user.addUserRole
);
authenticated.delete(
"/role/:roleId/remove/:userId",
verifyApiKeyRoleAccess,
verifyApiKeyUserAccess,
verifyLimits,
verifyApiKeyHasAction(ActionsEnum.removeUserRole),
logActionAudit(ActionsEnum.removeUserRole),
user.removeUserRole
);
authenticated.post(
"/resource/:resourceId/roles",
verifyApiKeyResourceAccess,

View File

@@ -46,7 +46,7 @@ export async function createNewt(
const { newtId, secret } = parsedBody.data;
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
if (req.user && !req.userOrgRoleId) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
);

View File

@@ -1,6 +1,8 @@
import { generateSessionToken } from "@server/auth/sessions/app";
import { db } from "@server/db";
import { db, newtSessions } from "@server/db";
import { newts } from "@server/db";
import { getOrCreateCachedToken } from "#dynamic/lib/tokenCache";
import { EXPIRES } from "@server/auth/sessions/newt";
import HttpCode from "@server/types/HttpCode";
import response from "@server/lib/response";
import { eq } from "drizzle-orm";
@@ -92,8 +94,19 @@ export async function getNewtToken(
);
}
const resToken = generateSessionToken();
await createNewtSession(resToken, existingNewt.newtId);
// Return a cached token if one exists to prevent thundering herd on
// simultaneous restarts; falls back to creating a fresh session when
// Redis is unavailable or the cache has expired.
const resToken = await getOrCreateCachedToken(
`newt:token_cache:${existingNewt.newtId}`,
config.getRawConfig().server.secret!,
Math.floor(EXPIRES / 1000),
async () => {
const token = generateSessionToken();
await createNewtSession(token, existingNewt.newtId);
return token;
}
);
return response<{ token: string; serverVersion: string }>(res, {
data: {

View File

@@ -5,6 +5,7 @@ import { Newt } from "@server/db";
import { eq, lt, isNull, and, or } from "drizzle-orm";
import logger from "@server/logger";
import { sendNewtSyncMessage } from "./sync";
import { recordPing } from "./pingAccumulator";
// Track if the offline checker interval is running
let offlineCheckerInterval: NodeJS.Timeout | null = null;
@@ -114,18 +115,12 @@ export const handleNewtPingMessage: MessageHandler = async (context) => {
return;
}
try {
// Mark the site as online and record the ping timestamp.
await db
.update(sites)
.set({
online: true,
lastPing: Math.floor(Date.now() / 1000)
})
.where(eq(sites.siteId, newt.siteId));
} catch (error) {
logger.error("Error updating online state on newt ping", { error });
}
// Record the ping in memory; it will be flushed to the database
// periodically by the ping accumulator (every ~10s) in a single
// batched UPDATE instead of one query per ping. This prevents
// connection pool exhaustion under load, especially with
// cross-region latency to the database.
recordPing(newt.siteId);
// Check config version and sync if stale.
const configVersion = await getClientConfigVersion(newt.newtId);

View File

@@ -0,0 +1,382 @@
import { db } from "@server/db";
import { sites, clients, olms } from "@server/db";
import { eq, inArray } from "drizzle-orm";
import logger from "@server/logger";
/**
* Ping Accumulator
*
* Instead of writing to the database on every single newt/olm ping (which
* causes pool exhaustion under load, especially with cross-region latency),
* we accumulate pings in memory and flush them to the database periodically
* in a single batch.
*
* This is the same pattern used for bandwidth flushing in
* receiveBandwidth.ts and handleReceiveBandwidthMessage.ts.
*
* Supports two kinds of pings:
* - **Site pings** (from newts): update `sites.online` and `sites.lastPing`
* - **Client pings** (from OLMs): update `clients.online`, `clients.lastPing`,
* `clients.archived`, and optionally reset `olms.archived`
*/
const FLUSH_INTERVAL_MS = 10_000; // Flush every 10 seconds
const MAX_RETRIES = 2;
const BASE_DELAY_MS = 50;
// ── Site (newt) pings ──────────────────────────────────────────────────
// Map of siteId -> latest ping timestamp (unix seconds)
const pendingSitePings: Map<number, number> = new Map();
// ── Client (OLM) pings ────────────────────────────────────────────────
// Map of clientId -> latest ping timestamp (unix seconds)
const pendingClientPings: Map<number, number> = new Map();
// Set of olmIds whose `archived` flag should be reset to false
const pendingOlmArchiveResets: Set<string> = new Set();
let flushTimer: NodeJS.Timeout | null = null;
// ── Public API ─────────────────────────────────────────────────────────
/**
* Record a ping for a newt site. This does NOT write to the database
* immediately. Instead it stores the latest ping timestamp in memory,
* to be flushed periodically by the background timer.
*/
export function recordSitePing(siteId: number): void {
const now = Math.floor(Date.now() / 1000);
pendingSitePings.set(siteId, now);
}
/** @deprecated Use `recordSitePing` instead. Alias kept for existing call-sites. */
export const recordPing = recordSitePing;
/**
* Record a ping for an OLM client. Batches the `clients` table update
* (`online`, `lastPing`, `archived`) and, when `olmArchived` is true,
* also queues an `olms` table update to clear the archived flag.
*/
export function recordClientPing(
clientId: number,
olmId: string,
olmArchived: boolean
): void {
const now = Math.floor(Date.now() / 1000);
pendingClientPings.set(clientId, now);
if (olmArchived) {
pendingOlmArchiveResets.add(olmId);
}
}
// ── Flush Logic ────────────────────────────────────────────────────────
/**
* Flush all accumulated site pings to the database.
*/
async function flushSitePingsToDb(): Promise<void> {
if (pendingSitePings.size === 0) {
return;
}
// Snapshot and clear so new pings arriving during the flush go into a
// fresh map for the next cycle.
const pingsToFlush = new Map(pendingSitePings);
pendingSitePings.clear();
// Sort by siteId for consistent lock ordering (prevents deadlocks)
const sortedEntries = Array.from(pingsToFlush.entries()).sort(
([a], [b]) => a - b
);
const BATCH_SIZE = 50;
for (let i = 0; i < sortedEntries.length; i += BATCH_SIZE) {
const batch = sortedEntries.slice(i, i + BATCH_SIZE);
try {
await withRetry(async () => {
// Group by timestamp for efficient bulk updates
const byTimestamp = new Map<number, number[]>();
for (const [siteId, timestamp] of batch) {
const group = byTimestamp.get(timestamp) || [];
group.push(siteId);
byTimestamp.set(timestamp, group);
}
if (byTimestamp.size === 1) {
const [timestamp, siteIds] = Array.from(
byTimestamp.entries()
)[0];
await db
.update(sites)
.set({
online: true,
lastPing: timestamp
})
.where(inArray(sites.siteId, siteIds));
} else {
await db.transaction(async (tx) => {
for (const [timestamp, siteIds] of byTimestamp) {
await tx
.update(sites)
.set({
online: true,
lastPing: timestamp
})
.where(inArray(sites.siteId, siteIds));
}
});
}
}, "flushSitePingsToDb");
} catch (error) {
logger.error(
`Failed to flush site ping batch (${batch.length} sites), re-queuing for next cycle`,
{ error }
);
for (const [siteId, timestamp] of batch) {
const existing = pendingSitePings.get(siteId);
if (!existing || existing < timestamp) {
pendingSitePings.set(siteId, timestamp);
}
}
}
}
}
/**
* Flush all accumulated client (OLM) pings to the database.
*/
async function flushClientPingsToDb(): Promise<void> {
if (pendingClientPings.size === 0 && pendingOlmArchiveResets.size === 0) {
return;
}
// Snapshot and clear
const pingsToFlush = new Map(pendingClientPings);
pendingClientPings.clear();
const olmResetsToFlush = new Set(pendingOlmArchiveResets);
pendingOlmArchiveResets.clear();
// ── Flush client pings ─────────────────────────────────────────────
if (pingsToFlush.size > 0) {
const sortedEntries = Array.from(pingsToFlush.entries()).sort(
([a], [b]) => a - b
);
const BATCH_SIZE = 50;
for (let i = 0; i < sortedEntries.length; i += BATCH_SIZE) {
const batch = sortedEntries.slice(i, i + BATCH_SIZE);
try {
await withRetry(async () => {
const byTimestamp = new Map<number, number[]>();
for (const [clientId, timestamp] of batch) {
const group = byTimestamp.get(timestamp) || [];
group.push(clientId);
byTimestamp.set(timestamp, group);
}
if (byTimestamp.size === 1) {
const [timestamp, clientIds] = Array.from(
byTimestamp.entries()
)[0];
await db
.update(clients)
.set({
lastPing: timestamp,
online: true,
archived: false
})
.where(inArray(clients.clientId, clientIds));
} else {
await db.transaction(async (tx) => {
for (const [timestamp, clientIds] of byTimestamp) {
await tx
.update(clients)
.set({
lastPing: timestamp,
online: true,
archived: false
})
.where(
inArray(clients.clientId, clientIds)
);
}
});
}
}, "flushClientPingsToDb");
} catch (error) {
logger.error(
`Failed to flush client ping batch (${batch.length} clients), re-queuing for next cycle`,
{ error }
);
for (const [clientId, timestamp] of batch) {
const existing = pendingClientPings.get(clientId);
if (!existing || existing < timestamp) {
pendingClientPings.set(clientId, timestamp);
}
}
}
}
}
// ── Flush OLM archive resets ───────────────────────────────────────
if (olmResetsToFlush.size > 0) {
const olmIds = Array.from(olmResetsToFlush).sort();
const BATCH_SIZE = 50;
for (let i = 0; i < olmIds.length; i += BATCH_SIZE) {
const batch = olmIds.slice(i, i + BATCH_SIZE);
try {
await withRetry(async () => {
await db
.update(olms)
.set({ archived: false })
.where(inArray(olms.olmId, batch));
}, "flushOlmArchiveResets");
} catch (error) {
logger.error(
`Failed to flush OLM archive reset batch (${batch.length} olms), re-queuing for next cycle`,
{ error }
);
for (const olmId of batch) {
pendingOlmArchiveResets.add(olmId);
}
}
}
}
}
/**
* Flush everything — called by the interval timer and during shutdown.
*/
export async function flushPingsToDb(): Promise<void> {
await flushSitePingsToDb();
await flushClientPingsToDb();
}
// ── Retry / Error Helpers ──────────────────────────────────────────────
/**
* Simple retry wrapper with exponential backoff for transient errors
* (connection timeouts, unexpected disconnects).
*/
async function withRetry<T>(
operation: () => Promise<T>,
context: string
): Promise<T> {
let attempt = 0;
while (true) {
try {
return await operation();
} catch (error: any) {
if (isTransientError(error) && attempt < MAX_RETRIES) {
attempt++;
const baseDelay = Math.pow(2, attempt - 1) * BASE_DELAY_MS;
const jitter = Math.random() * baseDelay;
const delay = baseDelay + jitter;
logger.warn(
`Transient DB error in ${context}, retrying attempt ${attempt}/${MAX_RETRIES} after ${delay.toFixed(0)}ms`
);
await new Promise((resolve) => setTimeout(resolve, delay));
continue;
}
throw error;
}
}
}
/**
* Detect transient connection errors that are safe to retry.
*/
function isTransientError(error: any): boolean {
if (!error) return false;
const message = (error.message || "").toLowerCase();
const causeMessage = (error.cause?.message || "").toLowerCase();
const code = error.code || "";
// Connection timeout / terminated
if (
message.includes("connection timeout") ||
message.includes("connection terminated") ||
message.includes("timeout exceeded when trying to connect") ||
causeMessage.includes("connection terminated unexpectedly") ||
causeMessage.includes("connection timeout")
) {
return true;
}
// PostgreSQL deadlock
if (code === "40P01" || message.includes("deadlock")) {
return true;
}
// ECONNRESET, ECONNREFUSED, EPIPE
if (
code === "ECONNRESET" ||
code === "ECONNREFUSED" ||
code === "EPIPE" ||
code === "ETIMEDOUT"
) {
return true;
}
return false;
}
// ── Lifecycle ──────────────────────────────────────────────────────────
/**
* Start the background flush timer. Call this once at server startup.
*/
export function startPingAccumulator(): void {
if (flushTimer) {
return; // Already running
}
flushTimer = setInterval(async () => {
try {
await flushPingsToDb();
} catch (error) {
logger.error("Unhandled error in ping accumulator flush", {
error
});
}
}, FLUSH_INTERVAL_MS);
// Don't prevent the process from exiting
flushTimer.unref();
logger.info(
`Ping accumulator started (flush interval: ${FLUSH_INTERVAL_MS}ms)`
);
}
/**
* Stop the background flush timer and perform a final flush.
* Call this during graceful shutdown.
*/
export async function stopPingAccumulator(): Promise<void> {
if (flushTimer) {
clearInterval(flushTimer);
flushTimer = null;
}
// Final flush to persist any remaining pings
try {
await flushPingsToDb();
} catch (error) {
logger.error("Error during final ping accumulator flush", { error });
}
logger.info("Ping accumulator stopped");
}
/**
* Get the number of pending (unflushed) pings. Useful for monitoring.
*/
export function getPendingPingCount(): number {
return pendingSitePings.size + pendingClientPings.size;
}

View File

@@ -46,7 +46,7 @@ export async function createNewt(
const { newtId, secret } = parsedBody.data;
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
if (req.user && !req.userOrgRoleId) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
);

View File

@@ -8,7 +8,7 @@ import {
ExitNode,
exitNodes,
sites,
clientSitesAssociationsCache
clientSitesAssociationsCache,
} from "@server/db";
import { olms } from "@server/db";
import HttpCode from "@server/types/HttpCode";
@@ -20,8 +20,10 @@ import { z } from "zod";
import { fromError } from "zod-validation-error";
import {
createOlmSession,
validateOlmSessionToken
validateOlmSessionToken,
EXPIRES
} from "@server/auth/sessions/olm";
import { getOrCreateCachedToken } from "#dynamic/lib/tokenCache";
import { verifyPassword } from "@server/auth/password";
import logger from "@server/logger";
import config from "@server/lib/config";
@@ -132,8 +134,19 @@ export async function getOlmToken(
logger.debug("Creating new olm session token");
const resToken = generateSessionToken();
await createOlmSession(resToken, existingOlm.olmId);
// Return a cached token if one exists to prevent thundering herd on
// simultaneous restarts; falls back to creating a fresh session when
// Redis is unavailable or the cache has expired.
const resToken = await getOrCreateCachedToken(
`olm:token_cache:${existingOlm.olmId}`,
config.getRawConfig().server.secret!,
Math.floor(EXPIRES / 1000),
async () => {
const token = generateSessionToken();
await createOlmSession(token, existingOlm.olmId);
return token;
}
);
let clientIdToUse;
if (orgId) {

View File

@@ -3,6 +3,7 @@ import { db } from "@server/db";
import { MessageHandler } from "@server/routers/ws";
import { clients, olms, Olm } from "@server/db";
import { eq, lt, isNull, and, or } from "drizzle-orm";
import { recordClientPing } from "@server/routers/newt/pingAccumulator";
import logger from "@server/logger";
import { validateSessionToken } from "@server/auth/sessions/app";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
@@ -201,22 +202,12 @@ export const handleOlmPingMessage: MessageHandler = async (context) => {
await sendOlmSyncMessage(olm, client);
}
// Update the client's last ping timestamp
await db
.update(clients)
.set({
lastPing: Math.floor(Date.now() / 1000),
online: true,
archived: false
})
.where(eq(clients.clientId, olm.clientId));
if (olm.archived) {
await db
.update(olms)
.set({ archived: false })
.where(eq(olms.olmId, olm.olmId));
}
// Record the ping in memory; it will be flushed to the database
// periodically by the ping accumulator (every ~10s) in a single
// batched UPDATE instead of one query per ping. This prevents
// connection pool exhaustion under load, especially with
// cross-region latency to the database.
recordClientPing(olm.clientId, olm.olmId, !!olm.archived);
} catch (error) {
logger.error("Error handling ping message", { error });
}

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, idp, idpOidcConfig } from "@server/db";
import { roles, userOrgRoles, userOrgs, users } from "@server/db";
import { roles, userOrgs, users } from "@server/db";
import { and, eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -14,7 +14,7 @@ import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
import { CheckOrgAccessPolicyResult } from "@server/lib/checkOrgAccessPolicy";
async function queryUser(orgId: string, userId: string) {
const [userRow] = await db
const [user] = await db
.select({
orgId: userOrgs.orgId,
userId: users.userId,
@@ -22,7 +22,10 @@ async function queryUser(orgId: string, userId: string) {
username: users.username,
name: users.name,
type: users.type,
roleId: userOrgs.roleId,
roleName: roles.name,
isOwner: userOrgs.isOwner,
isAdmin: roles.isAdmin,
twoFactorEnabled: users.twoFactorEnabled,
autoProvisioned: userOrgs.autoProvisioned,
idpId: users.idpId,
@@ -32,40 +35,13 @@ async function queryUser(orgId: string, userId: string) {
idpAutoProvision: idp.autoProvision
})
.from(userOrgs)
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
.leftJoin(users, eq(userOrgs.userId, users.userId))
.leftJoin(idp, eq(users.idpId, idp.idpId))
.leftJoin(idpOidcConfig, eq(idp.idpId, idpOidcConfig.idpId))
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
.limit(1);
if (!userRow) return undefined;
const roleRows = await db
.select({
roleId: userOrgRoles.roleId,
roleName: roles.name,
isAdmin: roles.isAdmin
})
.from(userOrgRoles)
.leftJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, orgId)
)
);
const isAdmin = roleRows.some((r) => r.isAdmin);
return {
...userRow,
isAdmin,
roleIds: roleRows.map((r) => r.roleId),
roles: roleRows.map((r) => ({
roleId: r.roleId,
name: r.roleName ?? ""
}))
};
return user;
}
export type CheckOrgUserAccessResponse = CheckOrgAccessPolicyResult;

View File

@@ -9,7 +9,6 @@ import {
orgs,
roleActions,
roles,
userOrgRoles,
userOrgs,
users,
actions
@@ -313,13 +312,9 @@ export async function createOrg(
await trx.insert(userOrgs).values({
userId: req.user!.userId,
orgId: newOrg[0].orgId,
roleId: roleId,
isOwner: true
});
await trx.insert(userOrgRoles).values({
userId: req.user!.userId,
orgId: newOrg[0].orgId,
roleId
});
ownerUserId = req.user!.userId;
} else {
// if org created by root api key, set the server admin as the owner
@@ -337,13 +332,9 @@ export async function createOrg(
await trx.insert(userOrgs).values({
userId: serverAdmin.userId,
orgId: newOrg[0].orgId,
roleId: roleId,
isOwner: true
});
await trx.insert(userOrgRoles).values({
userId: serverAdmin.userId,
orgId: newOrg[0].orgId,
roleId
});
ownerUserId = serverAdmin.userId;
}

View File

@@ -117,26 +117,20 @@ export async function getOrgOverview(
.from(userOrgs)
.where(eq(userOrgs.orgId, orgId));
const roleIds = req.userOrgRoleIds ?? [];
const roleRows =
roleIds.length > 0
? await db
.select({ name: roles.name, isAdmin: roles.isAdmin })
.from(roles)
.where(inArray(roles.roleId, roleIds))
: [];
const userRoleName = roleRows.map((r) => r.name ?? "").join(", ") ?? "";
const isAdmin = roleRows.some((r) => r.isAdmin === true);
const [role] = await db
.select()
.from(roles)
.where(eq(roles.roleId, req.userOrg.roleId));
return response<GetOrgOverviewResponse>(res, {
data: {
orgName: org[0].name,
orgId: org[0].orgId,
userRoleName,
userRoleName: role.name,
numSites,
numUsers,
numResources,
isAdmin,
isAdmin: role.isAdmin || false,
isOwner: req.userOrg?.isOwner || false
},
success: true,

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, roles } from "@server/db";
import { Org, orgs, userOrgRoles, userOrgs } from "@server/db";
import { Org, orgs, userOrgs } from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
@@ -82,7 +82,10 @@ export async function listUserOrgs(
const { userId } = parsedParams.data;
const userOrganizations = await db
.select({ orgId: userOrgs.orgId })
.select({
orgId: userOrgs.orgId,
roleId: userOrgs.roleId
})
.from(userOrgs)
.where(eq(userOrgs.userId, userId));
@@ -113,27 +116,10 @@ export async function listUserOrgs(
userOrgs,
and(eq(userOrgs.orgId, orgs.orgId), eq(userOrgs.userId, userId))
)
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
.limit(limit)
.offset(offset);
const roleRows = await db
.select({
orgId: userOrgRoles.orgId,
isAdmin: roles.isAdmin
})
.from(userOrgRoles)
.leftJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
.where(
and(
eq(userOrgRoles.userId, userId),
inArray(userOrgRoles.orgId, userOrgIds)
)
);
const orgHasAdmin = new Set(
roleRows.filter((r) => r.isAdmin).map((r) => r.orgId)
);
const totalCountResult = await db
.select({ count: sql<number>`cast(count(*) as integer)` })
.from(orgs)
@@ -147,8 +133,8 @@ export async function listUserOrgs(
if (val.userOrgs && val.userOrgs.isOwner) {
res.isOwner = val.userOrgs.isOwner;
}
if (val.orgs && orgHasAdmin.has(val.orgs.orgId)) {
res.isAdmin = true;
if (val.roles && val.roles.isAdmin) {
res.isAdmin = val.roles.isAdmin;
}
if (val.userOrgs?.isOwner && val.orgs?.isBillingOrg) {
res.isPrimaryOrg = val.orgs.isBillingOrg;

View File

@@ -112,7 +112,7 @@ export async function createResource(
const { orgId } = parsedParams.data;
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
if (req.user && !req.userOrgRoleId) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
);
@@ -292,7 +292,7 @@ async function createHttpResource(
resourceId: newResource[0].resourceId
});
if (req.user && !req.userOrgRoleIds?.includes(adminRole[0].roleId)) {
if (req.user && req.userOrgRoleId != adminRole[0].roleId) {
// make sure the user can access the resource
await trx.insert(userResources).values({
userId: req.user?.userId!,
@@ -385,7 +385,7 @@ async function createRawResource(
resourceId: newResource[0].resourceId
});
if (req.user && !req.userOrgRoleIds?.includes(adminRole[0].roleId)) {
if (req.user && req.userOrgRoleId != adminRole[0].roleId) {
// make sure the user can access the resource
await trx.insert(userResources).values({
userId: req.user?.userId!,

View File

@@ -5,7 +5,6 @@ import {
resources,
userResources,
roleResources,
userOrgRoles,
userOrgs,
resourcePassword,
resourcePincode,
@@ -33,29 +32,22 @@ export async function getUserResources(
);
}
// Check user is in organization and get their role IDs
const [userOrg] = await db
.select()
// First get the user's role in the organization
const userOrgResult = await db
.select({
roleId: userOrgs.roleId
})
.from(userOrgs)
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
.limit(1);
if (!userOrg) {
if (userOrgResult.length === 0) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User not in organization")
);
}
const userRoleIds = await db
.select({ roleId: userOrgRoles.roleId })
.from(userOrgRoles)
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, orgId)
)
)
.then((rows) => rows.map((r) => r.roleId));
const userRoleId = userOrgResult[0].roleId;
// Get resources accessible through direct assignment or role assignment
const directResourcesQuery = db
@@ -63,28 +55,20 @@ export async function getUserResources(
.from(userResources)
.where(eq(userResources.userId, userId));
const roleResourcesQuery =
userRoleIds.length > 0
? db
.select({ resourceId: roleResources.resourceId })
.from(roleResources)
.where(inArray(roleResources.roleId, userRoleIds))
: Promise.resolve([]);
const roleResourcesQuery = db
.select({ resourceId: roleResources.resourceId })
.from(roleResources)
.where(eq(roleResources.roleId, userRoleId));
const directSiteResourcesQuery = db
.select({ siteResourceId: userSiteResources.siteResourceId })
.from(userSiteResources)
.where(eq(userSiteResources.userId, userId));
const roleSiteResourcesQuery =
userRoleIds.length > 0
? db
.select({
siteResourceId: roleSiteResources.siteResourceId
})
.from(roleSiteResources)
.where(inArray(roleSiteResources.roleId, userRoleIds))
: Promise.resolve([]);
const roleSiteResourcesQuery = db
.select({ siteResourceId: roleSiteResources.siteResourceId })
.from(roleSiteResources)
.where(eq(roleSiteResources.roleId, userRoleId));
const [directResources, roleResourceResults, directSiteResourceResults, roleSiteResourceResults] = await Promise.all([
directResourcesQuery,

View File

@@ -305,7 +305,7 @@ export async function listResources(
.where(
or(
eq(userResources.userId, req.user!.userId),
inArray(roleResources.roleId, req.userOrgRoleIds!)
eq(roleResources.roleId, req.userOrgRoleId!)
)
);
} else {

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { roles, userOrgRoles } from "@server/db";
import { roles, userOrgs } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -114,11 +114,11 @@ export async function deleteRole(
}
await db.transaction(async (trx) => {
// move all users from userOrgRoles with roleId to newRoleId
// move all users from the userOrgs table with roleId to newRoleId
await trx
.update(userOrgRoles)
.update(userOrgs)
.set({ roleId: newRoleId })
.where(eq(userOrgRoles.roleId, roleId));
.where(eq(userOrgs.roleId, roleId));
// delete the old role
await trx.delete(roles).where(eq(roles.roleId, roleId));

View File

@@ -111,7 +111,7 @@ export async function createSite(
const { orgId } = parsedParams.data;
if (req.user && (!req.userOrgRoleIds || req.userOrgRoleIds.length === 0)) {
if (req.user && !req.userOrgRoleId) {
return next(
createHttpError(HttpCode.FORBIDDEN, "User does not have a role")
);
@@ -399,7 +399,7 @@ export async function createSite(
siteId: newSite.siteId
});
if (req.user && !req.userOrgRoleIds?.includes(adminRole[0].roleId)) {
if (req.user && req.userOrgRoleId != adminRole[0].roleId) {
// make sure the user can access the site
trx.insert(userSites).values({
userId: req.user?.userId!,

View File

@@ -235,7 +235,7 @@ export async function listSites(
.where(
or(
eq(userSites.userId, req.user!.userId),
inArray(roleSites.roleId, req.userOrgRoleIds!)
eq(roleSites.roleId, req.userOrgRoleId!)
)
);
} else {

View File

@@ -165,9 +165,9 @@ export async function acceptInvite(
org,
{
userId: existingUser[0].userId,
orgId: existingInvite.orgId
orgId: existingInvite.orgId,
roleId: existingInvite.roleId
},
existingInvite.roleId,
trx
);

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { clients, db } from "@server/db";
import { userOrgRoles, userOrgs, roles } from "@server/db";
import { clients, db, UserOrg } from "@server/db";
import { userOrgs, roles } from "@server/db";
import { eq, and } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -111,23 +111,20 @@ export async function addUserRole(
);
}
let newUserRole: { userId: string; orgId: string; roleId: number } | null =
null;
let newUserRole: UserOrg | null = null;
await db.transaction(async (trx) => {
const inserted = await trx
.insert(userOrgRoles)
.values({
userId,
orgId: role.orgId,
roleId
})
.onConflictDoNothing()
[newUserRole] = await trx
.update(userOrgs)
.set({ roleId })
.where(
and(
eq(userOrgs.userId, userId),
eq(userOrgs.orgId, role.orgId)
)
)
.returning();
if (inserted.length > 0) {
newUserRole = inserted[0];
}
// get the client associated with this user in this org
const orgClients = await trx
.select()
.from(clients)
@@ -136,15 +133,17 @@ export async function addUserRole(
eq(clients.userId, userId),
eq(clients.orgId, role.orgId)
)
);
)
.limit(1);
for (const orgClient of orgClients) {
// we just changed the user's role, so we need to rebuild client associations and what they have access to
await rebuildClientAssociationsFromClient(orgClient, trx);
}
});
return response(res, {
data: newUserRole ?? { userId, orgId: role.orgId, roleId },
data: newUserRole,
success: true,
error: false,
message: "Role added to user successfully",

View File

@@ -221,16 +221,12 @@ export async function createOrgUser(
);
}
await assignUserToOrg(
org,
{
orgId,
userId: existingUser.userId,
autoProvisioned: false,
},
role.roleId,
trx
);
await assignUserToOrg(org, {
orgId,
userId: existingUser.userId,
roleId: role.roleId,
autoProvisioned: false
}, trx);
} else {
userId = generateId(15);
@@ -248,16 +244,12 @@ export async function createOrgUser(
})
.returning();
await assignUserToOrg(
org,
{
orgId,
userId: newUser.userId,
autoProvisioned: false,
},
role.roleId,
trx
);
await assignUserToOrg(org, {
orgId,
userId: newUser.userId,
roleId: role.roleId,
autoProvisioned: false
}, trx);
}
await calculateUserClientsForOrgs(userId, trx);

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, idp, idpOidcConfig } from "@server/db";
import { roles, userOrgRoles, userOrgs, users } from "@server/db";
import { roles, userOrgs, users } from "@server/db";
import { and, eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -12,7 +12,7 @@ import { ActionsEnum, checkUserActionPermission } from "@server/auth/actions";
import { OpenAPITags, registry } from "@server/openApi";
export async function queryUser(orgId: string, userId: string) {
const [userRow] = await db
const [user] = await db
.select({
orgId: userOrgs.orgId,
userId: users.userId,
@@ -20,7 +20,10 @@ export async function queryUser(orgId: string, userId: string) {
username: users.username,
name: users.name,
type: users.type,
roleId: userOrgs.roleId,
roleName: roles.name,
isOwner: userOrgs.isOwner,
isAdmin: roles.isAdmin,
twoFactorEnabled: users.twoFactorEnabled,
autoProvisioned: userOrgs.autoProvisioned,
idpId: users.idpId,
@@ -30,40 +33,13 @@ export async function queryUser(orgId: string, userId: string) {
idpAutoProvision: idp.autoProvision
})
.from(userOrgs)
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
.leftJoin(users, eq(userOrgs.userId, users.userId))
.leftJoin(idp, eq(users.idpId, idp.idpId))
.leftJoin(idpOidcConfig, eq(idp.idpId, idpOidcConfig.idpId))
.where(and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, orgId)))
.limit(1);
if (!userRow) return undefined;
const roleRows = await db
.select({
roleId: userOrgRoles.roleId,
roleName: roles.name,
isAdmin: roles.isAdmin
})
.from(userOrgRoles)
.leftJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, orgId)
)
);
const isAdmin = roleRows.some((r) => r.isAdmin);
return {
...userRow,
isAdmin,
roleIds: roleRows.map((r) => r.roleId),
roles: roleRows.map((r) => ({
roleId: r.roleId,
name: r.roleName ?? ""
}))
};
return user;
}
export type GetOrgUserResponse = NonNullable<

View File

@@ -2,7 +2,6 @@ export * from "./getUser";
export * from "./removeUserOrg";
export * from "./listUsers";
export * from "./addUserRole";
export * from "./removeUserRole";
export * from "./inviteUser";
export * from "./acceptInvite";
export * from "./getOrgUser";

View File

@@ -1,11 +1,11 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db, idpOidcConfig } from "@server/db";
import { idp, roles, userOrgRoles, userOrgs, users } from "@server/db";
import { idp, roles, userOrgs, users } from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import { sql } from "drizzle-orm";
import { and, sql } from "drizzle-orm";
import logger from "@server/logger";
import { fromZodError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
@@ -31,7 +31,7 @@ const listUsersSchema = z.strictObject({
});
async function queryUsers(orgId: string, limit: number, offset: number) {
const rows = await db
return await db
.select({
id: users.userId,
email: users.email,
@@ -41,6 +41,8 @@ async function queryUsers(orgId: string, limit: number, offset: number) {
username: users.username,
name: users.name,
type: users.type,
roleId: userOrgs.roleId,
roleName: roles.name,
isOwner: userOrgs.isOwner,
idpName: idp.name,
idpId: users.idpId,
@@ -50,39 +52,12 @@ async function queryUsers(orgId: string, limit: number, offset: number) {
})
.from(users)
.leftJoin(userOrgs, eq(users.userId, userOrgs.userId))
.leftJoin(roles, eq(userOrgs.roleId, roles.roleId))
.leftJoin(idp, eq(users.idpId, idp.idpId))
.leftJoin(idpOidcConfig, eq(idpOidcConfig.idpId, idp.idpId))
.where(eq(userOrgs.orgId, orgId))
.limit(limit)
.offset(offset);
const roleRows = await db
.select({
userId: userOrgRoles.userId,
roleId: userOrgRoles.roleId,
roleName: roles.name
})
.from(userOrgRoles)
.leftJoin(roles, eq(userOrgRoles.roleId, roles.roleId))
.where(eq(userOrgRoles.orgId, orgId));
const rolesByUser = new Map<
string,
{ roleId: number; roleName: string }[]
>();
for (const r of roleRows) {
const list = rolesByUser.get(r.userId) ?? [];
list.push({ roleId: r.roleId, roleName: r.roleName ?? "" });
rolesByUser.set(r.userId, list);
}
return rows.map((row) => {
const userRoles = rolesByUser.get(row.id) ?? [];
return {
...row,
roles: userRoles
};
});
}
export type ListUsersResponse = {

View File

@@ -1,5 +1,5 @@
import { Request, Response, NextFunction } from "express";
import { db, Olm, olms, orgs, userOrgRoles, userOrgs } from "@server/db";
import { db, Olm, olms, orgs, userOrgs } from "@server/db";
import { idp, users } from "@server/db";
import { and, eq } from "drizzle-orm";
import response from "@server/lib/response";
@@ -84,31 +84,16 @@ export async function myDevice(
.from(olms)
.where(and(eq(olms.userId, userId), eq(olms.olmId, olmId)));
const userOrgRows = await db
const userOrganizations = await db
.select({
orgId: userOrgs.orgId,
orgName: orgs.name
orgName: orgs.name,
roleId: userOrgs.roleId
})
.from(userOrgs)
.where(eq(userOrgs.userId, userId))
.innerJoin(orgs, eq(userOrgs.orgId, orgs.orgId));
const roleRows = await db
.select({
orgId: userOrgRoles.orgId,
roleId: userOrgRoles.roleId
})
.from(userOrgRoles)
.where(eq(userOrgRoles.userId, userId));
const roleByOrg = new Map(
roleRows.map((r) => [r.orgId, r.roleId])
);
const userOrganizations = userOrgRows.map((row) => ({
...row,
roleId: roleByOrg.get(row.orgId) ?? 0
}));
return response<MyDeviceResponse>(res, {
data: {
user,

View File

@@ -1,157 +0,0 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { userOrgRoles, userOrgs, roles, clients } from "@server/db";
import { eq, and } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import stoi from "@server/lib/stoi";
import { OpenAPITags, registry } from "@server/openApi";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
const removeUserRoleParamsSchema = z.strictObject({
userId: z.string(),
roleId: z.string().transform(stoi).pipe(z.number())
});
registry.registerPath({
method: "delete",
path: "/role/{roleId}/remove/{userId}",
description: "Remove a role from a user. User must have at least one role left in the org.",
tags: [OpenAPITags.Role, OpenAPITags.User],
request: {
params: removeUserRoleParamsSchema
},
responses: {}
});
export async function removeUserRole(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = removeUserRoleParamsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { userId, roleId } = parsedParams.data;
if (req.user && !req.userOrg) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"You do not have access to this organization"
)
);
}
const [role] = await db
.select()
.from(roles)
.where(eq(roles.roleId, roleId))
.limit(1);
if (!role) {
return next(
createHttpError(HttpCode.BAD_REQUEST, "Invalid role ID")
);
}
const [existingUser] = await db
.select()
.from(userOrgs)
.where(
and(eq(userOrgs.userId, userId), eq(userOrgs.orgId, role.orgId))
)
.limit(1);
if (!existingUser) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
"User not found or does not belong to the specified organization"
)
);
}
if (existingUser.isOwner) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Cannot change the roles of the owner of the organization"
)
);
}
const remainingRoles = await db
.select({ roleId: userOrgRoles.roleId })
.from(userOrgRoles)
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, role.orgId)
)
);
if (remainingRoles.length <= 1) {
const hasThisRole = remainingRoles.some((r) => r.roleId === roleId);
if (hasThisRole) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User must have at least one role in the organization. Remove the last role is not allowed."
)
);
}
}
await db.transaction(async (trx) => {
await trx
.delete(userOrgRoles)
.where(
and(
eq(userOrgRoles.userId, userId),
eq(userOrgRoles.orgId, role.orgId),
eq(userOrgRoles.roleId, roleId)
)
);
const orgClients = await trx
.select()
.from(clients)
.where(
and(
eq(clients.userId, userId),
eq(clients.orgId, role.orgId)
)
);
for (const orgClient of orgClients) {
await rebuildClientAssociationsFromClient(orgClient, trx);
}
});
return response(res, {
data: { userId, orgId: role.orgId, roleId },
success: true,
error: false,
message: "Role removed from user successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
);
}
}

View File

@@ -11,6 +11,7 @@ import {
startNewtOfflineChecker,
handleNewtDisconnectingMessage
} from "../newt";
import { startPingAccumulator } from "../newt/pingAccumulator";
import {
handleOlmRegisterMessage,
handleOlmRelayMessage,
@@ -46,6 +47,10 @@ export const messageHandlers: Record<string, MessageHandler> = {
"ws/round-trip/complete": handleRoundTripMessage
};
// Start the ping accumulator for all builds — it batches per-site online/lastPing
// updates into periodic bulk writes, preventing connection pool exhaustion.
startPingAccumulator();
if (build != "saas") {
startOlmOfflineChecker(); // this is to handle the offline check for olms
startNewtOfflineChecker(); // this is to handle the offline check for newts

View File

@@ -6,6 +6,7 @@ import { Socket } from "net";
import { Newt, newts, NewtSession, olms, Olm, OlmSession, sites } from "@server/db";
import { eq } from "drizzle-orm";
import { db } from "@server/db";
import { recordPing } from "@server/routers/newt/pingAccumulator";
import { validateNewtSessionToken } from "@server/auth/sessions/newt";
import { validateOlmSessionToken } from "@server/auth/sessions/olm";
import { messageHandlers } from "./messageHandlers";
@@ -386,22 +387,14 @@ const setupConnection = async (
// the same as modern newt clients.
if (clientType === "newt") {
const newtClient = client as Newt;
ws.on("ping", async () => {
ws.on("ping", () => {
if (!newtClient.siteId) return;
try {
await db
.update(sites)
.set({
online: true,
lastPing: Math.floor(Date.now() / 1000)
})
.where(eq(sites.siteId, newtClient.siteId));
} catch (error) {
logger.error(
"Error updating newt site online state on WS ping",
{ error }
);
}
// Record the ping in the accumulator instead of writing to the
// database on every WS ping frame. The accumulator flushes all
// pending pings in a single batched UPDATE every ~10s, which
// prevents connection pool exhaustion under load (especially
// with cross-region latency to the database).
recordPing(newtClient.siteId);
});
}

View File

@@ -5,5 +5,5 @@ import { Session } from "@server/db";
export interface AuthenticatedRequest extends Request {
user: User;
session: Session;
userOrgRoleIds?: number[];
userOrgRoleId?: number;
}

View File

@@ -275,6 +275,8 @@ export default function Page() {
}
}
const disabled = !isPaidUser(tierMatrix.orgOidc);
return (
<>
<div className="flex justify-between">
@@ -292,6 +294,9 @@ export default function Page() {
</Button>
</div>
<PaidFeaturesAlert tiers={tierMatrix.orgOidc} />
<fieldset disabled={disabled} className={disabled ? "opacity-50 pointer-events-none" : ""}>
<SettingsContainer>
<SettingsSection>
<SettingsSectionHeader>
@@ -812,9 +817,10 @@ export default function Page() {
</Button>
<Button
type="submit"
disabled={createLoading || !isPaidUser(tierMatrix.orgOidc)}
disabled={createLoading || disabled}
loading={createLoading}
onClick={() => {
if (disabled) return;
// log any issues with the form
console.log(form.formState.errors);
form.handleSubmit(onSubmit)();
@@ -823,6 +829,7 @@ export default function Page() {
{t("idpSubmit")}
</Button>
</div>
</fieldset>
</>
);
}

View File

@@ -8,6 +8,7 @@ import {
FormLabel,
FormMessage
} from "@app/components/ui/form";
import { Input } from "@app/components/ui/input";
import {
Select,
SelectContent,
@@ -18,6 +19,7 @@ import {
import { Checkbox } from "@app/components/ui/checkbox";
import { toast } from "@app/hooks/useToast";
import { zodResolver } from "@hookform/resolvers/zod";
import { InviteUserResponse } from "@server/routers/user";
import { AxiosResponse } from "axios";
import { useEffect, useState } from "react";
import { useForm } from "react-hook-form";
@@ -42,9 +44,6 @@ import { useEnvContext } from "@app/hooks/useEnvContext";
import { useTranslations } from "next-intl";
import IdpTypeBadge from "@app/components/IdpTypeBadge";
import { UserType } from "@server/types/UserTypes";
import { Badge } from "@app/components/ui/badge";
type UserRole = { roleId: number; name: string };
export default function AccessControlsPage() {
const { orgUser: user } = userOrgUserContext();
@@ -55,12 +54,12 @@ export default function AccessControlsPage() {
const [loading, setLoading] = useState(false);
const [roles, setRoles] = useState<{ roleId: number; name: string }[]>([]);
const [userRoles, setUserRoles] = useState<UserRole[]>([]);
const t = useTranslations();
const formSchema = z.object({
username: z.string(),
roleId: z.string().min(1, { message: t("accessRoleSelectPlease") }),
autoProvisioned: z.boolean()
});
@@ -68,17 +67,11 @@ export default function AccessControlsPage() {
resolver: zodResolver(formSchema),
defaultValues: {
username: user.username!,
roleId: user.roleId?.toString(),
autoProvisioned: user.autoProvisioned || false
}
});
const currentRoleIds = user.roleIds ?? [];
const currentRoles: UserRole[] = user.roles ?? [];
useEffect(() => {
setUserRoles(currentRoles);
}, [user.userId, currentRoleIds.join(",")]);
useEffect(() => {
async function fetchRoles() {
const res = await api
@@ -101,67 +94,32 @@ export default function AccessControlsPage() {
}
fetchRoles();
form.setValue("roleId", user.roleId.toString());
form.setValue("autoProvisioned", user.autoProvisioned || false);
}, []);
async function handleAddRole(roleId: number) {
setLoading(true);
try {
await api.post(`/role/${roleId}/add/${user.userId}`);
toast({
variant: "default",
title: t("userSaved"),
description: t("userSavedDescription")
});
const role = roles.find((r) => r.roleId === roleId);
if (role) setUserRoles((prev) => [...prev, role]);
} catch (e) {
toast({
variant: "destructive",
title: t("accessRoleErrorAdd"),
description: formatAxiosError(
e,
t("accessRoleErrorAddDescription")
)
});
}
setLoading(false);
}
async function handleRemoveRole(roleId: number) {
setLoading(true);
try {
await api.delete(`/role/${roleId}/remove/${user.userId}`);
toast({
variant: "default",
title: t("userSaved"),
description: t("userSavedDescription")
});
setUserRoles((prev) => prev.filter((r) => r.roleId !== roleId));
} catch (e) {
toast({
variant: "destructive",
title: t("accessRoleErrorAdd"),
description: formatAxiosError(
e,
t("accessRoleErrorAddDescription")
)
});
}
setLoading(false);
}
async function onSubmit(values: z.infer<typeof formSchema>) {
setLoading(true);
try {
await api.post(`/org/${orgId}/user/${user.userId}`, {
autoProvisioned: values.autoProvisioned
});
toast({
variant: "default",
title: t("userSaved"),
description: t("userSavedDescription")
});
// Execute both API calls simultaneously
const [roleRes, userRes] = await Promise.all([
api.post<AxiosResponse<InviteUserResponse>>(
`/role/${values.roleId}/add/${user.userId}`
),
api.post(`/org/${orgId}/user/${user.userId}`, {
autoProvisioned: values.autoProvisioned
})
]);
if (roleRes.status === 200 && userRes.status === 200) {
toast({
variant: "default",
title: t("userSaved"),
description: t("userSavedDescription")
});
}
} catch (e) {
toast({
variant: "destructive",
@@ -172,14 +130,10 @@ export default function AccessControlsPage() {
)
});
}
setLoading(false);
}
const availableRolesToAdd = roles.filter(
(r) => !userRoles.some((ur) => ur.roleId === r.roleId)
);
const canRemoveRole = userRoles.length > 1;
return (
<SettingsContainer>
<SettingsSection>
@@ -200,6 +154,7 @@ export default function AccessControlsPage() {
className="space-y-4"
id="access-controls-form"
>
{/* IDP Type Display */}
{user.type !== UserType.Internal &&
user.idpType && (
<div className="flex items-center space-x-2 mb-4">
@@ -216,72 +171,49 @@ export default function AccessControlsPage() {
</div>
)}
<FormItem>
<FormLabel>{t("role")}</FormLabel>
<div className="flex flex-wrap gap-2 items-center">
{userRoles.map((r) => (
<Badge
key={r.roleId}
variant="secondary"
className="flex items-center gap-1"
>
{r.name}
{canRemoveRole && (
<button
type="button"
onClick={() =>
handleRemoveRole(
r.roleId
)
}
disabled={loading}
className="ml-1 rounded hover:bg-muted"
aria-label={`Remove ${r.name}`}
>
×
</button>
)}
</Badge>
))}
{availableRolesToAdd.length > 0 && (
<FormField
control={form.control}
name="roleId"
render={({ field }) => (
<FormItem>
<FormLabel>{t("role")}</FormLabel>
<Select
onValueChange={(value) => {
handleAddRole(
parseInt(value, 10)
);
field.onChange(value);
// If auto provision is enabled, set it to false when role changes
if (user.idpAutoProvision) {
form.setValue(
"autoProvisioned",
false
);
}
}}
disabled={loading}
value={field.value}
>
<SelectTrigger className="w-[180px]">
<SelectValue
placeholder={t(
"accessRoleSelect"
)}
/>
</SelectTrigger>
<FormControl>
<SelectTrigger>
<SelectValue
placeholder={t(
"accessRoleSelect"
)}
/>
</SelectTrigger>
</FormControl>
<SelectContent>
{availableRolesToAdd.map(
(role) => (
<SelectItem
key={
role.roleId
}
value={role.roleId.toString()}
>
{role.name}
</SelectItem>
)
)}
{roles.map((role) => (
<SelectItem
key={role.roleId}
value={role.roleId.toString()}
>
{role.name}
</SelectItem>
))}
</SelectContent>
</Select>
)}
</div>
{userRoles.length === 0 && (
<p className="text-sm text-muted-foreground">
{t("accessRoleSelectPlease")}
</p>
<FormMessage />
</FormItem>
)}
</FormItem>
/>
{user.idpAutoProvision && (
<FormField
@@ -299,9 +231,7 @@ export default function AccessControlsPage() {
</FormControl>
<div className="space-y-1 leading-none">
<FormLabel>
{t(
"autoProvisioned"
)}
{t("autoProvisioned")}
</FormLabel>
<p className="text-sm text-muted-foreground">
{t(

View File

@@ -88,9 +88,7 @@ export default async function UsersPage(props: UsersPageProps) {
status: t("userConfirmed"),
role: user.isOwner
? t("accessRoleOwner")
: user.roles?.length
? user.roles.map((r) => r.roleName).join(", ")
: t("accessRoleMember"),
: user.roleName || t("accessRoleMember"),
isOwner: user.isOwner || false
};
});