mirror of
https://github.com/fosrl/pangolin.git
synced 2026-03-25 12:06:37 +00:00
Merge branch 'logging-provision' of https://github.com/fosrl/pangolin into logging-provision
This commit is contained in:
@@ -1,9 +1,11 @@
|
||||
import { flushBandwidthToDb } from "@server/routers/newt/handleReceiveBandwidthMessage";
|
||||
import { flushConnectionLogToDb } from "#dynamic/routers/newt";
|
||||
import { flushSiteBandwidthToDb } from "@server/routers/gerbil/receiveBandwidth";
|
||||
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
|
||||
|
||||
async function cleanup() {
|
||||
await flushBandwidthToDb();
|
||||
await flushConnectionLogToDb();
|
||||
await flushSiteBandwidthToDb();
|
||||
await wsCleanup();
|
||||
|
||||
@@ -14,4 +16,4 @@ export async function initCleanup() {
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => cleanup());
|
||||
process.on("SIGINT", () => cleanup());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,9 @@ import {
|
||||
users,
|
||||
exitNodes,
|
||||
sessions,
|
||||
clients
|
||||
clients,
|
||||
siteResources,
|
||||
sites
|
||||
} from "./schema";
|
||||
|
||||
export const certificates = pgTable("certificates", {
|
||||
@@ -305,6 +307,45 @@ export const accessAuditLog = pgTable(
|
||||
]
|
||||
);
|
||||
|
||||
export const connectionAuditLog = pgTable(
|
||||
"connectionAuditLog",
|
||||
{
|
||||
id: serial("id").primaryKey(),
|
||||
sessionId: text("sessionId").notNull(),
|
||||
siteResourceId: integer("siteResourceId").references(
|
||||
() => siteResources.siteResourceId,
|
||||
{ onDelete: "cascade" }
|
||||
),
|
||||
orgId: text("orgId").references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
siteId: integer("siteId").references(() => sites.siteId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
sourceAddr: text("sourceAddr").notNull(),
|
||||
destAddr: text("destAddr").notNull(),
|
||||
protocol: text("protocol").notNull(),
|
||||
startedAt: integer("startedAt").notNull(),
|
||||
endedAt: integer("endedAt"),
|
||||
bytesTx: integer("bytesTx"),
|
||||
bytesRx: integer("bytesRx")
|
||||
},
|
||||
(table) => [
|
||||
index("idx_accessAuditLog_startedAt").on(table.startedAt),
|
||||
index("idx_accessAuditLog_org_startedAt").on(
|
||||
table.orgId,
|
||||
table.startedAt
|
||||
),
|
||||
index("idx_accessAuditLog_siteResourceId").on(table.siteResourceId)
|
||||
]
|
||||
);
|
||||
|
||||
export const approvals = pgTable("approvals", {
|
||||
approvalId: serial("approvalId").primaryKey(),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
@@ -391,3 +432,4 @@ export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type LoginPageBranding = InferSelectModel<typeof loginPageBranding>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
export type ConnectionAuditLog = InferSelectModel<typeof connectionAuditLog>;
|
||||
|
||||
@@ -55,6 +55,9 @@ export const orgs = pgTable("orgs", {
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format)
|
||||
sshCaPublicKey: text("sshCaPublicKey"), // SSH CA public key (OpenSSH format)
|
||||
isBillingOrg: boolean("isBillingOrg"),
|
||||
|
||||
@@ -7,7 +7,7 @@ import {
|
||||
sqliteTable,
|
||||
text
|
||||
} from "drizzle-orm/sqlite-core";
|
||||
import { clients, domains, exitNodes, orgs, sessions, users } from "./schema";
|
||||
import { clients, domains, exitNodes, orgs, sessions, siteResources, sites, users } from "./schema";
|
||||
|
||||
export const certificates = sqliteTable("certificates", {
|
||||
certId: integer("certId").primaryKey({ autoIncrement: true }),
|
||||
@@ -295,6 +295,45 @@ export const accessAuditLog = sqliteTable(
|
||||
]
|
||||
);
|
||||
|
||||
export const connectionAuditLog = sqliteTable(
|
||||
"connectionAuditLog",
|
||||
{
|
||||
id: integer("id").primaryKey({ autoIncrement: true }),
|
||||
sessionId: text("sessionId").notNull(),
|
||||
siteResourceId: integer("siteResourceId").references(
|
||||
() => siteResources.siteResourceId,
|
||||
{ onDelete: "cascade" }
|
||||
),
|
||||
orgId: text("orgId").references(() => orgs.orgId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
siteId: integer("siteId").references(() => sites.siteId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
clientId: integer("clientId").references(() => clients.clientId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
userId: text("userId").references(() => users.userId, {
|
||||
onDelete: "cascade"
|
||||
}),
|
||||
sourceAddr: text("sourceAddr").notNull(),
|
||||
destAddr: text("destAddr").notNull(),
|
||||
protocol: text("protocol").notNull(),
|
||||
startedAt: integer("startedAt").notNull(),
|
||||
endedAt: integer("endedAt"),
|
||||
bytesTx: integer("bytesTx"),
|
||||
bytesRx: integer("bytesRx")
|
||||
},
|
||||
(table) => [
|
||||
index("idx_accessAuditLog_startedAt").on(table.startedAt),
|
||||
index("idx_accessAuditLog_org_startedAt").on(
|
||||
table.orgId,
|
||||
table.startedAt
|
||||
),
|
||||
index("idx_accessAuditLog_siteResourceId").on(table.siteResourceId)
|
||||
]
|
||||
);
|
||||
|
||||
export const approvals = sqliteTable("approvals", {
|
||||
approvalId: integer("approvalId").primaryKey({ autoIncrement: true }),
|
||||
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
|
||||
@@ -375,3 +414,4 @@ export type LoginPage = InferSelectModel<typeof loginPage>;
|
||||
export type LoginPageBranding = InferSelectModel<typeof loginPageBranding>;
|
||||
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
|
||||
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;
|
||||
export type ConnectionAuditLog = InferSelectModel<typeof connectionAuditLog>;
|
||||
|
||||
@@ -47,6 +47,9 @@ export const orgs = sqliteTable("orgs", {
|
||||
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
|
||||
.notNull()
|
||||
.default(0),
|
||||
sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format)
|
||||
sshCaPublicKey: text("sshCaPublicKey"), // SSH CA public key (OpenSSH format)
|
||||
isBillingOrg: integer("isBillingOrg", { mode: "boolean" }),
|
||||
|
||||
@@ -8,6 +8,7 @@ export enum TierFeature {
|
||||
LogExport = "logExport",
|
||||
AccessLogs = "accessLogs", // set the retention period to none on downgrade
|
||||
ActionLogs = "actionLogs", // set the retention period to none on downgrade
|
||||
ConnectionLogs = "connectionLogs",
|
||||
RotateCredentials = "rotateCredentials",
|
||||
MaintencePage = "maintencePage", // handle downgrade
|
||||
DevicePosture = "devicePosture",
|
||||
@@ -26,6 +27,7 @@ export const tierMatrix: Record<TierFeature, Tier[]> = {
|
||||
[TierFeature.LogExport]: ["tier3", "enterprise"],
|
||||
[TierFeature.AccessLogs]: ["tier2", "tier3", "enterprise"],
|
||||
[TierFeature.ActionLogs]: ["tier2", "tier3", "enterprise"],
|
||||
[TierFeature.ConnectionLogs]: ["tier2", "tier3", "enterprise"],
|
||||
[TierFeature.RotateCredentials]: ["tier1", "tier2", "tier3", "enterprise"],
|
||||
[TierFeature.MaintencePage]: ["tier1", "tier2", "tier3", "enterprise"],
|
||||
[TierFeature.DevicePosture]: ["tier2", "tier3", "enterprise"],
|
||||
|
||||
@@ -2,6 +2,7 @@ import { db, orgs } from "@server/db";
|
||||
import { cleanUpOldLogs as cleanUpOldAccessLogs } from "#dynamic/lib/logAccessAudit";
|
||||
import { cleanUpOldLogs as cleanUpOldActionLogs } from "#dynamic/middlewares/logActionAudit";
|
||||
import { cleanUpOldLogs as cleanUpOldRequestLogs } from "@server/routers/badger/logRequestAudit";
|
||||
import { cleanUpOldLogs as cleanUpOldConnectionLogs } from "#dynamic/routers/newt";
|
||||
import { gt, or } from "drizzle-orm";
|
||||
import { cleanUpOldFingerprintSnapshots } from "@server/routers/olm/fingerprintingUtils";
|
||||
import { build } from "@server/build";
|
||||
@@ -20,14 +21,17 @@ export function initLogCleanupInterval() {
|
||||
settingsLogRetentionDaysAccess:
|
||||
orgs.settingsLogRetentionDaysAccess,
|
||||
settingsLogRetentionDaysRequest:
|
||||
orgs.settingsLogRetentionDaysRequest
|
||||
orgs.settingsLogRetentionDaysRequest,
|
||||
settingsLogRetentionDaysConnection:
|
||||
orgs.settingsLogRetentionDaysConnection
|
||||
})
|
||||
.from(orgs)
|
||||
.where(
|
||||
or(
|
||||
gt(orgs.settingsLogRetentionDaysAction, 0),
|
||||
gt(orgs.settingsLogRetentionDaysAccess, 0),
|
||||
gt(orgs.settingsLogRetentionDaysRequest, 0)
|
||||
gt(orgs.settingsLogRetentionDaysRequest, 0),
|
||||
gt(orgs.settingsLogRetentionDaysConnection, 0)
|
||||
)
|
||||
);
|
||||
|
||||
@@ -37,7 +41,8 @@ export function initLogCleanupInterval() {
|
||||
orgId,
|
||||
settingsLogRetentionDaysAction,
|
||||
settingsLogRetentionDaysAccess,
|
||||
settingsLogRetentionDaysRequest
|
||||
settingsLogRetentionDaysRequest,
|
||||
settingsLogRetentionDaysConnection
|
||||
} = org;
|
||||
|
||||
if (settingsLogRetentionDaysAction > 0) {
|
||||
@@ -60,6 +65,13 @@ export function initLogCleanupInterval() {
|
||||
settingsLogRetentionDaysRequest
|
||||
);
|
||||
}
|
||||
|
||||
if (settingsLogRetentionDaysConnection > 0) {
|
||||
await cleanUpOldConnectionLogs(
|
||||
orgId,
|
||||
settingsLogRetentionDaysConnection
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
await cleanUpOldFingerprintSnapshots(365);
|
||||
|
||||
127
server/lib/ip.ts
127
server/lib/ip.ts
@@ -571,6 +571,133 @@ export function generateSubnetProxyTargets(
|
||||
return targets;
|
||||
}
|
||||
|
||||
export type SubnetProxyTargetV2 = {
|
||||
sourcePrefixes: string[]; // must be cidrs
|
||||
destPrefix: string; // must be a cidr
|
||||
disableIcmp?: boolean;
|
||||
rewriteTo?: string; // must be a cidr
|
||||
portRange?: {
|
||||
min: number;
|
||||
max: number;
|
||||
protocol: "tcp" | "udp";
|
||||
}[];
|
||||
resourceId?: number;
|
||||
};
|
||||
|
||||
export function generateSubnetProxyTargetV2(
|
||||
siteResource: SiteResource,
|
||||
clients: {
|
||||
clientId: number;
|
||||
pubKey: string | null;
|
||||
subnet: string | null;
|
||||
}[]
|
||||
): SubnetProxyTargetV2 | undefined {
|
||||
if (clients.length === 0) {
|
||||
logger.debug(
|
||||
`No clients have access to site resource ${siteResource.siteResourceId}, skipping target generation.`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
let target: SubnetProxyTargetV2 | null = null;
|
||||
|
||||
const portRange = [
|
||||
...parsePortRangeString(siteResource.tcpPortRangeString, "tcp"),
|
||||
...parsePortRangeString(siteResource.udpPortRangeString, "udp")
|
||||
];
|
||||
const disableIcmp = siteResource.disableIcmp ?? false;
|
||||
|
||||
if (siteResource.mode == "host") {
|
||||
let destination = siteResource.destination;
|
||||
// check if this is a valid ip
|
||||
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
|
||||
if (ipSchema.safeParse(destination).success) {
|
||||
destination = `${destination}/32`;
|
||||
|
||||
target = {
|
||||
sourcePrefixes: [],
|
||||
destPrefix: destination,
|
||||
portRange,
|
||||
disableIcmp,
|
||||
resourceId: siteResource.siteResourceId,
|
||||
};
|
||||
}
|
||||
|
||||
if (siteResource.alias && siteResource.aliasAddress) {
|
||||
// also push a match for the alias address
|
||||
target = {
|
||||
sourcePrefixes: [],
|
||||
destPrefix: `${siteResource.aliasAddress}/32`,
|
||||
rewriteTo: destination,
|
||||
portRange,
|
||||
disableIcmp,
|
||||
resourceId: siteResource.siteResourceId,
|
||||
};
|
||||
}
|
||||
} else if (siteResource.mode == "cidr") {
|
||||
target = {
|
||||
sourcePrefixes: [],
|
||||
destPrefix: siteResource.destination,
|
||||
portRange,
|
||||
disableIcmp,
|
||||
resourceId: siteResource.siteResourceId,
|
||||
};
|
||||
}
|
||||
|
||||
if (!target) {
|
||||
return;
|
||||
}
|
||||
|
||||
for (const clientSite of clients) {
|
||||
if (!clientSite.subnet) {
|
||||
logger.debug(
|
||||
`Client ${clientSite.clientId} has no subnet, skipping for site resource ${siteResource.siteResourceId}.`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const clientPrefix = `${clientSite.subnet.split("/")[0]}/32`;
|
||||
|
||||
// add client prefix to source prefixes
|
||||
target.sourcePrefixes.push(clientPrefix);
|
||||
}
|
||||
|
||||
// print a nice representation of the targets
|
||||
// logger.debug(
|
||||
// `Generated subnet proxy targets for: ${JSON.stringify(targets, null, 2)}`
|
||||
// );
|
||||
|
||||
return target;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Converts a SubnetProxyTargetV2 to an array of SubnetProxyTarget (v1)
|
||||
* by expanding each source prefix into its own target entry.
|
||||
* @param targetV2 - The v2 target to convert
|
||||
* @returns Array of v1 SubnetProxyTarget objects
|
||||
*/
|
||||
export function convertSubnetProxyTargetsV2ToV1(
|
||||
targetsV2: SubnetProxyTargetV2[]
|
||||
): SubnetProxyTarget[] {
|
||||
return targetsV2.flatMap((targetV2) =>
|
||||
targetV2.sourcePrefixes.map((sourcePrefix) => ({
|
||||
sourcePrefix,
|
||||
destPrefix: targetV2.destPrefix,
|
||||
...(targetV2.disableIcmp !== undefined && {
|
||||
disableIcmp: targetV2.disableIcmp
|
||||
}),
|
||||
...(targetV2.rewriteTo !== undefined && {
|
||||
rewriteTo: targetV2.rewriteTo
|
||||
}),
|
||||
...(targetV2.portRange !== undefined && {
|
||||
portRange: targetV2.portRange
|
||||
})
|
||||
}))
|
||||
);
|
||||
}
|
||||
|
||||
|
||||
// Custom schema for validating port range strings
|
||||
// Format: "80,443,8000-9000" or "*" for all ports, or empty string
|
||||
export const portRangeStringSchema = z
|
||||
|
||||
@@ -302,8 +302,8 @@ export const configSchema = z
|
||||
.optional()
|
||||
.default({
|
||||
block_size: 24,
|
||||
subnet_group: "100.90.128.0/24",
|
||||
utility_subnet_group: "100.96.128.0/24"
|
||||
subnet_group: "100.90.128.0/20",
|
||||
utility_subnet_group: "100.96.128.0/20"
|
||||
}),
|
||||
rate_limits: z
|
||||
.object({
|
||||
|
||||
@@ -32,7 +32,7 @@ import logger from "@server/logger";
|
||||
import {
|
||||
generateAliasConfig,
|
||||
generateRemoteSubnets,
|
||||
generateSubnetProxyTargets,
|
||||
generateSubnetProxyTargetV2,
|
||||
parseEndpoint,
|
||||
formatEndpoint
|
||||
} from "@server/lib/ip";
|
||||
@@ -660,19 +660,16 @@ async function handleSubnetProxyTargetUpdates(
|
||||
);
|
||||
|
||||
if (addedClients.length > 0) {
|
||||
const targetsToAdd = generateSubnetProxyTargets(
|
||||
const targetToAdd = generateSubnetProxyTargetV2(
|
||||
siteResource,
|
||||
addedClients
|
||||
);
|
||||
|
||||
if (targetsToAdd.length > 0) {
|
||||
logger.info(
|
||||
`Adding ${targetsToAdd.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
|
||||
);
|
||||
if (targetToAdd) {
|
||||
proxyJobs.push(
|
||||
addSubnetProxyTargets(
|
||||
newt.newtId,
|
||||
targetsToAdd,
|
||||
[targetToAdd],
|
||||
newt.version
|
||||
)
|
||||
);
|
||||
@@ -700,19 +697,16 @@ async function handleSubnetProxyTargetUpdates(
|
||||
);
|
||||
|
||||
if (removedClients.length > 0) {
|
||||
const targetsToRemove = generateSubnetProxyTargets(
|
||||
const targetToRemove = generateSubnetProxyTargetV2(
|
||||
siteResource,
|
||||
removedClients
|
||||
);
|
||||
|
||||
if (targetsToRemove.length > 0) {
|
||||
logger.info(
|
||||
`Removing ${targetsToRemove.length} subnet proxy targets for siteResource ${siteResource.siteResourceId}`
|
||||
);
|
||||
if (targetToRemove) {
|
||||
proxyJobs.push(
|
||||
removeSubnetProxyTargets(
|
||||
newt.newtId,
|
||||
targetsToRemove,
|
||||
[targetToRemove],
|
||||
newt.version
|
||||
)
|
||||
);
|
||||
@@ -1169,7 +1163,7 @@ async function handleMessagesForClientResources(
|
||||
}
|
||||
|
||||
for (const resource of resources) {
|
||||
const targets = generateSubnetProxyTargets(resource, [
|
||||
const target = generateSubnetProxyTargetV2(resource, [
|
||||
{
|
||||
clientId: client.clientId,
|
||||
pubKey: client.pubKey,
|
||||
@@ -1177,11 +1171,11 @@ async function handleMessagesForClientResources(
|
||||
}
|
||||
]);
|
||||
|
||||
if (targets.length > 0) {
|
||||
if (target) {
|
||||
proxyJobs.push(
|
||||
addSubnetProxyTargets(
|
||||
newt.newtId,
|
||||
targets,
|
||||
[target],
|
||||
newt.version
|
||||
)
|
||||
);
|
||||
@@ -1246,7 +1240,7 @@ async function handleMessagesForClientResources(
|
||||
}
|
||||
|
||||
for (const resource of resources) {
|
||||
const targets = generateSubnetProxyTargets(resource, [
|
||||
const target = generateSubnetProxyTargetV2(resource, [
|
||||
{
|
||||
clientId: client.clientId,
|
||||
pubKey: client.pubKey,
|
||||
@@ -1254,11 +1248,11 @@ async function handleMessagesForClientResources(
|
||||
}
|
||||
]);
|
||||
|
||||
if (targets.length > 0) {
|
||||
if (target) {
|
||||
proxyJobs.push(
|
||||
removeSubnetProxyTargets(
|
||||
newt.newtId,
|
||||
targets,
|
||||
[target],
|
||||
newt.version
|
||||
)
|
||||
);
|
||||
|
||||
@@ -14,10 +14,12 @@
|
||||
import { rateLimitService } from "#private/lib/rateLimit";
|
||||
import { cleanup as wsCleanup } from "#private/routers/ws";
|
||||
import { flushBandwidthToDb } from "@server/routers/newt/handleReceiveBandwidthMessage";
|
||||
import { flushConnectionLogToDb } from "#dynamic/routers/newt";
|
||||
import { flushSiteBandwidthToDb } from "@server/routers/gerbil/receiveBandwidth";
|
||||
|
||||
async function cleanup() {
|
||||
await flushBandwidthToDb();
|
||||
await flushConnectionLogToDb();
|
||||
await flushSiteBandwidthToDb();
|
||||
await rateLimitService.cleanup();
|
||||
await wsCleanup();
|
||||
@@ -29,4 +31,4 @@ export async function initCleanup() {
|
||||
// Handle process termination
|
||||
process.on("SIGTERM", () => cleanup());
|
||||
process.on("SIGINT", () => cleanup());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -57,7 +57,10 @@ export const privateConfigSchema = z.object({
|
||||
.object({
|
||||
host: z.string(),
|
||||
port: portSchema,
|
||||
password: z.string().optional(),
|
||||
password: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(getEnvOrYaml("REDIS_PASSWORD")),
|
||||
db: z.int().nonnegative().optional().default(0),
|
||||
replicas: z
|
||||
.array(
|
||||
|
||||
99
server/private/routers/auditLogs/exportConnectionAuditLog.ts
Normal file
99
server/private/routers/auditLogs/exportConnectionAuditLog.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import { registry } from "@server/openApi";
|
||||
import { NextFunction } from "express";
|
||||
import { Request, Response } from "express";
|
||||
import { OpenAPITags } from "@server/openApi";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import logger from "@server/logger";
|
||||
import {
|
||||
queryConnectionAuditLogsParams,
|
||||
queryConnectionAuditLogsQuery,
|
||||
queryConnection,
|
||||
countConnectionQuery
|
||||
} from "./queryConnectionAuditLog";
|
||||
import { generateCSV } from "@server/routers/auditLogs/generateCSV";
|
||||
import { MAX_EXPORT_LIMIT } from "@server/routers/auditLogs";
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/org/{orgId}/logs/connection/export",
|
||||
description: "Export the connection audit log for an organization as CSV",
|
||||
tags: [OpenAPITags.Logs],
|
||||
request: {
|
||||
query: queryConnectionAuditLogsQuery,
|
||||
params: queryConnectionAuditLogsParams
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function exportConnectionAuditLogs(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedQuery = queryConnectionAuditLogsQuery.safeParse(req.query);
|
||||
if (!parsedQuery.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedQuery.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const parsedParams = queryConnectionAuditLogsParams.safeParse(req.params);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const data = { ...parsedQuery.data, ...parsedParams.data };
|
||||
const [{ count }] = await countConnectionQuery(data);
|
||||
if (count > MAX_EXPORT_LIMIT) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
`Export limit exceeded. Your selection contains ${count} rows, but the maximum is ${MAX_EXPORT_LIMIT} rows. Please select a shorter time range to reduce the data.`
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const baseQuery = queryConnection(data);
|
||||
|
||||
const log = await baseQuery.limit(data.limit).offset(data.offset);
|
||||
|
||||
const csvData = generateCSV(log);
|
||||
|
||||
res.setHeader("Content-Type", "text/csv");
|
||||
res.setHeader(
|
||||
"Content-Disposition",
|
||||
`attachment; filename="connection-audit-logs-${data.orgId}-${Date.now()}.csv"`
|
||||
);
|
||||
|
||||
return res.send(csvData);
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -15,3 +15,5 @@ export * from "./queryActionAuditLog";
|
||||
export * from "./exportActionAuditLog";
|
||||
export * from "./queryAccessAuditLog";
|
||||
export * from "./exportAccessAuditLog";
|
||||
export * from "./queryConnectionAuditLog";
|
||||
export * from "./exportConnectionAuditLog";
|
||||
|
||||
524
server/private/routers/auditLogs/queryConnectionAuditLog.ts
Normal file
524
server/private/routers/auditLogs/queryConnectionAuditLog.ts
Normal file
@@ -0,0 +1,524 @@
|
||||
/*
|
||||
* This file is part of a proprietary work.
|
||||
*
|
||||
* Copyright (c) 2025 Fossorial, Inc.
|
||||
* All rights reserved.
|
||||
*
|
||||
* This file is licensed under the Fossorial Commercial License.
|
||||
* You may not use this file except in compliance with the License.
|
||||
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
|
||||
*
|
||||
* This file is not licensed under the AGPLv3.
|
||||
*/
|
||||
|
||||
import {
|
||||
connectionAuditLog,
|
||||
logsDb,
|
||||
siteResources,
|
||||
sites,
|
||||
clients,
|
||||
users,
|
||||
primaryDb
|
||||
} from "@server/db";
|
||||
import { registry } from "@server/openApi";
|
||||
import { NextFunction } from "express";
|
||||
import { Request, Response } from "express";
|
||||
import { eq, gt, lt, and, count, desc, inArray } from "drizzle-orm";
|
||||
import { OpenAPITags } from "@server/openApi";
|
||||
import { z } from "zod";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { fromError } from "zod-validation-error";
|
||||
import { QueryConnectionAuditLogResponse } from "@server/routers/auditLogs/types";
|
||||
import response from "@server/lib/response";
|
||||
import logger from "@server/logger";
|
||||
import { getSevenDaysAgo } from "@app/lib/getSevenDaysAgo";
|
||||
|
||||
export const queryConnectionAuditLogsQuery = z.object({
|
||||
// iso string just validate its a parseable date
|
||||
timeStart: z
|
||||
.string()
|
||||
.refine((val) => !isNaN(Date.parse(val)), {
|
||||
error: "timeStart must be a valid ISO date string"
|
||||
})
|
||||
.transform((val) => Math.floor(new Date(val).getTime() / 1000))
|
||||
.prefault(() => getSevenDaysAgo().toISOString())
|
||||
.openapi({
|
||||
type: "string",
|
||||
format: "date-time",
|
||||
description:
|
||||
"Start time as ISO date string (defaults to 7 days ago)"
|
||||
}),
|
||||
timeEnd: z
|
||||
.string()
|
||||
.refine((val) => !isNaN(Date.parse(val)), {
|
||||
error: "timeEnd must be a valid ISO date string"
|
||||
})
|
||||
.transform((val) => Math.floor(new Date(val).getTime() / 1000))
|
||||
.optional()
|
||||
.prefault(() => new Date().toISOString())
|
||||
.openapi({
|
||||
type: "string",
|
||||
format: "date-time",
|
||||
description:
|
||||
"End time as ISO date string (defaults to current time)"
|
||||
}),
|
||||
protocol: z.string().optional(),
|
||||
sourceAddr: z.string().optional(),
|
||||
destAddr: z.string().optional(),
|
||||
clientId: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive())
|
||||
.optional(),
|
||||
siteId: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive())
|
||||
.optional(),
|
||||
siteResourceId: z
|
||||
.string()
|
||||
.optional()
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive())
|
||||
.optional(),
|
||||
userId: z.string().optional(),
|
||||
limit: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("1000")
|
||||
.transform(Number)
|
||||
.pipe(z.int().positive()),
|
||||
offset: z
|
||||
.string()
|
||||
.optional()
|
||||
.default("0")
|
||||
.transform(Number)
|
||||
.pipe(z.int().nonnegative())
|
||||
});
|
||||
|
||||
export const queryConnectionAuditLogsParams = z.object({
|
||||
orgId: z.string()
|
||||
});
|
||||
|
||||
export const queryConnectionAuditLogsCombined =
|
||||
queryConnectionAuditLogsQuery.merge(queryConnectionAuditLogsParams);
|
||||
type Q = z.infer<typeof queryConnectionAuditLogsCombined>;
|
||||
|
||||
function getWhere(data: Q) {
|
||||
return and(
|
||||
gt(connectionAuditLog.startedAt, data.timeStart),
|
||||
lt(connectionAuditLog.startedAt, data.timeEnd),
|
||||
eq(connectionAuditLog.orgId, data.orgId),
|
||||
data.protocol
|
||||
? eq(connectionAuditLog.protocol, data.protocol)
|
||||
: undefined,
|
||||
data.sourceAddr
|
||||
? eq(connectionAuditLog.sourceAddr, data.sourceAddr)
|
||||
: undefined,
|
||||
data.destAddr
|
||||
? eq(connectionAuditLog.destAddr, data.destAddr)
|
||||
: undefined,
|
||||
data.clientId
|
||||
? eq(connectionAuditLog.clientId, data.clientId)
|
||||
: undefined,
|
||||
data.siteId
|
||||
? eq(connectionAuditLog.siteId, data.siteId)
|
||||
: undefined,
|
||||
data.siteResourceId
|
||||
? eq(connectionAuditLog.siteResourceId, data.siteResourceId)
|
||||
: undefined,
|
||||
data.userId
|
||||
? eq(connectionAuditLog.userId, data.userId)
|
||||
: undefined
|
||||
);
|
||||
}
|
||||
|
||||
export function queryConnection(data: Q) {
|
||||
return logsDb
|
||||
.select({
|
||||
sessionId: connectionAuditLog.sessionId,
|
||||
siteResourceId: connectionAuditLog.siteResourceId,
|
||||
orgId: connectionAuditLog.orgId,
|
||||
siteId: connectionAuditLog.siteId,
|
||||
clientId: connectionAuditLog.clientId,
|
||||
userId: connectionAuditLog.userId,
|
||||
sourceAddr: connectionAuditLog.sourceAddr,
|
||||
destAddr: connectionAuditLog.destAddr,
|
||||
protocol: connectionAuditLog.protocol,
|
||||
startedAt: connectionAuditLog.startedAt,
|
||||
endedAt: connectionAuditLog.endedAt,
|
||||
bytesTx: connectionAuditLog.bytesTx,
|
||||
bytesRx: connectionAuditLog.bytesRx
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(getWhere(data))
|
||||
.orderBy(
|
||||
desc(connectionAuditLog.startedAt),
|
||||
desc(connectionAuditLog.id)
|
||||
);
|
||||
}
|
||||
|
||||
export function countConnectionQuery(data: Q) {
|
||||
const countQuery = logsDb
|
||||
.select({ count: count() })
|
||||
.from(connectionAuditLog)
|
||||
.where(getWhere(data));
|
||||
return countQuery;
|
||||
}
|
||||
|
||||
async function enrichWithDetails(
|
||||
logs: Awaited<ReturnType<typeof queryConnection>>
|
||||
) {
|
||||
// Collect unique IDs from logs
|
||||
const siteResourceIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.siteResourceId)
|
||||
.filter((id): id is number => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
const siteIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.siteId)
|
||||
.filter((id): id is number => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
const clientIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.clientId)
|
||||
.filter((id): id is number => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
const userIds = [
|
||||
...new Set(
|
||||
logs
|
||||
.map((log) => log.userId)
|
||||
.filter((id): id is string => id !== null && id !== undefined)
|
||||
)
|
||||
];
|
||||
|
||||
// Fetch resource details from main database
|
||||
const resourceMap = new Map<
|
||||
number,
|
||||
{ name: string; niceId: string }
|
||||
>();
|
||||
if (siteResourceIds.length > 0) {
|
||||
const resourceDetails = await primaryDb
|
||||
.select({
|
||||
siteResourceId: siteResources.siteResourceId,
|
||||
name: siteResources.name,
|
||||
niceId: siteResources.niceId
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(inArray(siteResources.siteResourceId, siteResourceIds));
|
||||
|
||||
for (const r of resourceDetails) {
|
||||
resourceMap.set(r.siteResourceId, {
|
||||
name: r.name,
|
||||
niceId: r.niceId
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch site details from main database
|
||||
const siteMap = new Map<number, { name: string; niceId: string }>();
|
||||
if (siteIds.length > 0) {
|
||||
const siteDetails = await primaryDb
|
||||
.select({
|
||||
siteId: sites.siteId,
|
||||
name: sites.name,
|
||||
niceId: sites.niceId
|
||||
})
|
||||
.from(sites)
|
||||
.where(inArray(sites.siteId, siteIds));
|
||||
|
||||
for (const s of siteDetails) {
|
||||
siteMap.set(s.siteId, { name: s.name, niceId: s.niceId });
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch client details from main database
|
||||
const clientMap = new Map<
|
||||
number,
|
||||
{ name: string; niceId: string; type: string }
|
||||
>();
|
||||
if (clientIds.length > 0) {
|
||||
const clientDetails = await primaryDb
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
name: clients.name,
|
||||
niceId: clients.niceId,
|
||||
type: clients.type
|
||||
})
|
||||
.from(clients)
|
||||
.where(inArray(clients.clientId, clientIds));
|
||||
|
||||
for (const c of clientDetails) {
|
||||
clientMap.set(c.clientId, {
|
||||
name: c.name,
|
||||
niceId: c.niceId,
|
||||
type: c.type
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Fetch user details from main database
|
||||
const userMap = new Map<
|
||||
string,
|
||||
{ email: string | null }
|
||||
>();
|
||||
if (userIds.length > 0) {
|
||||
const userDetails = await primaryDb
|
||||
.select({
|
||||
userId: users.userId,
|
||||
email: users.email
|
||||
})
|
||||
.from(users)
|
||||
.where(inArray(users.userId, userIds));
|
||||
|
||||
for (const u of userDetails) {
|
||||
userMap.set(u.userId, { email: u.email });
|
||||
}
|
||||
}
|
||||
|
||||
// Enrich logs with details
|
||||
return logs.map((log) => ({
|
||||
...log,
|
||||
resourceName: log.siteResourceId
|
||||
? resourceMap.get(log.siteResourceId)?.name ?? null
|
||||
: null,
|
||||
resourceNiceId: log.siteResourceId
|
||||
? resourceMap.get(log.siteResourceId)?.niceId ?? null
|
||||
: null,
|
||||
siteName: log.siteId
|
||||
? siteMap.get(log.siteId)?.name ?? null
|
||||
: null,
|
||||
siteNiceId: log.siteId
|
||||
? siteMap.get(log.siteId)?.niceId ?? null
|
||||
: null,
|
||||
clientName: log.clientId
|
||||
? clientMap.get(log.clientId)?.name ?? null
|
||||
: null,
|
||||
clientNiceId: log.clientId
|
||||
? clientMap.get(log.clientId)?.niceId ?? null
|
||||
: null,
|
||||
clientType: log.clientId
|
||||
? clientMap.get(log.clientId)?.type ?? null
|
||||
: null,
|
||||
userEmail: log.userId
|
||||
? userMap.get(log.userId)?.email ?? null
|
||||
: null
|
||||
}));
|
||||
}
|
||||
|
||||
async function queryUniqueFilterAttributes(
|
||||
timeStart: number,
|
||||
timeEnd: number,
|
||||
orgId: string
|
||||
) {
|
||||
const baseConditions = and(
|
||||
gt(connectionAuditLog.startedAt, timeStart),
|
||||
lt(connectionAuditLog.startedAt, timeEnd),
|
||||
eq(connectionAuditLog.orgId, orgId)
|
||||
);
|
||||
|
||||
// Get unique protocols
|
||||
const uniqueProtocols = await logsDb
|
||||
.selectDistinct({
|
||||
protocol: connectionAuditLog.protocol
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique destination addresses
|
||||
const uniqueDestAddrs = await logsDb
|
||||
.selectDistinct({
|
||||
destAddr: connectionAuditLog.destAddr
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique client IDs
|
||||
const uniqueClients = await logsDb
|
||||
.selectDistinct({
|
||||
clientId: connectionAuditLog.clientId
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique resource IDs
|
||||
const uniqueResources = await logsDb
|
||||
.selectDistinct({
|
||||
siteResourceId: connectionAuditLog.siteResourceId
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Get unique user IDs
|
||||
const uniqueUsers = await logsDb
|
||||
.selectDistinct({
|
||||
userId: connectionAuditLog.userId
|
||||
})
|
||||
.from(connectionAuditLog)
|
||||
.where(baseConditions);
|
||||
|
||||
// Enrich client IDs with names from main database
|
||||
const clientIds = uniqueClients
|
||||
.map((row) => row.clientId)
|
||||
.filter((id): id is number => id !== null);
|
||||
|
||||
let clientsWithNames: Array<{ id: number; name: string }> = [];
|
||||
if (clientIds.length > 0) {
|
||||
const clientDetails = await primaryDb
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
name: clients.name
|
||||
})
|
||||
.from(clients)
|
||||
.where(inArray(clients.clientId, clientIds));
|
||||
|
||||
clientsWithNames = clientDetails.map((c) => ({
|
||||
id: c.clientId,
|
||||
name: c.name
|
||||
}));
|
||||
}
|
||||
|
||||
// Enrich resource IDs with names from main database
|
||||
const resourceIds = uniqueResources
|
||||
.map((row) => row.siteResourceId)
|
||||
.filter((id): id is number => id !== null);
|
||||
|
||||
let resourcesWithNames: Array<{ id: number; name: string | null }> = [];
|
||||
if (resourceIds.length > 0) {
|
||||
const resourceDetails = await primaryDb
|
||||
.select({
|
||||
siteResourceId: siteResources.siteResourceId,
|
||||
name: siteResources.name
|
||||
})
|
||||
.from(siteResources)
|
||||
.where(inArray(siteResources.siteResourceId, resourceIds));
|
||||
|
||||
resourcesWithNames = resourceDetails.map((r) => ({
|
||||
id: r.siteResourceId,
|
||||
name: r.name
|
||||
}));
|
||||
}
|
||||
|
||||
// Enrich user IDs with emails from main database
|
||||
const userIdsList = uniqueUsers
|
||||
.map((row) => row.userId)
|
||||
.filter((id): id is string => id !== null);
|
||||
|
||||
let usersWithEmails: Array<{ id: string; email: string | null }> = [];
|
||||
if (userIdsList.length > 0) {
|
||||
const userDetails = await primaryDb
|
||||
.select({
|
||||
userId: users.userId,
|
||||
email: users.email
|
||||
})
|
||||
.from(users)
|
||||
.where(inArray(users.userId, userIdsList));
|
||||
|
||||
usersWithEmails = userDetails.map((u) => ({
|
||||
id: u.userId,
|
||||
email: u.email
|
||||
}));
|
||||
}
|
||||
|
||||
return {
|
||||
protocols: uniqueProtocols
|
||||
.map((row) => row.protocol)
|
||||
.filter((protocol): protocol is string => protocol !== null),
|
||||
destAddrs: uniqueDestAddrs
|
||||
.map((row) => row.destAddr)
|
||||
.filter((addr): addr is string => addr !== null),
|
||||
clients: clientsWithNames,
|
||||
resources: resourcesWithNames,
|
||||
users: usersWithEmails
|
||||
};
|
||||
}
|
||||
|
||||
registry.registerPath({
|
||||
method: "get",
|
||||
path: "/org/{orgId}/logs/connection",
|
||||
description: "Query the connection audit log for an organization",
|
||||
tags: [OpenAPITags.Logs],
|
||||
request: {
|
||||
query: queryConnectionAuditLogsQuery,
|
||||
params: queryConnectionAuditLogsParams
|
||||
},
|
||||
responses: {}
|
||||
});
|
||||
|
||||
export async function queryConnectionAuditLogs(
|
||||
req: Request,
|
||||
res: Response,
|
||||
next: NextFunction
|
||||
): Promise<any> {
|
||||
try {
|
||||
const parsedQuery = queryConnectionAuditLogsQuery.safeParse(req.query);
|
||||
if (!parsedQuery.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedQuery.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
const parsedParams = queryConnectionAuditLogsParams.safeParse(
|
||||
req.params
|
||||
);
|
||||
if (!parsedParams.success) {
|
||||
return next(
|
||||
createHttpError(
|
||||
HttpCode.BAD_REQUEST,
|
||||
fromError(parsedParams.error)
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
const data = { ...parsedQuery.data, ...parsedParams.data };
|
||||
|
||||
const baseQuery = queryConnection(data);
|
||||
|
||||
const logsRaw = await baseQuery.limit(data.limit).offset(data.offset);
|
||||
|
||||
// Enrich with resource, site, client, and user details
|
||||
const log = await enrichWithDetails(logsRaw);
|
||||
|
||||
const totalCountResult = await countConnectionQuery(data);
|
||||
const totalCount = totalCountResult[0].count;
|
||||
|
||||
const filterAttributes = await queryUniqueFilterAttributes(
|
||||
data.timeStart,
|
||||
data.timeEnd,
|
||||
data.orgId
|
||||
);
|
||||
|
||||
return response<QueryConnectionAuditLogResponse>(res, {
|
||||
data: {
|
||||
log: log,
|
||||
pagination: {
|
||||
total: totalCount,
|
||||
limit: data.limit,
|
||||
offset: data.offset
|
||||
},
|
||||
filterAttributes
|
||||
},
|
||||
success: true,
|
||||
error: false,
|
||||
message: "Connection audit logs retrieved successfully",
|
||||
status: HttpCode.OK
|
||||
});
|
||||
} catch (error) {
|
||||
logger.error(error);
|
||||
return next(
|
||||
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -478,6 +478,25 @@ authenticated.get(
|
||||
logs.exportAccessAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.connectionLogs),
|
||||
verifyOrgAccess,
|
||||
verifyUserHasAction(ActionsEnum.exportLogs),
|
||||
logs.queryConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection/export",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.logExport),
|
||||
verifyOrgAccess,
|
||||
verifyUserHasAction(ActionsEnum.exportLogs),
|
||||
logActionAudit(ActionsEnum.exportLogs),
|
||||
logs.exportConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.post(
|
||||
"/re-key/:clientId/regenerate-client-secret",
|
||||
verifyClientAccess, // this is first to set the org id
|
||||
|
||||
@@ -91,6 +91,25 @@ authenticated.get(
|
||||
logs.exportAccessAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.connectionLogs),
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logs.queryConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.get(
|
||||
"/org/:orgId/logs/connection/export",
|
||||
verifyValidLicense,
|
||||
verifyValidSubscription(tierMatrix.logExport),
|
||||
verifyApiKeyOrgAccess,
|
||||
verifyApiKeyHasAction(ActionsEnum.exportLogs),
|
||||
logActionAudit(ActionsEnum.exportLogs),
|
||||
logs.exportConnectionAuditLogs
|
||||
);
|
||||
|
||||
authenticated.put(
|
||||
"/org/:orgId/idp/oidc",
|
||||
verifyValidLicense,
|
||||
|
||||
394
server/private/routers/newt/handleConnectionLogMessage.ts
Normal file
394
server/private/routers/newt/handleConnectionLogMessage.ts
Normal file
@@ -0,0 +1,394 @@
|
||||
import { db, logsDb } from "@server/db";
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
import { connectionAuditLog, sites, Newt, clients, orgs } from "@server/db";
|
||||
import { and, eq, lt, inArray } from "drizzle-orm";
|
||||
import logger from "@server/logger";
|
||||
import { inflate } from "zlib";
|
||||
import { promisify } from "util";
|
||||
import { calculateCutoffTimestamp } from "@server/lib/cleanupLogs";
|
||||
|
||||
const zlibInflate = promisify(inflate);
|
||||
|
||||
// Retry configuration for deadlock handling
|
||||
const MAX_RETRIES = 3;
|
||||
const BASE_DELAY_MS = 50;
|
||||
|
||||
// How often to flush accumulated connection log data to the database
|
||||
const FLUSH_INTERVAL_MS = 30_000; // 30 seconds
|
||||
|
||||
// Maximum number of records to buffer before forcing a flush
|
||||
const MAX_BUFFERED_RECORDS = 500;
|
||||
|
||||
// Maximum number of records to insert in a single batch
|
||||
const INSERT_BATCH_SIZE = 100;
|
||||
|
||||
interface ConnectionSessionData {
|
||||
sessionId: string;
|
||||
resourceId: number;
|
||||
sourceAddr: string;
|
||||
destAddr: string;
|
||||
protocol: string;
|
||||
startedAt: string; // ISO 8601 timestamp
|
||||
endedAt?: string; // ISO 8601 timestamp
|
||||
bytesTx?: number;
|
||||
bytesRx?: number;
|
||||
}
|
||||
|
||||
interface ConnectionLogRecord {
|
||||
sessionId: string;
|
||||
siteResourceId: number;
|
||||
orgId: string;
|
||||
siteId: number;
|
||||
clientId: number | null;
|
||||
userId: string | null;
|
||||
sourceAddr: string;
|
||||
destAddr: string;
|
||||
protocol: string;
|
||||
startedAt: number; // epoch seconds
|
||||
endedAt: number | null;
|
||||
bytesTx: number | null;
|
||||
bytesRx: number | null;
|
||||
}
|
||||
|
||||
// In-memory buffer of records waiting to be flushed
|
||||
let buffer: ConnectionLogRecord[] = [];
|
||||
|
||||
/**
|
||||
* Check if an error is a deadlock error
|
||||
*/
|
||||
function isDeadlockError(error: any): boolean {
|
||||
return (
|
||||
error?.code === "40P01" ||
|
||||
error?.cause?.code === "40P01" ||
|
||||
(error?.message && error.message.includes("deadlock"))
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a function with retry logic for deadlock handling
|
||||
*/
|
||||
async function withDeadlockRetry<T>(
|
||||
operation: () => Promise<T>,
|
||||
context: string
|
||||
): Promise<T> {
|
||||
let attempt = 0;
|
||||
while (true) {
|
||||
try {
|
||||
return await operation();
|
||||
} catch (error: any) {
|
||||
if (isDeadlockError(error) && attempt < MAX_RETRIES) {
|
||||
attempt++;
|
||||
const baseDelay = Math.pow(2, attempt - 1) * BASE_DELAY_MS;
|
||||
const jitter = Math.random() * baseDelay;
|
||||
const delay = baseDelay + jitter;
|
||||
logger.warn(
|
||||
`Deadlock detected in ${context}, retrying attempt ${attempt}/${MAX_RETRIES} after ${delay.toFixed(0)}ms`
|
||||
);
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
continue;
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decompress a base64-encoded zlib-compressed string into parsed JSON.
|
||||
*/
|
||||
async function decompressConnectionLog(
|
||||
compressed: string
|
||||
): Promise<ConnectionSessionData[]> {
|
||||
const compressedBuffer = Buffer.from(compressed, "base64");
|
||||
const decompressed = await zlibInflate(compressedBuffer);
|
||||
const jsonString = decompressed.toString("utf-8");
|
||||
const parsed = JSON.parse(jsonString);
|
||||
|
||||
if (!Array.isArray(parsed)) {
|
||||
throw new Error("Decompressed connection log data is not an array");
|
||||
}
|
||||
|
||||
return parsed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert an ISO 8601 timestamp string to epoch seconds.
|
||||
* Returns null if the input is falsy.
|
||||
*/
|
||||
function toEpochSeconds(isoString: string | undefined | null): number | null {
|
||||
if (!isoString) {
|
||||
return null;
|
||||
}
|
||||
const ms = new Date(isoString).getTime();
|
||||
if (isNaN(ms)) {
|
||||
return null;
|
||||
}
|
||||
return Math.floor(ms / 1000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Flush all buffered connection log records to the database.
|
||||
*
|
||||
* Swaps out the buffer before writing so that any records added during the
|
||||
* flush are captured in the new buffer rather than being lost. Entries that
|
||||
* fail to write are re-queued back into the buffer so they will be retried
|
||||
* on the next flush.
|
||||
*
|
||||
* This function is exported so that the application's graceful-shutdown
|
||||
* cleanup handler can call it before the process exits.
|
||||
*/
|
||||
export async function flushConnectionLogToDb(): Promise<void> {
|
||||
if (buffer.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Atomically swap out the buffer so new data keeps flowing in
|
||||
const snapshot = buffer;
|
||||
buffer = [];
|
||||
|
||||
logger.debug(
|
||||
`Flushing ${snapshot.length} connection log record(s) to the database`
|
||||
);
|
||||
|
||||
// Insert in batches to avoid overly large SQL statements
|
||||
for (let i = 0; i < snapshot.length; i += INSERT_BATCH_SIZE) {
|
||||
const batch = snapshot.slice(i, i + INSERT_BATCH_SIZE);
|
||||
|
||||
try {
|
||||
await withDeadlockRetry(async () => {
|
||||
await logsDb.insert(connectionAuditLog).values(batch);
|
||||
}, `flush connection log batch (${batch.length} records)`);
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
`Failed to flush connection log batch of ${batch.length} records:`,
|
||||
error
|
||||
);
|
||||
|
||||
// Re-queue the failed batch so it is retried on the next flush
|
||||
buffer = [...batch, ...buffer];
|
||||
|
||||
// Cap buffer to prevent unbounded growth if DB is unreachable
|
||||
if (buffer.length > MAX_BUFFERED_RECORDS * 5) {
|
||||
const dropped = buffer.length - MAX_BUFFERED_RECORDS * 5;
|
||||
buffer = buffer.slice(0, MAX_BUFFERED_RECORDS * 5);
|
||||
logger.warn(
|
||||
`Connection log buffer overflow, dropped ${dropped} oldest records`
|
||||
);
|
||||
}
|
||||
|
||||
// Stop trying further batches from this snapshot — they'll be
|
||||
// picked up by the next flush via the re-queued records above
|
||||
const remaining = snapshot.slice(i + INSERT_BATCH_SIZE);
|
||||
if (remaining.length > 0) {
|
||||
buffer = [...remaining, ...buffer];
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const flushTimer = setInterval(async () => {
|
||||
try {
|
||||
await flushConnectionLogToDb();
|
||||
} catch (error) {
|
||||
logger.error(
|
||||
"Unexpected error during periodic connection log flush:",
|
||||
error
|
||||
);
|
||||
}
|
||||
}, FLUSH_INTERVAL_MS);
|
||||
|
||||
// Calling unref() means this timer will not keep the Node.js event loop alive
|
||||
// on its own — the process can still exit normally when there is no other work
|
||||
// left. The graceful-shutdown path will call flushConnectionLogToDb() explicitly
|
||||
// before process.exit(), so no data is lost.
|
||||
flushTimer.unref();
|
||||
|
||||
export async function cleanUpOldLogs(orgId: string, retentionDays: number) {
|
||||
const cutoffTimestamp = calculateCutoffTimestamp(retentionDays);
|
||||
|
||||
try {
|
||||
await logsDb
|
||||
.delete(connectionAuditLog)
|
||||
.where(
|
||||
and(
|
||||
lt(connectionAuditLog.startedAt, cutoffTimestamp),
|
||||
eq(connectionAuditLog.orgId, orgId)
|
||||
)
|
||||
);
|
||||
|
||||
// logger.debug(
|
||||
// `Cleaned up connection audit logs older than ${retentionDays} days`
|
||||
// );
|
||||
} catch (error) {
|
||||
logger.error("Error cleaning up old connection audit logs:", error);
|
||||
}
|
||||
}
|
||||
|
||||
export const handleConnectionLogMessage: MessageHandler = async (context) => {
|
||||
const { message, client } = context;
|
||||
const newt = client as Newt;
|
||||
|
||||
if (!newt) {
|
||||
logger.warn("Connection log received but no newt client in context");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!newt.siteId) {
|
||||
logger.warn("Connection log received but newt has no siteId");
|
||||
return;
|
||||
}
|
||||
|
||||
if (!message.data?.compressed) {
|
||||
logger.warn("Connection log message missing compressed data");
|
||||
return;
|
||||
}
|
||||
|
||||
// Look up the org for this site
|
||||
const [site] = await db
|
||||
.select({ orgId: sites.orgId, orgSubnet: orgs.subnet })
|
||||
.from(sites)
|
||||
.innerJoin(orgs, eq(sites.orgId, orgs.orgId))
|
||||
.where(eq(sites.siteId, newt.siteId));
|
||||
|
||||
if (!site) {
|
||||
logger.warn(
|
||||
`Connection log received but site ${newt.siteId} not found in database`
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
const orgId = site.orgId;
|
||||
|
||||
// Extract the CIDR suffix (e.g. "/16") from the org subnet so we can
|
||||
// reconstruct the exact subnet string stored on each client record.
|
||||
const cidrSuffix = site.orgSubnet?.includes("/")
|
||||
? site.orgSubnet.substring(site.orgSubnet.indexOf("/"))
|
||||
: null;
|
||||
|
||||
let sessions: ConnectionSessionData[];
|
||||
try {
|
||||
sessions = await decompressConnectionLog(message.data.compressed);
|
||||
} catch (error) {
|
||||
logger.error("Failed to decompress connection log data:", error);
|
||||
return;
|
||||
}
|
||||
|
||||
if (sessions.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
logger.debug(`Sessions: ${JSON.stringify(sessions)}`)
|
||||
|
||||
// Build a map from sourceAddr → { clientId, userId } by querying clients
|
||||
// whose subnet field matches exactly. Client subnets are stored with the
|
||||
// org's CIDR suffix (e.g. "100.90.128.5/16"), so we reconstruct that from
|
||||
// each unique sourceAddr + the org's CIDR suffix and do a targeted IN query.
|
||||
const ipToClient = new Map<string, { clientId: number; userId: string | null }>();
|
||||
|
||||
if (cidrSuffix) {
|
||||
// Collect unique source addresses so we only query for what we need
|
||||
const uniqueSourceAddrs = new Set<string>();
|
||||
for (const session of sessions) {
|
||||
if (session.sourceAddr) {
|
||||
uniqueSourceAddrs.add(session.sourceAddr);
|
||||
}
|
||||
}
|
||||
|
||||
if (uniqueSourceAddrs.size > 0) {
|
||||
// Construct the exact subnet strings as stored in the DB
|
||||
const subnetQueries = Array.from(uniqueSourceAddrs).map(
|
||||
(addr) => {
|
||||
// Strip port if present (e.g. "100.90.128.1:38004" → "100.90.128.1")
|
||||
const ip = addr.includes(":") ? addr.split(":")[0] : addr;
|
||||
return `${ip}${cidrSuffix}`;
|
||||
}
|
||||
);
|
||||
|
||||
logger.debug(`Subnet queries: ${JSON.stringify(subnetQueries)}`);
|
||||
|
||||
const matchedClients = await db
|
||||
.select({
|
||||
clientId: clients.clientId,
|
||||
userId: clients.userId,
|
||||
subnet: clients.subnet
|
||||
})
|
||||
.from(clients)
|
||||
.where(
|
||||
and(
|
||||
eq(clients.orgId, orgId),
|
||||
inArray(clients.subnet, subnetQueries)
|
||||
)
|
||||
);
|
||||
|
||||
for (const c of matchedClients) {
|
||||
const ip = c.subnet.split("/")[0];
|
||||
logger.debug(`Client ${c.clientId} subnet ${c.subnet} matches ${ip}`);
|
||||
ipToClient.set(ip, { clientId: c.clientId, userId: c.userId });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Convert to DB records and add to the buffer
|
||||
for (const session of sessions) {
|
||||
// Validate required fields
|
||||
if (
|
||||
!session.sessionId ||
|
||||
!session.resourceId ||
|
||||
!session.sourceAddr ||
|
||||
!session.destAddr ||
|
||||
!session.protocol
|
||||
) {
|
||||
logger.debug(
|
||||
`Skipping connection log session with missing required fields: ${JSON.stringify(session)}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
const startedAt = toEpochSeconds(session.startedAt);
|
||||
if (startedAt === null) {
|
||||
logger.debug(
|
||||
`Skipping connection log session with invalid startedAt: ${session.startedAt}`
|
||||
);
|
||||
continue;
|
||||
}
|
||||
|
||||
// Match the source address to a client. The sourceAddr is the
|
||||
// client's IP on the WireGuard network, which corresponds to the IP
|
||||
// portion of the client's subnet CIDR (e.g. "100.90.128.5/24").
|
||||
// Strip port if present (e.g. "100.90.128.1:38004" → "100.90.128.1")
|
||||
const sourceIp = session.sourceAddr.includes(":") ? session.sourceAddr.split(":")[0] : session.sourceAddr;
|
||||
const clientInfo = ipToClient.get(sourceIp) ?? null;
|
||||
|
||||
|
||||
buffer.push({
|
||||
sessionId: session.sessionId,
|
||||
siteResourceId: session.resourceId,
|
||||
orgId,
|
||||
siteId: newt.siteId,
|
||||
clientId: clientInfo?.clientId ?? null,
|
||||
userId: clientInfo?.userId ?? null,
|
||||
sourceAddr: session.sourceAddr,
|
||||
destAddr: session.destAddr,
|
||||
protocol: session.protocol,
|
||||
startedAt,
|
||||
endedAt: toEpochSeconds(session.endedAt),
|
||||
bytesTx: session.bytesTx ?? null,
|
||||
bytesRx: session.bytesRx ?? null
|
||||
});
|
||||
}
|
||||
|
||||
logger.debug(
|
||||
`Buffered ${sessions.length} connection log session(s) from newt ${newt.newtId} (site ${newt.siteId})`
|
||||
);
|
||||
|
||||
// If the buffer has grown large enough, trigger an immediate flush
|
||||
if (buffer.length >= MAX_BUFFERED_RECORDS) {
|
||||
// Fire and forget — errors are handled inside flushConnectionLogToDb
|
||||
flushConnectionLogToDb().catch((error) => {
|
||||
logger.error(
|
||||
"Unexpected error during size-triggered connection log flush:",
|
||||
error
|
||||
);
|
||||
});
|
||||
}
|
||||
};
|
||||
1
server/private/routers/newt/index.ts
Normal file
1
server/private/routers/newt/index.ts
Normal file
@@ -0,0 +1 @@
|
||||
export * from "./handleConnectionLogMessage";
|
||||
@@ -18,10 +18,12 @@ import {
|
||||
} from "#private/routers/remoteExitNode";
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
import { build } from "@server/build";
|
||||
import { handleConnectionLogMessage } from "#dynamic/routers/newt";
|
||||
|
||||
export const messageHandlers: Record<string, MessageHandler> = {
|
||||
"remoteExitNode/register": handleRemoteExitNodeRegisterMessage,
|
||||
"remoteExitNode/ping": handleRemoteExitNodePingMessage
|
||||
"remoteExitNode/ping": handleRemoteExitNodePingMessage,
|
||||
"newt/access-log": handleConnectionLogMessage,
|
||||
};
|
||||
|
||||
if (build != "saas") {
|
||||
|
||||
@@ -91,3 +91,50 @@ export type QueryAccessAuditLogResponse = {
|
||||
locations: string[];
|
||||
};
|
||||
};
|
||||
|
||||
export type QueryConnectionAuditLogResponse = {
|
||||
log: {
|
||||
sessionId: string;
|
||||
siteResourceId: number | null;
|
||||
orgId: string | null;
|
||||
siteId: number | null;
|
||||
clientId: number | null;
|
||||
userId: string | null;
|
||||
sourceAddr: string;
|
||||
destAddr: string;
|
||||
protocol: string;
|
||||
startedAt: number;
|
||||
endedAt: number | null;
|
||||
bytesTx: number | null;
|
||||
bytesRx: number | null;
|
||||
resourceName: string | null;
|
||||
resourceNiceId: string | null;
|
||||
siteName: string | null;
|
||||
siteNiceId: string | null;
|
||||
clientName: string | null;
|
||||
clientNiceId: string | null;
|
||||
clientType: string | null;
|
||||
userEmail: string | null;
|
||||
}[];
|
||||
pagination: {
|
||||
total: number;
|
||||
limit: number;
|
||||
offset: number;
|
||||
};
|
||||
filterAttributes: {
|
||||
protocols: string[];
|
||||
destAddrs: string[];
|
||||
clients: {
|
||||
id: number;
|
||||
name: string;
|
||||
}[];
|
||||
resources: {
|
||||
id: number;
|
||||
name: string | null;
|
||||
}[];
|
||||
users: {
|
||||
id: string;
|
||||
email: string | null;
|
||||
}[];
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,15 +1,54 @@
|
||||
import { sendToClient } from "#dynamic/routers/ws";
|
||||
import { db, olms, Transaction } from "@server/db";
|
||||
import { db, newts, olms } from "@server/db";
|
||||
import {
|
||||
Alias,
|
||||
convertSubnetProxyTargetsV2ToV1,
|
||||
SubnetProxyTarget,
|
||||
SubnetProxyTargetV2
|
||||
} from "@server/lib/ip";
|
||||
import { canCompress } from "@server/lib/clientVersionChecks";
|
||||
import { Alias, SubnetProxyTarget } from "@server/lib/ip";
|
||||
import logger from "@server/logger";
|
||||
import { eq } from "drizzle-orm";
|
||||
import semver from "semver";
|
||||
|
||||
const NEWT_V2_TARGETS_VERSION = ">=1.10.3";
|
||||
|
||||
export async function convertTargetsIfNessicary(
|
||||
newtId: string,
|
||||
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[]
|
||||
) {
|
||||
// get the newt
|
||||
const [newt] = await db
|
||||
.select()
|
||||
.from(newts)
|
||||
.where(eq(newts.newtId, newtId));
|
||||
if (!newt) {
|
||||
throw new Error(`No newt found for id: ${newtId}`);
|
||||
}
|
||||
|
||||
// check the semver
|
||||
if (
|
||||
newt.version &&
|
||||
!semver.satisfies(newt.version, NEWT_V2_TARGETS_VERSION)
|
||||
) {
|
||||
logger.debug(
|
||||
`addTargets Newt version ${newt.version} does not support targets v2 falling back`
|
||||
);
|
||||
targets = convertSubnetProxyTargetsV2ToV1(
|
||||
targets as SubnetProxyTargetV2[]
|
||||
);
|
||||
}
|
||||
|
||||
return targets;
|
||||
}
|
||||
|
||||
export async function addTargets(
|
||||
newtId: string,
|
||||
targets: SubnetProxyTarget[],
|
||||
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[],
|
||||
version?: string | null
|
||||
) {
|
||||
targets = await convertTargetsIfNessicary(newtId, targets);
|
||||
|
||||
await sendToClient(
|
||||
newtId,
|
||||
{
|
||||
@@ -22,9 +61,11 @@ export async function addTargets(
|
||||
|
||||
export async function removeTargets(
|
||||
newtId: string,
|
||||
targets: SubnetProxyTarget[],
|
||||
targets: SubnetProxyTarget[] | SubnetProxyTargetV2[],
|
||||
version?: string | null
|
||||
) {
|
||||
targets = await convertTargetsIfNessicary(newtId, targets);
|
||||
|
||||
await sendToClient(
|
||||
newtId,
|
||||
{
|
||||
@@ -38,11 +79,39 @@ export async function removeTargets(
|
||||
export async function updateTargets(
|
||||
newtId: string,
|
||||
targets: {
|
||||
oldTargets: SubnetProxyTarget[];
|
||||
newTargets: SubnetProxyTarget[];
|
||||
oldTargets: SubnetProxyTarget[] | SubnetProxyTargetV2[];
|
||||
newTargets: SubnetProxyTarget[] | SubnetProxyTargetV2[];
|
||||
},
|
||||
version?: string | null
|
||||
) {
|
||||
// get the newt
|
||||
const [newt] = await db
|
||||
.select()
|
||||
.from(newts)
|
||||
.where(eq(newts.newtId, newtId));
|
||||
if (!newt) {
|
||||
logger.error(`addTargetsL No newt found for id: ${newtId}`);
|
||||
return;
|
||||
}
|
||||
|
||||
// check the semver
|
||||
if (
|
||||
newt.version &&
|
||||
!semver.satisfies(newt.version, NEWT_V2_TARGETS_VERSION)
|
||||
) {
|
||||
logger.debug(
|
||||
`addTargets Newt version ${newt.version} does not support targets v2 falling back`
|
||||
);
|
||||
targets = {
|
||||
oldTargets: convertSubnetProxyTargetsV2ToV1(
|
||||
targets.oldTargets as SubnetProxyTargetV2[]
|
||||
),
|
||||
newTargets: convertSubnetProxyTargetsV2ToV1(
|
||||
targets.newTargets as SubnetProxyTargetV2[]
|
||||
)
|
||||
};
|
||||
}
|
||||
|
||||
await sendToClient(
|
||||
newtId,
|
||||
{
|
||||
|
||||
@@ -16,8 +16,8 @@ import { eq, and } from "drizzle-orm";
|
||||
import config from "@server/lib/config";
|
||||
import {
|
||||
formatEndpoint,
|
||||
generateSubnetProxyTargets,
|
||||
SubnetProxyTarget
|
||||
generateSubnetProxyTargetV2,
|
||||
SubnetProxyTargetV2
|
||||
} from "@server/lib/ip";
|
||||
|
||||
export async function buildClientConfigurationForNewtClient(
|
||||
@@ -143,7 +143,7 @@ export async function buildClientConfigurationForNewtClient(
|
||||
.from(siteResources)
|
||||
.where(eq(siteResources.siteId, siteId));
|
||||
|
||||
const targetsToSend: SubnetProxyTarget[] = [];
|
||||
const targetsToSend: SubnetProxyTargetV2[] = [];
|
||||
|
||||
for (const resource of allSiteResources) {
|
||||
// Get clients associated with this specific resource
|
||||
@@ -168,12 +168,14 @@ export async function buildClientConfigurationForNewtClient(
|
||||
)
|
||||
);
|
||||
|
||||
const resourceTargets = generateSubnetProxyTargets(
|
||||
const resourceTarget = generateSubnetProxyTargetV2(
|
||||
resource,
|
||||
resourceClients
|
||||
);
|
||||
|
||||
targetsToSend.push(...resourceTargets);
|
||||
if (resourceTarget) {
|
||||
targetsToSend.push(resourceTarget);
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
|
||||
13
server/routers/newt/handleConnectionLogMessage.ts
Normal file
13
server/routers/newt/handleConnectionLogMessage.ts
Normal file
@@ -0,0 +1,13 @@
|
||||
import { MessageHandler } from "@server/routers/ws";
|
||||
|
||||
export async function flushConnectionLogToDb(): Promise<void> {
|
||||
return;
|
||||
}
|
||||
|
||||
export async function cleanUpOldLogs(orgId: string, retentionDays: number) {
|
||||
return;
|
||||
}
|
||||
|
||||
export const handleConnectionLogMessage: MessageHandler = async (context) => {
|
||||
return;
|
||||
};
|
||||
@@ -6,6 +6,7 @@ import { db, ExitNode, exitNodes, Newt, sites } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { sendToExitNode } from "#dynamic/lib/exitNodes";
|
||||
import { buildClientConfigurationForNewtClient } from "./buildConfiguration";
|
||||
import { convertTargetsIfNessicary } from "../client/targets";
|
||||
import { canCompress } from "@server/lib/clientVersionChecks";
|
||||
|
||||
const inputSchema = z.object({
|
||||
@@ -127,13 +128,15 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
|
||||
exitNode
|
||||
);
|
||||
|
||||
const targetsToSend = await convertTargetsIfNessicary(newt.newtId, targets);
|
||||
|
||||
return {
|
||||
message: {
|
||||
type: "newt/wg/receive-config",
|
||||
data: {
|
||||
ipAddress: site.address,
|
||||
peers,
|
||||
targets
|
||||
targets: targetsToSend
|
||||
}
|
||||
},
|
||||
options: {
|
||||
|
||||
@@ -8,3 +8,4 @@ export * from "./handleNewtPingRequestMessage";
|
||||
export * from "./handleApplyBlueprintMessage";
|
||||
export * from "./handleNewtPingMessage";
|
||||
export * from "./handleNewtDisconnectingMessage";
|
||||
export * from "./handleConnectionLogMessage";
|
||||
|
||||
@@ -88,7 +88,7 @@ const createSiteResourceSchema = z
|
||||
},
|
||||
{
|
||||
message:
|
||||
"Destination must be a valid IP address or valid domain AND alias is required"
|
||||
"Destination must be a valid IPV4 address or valid domain AND alias is required"
|
||||
}
|
||||
)
|
||||
.refine(
|
||||
|
||||
@@ -24,7 +24,7 @@ import { updatePeerData, updateTargets } from "@server/routers/client/targets";
|
||||
import {
|
||||
generateAliasConfig,
|
||||
generateRemoteSubnets,
|
||||
generateSubnetProxyTargets,
|
||||
generateSubnetProxyTargetV2,
|
||||
isIpInCidr,
|
||||
portRangeStringSchema
|
||||
} from "@server/lib/ip";
|
||||
@@ -608,18 +608,18 @@ export async function handleMessagingForUpdatedSiteResource(
|
||||
|
||||
// Only update targets on newt if destination changed
|
||||
if (destinationChanged || portRangesChanged) {
|
||||
const oldTargets = generateSubnetProxyTargets(
|
||||
const oldTarget = generateSubnetProxyTargetV2(
|
||||
existingSiteResource,
|
||||
mergedAllClients
|
||||
);
|
||||
const newTargets = generateSubnetProxyTargets(
|
||||
const newTarget = generateSubnetProxyTargetV2(
|
||||
updatedSiteResource,
|
||||
mergedAllClients
|
||||
);
|
||||
|
||||
await updateTargets(newt.newtId, {
|
||||
oldTargets: oldTargets,
|
||||
newTargets: newTargets
|
||||
oldTargets: oldTarget ? [oldTarget] : [],
|
||||
newTargets: newTarget ? [newTarget] : []
|
||||
}, newt.version);
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user