Add resource column to hc and remove —

This commit is contained in:
Owen
2026-04-16 17:42:30 -07:00
parent b958537f3e
commit d6c15c8b81
60 changed files with 257 additions and 211 deletions

View File

@@ -144,7 +144,7 @@ async function pushCertUpdateToAffectedNewts(
await cache.del(`cert:${resource.fullDomain}`);
}
// Generate target once same cert applies to all sites for this resource
// Generate target once - same cert applies to all sites for this resource
const newTargets = await generateSubnetProxyTargetV2(
resource,
resourceClients
@@ -157,7 +157,7 @@ async function pushCertUpdateToAffectedNewts(
continue;
}
// Construct the old targets same routing shape but with the previous cert/key.
// Construct the old targets - same routing shape but with the previous cert/key.
// The newt only uses destPrefix/sourcePrefixes for removal, but we keep the
// semantics correct so the update message accurately reflects what changed.
const oldTargets: SubnetProxyTargetV2[] = newTargets.map((t) => ({

View File

@@ -153,7 +153,7 @@ export async function flushConnectionLogToDb(): Promise<void> {
);
}
// Stop processing further batches from this snapshot they will
// Stop processing further batches from this snapshot - they will
// be picked up via the re-queued records on the next flush.
const remaining = snapshot.slice(i + INSERT_BATCH_SIZE);
if (remaining.length > 0) {
@@ -180,7 +180,7 @@ const flushTimer = setInterval(async () => {
}, FLUSH_INTERVAL_MS);
// Calling unref() means this timer will not keep the Node.js event loop alive
// on its own the process can still exit normally when there is no other work
// on its own - the process can still exit normally when there is no other work
// left. The graceful-shutdown path will call flushConnectionLogToDb() explicitly
// before process.exit(), so no data is lost.
flushTimer.unref();
@@ -223,7 +223,7 @@ export function logConnectionAudit(record: ConnectionLogRecord): void {
buffer.push(record);
if (buffer.length >= MAX_BUFFERED_RECORDS) {
// Fire and forget errors are handled inside flushConnectionLogToDb
// Fire and forget - errors are handled inside flushConnectionLogToDb
flushConnectionLogToDb().catch((error) => {
logger.error(
"Unexpected error during size-triggered connection log flush:",
@@ -231,4 +231,4 @@ export function logConnectionAudit(record: ConnectionLogRecord): void {
);
});
}
}
}

View File

@@ -37,7 +37,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array";
*
* **Payload formats** (controlled by `config.format`):
*
* - `json_array` (default) one POST per batch, body is a JSON array:
* - `json_array` (default) - one POST per batch, body is a JSON array:
* ```json
* [
* { "event": "request", "timestamp": "2024-01-01T00:00:00.000Z", "data": { … } },
@@ -46,7 +46,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array";
* ```
* `Content-Type: application/json`
*
* - `ndjson` one POST per batch, body is newline-delimited JSON (one object
* - `ndjson` - one POST per batch, body is newline-delimited JSON (one object
* per line, no outer array). Required by Splunk HEC, Elastic/OpenSearch,
* and Grafana Loki:
* ```
@@ -55,7 +55,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array";
* ```
* `Content-Type: application/x-ndjson`
*
* - `json_single` one POST **per event**, body is a plain JSON object.
* - `json_single` - one POST **per event**, body is a plain JSON object.
* Use only for endpoints that cannot handle batches at all.
*
* With a body template each event is rendered through the template before
@@ -319,4 +319,4 @@ function epochSecondsToIso(epochSeconds: number): string {
function escapeJsonString(value: string): string {
// JSON.stringify produces `"<escaped>"` strip the outer quotes.
return JSON.stringify(value).slice(1, -1);
}
}

View File

@@ -60,9 +60,9 @@ export type AuthType = "none" | "bearer" | "basic" | "custom";
/**
* Controls how the batch of events is serialised into the HTTP request body.
*
* - `json_array` `[{…}, {…}]` default; one POST per batch wrapped in a
* - `json_array` `[{…}, {…}]` - default; one POST per batch wrapped in a
* JSON array. Works with most generic webhooks and Datadog.
* - `ndjson` `{…}\n{…}` newline-delimited JSON, one object per
* - `ndjson` `{…}\n{…}` - newline-delimited JSON, one object per
* line. Required by Splunk HEC, Elastic/OpenSearch, Loki.
* - `json_single` one HTTP POST per event, body is a plain JSON object.
* Use only for endpoints that cannot handle batches at all.
@@ -131,4 +131,4 @@ export interface DestinationFailureState {
nextRetryAt: number;
/** Date.now() value of the very first failure in the current streak */
firstFailedAt: number;
}
}

View File

@@ -267,7 +267,7 @@ export async function getTraefikConfig(
});
});
// Query siteResources in HTTP mode with SSL enabled and aliases cert generation / HTTPS edge
// Query siteResources in HTTP mode with SSL enabled and aliases - cert generation / HTTPS edge
const siteResourcesWithFullDomain = await db
.select({
siteResourceId: siteResources.siteResourceId,
@@ -1010,7 +1010,7 @@ export async function getTraefikConfig(
}
}
// HTTPS router presence of this entry triggers cert generation
// HTTPS router - presence of this entry triggers cert generation
config_output.http.routers[siteResourceRouterName] = {
entryPoints: [
config.getRawConfig().traefik.https_entrypoint
@@ -1022,7 +1022,7 @@ export async function getTraefikConfig(
tls
};
// Assets bypass router lets Next.js static files load without rewrite
// Assets bypass router - lets Next.js static files load without rewrite
config_output.http.routers[`${siteResourceRouterName}-assets`] = {
entryPoints: [
config.getRawConfig().traefik.https_entrypoint

View File

@@ -11,7 +11,7 @@
* This file is not licensed under the AGPLv3.
*/
import { db, targetHealthCheck } from "@server/db";
import { db, targetHealthCheck, targets, resources } from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
@@ -84,12 +84,36 @@ export async function listHealthChecks(
const whereClause = and(
eq(targetHealthCheck.orgId, orgId),
isNull(targetHealthCheck.targetId)
);
const list = await db
.select()
.select({
targetHealthCheckId: targetHealthCheck.targetHealthCheckId,
name: targetHealthCheck.name,
hcEnabled: targetHealthCheck.hcEnabled,
hcHealth: targetHealthCheck.hcHealth,
hcMode: targetHealthCheck.hcMode,
hcHostname: targetHealthCheck.hcHostname,
hcPort: targetHealthCheck.hcPort,
hcPath: targetHealthCheck.hcPath,
hcScheme: targetHealthCheck.hcScheme,
hcMethod: targetHealthCheck.hcMethod,
hcInterval: targetHealthCheck.hcInterval,
hcUnhealthyInterval: targetHealthCheck.hcUnhealthyInterval,
hcTimeout: targetHealthCheck.hcTimeout,
hcHeaders: targetHealthCheck.hcHeaders,
hcFollowRedirects: targetHealthCheck.hcFollowRedirects,
hcStatus: targetHealthCheck.hcStatus,
hcTlsServerName: targetHealthCheck.hcTlsServerName,
hcHealthyThreshold: targetHealthCheck.hcHealthyThreshold,
hcUnhealthyThreshold: targetHealthCheck.hcUnhealthyThreshold,
resourceId: resources.resourceId,
resourceName: resources.name,
resourceNiceId: resources.niceId
})
.from(targetHealthCheck)
.leftJoin(targets, eq(targetHealthCheck.targetId, targets.targetId))
.leftJoin(resources, eq(targets.resourceId, resources.resourceId))
.where(whereClause)
.orderBy(sql`${targetHealthCheck.targetHealthCheckId} DESC`)
.limit(limit)
@@ -124,7 +148,10 @@ export async function listHealthChecks(
hcStatus: row.hcStatus ?? null,
hcTlsServerName: row.hcTlsServerName ?? null,
hcHealthyThreshold: row.hcHealthyThreshold ?? null,
hcUnhealthyThreshold: row.hcUnhealthyThreshold ?? null
hcUnhealthyThreshold: row.hcUnhealthyThreshold ?? null,
resourceId: row.resourceId ?? null,
resourceName: row.resourceName ?? null,
resourceNiceId: row.resourceNiceId ?? null
})),
pagination: {
total: count,