Compare commits

..

4 Commits

Author SHA1 Message Date
dependabot[bot]
1c95d46eaa Bump next from 15.5.14 to 15.5.15
Bumps [next](https://github.com/vercel/next.js) from 15.5.14 to 15.5.15.
- [Release notes](https://github.com/vercel/next.js/releases)
- [Changelog](https://github.com/vercel/next.js/blob/canary/release.js)
- [Commits](https://github.com/vercel/next.js/compare/v15.5.14...v15.5.15)

---
updated-dependencies:
- dependency-name: next
  dependency-version: 15.5.15
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2026-04-14 00:05:43 +00:00
Owen Schwartz
72bc125f84 Merge pull request #2854 from fosrl/dev
Rename script
2026-04-13 16:23:39 -07:00
Owen Schwartz
b18ea66def Merge pull request #2853 from fosrl/dev
1.17.1-s.1
2026-04-13 12:28:08 -07:00
Owen Schwartz
41f541a531 Merge pull request #2852 from fosrl/dev
1.17.1-s.0
2026-04-13 11:36:36 -07:00
336 changed files with 3296 additions and 15641 deletions

View File

@@ -53,9 +53,9 @@ Pangolin is an open-source, identity-based remote access platform built on WireG
## Deployment Options
- **Pangolin Cloud** - Fully managed service - no infrastructure required.
- **Self-Host: Community Edition** - Free, open source, and licensed under AGPL-3.
- **Self-Host: Enterprise Edition** - Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses making less than \$100K USD gross annual revenue.
- **Pangolin Cloud** Fully managed service - no infrastructure required.
- **Self-Host: Community Edition** Free, open source, and licensed under AGPL-3.
- **Self-Host: Enterprise Edition** Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses making less than \$100K USD gross annual revenue.
## Key Features

View File

@@ -1 +0,0 @@
*-journal

View File

@@ -6,7 +6,7 @@ import sys
HEADER_TEXT = """/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
@@ -17,109 +17,87 @@ HEADER_TEXT = """/*
*/
"""
HEADER_NORMALIZED = HEADER_TEXT.strip()
def extract_leading_block_comment(content):
"""
If the file content begins with a /* ... */ block comment, return the
full text of that comment (including the delimiters) and the index at
which the rest of the file starts (after any trailing newlines).
Returns (None, 0) when no such comment is found.
"""
stripped = content.lstrip()
if not stripped.startswith('/*'):
return None, 0
# Account for any leading whitespace before the comment
comment_start = content.index('/*')
end_marker = content.find('*/', comment_start + 2)
if end_marker == -1:
return None, 0
comment_end = end_marker + 2 # position just after '*/'
comment_text = content[comment_start:comment_end].strip()
# Advance past any whitespace / newlines that follow the closing */
rest_start = comment_end
while rest_start < len(content) and content[rest_start] in '\n\r':
rest_start += 1
return comment_text, rest_start
def should_add_header(file_path):
"""
Checks if a file should receive the commercial license header.
Returns True if 'server/private' is in the path.
Returns True if 'private' is in the path or file content.
"""
# Check if 'private' is in the file path (case-insensitive)
if 'server/private' in file_path.lower():
return True
return False
# Check if 'private' is in the file content (case-insensitive)
# try:
# with open(file_path, 'r', encoding='utf-8', errors='ignore') as f:
# content = f.read()
# if 'private' in content.lower():
# return True
# except Exception as e:
# print(f"Could not read file {file_path}: {e}")
return False
def process_directory(root_dir):
"""
Recursively scans a directory and adds/replaces/removes headers in
qualifying .ts or .tsx files, skipping any 'node_modules' directories.
Recursively scans a directory and adds headers to qualifying .ts or .tsx files,
skipping any 'node_modules' directories.
"""
print(f"Scanning directory: {root_dir}")
files_processed = 0
files_modified = 0
headers_added = 0
for root, dirs, files in os.walk(root_dir):
# Exclude 'node_modules' directories from the scan.
# --- MODIFICATION ---
# Exclude 'node_modules' directories from the scan to improve performance.
if 'node_modules' in dirs:
dirs.remove('node_modules')
for file in files:
if not (file.endswith('.ts') or file.endswith('.tsx')):
continue
if file.endswith('.ts') or file.endswith('.tsx'):
file_path = os.path.join(root, file)
files_processed += 1
file_path = os.path.join(root, file)
files_processed += 1
try:
with open(file_path, 'r+', encoding='utf-8') as f:
original_content = f.read()
has_header = original_content.startswith(HEADER_TEXT.strip())
if should_add_header(file_path):
# Add header only if it's not already there
if not has_header:
f.seek(0, 0) # Go to the beginning of the file
f.write(HEADER_TEXT.strip() + '\n\n' + original_content)
print(f"Added header to: {file_path}")
headers_added += 1
else:
print(f"Header already exists in: {file_path}")
else:
# Remove header if it exists but shouldn't be there
if has_header:
# Find the end of the header and remove it (including following newlines)
header_with_newlines = HEADER_TEXT.strip() + '\n\n'
if original_content.startswith(header_with_newlines):
content_without_header = original_content[len(header_with_newlines):]
else:
# Handle case where there might be different newline patterns
header_end = len(HEADER_TEXT.strip())
# Skip any newlines after the header
while header_end < len(original_content) and original_content[header_end] in '\n\r':
header_end += 1
content_without_header = original_content[header_end:]
f.seek(0)
f.write(content_without_header)
f.truncate()
print(f"Removed header from: {file_path}")
headers_added += 1 # Reusing counter for modifications
try:
with open(file_path, 'r', encoding='utf-8') as f:
original_content = f.read()
existing_comment, body_start = extract_leading_block_comment(
original_content
)
has_any_header = existing_comment is not None
has_correct_header = existing_comment == HEADER_NORMALIZED
body = original_content[body_start:] if has_any_header else original_content
if should_add_header(file_path):
if has_correct_header:
print(f"Header up-to-date: {file_path}")
else:
# Either no header exists or the header is outdated - write
# the correct one.
action = "Replaced header in" if has_any_header else "Added header to"
new_content = HEADER_NORMALIZED + '\n\n' + body
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"{action}: {file_path}")
files_modified += 1
else:
if has_any_header:
# Remove the header - it shouldn't be here.
with open(file_path, 'w', encoding='utf-8') as f:
f.write(body)
print(f"Removed header from: {file_path}")
files_modified += 1
else:
print(f"No header needed: {file_path}")
except Exception as e:
print(f"Error processing file {file_path}: {e}")
except Exception as e:
print(f"Error processing file {file_path}: {e}")
print("\n--- Scan Complete ---")
print(f"Total .ts or .tsx files found: {files_processed}")
print(f"Files modified (added/replaced/removed): {files_modified}")
print(f"Total .ts or .tsx files found: {files_processed}")
print(f"Files modified (headers added/removed): {headers_added}")
if __name__ == "__main__":
@@ -128,7 +106,7 @@ if __name__ == "__main__":
if len(sys.argv) > 1:
target_directory = sys.argv[1]
else:
target_directory = '.' # Default to current directory
target_directory = '.' # Default to current directory
if not os.path.isdir(target_directory):
print(f"Error: Directory '{target_directory}' not found.")

View File

@@ -1993,7 +1993,7 @@
"description": "По-надежден и по-нисък поддръжка на Самостоятелно-хостван Панголиин сървър с допълнителни екстри",
"introTitle": "Управлявано Самостоятелно-хостван Панголиин",
"introDescription": "е опция за внедряване, предназначена за хора, които искат простота и допълнителна надеждност, като същевременно запазят данните си частни и самостоятелно-хоствани.",
"introDetail": "С тази опция все още управлявате свой собствен Панголиин възел - вашите тунели, SSL терминатора и трафик остават на вашия сървър. Разликата е, че управлението и мониторингът се обработват чрез нашия облачен панел за контрол, който отключва редица предимства:",
"introDetail": "С тази опция все още управлявате свой собствен Панголиин възел вашите тунели, SSL терминатора и трафик остават на вашия сървър. Разликата е, че управлението и мониторингът се обработват чрез нашия облачен панел за контрол, който отключва редица предимства:",
"benefitSimplerOperations": {
"title": "По-прости операции",
"description": "Няма нужда да управлявате свой собствен имейл сървър или да настройвате сложни аларми. Ще получите проверки и предупреждения при прекъсване от самото начало."
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Разкриване на употреба",
"description": "Изберете лицензионен клас, който точно отразява вашата целена употреба. Персоналният лиценз позволява безплатно ползване на софтуера за индивидуална, некомерсиална или маломащабна комерсиална дейност с годишен брутен приход под 100,000 USD. Всяко ползване извън тези граници - включително ползване във фирма, организация или друга доходоносна среда - изисква валиден корпоративен лиценз и плащане на съответната лицензионна такса. Всички потребители, независимо дали са лични или корпоративни, трябва да спазват Условията на Fossorial Commercial License."
"description": "Изберете лицензионен клас, който точно отразява вашата целена употреба. Персоналният лиценз позволява безплатно ползване на софтуера за индивидуална, некомерсиална или маломащабна комерсиална дейност с годишен брутен приход под 100,000 USD. Всяко ползване извън тези граници включително ползване във фирма, организация или друга доходоносна среда изисква валиден корпоративен лиценз и плащане на съответната лицензионна такса. Всички потребители, независимо дали са лични или корпоративни, трябва да спазват Условията на Fossorial Commercial License."
},
"trialPeriodInformation": {
"title": "Информация за пробен период",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Скала",
"description": "Функции за корпоративни клиенти, 50 потребители, 100 сайта и приоритетна поддръжка."
"description": "Предприятие, 50 потребители, 50 сайта и приоритетна поддръжка."
}
},
"personalUseOnly": "Само за лична употреба (безплатен лиценз - без проверка)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "JSON масив",
"httpDestFormatJsonArrayDescription": "Една заявка на партида, тялото е JSON масив. Съвместим с повечето общи уеб куки и Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Една заявка на партида, тялото е ново линии отделени JSON - един обект на ред, няма външен масив. Изисквано от Splunk HEC, Elastic / OpenSearch и Grafana.",
"httpDestFormatNdjsonDescription": "Една заявка на партида, тялото е ново линии отделени JSON един обект на ред, няма външен масив. Изисквано от Splunk HEC, Elastic / OpenSearch и Grafana.",
"httpDestFormatSingleTitle": "Едно събитие на заявка",
"httpDestFormatSingleDescription": "Изпращат се отделни HTTP POST за всяко индивидуално събитие. Използвайте само за крайни точки, които не могат да обработват партиди.",
"httpDestLogTypesTitle": "Видове логове",

View File

@@ -1993,7 +1993,7 @@
"description": "Spolehlivější a nízko udržovaný Pangolinův server s dalšími zvony a bičkami",
"introTitle": "Spravovaný Pangolin",
"introDescription": "je možnost nasazení určená pro lidi, kteří chtějí jednoduchost a spolehlivost při zachování soukromých a samoobslužných dat.",
"introDetail": "Pomocí této volby stále provozujete vlastní uzel Pangolin - tunely, SSL terminály a provoz všech pobytů na vašem serveru. Rozdíl spočívá v tom, že řízení a monitorování se řeší prostřednictvím našeho cloudového panelu, který odemkne řadu výhod:",
"introDetail": "Pomocí této volby stále provozujete vlastní uzel Pangolin tunely, SSL terminály a provoz všech pobytů na vašem serveru. Rozdíl spočívá v tom, že řízení a monitorování se řeší prostřednictvím našeho cloudového panelu, který odemkne řadu výhod:",
"benefitSimplerOperations": {
"title": "Jednoduchý provoz",
"description": "Není třeba spouštět svůj vlastní poštovní server nebo nastavit komplexní upozornění. Ze schránky dostanete upozornění na zdravotní kontrolu a výpadek."
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Měřítko",
"description": "Podnikové funkce, 50 uživatelů, 100 stránek a prioritní podpora."
"description": "Podnikové funkce, 50 uživatelů, 50 míst a prioritní podpory."
}
},
"personalUseOnly": "Pouze pro osobní použití (zdarma licence - bez ověření)",

View File

@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Verwendungsanzeige",
"description": "Wählen Sie die Lizenz-Ebene, die Ihre beabsichtigte Nutzung genau widerspiegelt. Die Persönliche Lizenz erlaubt die freie Nutzung der Software für individuelle, nicht-kommerzielle oder kleine kommerzielle Aktivitäten mit jährlichen Brutto-Einnahmen von 100.000 USD. Über diese Grenzen hinausgehende Verwendungszwecke einschließlich der Verwendung innerhalb eines Unternehmens, einer Organisation, oder eine andere umsatzgenerierende Umgebung - erfordert eine gültige Enterprise-Lizenz und die Zahlung der Lizenzgebühr. Alle Benutzer, ob Personal oder Enterprise, müssen die Fossorial Commercial License Bedingungen einhalten."
"description": "Wählen Sie die Lizenz-Ebene, die Ihre beabsichtigte Nutzung genau widerspiegelt. Die Persönliche Lizenz erlaubt die freie Nutzung der Software für individuelle, nicht-kommerzielle oder kleine kommerzielle Aktivitäten mit jährlichen Brutto-Einnahmen von 100.000 USD. Über diese Grenzen hinausgehende Verwendungszwecke einschließlich der Verwendung innerhalb eines Unternehmens, einer Organisation, oder eine andere umsatzgenerierende Umgebung erfordert eine gültige Enterprise-Lizenz und die Zahlung der Lizenzgebühr. Alle Benutzer, ob Personal oder Enterprise, müssen die Fossorial Commercial License Bedingungen einhalten."
},
"trialPeriodInformation": {
"title": "Testperiode Information",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Maßstab",
"description": "Unternehmensmerkmale, 50 Benutzer, 100 Standorte und prioritärer Support."
"description": "Enterprise Features, 50 Benutzer, 50 Sites und Prioritätsunterstützung."
}
},
"personalUseOnly": "Nur persönliche Nutzung (kostenlose Lizenz - kein Checkout)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "JSON Array",
"httpDestFormatJsonArrayDescription": "Eine Anfrage pro Stapel ist ein JSON-Array. Kompatibel mit den meisten generischen Webhooks und Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Eine Anfrage pro Batch, der Körper ist newline-getrenntes JSON - ein Objekt pro Zeile, kein äußeres Array. Benötigt von Splunk HEC, Elastic / OpenSearch, und Grafana Loki.",
"httpDestFormatNdjsonDescription": "Eine Anfrage pro Batch, der Körper ist newline-getrenntes JSON ein Objekt pro Zeile, kein äußeres Array. Benötigt von Splunk HEC, Elastic / OpenSearch, und Grafana Loki.",
"httpDestFormatSingleTitle": "Ein Ereignis pro Anfrage",
"httpDestFormatSingleDescription": "Sendet eine separate HTTP-POST für jedes einzelne Ereignis. Nur für Endpunkte, die Batches nicht handhaben können.",
"httpDestLogTypesTitle": "Log-Typen",

View File

@@ -1,8 +1,4 @@
{
"contactSalesEnable": "Contact sales to enable this feature.",
"contactSalesBookDemo": "Book a demo",
"contactSalesOr": "or",
"contactSalesContactUs": "contact us",
"setupCreate": "Create the organization, site, and resources",
"headerAuthCompatibilityInfo": "Enable this to force a 401 Unauthorized response when an authentication token is missing. This is required for browsers or specific HTTP libraries that do not send credentials without a server challenge.",
"headerAuthCompatibility": "Extended compatibility",
@@ -1260,7 +1256,6 @@
"actionViewLogs": "View Logs",
"noneSelected": "None selected",
"orgNotFound2": "No organizations found.",
"search": "Search…",
"searchPlaceholder": "Search...",
"emptySearchOptions": "No options found",
"create": "Create",
@@ -1345,108 +1340,10 @@
"sidebarGeneral": "Manage",
"sidebarLogAndAnalytics": "Log & Analytics",
"sidebarBluePrints": "Blueprints",
"sidebarAlerting": "Alerting",
"sidebarOrganization": "Organization",
"sidebarManagement": "Management",
"sidebarBillingAndLicenses": "Billing & Licenses",
"sidebarLogsAnalytics": "Analytics",
"alertingTitle": "Alerting",
"alertingDescription": "Define sources, triggers, and actions for notifications.",
"alertingRules": "Alert rules",
"alertingSearchRules": "Search rules…",
"alertingAddRule": "Create Rule",
"alertingColumnSource": "Source",
"alertingColumnTrigger": "Trigger",
"alertingColumnActions": "Actions",
"alertingColumnEnabled": "Enabled",
"alertingDeleteQuestion": "Delete this alert rule? This cannot be undone.",
"alertingDeleteRule": "Delete alert rule",
"alertingRuleDeleted": "Alert rule deleted",
"alertingRuleSaved": "Alert rule saved",
"alertingEditRule": "Edit alert rule",
"alertingCreateRule": "Create alert rule",
"alertingRuleCredenzaDescription": "Choose what to watch, when to fire, and how to notify your team.",
"alertingRuleNamePlaceholder": "Production site down",
"alertingRuleEnabled": "Rule enabled",
"alertingSectionSource": "Source",
"alertingSourceType": "Source type",
"alertingSourceSite": "Site",
"alertingSourceHealthCheck": "Health check",
"alertingPickSites": "Sites",
"alertingPickHealthChecks": "Health checks",
"alertingPickResources": "Resources",
"alertingSelectResources": "Select resources…",
"alertingResourcesSelected": "{count} resources selected",
"alertingResourcesEmpty": "No resources with targets in the first 10 results.",
"alertingSectionTrigger": "Trigger",
"alertingTrigger": "When to alert",
"alertingTriggerSiteOnline": "Site online",
"alertingTriggerSiteOffline": "Site offline",
"alertingTriggerHcHealthy": "Health check healthy",
"alertingTriggerHcUnhealthy": "Health check unhealthy",
"alertingSectionActions": "Actions",
"alertingAddAction": "Add action",
"alertingActionNotify": "Email",
"alertingActionWebhook": "Webhook",
"alertingActionType": "Action type",
"alertingNotifyUsers": "Users",
"alertingNotifyRoles": "Roles",
"alertingNotifyEmails": "Email addresses",
"alertingEmailPlaceholder": "Add email and press Enter",
"alertingWebhookMethod": "HTTP method",
"alertingWebhookSecret": "Signing secret (optional)",
"alertingWebhookSecretPlaceholder": "HMAC secret",
"alertingWebhookHeaders": "Headers",
"alertingAddHeader": "Add header",
"alertingSelectSites": "Select sites…",
"alertingSitesSelected": "{count} sites selected",
"alertingSelectHealthChecks": "Select health checks…",
"alertingHealthChecksSelected": "{count} health checks selected",
"alertingNoHealthChecks": "No targets with health checks enabled",
"alertingHealthCheckStub": "Health check source selection is not wired up yet - you can still configure triggers and actions.",
"alertingSelectUsers": "Select users…",
"alertingUsersSelected": "{count} users selected",
"alertingSelectRoles": "Select roles…",
"alertingRolesSelected": "{count} roles selected",
"alertingSummarySites": "Sites ({count})",
"alertingSummaryHealthChecks": "Health checks ({count})",
"alertingErrorNameRequired": "Enter a name",
"alertingErrorActionsMin": "Add at least one action",
"alertingErrorPickSites": "Select at least one site",
"alertingErrorPickHealthChecks": "Select at least one health check",
"alertingErrorTriggerSite": "Choose a site trigger",
"alertingErrorTriggerHealth": "Choose a health check trigger",
"alertingErrorNotifyRecipients": "Pick users, roles, or at least one email",
"alertingConfigureSource": "Configure Source",
"alertingConfigureTrigger": "Configure Trigger",
"alertingConfigureActions": "Configure Actions",
"alertingBackToRules": "Back to Rules",
"alertingDraftBadge": "Draft - save to store this rule",
"alertingSidebarHint": "Click a step on the canvas to edit it here.",
"alertingGraphCanvasTitle": "Rule Flow",
"alertingGraphCanvasDescription": "Visual overview of source, trigger, and actions. Select a node to edit it in the panel.",
"alertingNodeNotConfigured": "Not configured yet",
"alertingNodeActionsCount": "{count, plural, one {# action} other {# actions}}",
"alertingNodeRoleSource": "Source",
"alertingNodeRoleTrigger": "Trigger",
"alertingNodeRoleAction": "Action",
"alertingTabRules": "Alert Rules",
"alertingTabHealthChecks": "Health Checks",
"standaloneHcTableTitle": "Health Checks",
"standaloneHcSearchPlaceholder": "Search health checks…",
"standaloneHcAddButton": "Create Health Check",
"standaloneHcCreateTitle": "Create Health Check",
"standaloneHcEditTitle": "Edit Health Check",
"standaloneHcDescription": "Configure a HTTP or TCP health check for use in alert rules.",
"standaloneHcNameLabel": "Name",
"standaloneHcNamePlaceholder": "My HTTP Monitor",
"standaloneHcDeleteTitle": "Delete health check",
"standaloneHcDeleteQuestion": "Delete this health check? This cannot be undone.",
"standaloneHcDeleted": "Health check deleted",
"standaloneHcSaved": "Health check saved",
"standaloneHcColumnHealth": "Health",
"standaloneHcColumnMode": "Mode",
"standaloneHcColumnTarget": "Target",
"blueprints": "Blueprints",
"blueprintsDescription": "Apply declarative configurations and view previous runs",
"blueprintAdd": "Add Blueprint",
@@ -1852,8 +1749,8 @@
"retryAttempts": "Retry Attempts",
"expectedResponseCodes": "Expected Response Codes",
"expectedResponseCodesDescription": "HTTP status code that indicates healthy status. If left blank, 200-300 is considered healthy.",
"customHeaders": "Custom Request Headers",
"customHeadersDescription": "Request headers sent to the downstream targets. Headers new line separated: Header-Name: value",
"customHeaders": "Custom Headers",
"customHeadersDescription": "Headers new line separated: Header-Name: value",
"headersValidationError": "Headers must be in the format: Header-Name: value",
"saveHealthCheck": "Save Health Check",
"healthCheckSaved": "Health Check Saved",
@@ -1865,17 +1762,8 @@
"healthCheckIntervalMin": "Check interval must be at least 5 seconds",
"healthCheckTimeoutMin": "Timeout must be at least 1 second",
"healthCheckRetryMin": "Retry attempts must be at least 1",
"healthCheckMode": "Check Mode",
"healthCheckStrategy": "Strategy",
"healthCheckModeDescription": "TCP mode verifies connectivity only. HTTP mode validates the HTTP response.",
"healthyThreshold": "Healthy Threshold",
"healthyThresholdDescription": "Consecutive successes required before marking as healthy.",
"unhealthyThreshold": "Unhealthy Threshold",
"unhealthyThresholdDescription": "Consecutive failures required before marking as unhealthy.",
"healthCheckHealthyThresholdMin": "Healthy threshold must be at least 1",
"healthCheckUnhealthyThresholdMin": "Unhealthy threshold must be at least 1",
"httpMethod": "Scheme",
"selectHttpMethod": "Select scheme",
"httpMethod": "HTTP Method",
"selectHttpMethod": "Select HTTP method",
"domainPickerSubdomainLabel": "Subdomain",
"domainPickerBaseDomainLabel": "Base Domain",
"domainPickerSearchDomains": "Search domains...",
@@ -1933,11 +1821,6 @@
"editInternalResourceDialogModePort": "Port",
"editInternalResourceDialogModeHost": "Host",
"editInternalResourceDialogModeCidr": "CIDR",
"editInternalResourceDialogModeHttp": "HTTP",
"editInternalResourceDialogModeHttps": "HTTPS",
"editInternalResourceDialogScheme": "Scheme",
"editInternalResourceDialogEnableSsl": "Enable SSL",
"editInternalResourceDialogEnableSslDescription": "Enable SSL/TLS encryption for secure HTTPS connections to the destination.",
"editInternalResourceDialogDestination": "Destination",
"editInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.",
"editInternalResourceDialogDestinationIPDescription": "The IP or hostname address of the resource on the site's network.",
@@ -1953,7 +1836,6 @@
"createInternalResourceDialogName": "Name",
"createInternalResourceDialogSite": "Site",
"selectSite": "Select site...",
"multiSitesSelectorSitesCount": "{count, plural, one {# site} other {# sites}}",
"noSitesFound": "No sites found.",
"createInternalResourceDialogProtocol": "Protocol",
"createInternalResourceDialogTcp": "TCP",
@@ -1982,19 +1864,11 @@
"createInternalResourceDialogModePort": "Port",
"createInternalResourceDialogModeHost": "Host",
"createInternalResourceDialogModeCidr": "CIDR",
"createInternalResourceDialogModeHttp": "HTTP",
"createInternalResourceDialogModeHttps": "HTTPS",
"scheme": "Scheme",
"createInternalResourceDialogScheme": "Scheme",
"createInternalResourceDialogEnableSsl": "Enable SSL",
"createInternalResourceDialogEnableSslDescription": "Enable SSL/TLS encryption for secure HTTPS connections to the destination.",
"createInternalResourceDialogDestination": "Destination",
"createInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.",
"createInternalResourceDialogDestinationCidrDescription": "The CIDR range of the resource on the site's network.",
"createInternalResourceDialogAlias": "Alias",
"createInternalResourceDialogAliasDescription": "An optional internal DNS alias for this resource.",
"internalResourceDownstreamSchemeRequired": "Scheme is required for HTTP resources",
"internalResourceHttpPortRequired": "Destination port is required for HTTP resources",
"siteConfiguration": "Configuration",
"siteAcceptClientConnections": "Accept Client Connections",
"siteAcceptClientConnectionsDescription": "Allow user devices and clients to access resources on this site. This can be changed later.",
@@ -2119,7 +1993,7 @@
"description": "More reliable and low-maintenance self-hosted Pangolin server with extra bells and whistles",
"introTitle": "Managed Self-Hosted Pangolin",
"introDescription": "is a deployment option designed for people who want simplicity and extra reliability while still keeping their data private and self-hosted.",
"introDetail": "With this option, you still run your own Pangolin node - your tunnels, SSL termination, and traffic all stay on your server. The difference is that management and monitoring are handled through our cloud dashboard, which unlocks a number of benefits:",
"introDetail": "With this option, you still run your own Pangolin node your tunnels, SSL termination, and traffic all stay on your server. The difference is that management and monitoring are handled through our cloud dashboard, which unlocks a number of benefits:",
"benefitSimplerOperations": {
"title": "Simpler operations",
"description": "No need to run your own mail server or set up complex alerting. You'll get health checks and downtime alerts out of the box."
@@ -2244,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Select a domain for the organization's authentication page",
"domainPickerProvidedDomain": "Provided Domain",
"domainPickerFreeProvidedDomain": "Provided Domain",
"domainPickerFreeDomainsPaidFeature": "Provided domains are a paid feature. Subscribe to get a domain included with your plan - no need to bring your own.",
"domainPickerFreeDomainsPaidFeature": "Provided domains are a paid feature. Subscribe to get a domain included with your plan no need to bring your own.",
"domainPickerVerified": "Verified",
"domainPickerUnverified": "Unverified",
"domainPickerManual": "Manual",
@@ -2422,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Usage Disclosure",
"description": "Select the license tier that accurately reflects your intended use. The Personal License permits free use of the Software for individual, non-commercial or small-scale commercial activities with annual gross revenue under $100,000 USD. Any use beyond these limits - including use within a business, organization, or other revenue-generating environment - requires a valid Enterprise License and payment of the applicable licensing fee. All users, whether Personal or Enterprise, must comply with the Fossorial Commercial License Terms."
"description": "Select the license tier that accurately reflects your intended use. The Personal License permits free use of the Software for individual, non-commercial or small-scale commercial activities with annual gross revenue under $100,000 USD. Any use beyond these limits including use within a business, organization, or other revenue-generating environment requires a valid Enterprise License and payment of the applicable licensing fee. All users, whether Personal or Enterprise, must comply with the Fossorial Commercial License Terms."
},
"trialPeriodInformation": {
"title": "Trial Period Information",
@@ -2477,7 +2351,7 @@
},
"scale": {
"title": "Scale",
"description": "Enterprise features, 50 users, 100 sites, and priority support."
"description": "Enterprise features, 50 users, 50 sites, and priority support."
}
},
"personalUseOnly": "Personal use only (free license - no checkout)",
@@ -2554,7 +2428,6 @@
"validPassword": "Valid Password",
"validEmail": "Valid email",
"validSSO": "Valid SSO",
"connectedClient": "Connected Client",
"resourceBlocked": "Resource Blocked",
"droppedByRule": "Dropped by Rule",
"noSessions": "No Sessions",
@@ -2792,12 +2665,8 @@
"editInternalResourceDialogAddUsers": "Add Users",
"editInternalResourceDialogAddClients": "Add Clients",
"editInternalResourceDialogDestinationLabel": "Destination",
"editInternalResourceDialogDestinationDescription": "Choose where this resource runs and how clients reach it. Selecting multiple sites will create a high availability resource that can be accessed from any of the selected sites.",
"editInternalResourceDialogDestinationDescription": "Specify the destination address for the internal resource. This can be a hostname, IP address, or CIDR range depending on the selected mode. Optionally set an internal DNS alias for easier identification.",
"editInternalResourceDialogPortRestrictionsDescription": "Restrict access to specific TCP/UDP ports or allow/block all ports.",
"createInternalResourceDialogHttpConfiguration": "HTTP configuration",
"createInternalResourceDialogHttpConfigurationDescription": "Choose the domain clients will use to reach this resource over HTTP or HTTPS.",
"editInternalResourceDialogHttpConfiguration": "HTTP configuration",
"editInternalResourceDialogHttpConfigurationDescription": "Choose the domain clients will use to reach this resource over HTTP or HTTPS.",
"editInternalResourceDialogTcp": "TCP",
"editInternalResourceDialogUdp": "UDP",
"editInternalResourceDialogIcmp": "ICMP",
@@ -2836,8 +2705,6 @@
"maintenancePageMessagePlaceholder": "We'll be back soon! Our site is currently undergoing scheduled maintenance.",
"maintenancePageMessageDescription": "Detailed message explaining the maintenance",
"maintenancePageTimeTitle": "Estimated Completion Time (Optional)",
"privateMaintenanceScreenTitle": "Private Placeholder Screen",
"privateMaintenanceScreenMessage": "This domain is being used on a private resource. Please connect using the Pangolin client to access this resource.",
"maintenanceTime": "e.g., 2 hours, Nov 1 at 5:00 PM",
"maintenanceEstimatedTimeDescription": "When you expect maintenance to be completed",
"editDomain": "Edit Domain",
@@ -2957,9 +2824,9 @@
"streamingHttpWebhookTitle": "HTTP Webhook",
"streamingHttpWebhookDescription": "Send events to any HTTP endpoint with flexible authentication and templating.",
"streamingS3Title": "Amazon S3",
"streamingS3Description": "Stream events to an S3-compatible object storage bucket.",
"streamingS3Description": "Stream events to an S3-compatible object storage bucket. Coming soon.",
"streamingDatadogTitle": "Datadog",
"streamingDatadogDescription": "Forward events directly to your Datadog account.",
"streamingDatadogDescription": "Forward events directly to your Datadog account. Coming soon.",
"streamingTypePickerDescription": "Choose a destination type to get started.",
"streamingFailedToLoad": "Failed to load destinations",
"streamingUnexpectedError": "An unexpected error occurred.",
@@ -2975,14 +2842,6 @@
"httpDestAddTitle": "Add HTTP Destination",
"httpDestEditDescription": "Update the configuration for this HTTP event streaming destination.",
"httpDestAddDescription": "Configure a new HTTP endpoint to receive your organization's events.",
"S3DestEditTitle": "Edit Destination",
"S3DestAddTitle": "Add S3 Destination",
"S3DestEditDescription": "Update the configuration for this S3 event streaming destination.",
"S3DestAddDescription": "Configure a new S3 endpoint to receive your organization's events.",
"datadogDestEditTitle": "Edit Destination",
"datadogDestAddTitle": "Add Datadog Destination",
"datadogDestEditDescription": "Update the configuration for this Datadog event streaming destination.",
"datadogDestAddDescription": "Configure a new Datadog endpoint to receive your organization's events.",
"httpDestTabSettings": "Settings",
"httpDestTabHeaders": "Headers",
"httpDestTabBody": "Body",
@@ -2990,7 +2849,7 @@
"httpDestNamePlaceholder": "My HTTP destination",
"httpDestUrlLabel": "Destination URL",
"httpDestUrlErrorHttpRequired": "URL must use http or https",
"httpDestUrlErrorHttpsRequired": "HTTPS is required",
"httpDestUrlErrorHttpsRequired": "HTTPS is required on cloud deployments",
"httpDestUrlErrorInvalid": "Enter a valid URL (e.g. https://example.com/webhook)",
"httpDestAuthTitle": "Authentication",
"httpDestAuthDescription": "Choose how requests to your endpoint are authenticated.",
@@ -3022,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "JSON Array",
"httpDestFormatJsonArrayDescription": "One request per batch, body is a JSON array. Compatible with most generic webhooks and Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "One request per batch, body is newline-delimited JSON - one object per line, no outer array. Required by Splunk HEC, Elastic / OpenSearch, and Grafana Loki.",
"httpDestFormatNdjsonDescription": "One request per batch, body is newline-delimited JSON one object per line, no outer array. Required by Splunk HEC, Elastic / OpenSearch, and Grafana Loki.",
"httpDestFormatSingleTitle": "One Event Per Request",
"httpDestFormatSingleDescription": "Sends a separate HTTP POST for each individual event. Use only for endpoints that cannot handle batches.",
"httpDestLogTypesTitle": "Log Types",
@@ -3040,17 +2899,5 @@
"httpDestUpdatedSuccess": "Destination updated successfully",
"httpDestCreatedSuccess": "Destination created successfully",
"httpDestUpdateFailed": "Failed to update destination",
"httpDestCreateFailed": "Failed to create destination",
"followRedirects": "Follow Redirects",
"followRedirectsDescription": "Automatically follow HTTP redirects for requests.",
"alertingErrorWebhookUrl": "Please enter a valid URL for the webhook.",
"healthCheckStrategyHttp": "Validates connectivity and checks the HTTP response status.",
"healthCheckStrategyTcp": "Verifies TCP connectivity only, without inspecting the response.",
"healthCheckStrategySnmp": "Makes an SNMP get request to check the health of network devices and infrastructure.",
"healthCheckStrategyIcmp": "Uses ICMP echo requests (pings) to check if a resource is reachable and responsive.",
"healthCheckTabStrategy": "Strategy",
"healthCheckTabConnection": "Connection",
"healthCheckTabAdvanced": "Advanced",
"healthCheckStrategyNotAvailable": "This strategy is not available. Please contact sales to enable this feature.",
"uptime30d": "Uptime (30d)"
"httpDestCreateFailed": "Failed to create destination"
}

View File

@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Seleccione un dominio para la página de autenticación de la organización",
"domainPickerProvidedDomain": "Dominio proporcionado",
"domainPickerFreeProvidedDomain": "Dominio proporcionado gratis",
"domainPickerFreeDomainsPaidFeature": "Los dominios proporcionados son una función de pago. Suscríbete para obtener un dominio incluido con tu plan - no necesitas traer el tuyo propio.",
"domainPickerFreeDomainsPaidFeature": "Los dominios proporcionados son una función de pago. Suscríbete para obtener un dominio incluido con tu plan no necesitas traer el tuyo propio.",
"domainPickerVerified": "Verificado",
"domainPickerUnverified": "Sin verificar",
"domainPickerManual": "Manual",
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Divulgación de uso",
"description": "Seleccione el nivel de licencia que refleje con precisión su uso previsto. La Licencia Personal permite el uso libre del Software para actividades comerciales individuales, no comerciales o de pequeña escala con ingresos brutos anuales inferiores a $100,000 USD. Cualquier uso más allá de estos límites - incluyendo el uso dentro de una empresa, organización, u otro entorno de generación de ingresos - requiere una Licencia Empresarial válida y el pago de la cuota de licencia aplicable. Todos los usuarios, ya sean personales o empresariales, deben cumplir con las Condiciones de Licencia Comercial Fossorial."
"description": "Seleccione el nivel de licencia que refleje con precisión su uso previsto. La Licencia Personal permite el uso libre del Software para actividades comerciales individuales, no comerciales o de pequeña escala con ingresos brutos anuales inferiores a $100,000 USD. Cualquier uso más allá de estos límites incluyendo el uso dentro de una empresa, organización, u otro entorno de generación de ingresos requiere una Licencia Empresarial válida y el pago de la cuota de licencia aplicable. Todos los usuarios, ya sean personales o empresariales, deben cumplir con las Condiciones de Licencia Comercial Fossorial."
},
"trialPeriodInformation": {
"title": "Información del período de prueba",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Escala",
"description": "Funcionalidades empresariales, 50 usuarios, 100 sitios y soporte prioritario."
"description": "Características de la empresa, 50 usuarios, 50 sitios y soporte prioritario."
}
},
"personalUseOnly": "Solo uso personal (licencia gratuita - sin salida)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "Matriz JSON",
"httpDestFormatJsonArrayDescription": "Una petición por lote, cuerpo es una matriz JSON. Compatible con la mayoría de los webhooks y Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Una petición por lote, el cuerpo es JSON delimitado por línea - un objeto por línea, sin arrays externos. Requerido por Splunk HEC, Elastic / OpenSearch, y Grafana Loki.",
"httpDestFormatNdjsonDescription": "Una petición por lote, el cuerpo es JSON delimitado por línea un objeto por línea, sin arrays externos. Requerido por Splunk HEC, Elastic / OpenSearch, y Grafana Loki.",
"httpDestFormatSingleTitle": "Un evento por solicitud",
"httpDestFormatSingleDescription": "Envía un HTTP POST separado para cada evento individual. Úsalo sólo para los extremos que no pueden manejar lotes.",
"httpDestLogTypesTitle": "Tipos de Log",

View File

@@ -1993,7 +1993,7 @@
"description": "Serveur Pangolin auto-hébergé avec des cloches et des sifflets supplémentaires",
"introTitle": "Pangolin auto-hébergé géré",
"introDescription": "est une option de déploiement conçue pour les personnes qui veulent de la simplicité et de la fiabilité tout en gardant leurs données privées et auto-hébergées.",
"introDetail": "Avec cette option, vous exécutez toujours votre propre nœud Pangolin - vos tunnels, la terminaison SSL et le trafic restent sur votre serveur. La différence est que la gestion et la surveillance sont gérées via notre tableau de bord du cloud, qui déverrouille un certain nombre d'avantages :",
"introDetail": "Avec cette option, vous exécutez toujours votre propre nœud Pangolin vos tunnels, la terminaison SSL et le trafic restent sur votre serveur. La différence est que la gestion et la surveillance sont gérées via notre tableau de bord du cloud, qui déverrouille un certain nombre d'avantages :",
"benefitSimplerOperations": {
"title": "Opérations plus simples",
"description": "Pas besoin de faire tourner votre propre serveur de messagerie ou de configurer des alertes complexes. Vous obtiendrez des contrôles de santé et des alertes de temps d'arrêt par la suite."
@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Sélectionnez un domaine pour la page d'authentification de l'organisation",
"domainPickerProvidedDomain": "Domaine fourni",
"domainPickerFreeProvidedDomain": "Domaine fourni gratuitement",
"domainPickerFreeDomainsPaidFeature": "Les domaines fournis sont une fonctionnalité payante. Abonnez-vous pour obtenir un domaine inclus avec votre plan - plus besoin de fournir le vôtre.",
"domainPickerFreeDomainsPaidFeature": "Les domaines fournis sont une fonctionnalité payante. Abonnez-vous pour obtenir un domaine inclus avec votre plan plus besoin de fournir le vôtre.",
"domainPickerVerified": "Vérifié",
"domainPickerUnverified": "Non vérifié",
"domainPickerManual": "Manuel",
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Divulgation d'utilisation",
"description": "Sélectionnez le niveau de licence qui correspond exactement à votre utilisation prévue. La Licence Personnelle autorise l'utilisation libre du Logiciel pour des activités commerciales individuelles, non commerciales ou à petite échelle avec un revenu annuel brut inférieur à 100 000 USD. Toute utilisation au-delà de ces limites - y compris l'utilisation au sein d'une entreprise, d'une organisation, ou tout autre environnement générateur de revenus - nécessite une licence dentreprise valide et le paiement des droits de licence applicables. Tous les utilisateurs, qu'ils soient personnels ou d'entreprise, doivent se conformer aux conditions de licence commerciale Fossorial."
"description": "Sélectionnez le niveau de licence qui correspond exactement à votre utilisation prévue. La Licence Personnelle autorise l'utilisation libre du Logiciel pour des activités commerciales individuelles, non commerciales ou à petite échelle avec un revenu annuel brut inférieur à 100 000 USD. Toute utilisation au-delà de ces limites y compris l'utilisation au sein d'une entreprise, d'une organisation, ou tout autre environnement générateur de revenus nécessite une licence dentreprise valide et le paiement des droits de licence applicables. Tous les utilisateurs, qu'ils soient personnels ou d'entreprise, doivent se conformer aux conditions de licence commerciale Fossorial."
},
"trialPeriodInformation": {
"title": "Informations sur la période d'essai",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Échelle",
"description": "Fonctionnalités d'entreprise, 50 utilisateurs, 100 sites et support prioritaire."
"description": "Fonctionnalités d'entreprise, 50 utilisateurs, 50 sites et une prise en charge prioritaire."
}
},
"personalUseOnly": "Usage personnel uniquement (licence gratuite - pas de validation)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "Tableau JSON",
"httpDestFormatJsonArrayDescription": "Une requête par lot, le corps est un tableau JSON. Compatible avec la plupart des webhooks génériques et des datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Une requête par lot, body est un JSON délimité par une nouvelle ligne - un objet par ligne, pas de tableau extérieur. Requis par Splunk HEC, Elastic / OpenSearch, et Grafana Loki.",
"httpDestFormatNdjsonDescription": "Une requête par lot, body est un JSON délimité par une nouvelle ligne un objet par ligne, pas de tableau extérieur. Requis par Splunk HEC, Elastic / OpenSearch, et Grafana Loki.",
"httpDestFormatSingleTitle": "Un événement par demande",
"httpDestFormatSingleDescription": "Envoie un POST HTTP séparé pour chaque événement individuel. Utilisé uniquement pour les terminaux qui ne peuvent pas gérer des lots.",
"httpDestLogTypesTitle": "Types de logs",

View File

@@ -1993,7 +1993,7 @@
"description": "Server Pangolin self-hosted più affidabile e a bassa manutenzione con campanelli e fischietti extra",
"introTitle": "Managed Self-Hosted Pangolin",
"introDescription": "è un'opzione di distribuzione progettata per le persone che vogliono la semplicità e l'affidabilità extra mantenendo i loro dati privati e self-hosted.",
"introDetail": "Con questa opzione, esegui ancora il tuo nodo Pangolin - i tunnel, la terminazione SSL e il traffico rimangono tutti sul tuo server. La differenza è che la gestione e il monitoraggio sono gestiti attraverso il nostro cruscotto cloud, che sblocca una serie di vantaggi:",
"introDetail": "Con questa opzione, esegui ancora il tuo nodo Pangolin i tunnel, la terminazione SSL e il traffico rimangono tutti sul tuo server. La differenza è che la gestione e il monitoraggio sono gestiti attraverso il nostro cruscotto cloud, che sblocca una serie di vantaggi:",
"benefitSimplerOperations": {
"title": "Operazioni più semplici",
"description": "Non è necessario eseguire il proprio server di posta o impostare un avviso complesso. Otterrai controlli di salute e avvisi di inattività fuori dalla casella."
@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Seleziona un dominio per la pagina di autenticazione dell'organizzazione",
"domainPickerProvidedDomain": "Dominio Fornito",
"domainPickerFreeProvidedDomain": "Dominio Fornito Gratuito",
"domainPickerFreeDomainsPaidFeature": "I domini forniti sono una funzionalità a pagamento. Abbonati per ricevere un dominio incluso con il tuo piano - non è necessario portare il proprio.",
"domainPickerFreeDomainsPaidFeature": "I domini forniti sono una funzionalità a pagamento. Abbonati per ricevere un dominio incluso con il tuo piano non è necessario portare il proprio.",
"domainPickerVerified": "Verificato",
"domainPickerUnverified": "Non Verificato",
"domainPickerManual": "Manuale",
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Trasparenza Di Utilizzo",
"description": "Seleziona il livello di licenza che rispecchia accuratamente il tuo utilizzo previsto. La Licenza Personale consente l'uso gratuito del Software per le attività commerciali individuali, non commerciali o su piccola scala con entrate lorde annue inferiori a $100.000 USD. Qualsiasi uso oltre questi limiti - compreso l'uso all'interno di un'azienda, organizzazione, o altro ambiente generatore di entrate - richiede una licenza Enterprise valida e il pagamento della tassa di licenza applicabile. Tutti gli utenti, siano essi personali o aziendali, devono rispettare i termini di licenza commerciale Fossorial."
"description": "Seleziona il livello di licenza che rispecchia accuratamente il tuo utilizzo previsto. La Licenza Personale consente l'uso gratuito del Software per le attività commerciali individuali, non commerciali o su piccola scala con entrate lorde annue inferiori a $100.000 USD. Qualsiasi uso oltre questi limiti compreso l'uso all'interno di un'azienda, organizzazione, o altro ambiente generatore di entrate richiede una licenza Enterprise valida e il pagamento della tassa di licenza applicabile. Tutti gli utenti, siano essi personali o aziendali, devono rispettare i termini di licenza commerciale Fossorial."
},
"trialPeriodInformation": {
"title": "Informazioni Periodo Di Prova",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Scala",
"description": "Funzionalità aziendali, 50 utenti, 100 siti e supporto prioritario."
"description": "Funzionalità aziendali, 50 utenti, 50 siti e supporto prioritario."
}
},
"personalUseOnly": "Uso personale esclusivo (licenza gratuita - nessun pagamento)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "JSON Array",
"httpDestFormatJsonArrayDescription": "Una richiesta per lotto, corpo è un array JSON. Compatibile con la maggior parte dei webhooks generici e Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Una richiesta per lotto, corpo è newline-delimited JSON - un oggetto per linea, nessun array esterno. Richiesto da Splunk HEC, Elastic / OpenSearch, e Grafana Loki.",
"httpDestFormatNdjsonDescription": "Una richiesta per lotto, corpo è newline-delimited JSON un oggetto per linea, nessun array esterno. Richiesto da Splunk HEC, Elastic / OpenSearch, e Grafana Loki.",
"httpDestFormatSingleTitle": "Un Evento Per Richiesta",
"httpDestFormatSingleDescription": "Invia un HTTP POST separato per ogni singolo evento. Usa solo per gli endpoint che non possono gestire i batch.",
"httpDestLogTypesTitle": "Tipi Di Log",

View File

@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "조직 인증 페이지에 대한 도메인을 선택하세요.",
"domainPickerProvidedDomain": "제공된 도메인",
"domainPickerFreeProvidedDomain": "무료 제공된 도메인",
"domainPickerFreeDomainsPaidFeature": "제공된 도메인은 유료 기능입니다. 요금제에 도메인이 포함되도록 구독하세요. - 별도로 도메인을 준비할 필요 없습니다.",
"domainPickerFreeDomainsPaidFeature": "제공된 도메인은 유료 기능입니다. 요금제에 도메인이 포함되도록 구독하세요. 별도로 도메인을 준비할 필요 없습니다.",
"domainPickerVerified": "검증됨",
"domainPickerUnverified": "검증되지 않음",
"domainPickerManual": "수동",
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "사용 공개",
"description": "당신의 의도된 사용에 정확히 맞는 라이선스 등급을 선택하세요. 개인 라이선스는 연간 총 수익 100,000 USD 이하의 개인, 비상업적 또는 소규모 상업 활동을 위한 소프트웨어의 무료 사용을 허용합니다. 이러한 제한을 넘는 모든 사용 - 비즈니스, 조직 또는 기타 수익 창출 환경 내에서의 사용 - 은 유효한 엔터프라이즈 라이선스 및 해당 라이선스 수수료의 지불이 필요합니다. 개인 또는 기업 사용자는 모두 Fossorial 상용 라이선스 조건을 준수해야 합니다."
"description": "당신의 의도된 사용에 정확히 맞는 라이선스 등급을 선택하세요. 개인 라이선스는 연간 총 수익 100,000 USD 이하의 개인, 비상업적 또는 소규모 상업 활동을 위한 소프트웨어의 무료 사용을 허용합니다. 이러한 제한을 넘는 모든 사용 비즈니스, 조직 또는 기타 수익 창출 환경 내에서의 사용 은 유효한 엔터프라이즈 라이선스 및 해당 라이선스 수수료의 지불이 필요합니다. 개인 또는 기업 사용자는 모두 Fossorial 상용 라이선스 조건을 준수해야 합니다."
},
"trialPeriodInformation": {
"title": "시험 기간 정보",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "스케일",
"description": "기업 기능, 50명의 사용자, 100개의 사이트, 그리고 우선 지원."
"description": "기업 기능, 50명의 사용자, 50개의 사이트, 우선 지원."
}
},
"personalUseOnly": "개인용으로만 사용 (무료 라이선스 - 결제 없음)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "JSON 배열",
"httpDestFormatJsonArrayDescription": "각 배치마다 요청 하나씩, 본문은 JSON 배열입니다. 대부분의 일반 웹훅 및 Datadog과 호환됩니다.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "각 배치마다 요청 하나씩, 본문은 줄 구분 JSON - 한 라인에 하나의 객체가 있으며 외부 배열이 없습니다. Splunk HEC, Elastic / OpenSearch, Grafana Loki에 필요합니다.",
"httpDestFormatNdjsonDescription": "각 배치마다 요청 하나씩, 본문은 줄 구분 JSON 한 라인에 하나의 객체가 있으며 외부 배열이 없습니다. Splunk HEC, Elastic / OpenSearch, Grafana Loki에 필요합니다.",
"httpDestFormatSingleTitle": "각 요청 당 하나의 이벤트",
"httpDestFormatSingleDescription": "각 개별 이벤트에 대해 별도의 HTTP POST를 전송합니다. 배치를 처리할 수 없는 엔드포인트에만 사용하세요.",
"httpDestLogTypesTitle": "로그 유형",

View File

@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Skala",
"description": "Funksjoner for bedrifter, 50 brukere, 100 nettsteder og prioritert support."
"description": "Enterprise features, 50 brukere, 50 nettsteder og prioritetsstøtte."
}
},
"personalUseOnly": "Kun personlig bruk (gratis lisens - ingen kasse)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "JSON liste",
"httpDestFormatJsonArrayDescription": "Én forespørsel per batch, innholdet er en JSON-liste. Kompatibel med de mest generiske webhooks og Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Én forespørsel per sats, innholdet er nytt avgrenset JSON - et objekt per linje, ingen ytterarray. Kreves av Splunk HEC, Elastisk/OpenSearch, og Grafana Loki.",
"httpDestFormatNdjsonDescription": "Én forespørsel per sats, innholdet er nytt avgrenset JSON et objekt per linje, ingen ytterarray. Kreves av Splunk HEC, Elastisk/OpenSearch, og Grafana Loki.",
"httpDestFormatSingleTitle": "En hendelse per forespørsel",
"httpDestFormatSingleDescription": "Sender en separat HTTP POST for hver enkelt hendelse. Bruk bare for endepunkter som ikke kan håndtere batcher.",
"httpDestLogTypesTitle": "Logg typer",

View File

@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Selecteer een domein voor de authenticatiepagina van de organisatie",
"domainPickerProvidedDomain": "Opgegeven domein",
"domainPickerFreeProvidedDomain": "Gratis verstrekt domein",
"domainPickerFreeDomainsPaidFeature": "Geleverde domeinen zijn een betaalde functie. Abonneer je om een domein bij je plan te krijgen - je hoeft er zelf geen mee te brengen.",
"domainPickerFreeDomainsPaidFeature": "Geleverde domeinen zijn een betaalde functie. Abonneer je om een domein bij je plan te krijgen je hoeft er zelf geen mee te brengen.",
"domainPickerVerified": "Geverifieerd",
"domainPickerUnverified": "Ongeverifieerd",
"domainPickerManual": "Handleiding",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Schaal",
"description": "Enterprise-functies, 50 gebruikers, 100 sites en prioritaire ondersteuning."
"description": "Enterprise functies, 50 gebruikers, 50 sites en prioriteit ondersteuning."
}
},
"personalUseOnly": "Alleen voor persoonlijk gebruik (gratis licentie - geen afrekening)",

View File

@@ -1993,7 +1993,7 @@
"description": "Większa niezawodność i niska konserwacja serwera Pangolin z dodatkowymi dzwonkami i sygnałami",
"introTitle": "Zarządzany samowystarczalny Pangolin",
"introDescription": "jest opcją wdrażania zaprojektowaną dla osób, które chcą prostoty i dodatkowej niezawodności, przy jednoczesnym utrzymaniu swoich danych prywatnych i samodzielnych.",
"introDetail": "Z tą opcją nadal obsługujesz swój własny węzeł Pangolin - tunele, zakończenie SSL i ruch na Twoim serwerze. Różnica polega na tym, że zarządzanie i monitorowanie odbywa się za pomocą naszej tablicy rozdzielczej, która odblokowuje szereg korzyści:",
"introDetail": "Z tą opcją nadal obsługujesz swój własny węzeł Pangolin tunele, zakończenie SSL i ruch na Twoim serwerze. Różnica polega na tym, że zarządzanie i monitorowanie odbywa się za pomocą naszej tablicy rozdzielczej, która odblokowuje szereg korzyści:",
"benefitSimplerOperations": {
"title": "Uproszczone operacje",
"description": "Nie ma potrzeby uruchamiania własnego serwera pocztowego lub ustawiania skomplikowanych powiadomień. Będziesz mieć kontrolę zdrowia i powiadomienia o przestoju."
@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Wybierz domenę dla strony uwierzytelniania organizacji",
"domainPickerProvidedDomain": "Dostarczona domena",
"domainPickerFreeProvidedDomain": "Darmowa oferowana domena",
"domainPickerFreeDomainsPaidFeature": "Dostarczane domeny to funkcja płatna. Subskrybuj, aby uzyskać domenę w ramach swojego planu - nie ma potrzeby przynoszenia własnej.",
"domainPickerFreeDomainsPaidFeature": "Dostarczane domeny to funkcja płatna. Subskrybuj, aby uzyskać domenę w ramach swojego planu nie ma potrzeby przynoszenia własnej.",
"domainPickerVerified": "Zweryfikowano",
"domainPickerUnverified": "Niezweryfikowane",
"domainPickerManual": "Podręcznik",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Skala",
"description": "Funkcje dla przedsiębiorstw, 50 użytkowników, 100 witryn i priorytetowe wsparcie."
"description": "Cechy przedsiębiorstw, 50 użytkowników, 50 obiektów i wsparcie priorytetowe."
}
},
"personalUseOnly": "Tylko do użytku osobistego (darmowa licencja - bez płatności)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "Tablica JSON",
"httpDestFormatJsonArrayDescription": "Jedna prośba na partię, treść jest tablicą JSON. Kompatybilna z najbardziej ogólnymi webhookami i Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Jedno żądanie na partię, ciałem jest plik JSON rozdzielony na newline-delimited - jeden obiekt na wiersz, bez tablicy zewnętrznej. Wymagane przez Splunk HEC, Elastic / OpenSesearch i Grafana Loki.",
"httpDestFormatNdjsonDescription": "Jedno żądanie na partię, ciałem jest plik JSON rozdzielony na newline-delimited jeden obiekt na wiersz, bez tablicy zewnętrznej. Wymagane przez Splunk HEC, Elastic / OpenSesearch i Grafana Loki.",
"httpDestFormatSingleTitle": "Jedno wydarzenie na żądanie",
"httpDestFormatSingleDescription": "Wysyła oddzielny POST HTTP dla każdego zdarzenia. Użyj tylko dla punktów końcowych, które nie mogą obsługiwać partii.",
"httpDestLogTypesTitle": "Typy logów",

View File

@@ -1993,7 +1993,7 @@
"description": "Servidor Pangolin auto-hospedado mais confiável e com baixa manutenção com sinos extras e assobiamentos",
"introTitle": "Pangolin Auto-Hospedado Gerenciado",
"introDescription": "é uma opção de implantação projetada para pessoas que querem simplicidade e confiança adicional, mantendo os seus dados privados e auto-hospedados.",
"introDetail": "Com esta opção, você ainda roda seu próprio nó Pangolin - seus túneis, terminação SSL e tráfego todos permanecem no seu servidor. A diferença é que a gestão e a monitorização são geridos através do nosso painel de nuvem, que desbloqueia vários benefícios:",
"introDetail": "Com esta opção, você ainda roda seu próprio nó Pangolin seus túneis, terminação SSL e tráfego todos permanecem no seu servidor. A diferença é que a gestão e a monitorização são geridos através do nosso painel de nuvem, que desbloqueia vários benefícios:",
"benefitSimplerOperations": {
"title": "Operações simples",
"description": "Não é necessário executar o seu próprio servidor de e-mail ou configurar um alerta complexo. Você receberá fora de caixa verificações de saúde e alertas de tempo de inatividade."
@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Selecione um domínio para a página de autenticação da organização",
"domainPickerProvidedDomain": "Domínio fornecido",
"domainPickerFreeProvidedDomain": "Domínio fornecido grátis",
"domainPickerFreeDomainsPaidFeature": "Os domínios fornecidos são um recurso pago. Assine para obter um domínio incluído no seu plano - não há necessidade de trazer o seu próprio.",
"domainPickerFreeDomainsPaidFeature": "Os domínios fornecidos são um recurso pago. Assine para obter um domínio incluído no seu plano não há necessidade de trazer o seu próprio.",
"domainPickerVerified": "Verificada",
"domainPickerUnverified": "Não verificado",
"domainPickerManual": "Manual",
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Divulgação de uso",
"description": "Selecione o nível de licença que reflete corretamente seu uso pretendido. A Licença Pessoal permite o uso livre do Software para atividades comerciais individuais, não comerciais ou em pequena escala com rendimento bruto anual inferior a 100.000 USD. Qualquer uso além destes limites - incluindo uso dentro de um negócio, organização, ou outro ambiente gerador de receitas - requer uma Licença Enterprise válida e o pagamento da taxa aplicável de licenciamento. Todos os usuários, pessoais ou empresariais, devem cumprir os Termos da Licença Comercial Fossorial."
"description": "Selecione o nível de licença que reflete corretamente seu uso pretendido. A Licença Pessoal permite o uso livre do Software para atividades comerciais individuais, não comerciais ou em pequena escala com rendimento bruto anual inferior a 100.000 USD. Qualquer uso além destes limites incluindo uso dentro de um negócio, organização, ou outro ambiente gerador de receitas requer uma Licença Enterprise válida e o pagamento da taxa aplicável de licenciamento. Todos os usuários, pessoais ou empresariais, devem cumprir os Termos da Licença Comercial Fossorial."
},
"trialPeriodInformation": {
"title": "Informações do Período de Avaliação",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Escala",
"description": "Recursos empresariais, 50 usuários, 100 sites, e suporte prioritário."
"description": "Recursos de empresa, 50 usuários, 50 sites e apoio prioritário."
}
},
"personalUseOnly": "Uso pessoal apenas (licença gratuita - sem checkout)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "Matriz JSON",
"httpDestFormatJsonArrayDescription": "Um pedido por lote, o corpo é um array JSON. Compatível com a maioria dos webhooks genéricos e Datadog.",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "Um pedido por lote, o corpo é um JSON delimitado por nova-linha - um objeto por linha, sem array exterior. Requerido pelo Splunk HEC, Elástico / OpenSearch, e Grafana Loki.",
"httpDestFormatNdjsonDescription": "Um pedido por lote, o corpo é um JSON delimitado por nova-linha um objeto por linha, sem array exterior. Requerido pelo Splunk HEC, Elástico / OpenSearch, e Grafana Loki.",
"httpDestFormatSingleTitle": "Um Evento por Requisição",
"httpDestFormatSingleDescription": "Envia um POST HTTP separado para cada evento. Utilize apenas para endpoints que não podem manipular lotes.",
"httpDestLogTypesTitle": "Tipos de log",

View File

@@ -56,7 +56,7 @@
"siteManageSites": "Управление сайтами",
"siteDescription": "Создание и управление сайтами, чтобы включить подключение к приватным сетям",
"sitesBannerTitle": "Подключить любую сеть",
"sitesBannerDescription": "Сайт - это соединение с удаленной сетью, которое позволяет Pangolin предоставлять доступ к ресурсам, будь они общедоступными или частными, пользователям в любом месте. Установите сетевой коннектор сайта (Newt) там, где можно запустить исполняемый файл или контейнер, чтобы установить соединение.",
"sitesBannerDescription": "Сайт это соединение с удаленной сетью, которое позволяет Pangolin предоставлять доступ к ресурсам, будь они общедоступными или частными, пользователям в любом месте. Установите сетевой коннектор сайта (Newt) там, где можно запустить исполняемый файл или контейнер, чтобы установить соединение.",
"sitesBannerButtonText": "Установить сайт",
"approvalsBannerTitle": "Одобрить или запретить доступ к устройству",
"approvalsBannerDescription": "Просмотрите и подтвердите или отклоните запросы на доступ к устройству от пользователей. Когда требуется подтверждение устройства, пользователи должны получить одобрение администратора, прежде чем их устройства смогут подключиться к ресурсам вашей организации.",
@@ -163,7 +163,7 @@
"proxyResourceTitle": "Управление публичными ресурсами",
"proxyResourceDescription": "Создание и управление ресурсами, которые доступны через веб-браузер",
"proxyResourcesBannerTitle": "Общедоступный доступ через веб",
"proxyResourcesBannerDescription": "Общедоступные ресурсы - это прокси-по HTTPS или TCP/UDP, доступные любому пользователю в Интернете через веб-браузер. В отличие от частных ресурсов, они не требуют программного обеспечения на стороне клиента и могут включать политики доступа на основе идентификации и контекста.",
"proxyResourcesBannerDescription": "Общедоступные ресурсы это прокси-по HTTPS или TCP/UDP, доступные любому пользователю в Интернете через веб-браузер. В отличие от частных ресурсов, они не требуют программного обеспечения на стороне клиента и могут включать политики доступа на основе идентификации и контекста.",
"clientResourceTitle": "Управление приватными ресурсами",
"clientResourceDescription": "Создание и управление ресурсами, которые доступны только через подключенный клиент",
"privateResourcesBannerTitle": "Частный доступ с нулевым доверием",
@@ -371,7 +371,7 @@
"provisioningKeysUpdated": "Ключ подготовки обновлен",
"provisioningKeysUpdatedDescription": "Ваши изменения были сохранены.",
"provisioningKeysBannerTitle": "Ключи подготовки сайта",
"provisioningKeysBannerDescription": "Создайте ключ настройки и используйте его с соединителем Newt для автоматического создания сайтов при первом запуске - нет необходимости настраивать отдельные учетные данные для каждого сайта.",
"provisioningKeysBannerDescription": "Создайте ключ настройки и используйте его с соединителем Newt для автоматического создания сайтов при первом запуске нет необходимости настраивать отдельные учетные данные для каждого сайта.",
"provisioningKeysBannerButtonText": "Узнать больше",
"pendingSitesBannerTitle": "Ожидающие сайты",
"pendingSitesBannerDescription": "Сайты, подключающиеся с помощью ключа настройки, отображаются здесь для проверки.",
@@ -1993,7 +1993,7 @@
"description": "Более надежный и низко обслуживаемый сервер Pangolin с дополнительными колокольнями и свистками",
"introTitle": "Управляемый Само-Хост Панголина",
"introDescription": "- это вариант развертывания, предназначенный для людей, которые хотят простоты и надёжности, сохраняя при этом свои данные конфиденциальными и самостоятельными.",
"introDetail": "С помощью этой опции вы по-прежнему используете узел Pangolin - туннели, SSL, и весь остающийся на вашем сервере. Разница заключается в том, что управление и мониторинг осуществляются через нашу панель инструментов из облака, которая открывает ряд преимуществ:",
"introDetail": "С помощью этой опции вы по-прежнему используете узел Pangolin туннели, SSL, и весь остающийся на вашем сервере. Разница заключается в том, что управление и мониторинг осуществляются через нашу панель инструментов из облака, которая открывает ряд преимуществ:",
"benefitSimplerOperations": {
"title": "Более простые операции",
"description": "Не нужно запускать свой собственный почтовый сервер или настроить комплексное оповещение. Вы будете получать проверки состояния здоровья и оповещения о неисправностях из коробки."
@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "Выберите домен для страницы аутентификации организации",
"domainPickerProvidedDomain": "Домен предоставлен",
"domainPickerFreeProvidedDomain": "Бесплатный домен",
"domainPickerFreeDomainsPaidFeature": "Предоставленные домены являются платной функцией. Подпишитесь, чтобы получить домен, включенный в ваш план - не нужно приносить свой собственный.",
"domainPickerFreeDomainsPaidFeature": "Предоставленные домены являются платной функцией. Подпишитесь, чтобы получить домен, включенный в ваш план не нужно приносить свой собственный.",
"domainPickerVerified": "Подтверждено",
"domainPickerUnverified": "Не подтверждено",
"domainPickerManual": "Ручной",
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Раскрытие",
"description": "Выберите уровень лицензии, который точно отражает ваше предполагаемое использование. Личная Лицензия разрешает свободное использование Программного Обеспечения для частной, некоммерческой или малой коммерческой деятельности с годовым валовым доходом до $100 000 USD. Любое использование сверх этих пределов - включая использование в бизнесе, организацию, или другой приносящей доход среде - требует действительной лицензии предприятия и уплаты соответствующей лицензионной платы. Все пользователи, будь то Личные или Предприятия, обязаны соблюдать условия коммерческой лицензии Fossoral."
"description": "Выберите уровень лицензии, который точно отражает ваше предполагаемое использование. Личная Лицензия разрешает свободное использование Программного Обеспечения для частной, некоммерческой или малой коммерческой деятельности с годовым валовым доходом до $100 000 USD. Любое использование сверх этих пределов включая использование в бизнесе, организацию, или другой приносящей доход среде требует действительной лицензии предприятия и уплаты соответствующей лицензионной платы. Все пользователи, будь то Личные или Предприятия, обязаны соблюдать условия коммерческой лицензии Fossoral."
},
"trialPeriodInformation": {
"title": "Информация о пробном периоде",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Масштаб",
"description": "Функции корпоративного уровня, 50 пользователей, 100 сайтов и приоритетная поддержка."
"description": "Функции предприятия, 50 пользователей, 50 сайтов, а также приоритетная поддержка."
}
},
"personalUseOnly": "Только для личного использования (бесплатная лицензия - без оформления на кассе)",

View File

@@ -1993,7 +1993,7 @@
"description": "Daha güvenilir ve düşük bakım gerektiren, ekstra özelliklere sahip kendi kendine barındırabileceğiniz Pangolin sunucusu",
"introTitle": "Yönetilen Kendi Kendine Barındırılan Pangolin",
"introDescription": "Bu, basitlik ve ekstra güvenilirlik arayan, ancak verilerini gizli tutmak ve kendi sunucularında barındırmak isteyen kişiler için tasarlanmış bir dağıtım seçeneğidir.",
"introDetail": "Bu seçenekle, kendi Pangolin düğümünüzü çalıştırmaya devam edersiniz - tünelleriniz, SSL bitişiniz ve trafiğiniz tamamen sunucunuzda kalır. Fark, yönetim ve izlemeyi bulut panomuz üzerinden gerçekleştiririz, bu da bir dizi avantaj sağlar:",
"introDetail": "Bu seçenekle, kendi Pangolin düğümünüzü çalıştırmaya devam edersiniz tünelleriniz, SSL bitişiniz ve trafiğiniz tamamen sunucunuzda kalır. Fark, yönetim ve izlemeyi bulut panomuz üzerinden gerçekleştiririz, bu da bir dizi avantaj sağlar:",
"benefitSimplerOperations": {
"title": "Daha basit işlemler",
"description": "Kendi e-posta sunucunuzu çalıştırmanıza veya karmaşık uyarılar kurmanıza gerek yok. Sağlık kontrolleri ve kesinti uyarılarını kutudan çıktığı gibi alırsınız."
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "Kullanım Açıklaması",
"description": "Kullanım amacınızı doğru bir şekilde yansıtan lisans seviyesini seçin. Kişisel Lisans, yazılımın bireysel, ticari olmayan veya yıllık geliri 100,000 ABD Dolarının altında olan küçük ölçekli ticari faaliyetlerde ücretsiz kullanılmasına izin verir. Bu sınırların ötesinde kullanım - bir işletme, organizasyon veya diğer gelir getirici ortamlarda kullanım dahil olmak üzere - geçerli bir Kurumsal Lisans ve ilgili lisans ücretinin ödenmesini gerektirir. Tüm kullanıcılar, ister Kişisel ister Kurumsal, Fossorial Ticari Lisans Şartlarına uymalıdır."
"description": "Kullanım amacınızı doğru bir şekilde yansıtan lisans seviyesini seçin. Kişisel Lisans, yazılımın bireysel, ticari olmayan veya yıllık geliri 100,000 ABD Dolarının altında olan küçük ölçekli ticari faaliyetlerde ücretsiz kullanılmasına izin verir. Bu sınırların ötesinde kullanım bir işletme, organizasyon veya diğer gelir getirici ortamlarda kullanım dahil olmak üzere geçerli bir Kurumsal Lisans ve ilgili lisans ücretinin ödenmesini gerektirir. Tüm kullanıcılar, ister Kişisel ister Kurumsal, Fossorial Ticari Lisans Şartlarına uymalıdır."
},
"trialPeriodInformation": {
"title": "Deneme Süresi Bilgileri",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "Ölçek",
"description": "Kurumsal özellikler, 50 kullanıcı, 100 site ve öncelikli destek."
"description": "Kurumsal özellikler, 50 kullanıcı, 50 site ve öncelikli destek."
}
},
"personalUseOnly": "Kişisel kullanım için (ücretsiz lisans - ödeme yok)",

View File

@@ -1993,7 +1993,7 @@
"description": "更可靠和低维护自我托管的 Pangolin 服务器,带有额外的铃声和告密器",
"introTitle": "托管自托管的潘戈林公司",
"introDescription": "这是一种部署选择,为那些希望简洁和额外可靠的人设计,同时仍然保持他们的数据的私密性和自我托管性。",
"introDetail": "通过此选项,您仍然运行您自己的 Pangolin 节点 - - 您的隧道、SSL 终止,并且流量在您的服务器上保持所有状态。 不同之处在于,管理和监测是通过我们的云层仪表板进行的,该仪表板开启了一些好处:",
"introDetail": "通过此选项,您仍然运行您自己的 Pangolin 节点 — — 您的隧道、SSL 终止,并且流量在您的服务器上保持所有状态。 不同之处在于,管理和监测是通过我们的云层仪表板进行的,该仪表板开启了一些好处:",
"benefitSimplerOperations": {
"title": "简单的操作",
"description": "无需运行您自己的邮件服务器或设置复杂的警报。您将从方框中获得健康检查和下限提醒。"
@@ -2118,7 +2118,7 @@
"selectDomainForOrgAuthPage": "选择组织认证页面的域",
"domainPickerProvidedDomain": "提供的域",
"domainPickerFreeProvidedDomain": "免费提供的域",
"domainPickerFreeDomainsPaidFeature": "提供的域名是付费功能。订阅即可将域名包含在您的计划中-无需自带域名。",
"domainPickerFreeDomainsPaidFeature": "提供的域名是付费功能。订阅即可将域名包含在您的计划中无需自带域名。",
"domainPickerVerified": "已验证",
"domainPickerUnverified": "未验证",
"domainPickerManual": "手动",
@@ -2296,7 +2296,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "使用情况披露",
"description": "选择能准确反映您预定用途的许可等级。 个人许可证允许对个人、非商业性或小型商业活动免费使用软件年收入毛额不到100 000美元。 超出这些限度的任何用途,包括在企业、组织内的用途。 或其他创收环境--需要有效的企业许可证和支付适用的许可证费用。 所有用户,不论是个人还是企业,都必须遵守寄养商业许可证条款。"
"description": "选择能准确反映您预定用途的许可等级。 个人许可证允许对个人、非商业性或小型商业活动免费使用软件年收入毛额不到100 000美元。 超出这些限度的任何用途,包括在企业、组织内的用途。 或其他创收环境——需要有效的企业许可证和支付适用的许可证费用。 所有用户,不论是个人还是企业,都必须遵守寄养商业许可证条款。"
},
"trialPeriodInformation": {
"title": "试用期信息",
@@ -2351,7 +2351,7 @@
},
"scale": {
"title": "缩放比例",
"description": "企业功能,50个用户100个站点,以及优先支持。"
"description": "企业特征、50个用户、50个站点优先支持。"
}
},
"personalUseOnly": "仅限个人使用(免费许可 - 无需结账)",
@@ -2881,7 +2881,7 @@
"httpDestFormatJsonArrayTitle": "JSON 数组",
"httpDestFormatJsonArrayDescription": "每批一个请求,实体是一个 JSON 数组。与大多数通用的 Web 钩子和数据兼容。",
"httpDestFormatNdjsonTitle": "NDJSON",
"httpDestFormatNdjsonDescription": "每批有一个请求,物体是换行符限制的 JSON --每行一个对象,不是外部数组。 Sluk HEC、Elastic / OpenSearch和Grafana Loki所需。",
"httpDestFormatNdjsonDescription": "每批有一个请求,物体是换行符限制的 JSON ——每行一个对象,不是外部数组。 Sluk HEC、Elastic / OpenSearch和Grafana Loki所需。",
"httpDestFormatSingleTitle": "每个请求一个事件",
"httpDestFormatSingleDescription": "为每个事件单独发送一个 HTTP POST。仅用于无法处理批量的端点。",
"httpDestLogTypesTitle": "日志类型",

View File

@@ -1763,7 +1763,7 @@
"description": "更可靠、維護成本更低的自架 Pangolin 伺服器,並附帶額外的附加功能",
"introTitle": "託管式自架 Pangolin",
"introDescription": "這是一種部署選擇,為那些希望簡潔和額外可靠的人設計,同時仍然保持他們的數據的私密性和自我託管性。",
"introDetail": "通過此選項,您仍然運行您自己的 Pangolin 節點 - - 您的隧道、SSL 終止,並且流量在您的伺服器上保持所有狀態。 不同之處在於,管理和監測是通過我們的雲層儀錶板進行的,該儀錶板開啟了一些好處:",
"introDetail": "通過此選項,您仍然運行您自己的 Pangolin 節點 — — 您的隧道、SSL 終止,並且流量在您的伺服器上保持所有狀態。 不同之處在於,管理和監測是通過我們的雲層儀錶板進行的,該儀錶板開啟了一些好處:",
"benefitSimplerOperations": {
"title": "簡單的操作",
"description": "無需運行您自己的郵件伺服器或設置複雜的警報。您將從方框中獲得健康檢查和下限提醒。"
@@ -2035,7 +2035,7 @@
"alerts": {
"commercialUseDisclosure": {
"title": "使用情況披露",
"description": "選擇能準確反映您預定用途的許可等級。 個人許可證允許對個人、非商業性或小型商業活動免費使用軟體,年收入毛額不到 100,000 美元。 超出這些限度的任何用途,包括在企業、組織內的用途。 或其他創收環境--需要有效的企業許可證和支付適用的許可證費用。 所有用戶,不論是個人還是企業,都必須遵守寄養商業許可證條款。"
"description": "選擇能準確反映您預定用途的許可等級。 個人許可證允許對個人、非商業性或小型商業活動免費使用軟體,年收入毛額不到 100,000 美元。 超出這些限度的任何用途,包括在企業、組織內的用途。 或其他創收環境——需要有效的企業許可證和支付適用的許可證費用。 所有用戶,不論是個人還是企業,都必須遵守寄養商業許可證條款。"
},
"trialPeriodInformation": {
"title": "試用期資訊",

210
package-lock.json generated
View File

@@ -44,7 +44,6 @@
"@tailwindcss/forms": "0.5.11",
"@tanstack/react-query": "5.90.21",
"@tanstack/react-table": "8.21.3",
"@xyflow/react": "^12.8.4",
"arctic": "3.7.0",
"axios": "1.13.5",
"better-sqlite3": "11.9.1",
@@ -70,7 +69,7 @@
"lucide-react": "0.577.0",
"maxmind": "5.0.5",
"moment": "2.30.1",
"next": "15.5.14",
"next": "15.5.15",
"next-intl": "4.8.3",
"next-themes": "0.4.6",
"nextjs-toploader": "3.9.17",
@@ -1059,7 +1058,6 @@
"integrity": "sha512-CGOfOJqWjg2qW/Mb6zNsDm+u5vFQ8DxXfbM09z69p5Z6+mE1ikP2jUXw+j42Pf1XTYED2Rni5f95npYeuwMDQA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.29.0",
"@babel/generator": "^7.29.0",
@@ -2355,7 +2353,6 @@
"cpu": [
"arm64"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2378,7 +2375,6 @@
"cpu": [
"x64"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2401,7 +2397,6 @@
"cpu": [
"arm64"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2418,7 +2413,6 @@
"cpu": [
"x64"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2435,7 +2429,6 @@
"cpu": [
"arm"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2452,7 +2445,6 @@
"cpu": [
"arm64"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2469,7 +2461,6 @@
"cpu": [
"ppc64"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2486,7 +2477,6 @@
"cpu": [
"s390x"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2503,7 +2493,6 @@
"cpu": [
"x64"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2520,7 +2509,6 @@
"cpu": [
"arm64"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2537,7 +2525,6 @@
"cpu": [
"x64"
],
"dev": true,
"license": "LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2554,7 +2541,6 @@
"cpu": [
"arm"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2577,7 +2563,6 @@
"cpu": [
"arm64"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2600,7 +2585,6 @@
"cpu": [
"ppc64"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2623,7 +2607,6 @@
"cpu": [
"s390x"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2646,7 +2629,6 @@
"cpu": [
"x64"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2669,7 +2651,6 @@
"cpu": [
"arm64"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2692,7 +2673,6 @@
"cpu": [
"x64"
],
"dev": true,
"license": "Apache-2.0",
"optional": true,
"os": [
@@ -2715,7 +2695,6 @@
"cpu": [
"wasm32"
],
"dev": true,
"license": "Apache-2.0 AND LGPL-3.0-or-later AND MIT",
"optional": true,
"dependencies": {
@@ -2735,7 +2714,6 @@
"cpu": [
"arm64"
],
"dev": true,
"license": "Apache-2.0 AND LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2755,7 +2733,6 @@
"cpu": [
"ia32"
],
"dev": true,
"license": "Apache-2.0 AND LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2775,7 +2752,6 @@
"cpu": [
"x64"
],
"dev": true,
"license": "Apache-2.0 AND LGPL-3.0-or-later",
"optional": true,
"os": [
@@ -2886,9 +2862,9 @@
}
},
"node_modules/@next/env": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.14.tgz",
"integrity": "sha512-aXeirLYuASxEgi4X4WhfXsShCFxWDfNn/8ZeC5YXAS2BB4A8FJi1kwwGL6nvMVboE7fZCzmJPNdMvVHc8JpaiA==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/env/-/env-15.5.15.tgz",
"integrity": "sha512-vcmyu5/MyFzN7CdqRHO3uHO44p/QPCZkuTUXroeUmhNP8bL5PHFEhik22JUazt+CDDoD6EpBYRCaS2pISL+/hg==",
"license": "MIT"
},
"node_modules/@next/eslint-plugin-next": {
@@ -2902,9 +2878,9 @@
}
},
"node_modules/@next/swc-darwin-arm64": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.14.tgz",
"integrity": "sha512-Y9K6SPzobnZvrRDPO2s0grgzC+Egf0CqfbdvYmQVaztV890zicw8Z8+4Vqw8oPck8r1TjUHxVh8299Cg4TrxXg==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-arm64/-/swc-darwin-arm64-15.5.15.tgz",
"integrity": "sha512-6PvFO2Tzt10GFK2Ro9tAVEtacMqRmTarYMFKAnV2vYMdwWc73xzmDQyAV7SwEdMhzmiRoo7+m88DuiXlJlGeaw==",
"cpu": [
"arm64"
],
@@ -2918,9 +2894,9 @@
}
},
"node_modules/@next/swc-darwin-x64": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.14.tgz",
"integrity": "sha512-aNnkSMjSFRTOmkd7qoNI2/rETQm/vKD6c/Ac9BZGa9CtoOzy3c2njgz7LvebQJ8iPxdeTuGnAjagyis8a9ifBw==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-darwin-x64/-/swc-darwin-x64-15.5.15.tgz",
"integrity": "sha512-G+YNV+z6FDZTp/+IdGyIMFqalBTaQSnvAA+X/hrt+eaTRFSznRMz9K7rTmzvM6tDmKegNtyzgufZW0HwVzEqaQ==",
"cpu": [
"x64"
],
@@ -2934,9 +2910,9 @@
}
},
"node_modules/@next/swc-linux-arm64-gnu": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.14.tgz",
"integrity": "sha512-tjlpia+yStPRS//6sdmlVwuO1Rioern4u2onafa5n+h2hCS9MAvMXqpVbSrjgiEOoCs0nJy7oPOmWgtRRNSM5Q==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-gnu/-/swc-linux-arm64-gnu-15.5.15.tgz",
"integrity": "sha512-eVkrMcVIBqGfXB+QUC7jjZ94Z6uX/dNStbQFabewAnk13Uy18Igd1YZ/GtPRzdhtm7QwC0e6o7zOQecul4iC1w==",
"cpu": [
"arm64"
],
@@ -2950,9 +2926,9 @@
}
},
"node_modules/@next/swc-linux-arm64-musl": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.14.tgz",
"integrity": "sha512-8B8cngBaLadl5lbDRdxGCP1Lef8ipD6KlxS3v0ElDAGil6lafrAM3B258p1KJOglInCVFUjk751IXMr2ixeQOQ==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-linux-arm64-musl/-/swc-linux-arm64-musl-15.5.15.tgz",
"integrity": "sha512-RwSHKMQ7InLy5GfkY2/n5PcFycKA08qI1VST78n09nN36nUPqCvGSMiLXlfUmzmpQpF6XeBYP2KRWHi0UW3uNg==",
"cpu": [
"arm64"
],
@@ -2966,9 +2942,9 @@
}
},
"node_modules/@next/swc-linux-x64-gnu": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.14.tgz",
"integrity": "sha512-bAS6tIAg8u4Gn3Nz7fCPpSoKAexEt2d5vn1mzokcqdqyov6ZJ6gu6GdF9l8ORFrBuRHgv3go/RfzYz5BkZ6YSQ==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-gnu/-/swc-linux-x64-gnu-15.5.15.tgz",
"integrity": "sha512-nplqvY86LakS+eeiuWsNWvfmK8pFcOEW7ZtVRt4QH70lL+0x6LG/m1OpJ/tvrbwjmR8HH9/fH2jzW1GlL03TIg==",
"cpu": [
"x64"
],
@@ -2982,9 +2958,9 @@
}
},
"node_modules/@next/swc-linux-x64-musl": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.14.tgz",
"integrity": "sha512-mMxv/FcrT7Gfaq4tsR22l17oKWXZmH/lVqcvjX0kfp5I0lKodHYLICKPoX1KRnnE+ci6oIUdriUhuA3rBCDiSw==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-linux-x64-musl/-/swc-linux-x64-musl-15.5.15.tgz",
"integrity": "sha512-eAgl9NKQ84/sww0v81DQINl/vL2IBxD7sMybd0cWRw6wqgouVI53brVRBrggqBRP/NWeIAE1dm5cbKYoiMlqDQ==",
"cpu": [
"x64"
],
@@ -2998,9 +2974,9 @@
}
},
"node_modules/@next/swc-win32-arm64-msvc": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.14.tgz",
"integrity": "sha512-OTmiBlYThppnvnsqx0rBqjDRemlmIeZ8/o4zI7veaXoeO1PVHoyj2lfTfXTiiGjCyRDhA10y4h6ZvZvBiynr2g==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-win32-arm64-msvc/-/swc-win32-arm64-msvc-15.5.15.tgz",
"integrity": "sha512-GJVZC86lzSquh0MtvZT+L7G8+jMnJcldloOjA8Kf3wXvBrvb6OGe2MzPuALxFshSm/IpwUtD2mIoof39ymf52A==",
"cpu": [
"arm64"
],
@@ -3014,9 +2990,9 @@
}
},
"node_modules/@next/swc-win32-x64-msvc": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.14.tgz",
"integrity": "sha512-+W7eFf3RS7m4G6tppVTOSyP9Y6FsJXfOuKzav1qKniiFm3KFByQfPEcouHdjlZmysl4zJGuGLQ/M9XyVeyeNEg==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/@next/swc-win32-x64-msvc/-/swc-win32-x64-msvc-15.5.15.tgz",
"integrity": "sha512-nFucjVdwlFqxh/JG3hWSJ4p8+YJV7Ii8aPDuBQULB6DzUF4UNZETXLfEUk+oI2zEznWWULPt7MeuTE6xtK1HSA==",
"cpu": [
"x64"
],
@@ -3035,7 +3011,6 @@
"integrity": "sha512-2I0gnIVPtfnMw9ee9h1dJG7tp81+8Ob3OJb3Mv37rx5L40/b0i7djjCVvGOVqc9AEIQyvyu1i6ypKdFw8R8gQw==",
"dev": true,
"license": "MIT",
"peer": true,
"engines": {
"node": "^14.21.3 || >=16"
},
@@ -6982,7 +6957,6 @@
"resolved": "https://registry.npmjs.org/@react-email/text/-/text-0.1.6.tgz",
"integrity": "sha512-TYqkioRS45wTR5il3dYk/SbUjjEdhSwh9BtRNB99qNH1pXAwA45H7rAuxehiu8iJQJH0IyIr+6n62gBz9ezmsw==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=20.0.0"
},
@@ -8443,7 +8417,6 @@
"version": "5.90.21",
"resolved": "https://registry.npmjs.org/@tanstack/react-query/-/react-query-5.90.21.tgz",
"integrity": "sha512-0Lu6y5t+tvlTJMTO7oh5NSpJfpg/5D41LlThfepTixPYkJ0sE2Jj0m0f6yYqujBwIXlId87e234+MxG3D3g7kg==",
"peer": true,
"dependencies": {
"@tanstack/query-core": "5.90.20"
},
@@ -8559,7 +8532,6 @@
"integrity": "sha512-NMv9ASNARoKksWtsq/SHakpYAYnhBrQgGD8zkLYk/jaK8jUGn08CfEdTRgYhMypUQAfzSP8W6gNLe0q19/t4VA==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/node": "*"
}
@@ -8719,6 +8691,7 @@
"version": "3.0.7",
"resolved": "https://registry.npmjs.org/@types/d3-drag/-/d3-drag-3.0.7.tgz",
"integrity": "sha512-HE3jVKlzU9AaMazNufooRJ5ZpWmLIoc90A37WU2JMmeq28w1FQqCZswHZ3xR+SuxYftzHq6WU6KJHvqxKzTxxQ==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/d3-selection": "*"
@@ -8834,6 +8807,7 @@
"version": "3.0.11",
"resolved": "https://registry.npmjs.org/@types/d3-selection/-/d3-selection-3.0.11.tgz",
"integrity": "sha512-bhAXu23DJWsrI45xafYpkQ4NtcKMwWnAC/vKrd2l+nxMFuvOT3XMYTIj2opv8vq8AO5Yh7Qac/nSeP/3zjTK0w==",
"dev": true,
"license": "MIT"
},
"node_modules/@types/d3-shape": {
@@ -8868,6 +8842,7 @@
"version": "3.0.9",
"resolved": "https://registry.npmjs.org/@types/d3-transition/-/d3-transition-3.0.9.tgz",
"integrity": "sha512-uZS5shfxzO3rGlu0cC3bjmMFKsXv+SmZZcgp0KD22ts4uGXp5EVYGzu/0YdwZeKmddhcAccYtREJKkPfXkZuCg==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/d3-selection": "*"
@@ -8877,6 +8852,7 @@
"version": "3.0.8",
"resolved": "https://registry.npmjs.org/@types/d3-zoom/-/d3-zoom-3.0.8.tgz",
"integrity": "sha512-iqMC4/YlFCSlO8+2Ii1GGGliCAY4XdeG748w5vQUbevlbDu0zSjH/+jojorQVBK/se0j6DUFNPBGSqD3YWYnDw==",
"dev": true,
"license": "MIT",
"dependencies": {
"@types/d3-interpolate": "*",
@@ -8903,7 +8879,6 @@
"integrity": "sha512-sKYVuV7Sv9fbPIt/442koC7+IIwK5olP1KWeD88e/idgoJqDm3JV/YUiPwkoKK92ylff2MGxSz1CSjsXelx0YA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/body-parser": "*",
"@types/express-serve-static-core": "^5.0.0",
@@ -8999,7 +8974,6 @@
"integrity": "sha512-oX8xrhvpiyRCQkG1MFchB09f+cXftgIXb3a7UUa4Y3wpmZPw5tyZGTLWhlESOLq1Rq6oDlc8npVU2/9xiCuXMA==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"undici-types": "~7.18.0"
}
@@ -9027,7 +9001,6 @@
"integrity": "sha512-gT+oueVQkqnj6ajGJXblFR4iavIXWsGAFCk3dP4Kki5+a9R4NMt0JARdk6s8cUKcfUoqP5dAtDSLU8xYUTFV+Q==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@types/node": "*",
"pg-protocol": "*",
@@ -9053,7 +9026,6 @@
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.2.14.tgz",
"integrity": "sha512-ilcTH/UniCkMdtexkoCN0bI7pMcJDvmQFPvuPvmEaYA/NSfFTAgdUSLAoVjaRJm7+6PvcM+q1zYOwS4wTYMF9w==",
"devOptional": true,
"peer": true,
"dependencies": {
"csstype": "^3.2.2"
}
@@ -9064,7 +9036,6 @@
"integrity": "sha512-jp2L/eY6fn+KgVVQAOqYItbF0VY/YApe5Mz2F0aykSO8gx31bYCZyvSeYxCHKvzHG5eZjc+zyaS5BrBWya2+kQ==",
"devOptional": true,
"license": "MIT",
"peer": true,
"peerDependencies": {
"@types/react": "^19.2.0"
}
@@ -9151,7 +9122,8 @@
"resolved": "https://registry.npmjs.org/@types/trusted-types/-/trusted-types-2.0.7.tgz",
"integrity": "sha512-ScaPdn1dQczgbl0QFTeTOmVHFULt394XJgOQNoyVhZ6r2vLnMLJfBPd53SB52T/3G36VI1/g2MZaX0cwDuXsfw==",
"license": "MIT",
"optional": true
"optional": true,
"peer": true
},
"node_modules/@types/ws": {
"version": "8.18.1",
@@ -9225,7 +9197,6 @@
"integrity": "sha512-klQbnPAAiGYFyI02+znpBRLyjL4/BrBd0nyWkdC0s/6xFLkXYQ8OoRrSkqacS1ddVxf/LDyODIKbQ5TgKAf/Fg==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@typescript-eslint/scope-manager": "8.56.1",
"@typescript-eslint/types": "8.56.1",
@@ -9680,38 +9651,6 @@
"win32"
]
},
"node_modules/@xyflow/react": {
"version": "12.8.4",
"resolved": "https://registry.npmjs.org/@xyflow/react/-/react-12.8.4.tgz",
"integrity": "sha512-bqUu4T5QSHiCFPkoH+b+LROKwQJdLvcjhGbNW9c1dLafCBRjmH1IYz0zPE+lRDXCtQ9kRyFxz3tG19+8VORJ1w==",
"license": "MIT",
"dependencies": {
"@xyflow/system": "0.0.68",
"classcat": "^5.0.3",
"zustand": "^4.4.0"
},
"peerDependencies": {
"react": ">=17",
"react-dom": ">=17"
}
},
"node_modules/@xyflow/system": {
"version": "0.0.68",
"resolved": "https://registry.npmjs.org/@xyflow/system/-/system-0.0.68.tgz",
"integrity": "sha512-QDG2wxIG4qX+uF8yzm1ULVZrcXX3MxPBoxv7O52FWsX87qIImOqifUhfa/TwsvLdzn7ic2DDBH1uI8TKbdNTYA==",
"license": "MIT",
"dependencies": {
"@types/d3-drag": "^3.0.7",
"@types/d3-interpolate": "^3.0.4",
"@types/d3-selection": "^3.0.10",
"@types/d3-transition": "^3.0.8",
"@types/d3-zoom": "^3.0.8",
"d3-drag": "^3.0.0",
"d3-interpolate": "^3.0.1",
"d3-selection": "^3.0.0",
"d3-zoom": "^3.0.0"
}
},
"node_modules/accepts": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/accepts/-/accepts-2.0.0.tgz",
@@ -9731,7 +9670,6 @@
"integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==",
"dev": true,
"license": "MIT",
"peer": true,
"bin": {
"acorn": "bin/acorn"
},
@@ -10180,7 +10118,6 @@
"integrity": "sha512-Ixm8tFfoKKIPYdCCKYTsqv+Fd4IJ0DQqMyEimo+pxUOMUR9cVPlwTrFt9Avu+3cb6Zp3mAzl+t1MrG2fxxKsxw==",
"devOptional": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@babel/types": "^7.26.0"
}
@@ -10252,7 +10189,6 @@
"integrity": "sha512-Ba0KR+Fzxh2jDRhdg6TSH0SJGzb8C0aBY4hR8w8madIdIzzC6Y1+kx5qR6eS1Z+Gy20h6ZU28aeyg0z1VIrShQ==",
"hasInstallScript": true,
"license": "MIT",
"peer": true,
"dependencies": {
"bindings": "^1.5.0",
"prebuild-install": "^7.1.1"
@@ -10381,7 +10317,6 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"baseline-browser-mapping": "^2.9.0",
"caniuse-lite": "^1.0.30001759",
@@ -10564,12 +10499,6 @@
"url": "https://polar.sh/cva"
}
},
"node_modules/classcat": {
"version": "5.0.5",
"resolved": "https://registry.npmjs.org/classcat/-/classcat-5.0.5.tgz",
"integrity": "sha512-JhZUT7JFcQy/EzW605k/ktHtncoo9vnyW/2GspNYwFlN1C/WmjuV/xtS04e9SOkL2sTdw0VAZ2UGCcQ9lR6p6w==",
"license": "MIT"
},
"node_modules/cli-spinners": {
"version": "2.9.2",
"resolved": "https://registry.npmjs.org/cli-spinners/-/cli-spinners-2.9.2.tgz",
@@ -11294,7 +11223,6 @@
"resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
"integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
"license": "ISC",
"peer": true,
"engines": {
"node": ">=12"
}
@@ -11735,6 +11663,7 @@
"resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.2.tgz",
"integrity": "sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==",
"license": "(MPL-2.0 OR Apache-2.0)",
"peer": true,
"engines": {
"node": ">=20"
},
@@ -12369,7 +12298,6 @@
"dev": true,
"hasInstallScript": true,
"license": "MIT",
"peer": true,
"bin": {
"esbuild": "bin/esbuild"
},
@@ -12455,7 +12383,6 @@
"integrity": "sha512-COV33RzXZkqhG9P2rZCFl9ZmJ7WL+gQSCRzE7RhkbclbQPtLAWReL7ysA0Sh4c8Im2U9ynybdR56PV0XcKvqaQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@eslint-community/eslint-utils": "^4.8.0",
"@eslint-community/regexpp": "^4.12.2",
@@ -12592,7 +12519,6 @@
"integrity": "sha512-whOE1HFo/qJDyX4SnXzP4N6zOWn79WhnCUY/iDR0mPfQZO8wcYE4JClzI2oZrhBnnMUCBCHZhO6VQyoBU95mZA==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@rtsao/scc": "^1.1.0",
"array-includes": "^3.1.9",
@@ -12986,7 +12912,6 @@
"resolved": "https://registry.npmjs.org/express/-/express-5.2.1.tgz",
"integrity": "sha512-hIS4idWWai69NezIdRt2xFVofaF4j+6INOpJlVOLDO8zXGpUVEVzIYk12UUi2JzjEzWL3IOAxcTubgz9Po0yXw==",
"license": "MIT",
"peer": true,
"dependencies": {
"accepts": "^2.0.0",
"body-parser": "^2.2.1",
@@ -15404,6 +15329,7 @@
"resolved": "https://registry.npmjs.org/monaco-editor/-/monaco-editor-0.55.1.tgz",
"integrity": "sha512-jz4x+TJNFHwHtwuV9vA9rMujcZRb0CEilTEwG2rRSpe/A7Jdkuj8xPKttCgOh+v/lkHy7HsZ64oj+q3xoAFl9A==",
"license": "MIT",
"peer": true,
"dependencies": {
"dompurify": "3.2.7",
"marked": "14.0.0"
@@ -15414,6 +15340,7 @@
"resolved": "https://registry.npmjs.org/marked/-/marked-14.0.0.tgz",
"integrity": "sha512-uIj4+faQ+MgHgwUW1l2PsPglZLOLOT1uErt06dAPtx2kjteLAkbsd/0FiYg/MGS+i7ZKLb7w2WClxHkzOOuryQ==",
"license": "MIT",
"peer": true,
"bin": {
"marked": "bin/marked.js"
},
@@ -15498,13 +15425,12 @@
}
},
"node_modules/next": {
"version": "15.5.14",
"resolved": "https://registry.npmjs.org/next/-/next-15.5.14.tgz",
"integrity": "sha512-M6S+4JyRjmKic2Ssm7jHUPkE6YUJ6lv4507jprsSZLulubz0ihO2E+S4zmQK3JZ2ov81JrugukKU4Tz0ivgqqQ==",
"version": "15.5.15",
"resolved": "https://registry.npmjs.org/next/-/next-15.5.15.tgz",
"integrity": "sha512-VSqCrJwtLVGwAVE0Sb/yikrQfkwkZW9p+lL/J4+xe+G3ZA+QnWPqgcfH1tDUEuk9y+pthzzVFp4L/U8JerMfMQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"@next/env": "15.5.14",
"@next/env": "15.5.15",
"@swc/helpers": "0.5.15",
"caniuse-lite": "^1.0.30001579",
"postcss": "8.4.31",
@@ -15517,14 +15443,14 @@
"node": "^18.18.0 || ^19.8.0 || >= 20.0.0"
},
"optionalDependencies": {
"@next/swc-darwin-arm64": "15.5.14",
"@next/swc-darwin-x64": "15.5.14",
"@next/swc-linux-arm64-gnu": "15.5.14",
"@next/swc-linux-arm64-musl": "15.5.14",
"@next/swc-linux-x64-gnu": "15.5.14",
"@next/swc-linux-x64-musl": "15.5.14",
"@next/swc-win32-arm64-msvc": "15.5.14",
"@next/swc-win32-x64-msvc": "15.5.14",
"@next/swc-darwin-arm64": "15.5.15",
"@next/swc-darwin-x64": "15.5.15",
"@next/swc-linux-arm64-gnu": "15.5.15",
"@next/swc-linux-arm64-musl": "15.5.15",
"@next/swc-linux-x64-gnu": "15.5.15",
"@next/swc-linux-x64-musl": "15.5.15",
"@next/swc-win32-arm64-msvc": "15.5.15",
"@next/swc-win32-x64-msvc": "15.5.15",
"sharp": "^0.34.3"
},
"peerDependencies": {
@@ -16462,7 +16388,6 @@
"resolved": "https://registry.npmjs.org/pg/-/pg-8.20.0.tgz",
"integrity": "sha512-ldhMxz2r8fl/6QkXnBD3CR9/xg694oT6DZQ2s6c/RI28OjtSOpxnPrUCGOBJ46RCUxcWdx3p6kw/xnDHjKvaRA==",
"license": "MIT",
"peer": true,
"dependencies": {
"pg-connection-string": "^2.12.0",
"pg-pool": "^3.13.0",
@@ -16967,7 +16892,6 @@
"resolved": "https://registry.npmjs.org/react/-/react-19.2.4.tgz",
"integrity": "sha512-9nfp2hYpCwOjAN+8TZFGhtWEwgvWHXqESH8qT89AT/lWklpLON22Lc8pEtnpsZz7VmawabSU0gCjnj8aC0euHQ==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -16999,7 +16923,6 @@
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-19.2.4.tgz",
"integrity": "sha512-AXJdLo8kgMbimY95O2aKQqsz2iWi9jMgKJhRBAxECE4IFxfcazB2LmzloIoibJI3C12IlY20+KFaLv+71bUJeQ==",
"license": "MIT",
"peer": true,
"dependencies": {
"scheduler": "^0.27.0"
},
@@ -17292,7 +17215,6 @@
"resolved": "https://registry.npmjs.org/react-hook-form/-/react-hook-form-7.71.2.tgz",
"integrity": "sha512-1CHvcDYzuRUNOflt4MOq3ZM46AronNJtQ1S7tnX6YN4y72qhgiUItpacZUAQ0TyWYci3yz1X+rXaSxiuEm86PA==",
"license": "MIT",
"peer": true,
"engines": {
"node": ">=18.0.0"
},
@@ -18754,8 +18676,7 @@
"version": "4.2.2",
"resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.2.2.tgz",
"integrity": "sha512-KWBIxs1Xb6NoLdMVqhbhgwZf2PGBpPEiwOqgI4pFIYbNTfBXiKYyWoTsXgBQ9WFg/OlhnvHaY+AEpW7wSmFo2Q==",
"license": "MIT",
"peer": true
"license": "MIT"
},
"node_modules/tapable": {
"version": "2.3.2",
@@ -19230,7 +19151,6 @@
"integrity": "sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==",
"devOptional": true,
"license": "Apache-2.0",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"
@@ -19658,7 +19578,6 @@
"resolved": "https://registry.npmjs.org/winston/-/winston-3.19.0.tgz",
"integrity": "sha512-LZNJgPzfKR+/J3cHkxcpHKpKKvGfDZVPS4hfJCc4cCG0CgYzvlD6yE/S3CIL/Yt91ak327YCpiF/0MyeZHEHKA==",
"license": "MIT",
"peer": true,
"dependencies": {
"@colors/colors": "^1.6.0",
"@dabh/diagnostics": "^2.0.8",
@@ -19865,7 +19784,6 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz",
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
@@ -19881,34 +19799,6 @@
"peerDependencies": {
"zod": "^3.25.0 || ^4.0.0"
}
},
"node_modules/zustand": {
"version": "4.5.7",
"resolved": "https://registry.npmjs.org/zustand/-/zustand-4.5.7.tgz",
"integrity": "sha512-CHOUy7mu3lbD6o6LJLfllpjkzhHXSBlX8B9+qPddUsIfeF5S/UZ5q0kmCsnRqT1UHFQZchNFDDzMbQsuesHWlw==",
"license": "MIT",
"dependencies": {
"use-sync-external-store": "^1.2.2"
},
"engines": {
"node": ">=12.7.0"
},
"peerDependencies": {
"@types/react": ">=16.8",
"immer": ">=9.0.6",
"react": ">=16.8"
},
"peerDependenciesMeta": {
"@types/react": {
"optional": true
},
"immer": {
"optional": true
},
"react": {
"optional": true
}
}
}
}
}

View File

@@ -67,7 +67,6 @@
"@tailwindcss/forms": "0.5.11",
"@tanstack/react-query": "5.90.21",
"@tanstack/react-table": "8.21.3",
"@xyflow/react": "^12.8.4",
"arctic": "3.7.0",
"axios": "1.13.5",
"better-sqlite3": "11.9.1",
@@ -93,7 +92,7 @@
"lucide-react": "0.577.0",
"maxmind": "5.0.5",
"moment": "2.30.1",
"next": "15.5.14",
"next": "15.5.15",
"next-intl": "4.8.3",
"next-themes": "0.4.6",
"nextjs-toploader": "3.9.17",

View File

@@ -144,16 +144,7 @@ export enum ActionsEnum {
createEventStreamingDestination = "createEventStreamingDestination",
updateEventStreamingDestination = "updateEventStreamingDestination",
deleteEventStreamingDestination = "deleteEventStreamingDestination",
listEventStreamingDestinations = "listEventStreamingDestinations",
createAlertRule = "createAlertRule",
updateAlertRule = "updateAlertRule",
deleteAlertRule = "deleteAlertRule",
listAlertRules = "listAlertRules",
getAlertRule = "getAlertRule",
createHealthCheck = "createHealthCheck",
updateHealthCheck = "updateHealthCheck",
deleteHealthCheck = "deleteHealthCheck",
listHealthChecks = "listHealthChecks"
listEventStreamingDestinations = "listEventStreamingDestinations"
}
export async function checkUserActionPermission(

View File

@@ -16,13 +16,11 @@ import {
domains,
orgs,
targets,
roles,
users,
exitNodes,
sessions,
clients,
siteResources,
targetHealthCheck,
sites
} from "./schema";
@@ -427,9 +425,7 @@ export const eventStreamingDestinations = pgTable(
orgId: varchar("orgId", { length: 255 })
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
sendConnectionLogs: boolean("sendConnectionLogs")
.notNull()
.default(false),
sendConnectionLogs: boolean("sendConnectionLogs").notNull().default(false),
sendRequestLogs: boolean("sendRequestLogs").notNull().default(false),
sendActionLogs: boolean("sendActionLogs").notNull().default(false),
sendAccessLogs: boolean("sendAccessLogs").notNull().default(false),
@@ -451,9 +447,7 @@ export const eventStreamingCursors = pgTable(
onDelete: "cascade"
}),
logType: varchar("logType", { length: 50 }).notNull(), // "request" | "action" | "access" | "connection"
lastSentId: bigint("lastSentId", { mode: "number" })
.notNull()
.default(0),
lastSentId: bigint("lastSentId", { mode: "number" }).notNull().default(0),
lastSentAt: bigint("lastSentAt", { mode: "number" }) // epoch milliseconds, null if never sent
},
(table) => [
@@ -464,87 +458,6 @@ export const eventStreamingCursors = pgTable(
]
);
export const alertRules = pgTable("alertRules", {
alertRuleId: serial("alertRuleId").primaryKey(),
orgId: varchar("orgId", { length: 255 })
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
name: varchar("name", { length: 255 }).notNull(),
// Single field encodes both source and trigger - no redundancy
eventType: varchar("eventType", { length: 100 })
.$type<
| "site_online"
| "site_offline"
| "health_check_healthy"
| "health_check_not_healthy"
>()
.notNull(),
// Nullable depending on eventType
enabled: boolean("enabled").notNull().default(true),
cooldownSeconds: integer("cooldownSeconds").notNull().default(300),
lastTriggeredAt: bigint("lastTriggeredAt", { mode: "number" }), // nullable
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
updatedAt: bigint("updatedAt", { mode: "number" }).notNull()
});
export const alertSites = pgTable("alertSites", {
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" })
});
export const alertHealthChecks = pgTable("alertHealthChecks", {
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
healthCheckId: integer("healthCheckId")
.notNull()
.references(() => targetHealthCheck.targetHealthCheckId, {
onDelete: "cascade"
})
});
// Separating channels by type avoids the mixed-shape problem entirely
export const alertEmailActions = pgTable("alertEmailActions", {
emailActionId: serial("emailActionId").primaryKey(),
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
enabled: boolean("enabled").notNull().default(true),
lastSentAt: bigint("lastSentAt", { mode: "number" }) // nullable
});
export const alertEmailRecipients = pgTable("alertEmailRecipients", {
recipientId: serial("recipientId").primaryKey(),
emailActionId: integer("emailActionId")
.notNull()
.references(() => alertEmailActions.emailActionId, {
onDelete: "cascade"
}),
// At least one of these should be set - enforced at app level
userId: varchar("userId").references(() => users.userId, {
onDelete: "cascade"
}),
roleId: varchar("roleId").references(() => roles.roleId, {
onDelete: "cascade"
}),
email: varchar("email", { length: 255 }) // external emails not tied to a user
});
export const alertWebhookActions = pgTable("alertWebhookActions", {
webhookActionId: serial("webhookActionId").primaryKey(),
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
webhookUrl: text("webhookUrl").notNull(),
config: text("config"), // encrypted JSON with auth config (authType, credentials)
enabled: boolean("enabled").notNull().default(true),
lastSentAt: bigint("lastSentAt", { mode: "number" }) // nullable
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;

View File

@@ -57,9 +57,7 @@ export const orgs = pgTable("orgs", {
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull()
.default(0),
settingsLogRetentionDaysConnection: integer(
"settingsLogRetentionDaysConnection"
) // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull()
.default(0),
sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format)
@@ -103,9 +101,7 @@ export const sites = pgTable("sites", {
lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
listenPort: integer("listenPort"),
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
status: varchar("status")
.$type<"pending" | "approved">()
.default("approved")
status: varchar("status").$type<"pending" | "approved">().default("approved")
});
export const resources = pgTable("resources", {
@@ -186,15 +182,9 @@ export const targets = pgTable("targets", {
export const targetHealthCheck = pgTable("targetHealthCheck", {
targetHealthCheckId: serial("targetHealthCheckId").primaryKey(),
targetId: integer("targetId").references(() => targets.targetId, {
onDelete: "cascade"
}),
orgId: varchar("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
name: varchar("name"),
targetId: integer("targetId")
.notNull()
.references(() => targets.targetId, { onDelete: "cascade" }),
hcEnabled: boolean("hcEnabled").notNull().default(false),
hcPath: varchar("hcPath"),
hcScheme: varchar("hcScheme"),
@@ -211,9 +201,7 @@ export const targetHealthCheck = pgTable("targetHealthCheck", {
hcHealth: text("hcHealth")
.$type<"unknown" | "healthy" | "unhealthy">()
.default("unknown"), // "unknown", "healthy", "unhealthy"
hcTlsServerName: text("hcTlsServerName"),
hcHealthyThreshold: integer("hcHealthyThreshold").default(1),
hcUnhealthyThreshold: integer("hcUnhealthyThreshold").default(1)
hcTlsServerName: text("hcTlsServerName")
});
export const exitNodes = pgTable("exitNodes", {
@@ -234,23 +222,16 @@ export const exitNodes = pgTable("exitNodes", {
export const siteResources = pgTable("siteResources", {
// this is for the clients
siteResourceId: serial("siteResourceId").primaryKey(),
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" }),
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
networkId: integer("networkId").references(() => networks.networkId, {
onDelete: "set null"
}),
defaultNetworkId: integer("defaultNetworkId").references(
() => networks.networkId,
{
onDelete: "restrict"
}
),
niceId: varchar("niceId").notNull(),
name: varchar("name").notNull(),
ssl: boolean("ssl").notNull().default(false),
mode: varchar("mode").$type<"host" | "cidr" | "http">().notNull(), // "host" | "cidr" | "http"
scheme: varchar("scheme").$type<"http" | "https">(), // only for when we are doing https or http mode
mode: varchar("mode").$type<"host" | "cidr">().notNull(), // "host" | "cidr" | "port"
protocol: varchar("protocol"), // only for port mode
proxyPort: integer("proxyPort"), // only for port mode
destinationPort: integer("destinationPort"), // only for port mode
destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode
@@ -263,38 +244,7 @@ export const siteResources = pgTable("siteResources", {
authDaemonPort: integer("authDaemonPort").default(22123),
authDaemonMode: varchar("authDaemonMode", { length: 32 })
.$type<"site" | "remote">()
.default("site"),
domainId: varchar("domainId").references(() => domains.domainId, {
onDelete: "set null"
}),
subdomain: varchar("subdomain"),
fullDomain: varchar("fullDomain")
});
export const networks = pgTable("networks", {
networkId: serial("networkId").primaryKey(),
niceId: text("niceId"),
name: text("name"),
scope: varchar("scope")
.$type<"global" | "resource">()
.notNull()
.default("global"),
orgId: varchar("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull()
});
export const siteNetworks = pgTable("siteNetworks", {
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, {
onDelete: "cascade"
}),
networkId: integer("networkId")
.notNull()
.references(() => networks.networkId, { onDelete: "cascade" })
.default("site")
});
export const clientSiteResources = pgTable("clientSiteResources", {
@@ -1044,7 +994,6 @@ export const requestAuditLog = pgTable(
actor: text("actor"),
actorId: text("actorId"),
resourceId: integer("resourceId"),
siteResourceId: integer("siteResourceId"),
ip: text("ip"),
location: text("location"),
userAgent: text("userAgent"),
@@ -1092,20 +1041,6 @@ export const roundTripMessageTracker = pgTable("roundTripMessageTracker", {
complete: boolean("complete").notNull().default(false)
});
export const statusHistory = pgTable("statusHistory", {
id: serial("id").primaryKey(),
entityType: varchar("entityType").notNull(),
entityId: integer("entityId").notNull(),
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
status: varchar("status").notNull(),
timestamp: integer("timestamp").notNull(),
}, (table) => [
index("idx_statusHistory_entity").on(table.entityType, table.entityId, table.timestamp),
index("idx_statusHistory_org_timestamp").on(table.orgId, table.timestamp),
]);
export type Org = InferSelectModel<typeof orgs>;
export type User = InferSelectModel<typeof users>;
export type Site = InferSelectModel<typeof sites>;
@@ -1145,7 +1080,6 @@ export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
export type VersionMigration = InferSelectModel<typeof versionMigrations>;
export type ResourceRule = InferSelectModel<typeof resourceRules>;
export type Domain = InferSelectModel<typeof domains>;
export type DnsRecord = InferSelectModel<typeof dnsRecords>;
export type SupporterKey = InferSelectModel<typeof supporterKey>;
export type Idp = InferSelectModel<typeof idp>;
export type ApiKey = InferSelectModel<typeof apiKeys>;
@@ -1172,5 +1106,3 @@ export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
export type RoundTripMessageTracker = InferSelectModel<
typeof roundTripMessageTracker
>;
export type Network = InferSelectModel<typeof networks>;
export type StatusHistory = InferSelectModel<typeof statusHistory>;

View File

@@ -13,11 +13,9 @@ import {
domains,
exitNodes,
orgs,
roles,
sessions,
siteResources,
sites,
targetHealthCheck,
users
} from "./schema";
@@ -457,77 +455,6 @@ export const eventStreamingCursors = sqliteTable(
]
);
export const alertRules = sqliteTable("alertRules", {
alertRuleId: integer("alertRuleId").primaryKey({ autoIncrement: true }),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
name: text("name").notNull(),
eventType: text("eventType")
.$type<
| "site_online"
| "site_offline"
| "health_check_healthy"
| "health_check_not_healthy"
>()
.notNull(),
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
cooldownSeconds: integer("cooldownSeconds").notNull().default(300),
lastTriggeredAt: integer("lastTriggeredAt"),
createdAt: integer("createdAt").notNull(),
updatedAt: integer("updatedAt").notNull()
});
export const alertSites = sqliteTable("alertSites", {
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" })
});
export const alertHealthChecks = sqliteTable("alertHealthChecks", {
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
healthCheckId: integer("healthCheckId")
.notNull()
.references(() => targetHealthCheck.targetHealthCheckId, {
onDelete: "cascade"
})
});
export const alertEmailActions = sqliteTable("alertEmailActions", {
emailActionId: integer("emailActionId").primaryKey({ autoIncrement: true }),
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
lastSentAt: integer("lastSentAt")
});
export const alertEmailRecipients = sqliteTable("alertEmailRecipients", {
recipientId: integer("recipientId").primaryKey({ autoIncrement: true }),
emailActionId: integer("emailActionId")
.notNull()
.references(() => alertEmailActions.emailActionId, { onDelete: "cascade" }),
userId: text("userId").references(() => users.userId, { onDelete: "cascade" }),
roleId: text("roleId").references(() => roles.roleId, { onDelete: "cascade" }),
email: text("email")
});
export const alertWebhookActions = sqliteTable("alertWebhookActions", {
webhookActionId: integer("webhookActionId").primaryKey({ autoIncrement: true }),
alertRuleId: integer("alertRuleId")
.notNull()
.references(() => alertRules.alertRuleId, { onDelete: "cascade" }),
webhookUrl: text("webhookUrl").notNull(),
config: text("config"), // encrypted JSON with auth config (authType, credentials)
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
lastSentAt: integer("lastSentAt")
});
export type Approval = InferSelectModel<typeof approvals>;
export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>;

View File

@@ -54,9 +54,7 @@ export const orgs = sqliteTable("orgs", {
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull()
.default(0),
settingsLogRetentionDaysConnection: integer(
"settingsLogRetentionDaysConnection"
) // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
settingsLogRetentionDaysConnection: integer("settingsLogRetentionDaysConnection") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull()
.default(0),
sshCaPrivateKey: text("sshCaPrivateKey"), // Encrypted SSH CA private key (PEM format)
@@ -94,9 +92,6 @@ export const sites = sqliteTable("sites", {
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
onDelete: "set null"
}),
networkId: integer("networkId").references(() => networks.networkId, {
onDelete: "set null"
}),
name: text("name").notNull(),
pubKey: text("pubKey"),
subnet: text("subnet"),
@@ -209,15 +204,9 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", {
targetHealthCheckId: integer("targetHealthCheckId").primaryKey({
autoIncrement: true
}),
targetId: integer("targetId").references(() => targets.targetId, {
onDelete: "cascade"
}),
orgId: text("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
name: text("name"),
targetId: integer("targetId")
.notNull()
.references(() => targets.targetId, { onDelete: "cascade" }),
hcEnabled: integer("hcEnabled", { mode: "boolean" })
.notNull()
.default(false),
@@ -238,9 +227,7 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", {
hcHealth: text("hcHealth")
.$type<"unknown" | "healthy" | "unhealthy">()
.default("unknown"), // "unknown", "healthy", "unhealthy"
hcTlsServerName: text("hcTlsServerName"),
hcHealthyThreshold: integer("hcHealthyThreshold").default(1),
hcUnhealthyThreshold: integer("hcUnhealthyThreshold").default(1)
hcTlsServerName: text("hcTlsServerName")
});
export const exitNodes = sqliteTable("exitNodes", {
@@ -263,21 +250,16 @@ export const siteResources = sqliteTable("siteResources", {
siteResourceId: integer("siteResourceId").primaryKey({
autoIncrement: true
}),
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" }),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
networkId: integer("networkId").references(() => networks.networkId, {
onDelete: "set null"
}),
defaultNetworkId: integer("defaultNetworkId").references(
() => networks.networkId,
{ onDelete: "restrict" }
),
niceId: text("niceId").notNull(),
name: text("name").notNull(),
ssl: integer("ssl", { mode: "boolean" }).notNull().default(false),
mode: text("mode").$type<"host" | "cidr" | "http">().notNull(), // "host" | "cidr" | "http"
scheme: text("scheme").$type<"http" | "https">(), // only for when we are doing https or http mode
mode: text("mode").$type<"host" | "cidr">().notNull(), // "host" | "cidr" | "port"
protocol: text("protocol"), // only for port mode
proxyPort: integer("proxyPort"), // only for port mode
destinationPort: integer("destinationPort"), // only for port mode
destination: text("destination").notNull(), // ip, cidr, hostname
@@ -292,36 +274,7 @@ export const siteResources = sqliteTable("siteResources", {
authDaemonPort: integer("authDaemonPort").default(22123),
authDaemonMode: text("authDaemonMode")
.$type<"site" | "remote">()
.default("site"),
domainId: text("domainId").references(() => domains.domainId, {
onDelete: "set null"
}),
subdomain: text("subdomain"),
fullDomain: text("fullDomain")
});
export const networks = sqliteTable("networks", {
networkId: integer("networkId").primaryKey({ autoIncrement: true }),
niceId: text("niceId"),
name: text("name"),
scope: text("scope")
.$type<"global" | "resource">()
.notNull()
.default("global"),
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" })
});
export const siteNetworks = sqliteTable("siteNetworks", {
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, {
onDelete: "cascade"
}),
networkId: integer("networkId")
.notNull()
.references(() => networks.networkId, { onDelete: "cascade" })
.default("site")
});
export const clientSiteResources = sqliteTable("clientSiteResources", {
@@ -1143,7 +1096,6 @@ export const requestAuditLog = sqliteTable(
actor: text("actor"),
actorId: text("actorId"),
resourceId: integer("resourceId"),
siteResourceId: integer("siteResourceId"),
ip: text("ip"),
location: text("location"),
userAgent: text("userAgent"),
@@ -1191,20 +1143,6 @@ export const roundTripMessageTracker = sqliteTable("roundTripMessageTracker", {
complete: integer("complete", { mode: "boolean" }).notNull().default(false)
});
export const statusHistory = sqliteTable("statusHistory", {
id: integer("id").primaryKey({ autoIncrement: true }),
entityType: text("entityType").notNull(), // "site" | "healthCheck"
entityId: integer("entityId").notNull(), // siteId or targetHealthCheckId
orgId: text("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
status: text("status").notNull(), // "online"/"offline" for sites; "healthy"/"unhealthy"/"unknown" for healthChecks
timestamp: integer("timestamp").notNull(), // unix epoch seconds
}, (table) => [
index("idx_statusHistory_entity").on(table.entityType, table.entityId, table.timestamp),
index("idx_statusHistory_org_timestamp").on(table.orgId, table.timestamp),
]);
export type Org = InferSelectModel<typeof orgs>;
export type User = InferSelectModel<typeof users>;
export type Site = InferSelectModel<typeof sites>;
@@ -1257,7 +1195,6 @@ export type ApiKey = InferSelectModel<typeof apiKeys>;
export type ApiKeyAction = InferSelectModel<typeof apiKeyActions>;
export type ApiKeyOrg = InferSelectModel<typeof apiKeyOrg>;
export type SiteResource = InferSelectModel<typeof siteResources>;
export type Network = InferSelectModel<typeof networks>;
export type OrgDomains = InferSelectModel<typeof orgDomains>;
export type SetupToken = InferSelectModel<typeof setupTokens>;
export type HostMeta = InferSelectModel<typeof hostMeta>;
@@ -1272,4 +1209,3 @@ export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
export type RoundTripMessageTracker = InferSelectModel<
typeof roundTripMessageTracker
>;
export type StatusHistory = InferSelectModel<typeof statusHistory>;

View File

@@ -1,140 +0,0 @@
import React from "react";
import { Body, Head, Html, Preview, Tailwind } from "@react-email/components";
import { themeColors } from "./lib/theme";
import {
EmailContainer,
EmailFooter,
EmailGreeting,
EmailHeading,
EmailInfoSection,
EmailLetterHead,
EmailSignature,
EmailText
} from "./components/Email";
export type AlertEventType =
| "site_online"
| "site_offline"
| "health_check_healthy"
| "health_check_not_healthy";
interface Props {
eventType: AlertEventType;
orgId: string;
data: Record<string, unknown>;
}
function getEventMeta(eventType: AlertEventType): {
heading: string;
previewText: string;
summary: string;
statusLabel: string;
statusColor: string;
} {
switch (eventType) {
case "site_online":
return {
heading: "Site Back Online",
previewText: "A site in your organization is back online.",
summary:
"Good news a site in your organization has come back online and is now reachable.",
statusLabel: "Online",
statusColor: "#16a34a"
};
case "site_offline":
return {
heading: "Site Offline",
previewText: "A site in your organization has gone offline.",
summary:
"A site in your organization has gone offline and is no longer reachable. Please investigate as soon as possible.",
statusLabel: "Offline",
statusColor: "#dc2626"
};
case "health_check_healthy":
return {
heading: "Health Check Recovered",
previewText:
"A health check in your organization is now healthy.",
summary:
"A health check in your organization has recovered and is now reporting a healthy status.",
statusLabel: "Healthy",
statusColor: "#16a34a"
};
case "health_check_not_healthy":
return {
heading: "Health Check Failing",
previewText:
"A health check in your organization is not healthy.",
summary:
"A health check in your organization is currently failing. Please review the details below and take action if needed.",
statusLabel: "Not Healthy",
statusColor: "#dc2626"
};
}
}
function formatDataItems(
data: Record<string, unknown>
): { label: string; value: React.ReactNode }[] {
return Object.entries(data)
.filter(([key]) => key !== "orgId")
.map(([key, value]) => ({
label: key
.replace(/([A-Z])/g, " $1")
.replace(/^./, (s) => s.toUpperCase())
.trim(),
value: String(value ?? "-")
}));
}
export const AlertNotification = ({ eventType, orgId, data }: Props) => {
const meta = getEventMeta(eventType);
const dataItems = formatDataItems(data);
const allItems: { label: string; value: React.ReactNode }[] = [
{ label: "Organization", value: orgId },
{ label: "Status", value: (
<span style={{ color: meta.statusColor, fontWeight: 600 }}>
{meta.statusLabel}
</span>
)},
{ label: "Time", value: new Date().toUTCString() },
...dataItems
];
return (
<Html>
<Head />
<Preview>{meta.previewText}</Preview>
<Tailwind config={themeColors}>
<Body className="font-sans bg-gray-50">
<EmailContainer>
<EmailLetterHead />
<EmailHeading>{meta.heading}</EmailHeading>
<EmailGreeting>Hi there,</EmailGreeting>
<EmailText>{meta.summary}</EmailText>
<EmailInfoSection
title="Event Details"
items={allItems}
/>
<EmailText>
Log in to your dashboard to view more details and
manage your alert rules.
</EmailText>
<EmailFooter>
<EmailSignature />
</EmailFooter>
</EmailContainer>
</Body>
</Tailwind>
</Html>
);
};
export default AlertNotification;

View File

@@ -32,7 +32,7 @@ export const EnterpriseEditionKeyGenerated = ({
}: EnterpriseEditionKeyGeneratedProps) => {
const previewText = personalUseOnly
? "Your Enterprise Edition key for personal use is ready"
: "Thank you for your purchase - your Enterprise Edition key is ready";
: "Thank you for your purchase your Enterprise Edition key is ready";
return (
<Html>

View File

@@ -22,7 +22,6 @@ import { TraefikConfigManager } from "@server/lib/traefik/TraefikConfigManager";
import { initCleanup } from "#dynamic/cleanup";
import license from "#dynamic/license/license";
import { initLogCleanupInterval } from "@server/lib/cleanupLogs";
import { initAcmeCertSync } from "#dynamic/lib/acmeCertSync";
import { fetchServerIp } from "@server/lib/serverIpService";
async function startServers() {
@@ -40,7 +39,6 @@ async function startServers() {
initTelemetryClient();
initLogCleanupInterval();
initAcmeCertSync();
// Start all servers
const apiServer = createApiServer();

View File

@@ -1,3 +0,0 @@
export function initAcmeCertSync(): void {
// stub
}

View File

@@ -1,19 +0,0 @@
// stub
export async function fireHealthCheckHealthyAlert(
orgId: string,
healthCheckId: number,
healthCheckName?: string,
extra?: Record<string, unknown>
): Promise<void> {
return;
}
export async function fireHealthCheckNotHealthyAlert(
orgId: string,
healthCheckId: number,
healthCheckName?: string,
extra?: Record<string, unknown>
): Promise<void> {
return;
}

View File

@@ -1,19 +0,0 @@
// stub
export async function fireSiteOnlineAlert(
orgId: string,
siteId: number,
siteName?: string,
extra?: Record<string, unknown>
): Promise<void> {
return;
}
export async function fireSiteOfflineAlert(
orgId: string,
siteId: number,
siteName?: string,
extra?: Record<string, unknown>
): Promise<void> {
return;
}

View File

@@ -1,2 +0,0 @@
export * from "./events/siteEvents";
export * from "./events/healthCheckEvents";

View File

@@ -9,8 +9,8 @@ export type LicensePriceSet = {
export const licensePriceSet: LicensePriceSet = {
// Free license matches the freeLimitSet
[LicenseId.SMALL_LICENSE]: "price_1TMJzmD3Ee2Ir7Wm05NlGImT",
[LicenseId.BIG_LICENSE]: "price_1TMJzzD3Ee2Ir7WmzJw9TerS"
[LicenseId.SMALL_LICENSE]: "price_1SxKHiD3Ee2Ir7WmvtEh17A8",
[LicenseId.BIG_LICENSE]: "price_1SxKHiD3Ee2Ir7WmMUiP0H6Y"
};
export const licensePriceSetSandbox: LicensePriceSet = {

View File

@@ -20,10 +20,7 @@ export enum TierFeature {
FullRbac = "fullRbac",
SiteProvisioningKeys = "siteProvisioningKeys", // handle downgrade by revoking keys if needed
SIEM = "siem", // handle downgrade by disabling SIEM integrations
HTTPPrivateResources = "httpPrivateResources", // handle downgrade by disabling HTTP private resources
DomainNamespaces = "domainNamespaces", // handle downgrade by removing custom domain namespaces
StandaloneHealthChecks = "standaloneHealthChecks",
AlertingRules = "alertingRules"
DomainNamespaces = "domainNamespaces" // handle downgrade by removing custom domain namespaces
}
export const tierMatrix: Record<TierFeature, Tier[]> = {
@@ -61,8 +58,5 @@ export const tierMatrix: Record<TierFeature, Tier[]> = {
[TierFeature.FullRbac]: ["tier1", "tier2", "tier3", "enterprise"],
[TierFeature.SiteProvisioningKeys]: ["tier3", "enterprise"],
[TierFeature.SIEM]: ["enterprise"],
[TierFeature.HTTPPrivateResources]: ["tier3", "enterprise"],
[TierFeature.DomainNamespaces]: ["tier1", "tier2", "tier3", "enterprise"],
[TierFeature.StandaloneHealthChecks]: ["tier2", "tier3", "enterprise"],
[TierFeature.AlertingRules]: ["tier2", "tier3", "enterprise"]
[TierFeature.DomainNamespaces]: ["tier1", "tier2", "tier3", "enterprise"]
};

View File

@@ -121,8 +121,8 @@ export async function applyBlueprint({
for (const result of clientResourcesResults) {
if (
result.oldSiteResource &&
JSON.stringify(result.newSites?.sort()) !==
JSON.stringify(result.oldSites?.sort())
result.oldSiteResource.siteId !=
result.newSiteResource.siteId
) {
// query existing associations
const existingRoleIds = await trx
@@ -222,46 +222,38 @@ export async function applyBlueprint({
trx
);
} else {
let good = true;
for (const newSite of result.newSites) {
const [site] = await trx
.select()
.from(sites)
.innerJoin(newts, eq(sites.siteId, newts.siteId))
.where(
and(
eq(sites.siteId, newSite.siteId),
eq(sites.orgId, orgId),
eq(sites.type, "newt"),
isNotNull(sites.pubKey)
)
const [newSite] = await trx
.select()
.from(sites)
.innerJoin(newts, eq(sites.siteId, newts.siteId))
.where(
and(
eq(sites.siteId, result.newSiteResource.siteId),
eq(sites.orgId, orgId),
eq(sites.type, "newt"),
isNotNull(sites.pubKey)
)
.limit(1);
if (!site) {
logger.debug(
`No newt sites found for client resource ${result.newSiteResource.siteResourceId}, skipping target update`
);
good = false;
break;
}
)
.limit(1);
if (!newSite) {
logger.debug(
`Updating client resource ${result.newSiteResource.siteResourceId} on site ${newSite.siteId}`
`No newt site found for client resource ${result.newSiteResource.siteResourceId}, skipping target update`
);
}
if (!good) {
continue;
}
logger.debug(
`Updating client resource ${result.newSiteResource.siteResourceId} on site ${newSite.sites.siteId}`
);
await handleMessagingForUpdatedSiteResource(
result.oldSiteResource,
result.newSiteResource,
result.newSites.map((site) => ({
siteId: site.siteId,
orgId: result.newSiteResource.orgId
})),
{
siteId: newSite.sites.siteId,
orgId: newSite.sites.orgId
},
trx
);
}

View File

@@ -1,104 +1,24 @@
import {
clients,
clientSiteResources,
domains,
orgDomains,
roles,
roleSiteResources,
Site,
SiteResource,
siteNetworks,
siteResources,
Transaction,
userOrgs,
users,
userSiteResources,
networks
userSiteResources
} from "@server/db";
import { sites } from "@server/db";
import { eq, and, ne, inArray, or, isNotNull } from "drizzle-orm";
import { eq, and, ne, inArray, or } from "drizzle-orm";
import { Config } from "./types";
import logger from "@server/logger";
import { getNextAvailableAliasAddress } from "../ip";
import { createCertificate } from "#dynamic/routers/certificates/createCertificate";
async function getDomainForSiteResource(
siteResourceId: number | undefined,
fullDomain: string,
orgId: string,
trx: Transaction
): Promise<{ subdomain: string | null; domainId: string }> {
const [fullDomainExists] = await trx
.select({ siteResourceId: siteResources.siteResourceId })
.from(siteResources)
.where(
and(
eq(siteResources.fullDomain, fullDomain),
eq(siteResources.orgId, orgId),
siteResourceId
? ne(siteResources.siteResourceId, siteResourceId)
: isNotNull(siteResources.siteResourceId)
)
)
.limit(1);
if (fullDomainExists) {
throw new Error(
`Site resource already exists with domain: ${fullDomain} in org ${orgId}`
);
}
const possibleDomains = await trx
.select()
.from(domains)
.innerJoin(orgDomains, eq(domains.domainId, orgDomains.domainId))
.where(and(eq(orgDomains.orgId, orgId), eq(domains.verified, true)))
.execute();
if (possibleDomains.length === 0) {
throw new Error(
`Domain not found for full-domain: ${fullDomain} in org ${orgId}`
);
}
const validDomains = possibleDomains.filter((domain) => {
if (domain.domains.type == "ns" || domain.domains.type == "wildcard") {
return (
fullDomain === domain.domains.baseDomain ||
fullDomain.endsWith(`.${domain.domains.baseDomain}`)
);
} else if (domain.domains.type == "cname") {
return fullDomain === domain.domains.baseDomain;
}
});
if (validDomains.length === 0) {
throw new Error(
`Domain not found for full-domain: ${fullDomain} in org ${orgId}`
);
}
const domainSelection = validDomains[0].domains;
const baseDomain = domainSelection.baseDomain;
let subdomain: string | null = null;
if (fullDomain !== baseDomain) {
subdomain = fullDomain.replace(`.${baseDomain}`, "");
}
await createCertificate(domainSelection.domainId, fullDomain, trx);
return {
subdomain,
domainId: domainSelection.domainId
};
}
export type ClientResourcesResults = {
newSiteResource: SiteResource;
oldSiteResource?: SiteResource;
newSites: { siteId: number }[];
oldSites: { siteId: number }[];
}[];
export async function updateClientResources(
@@ -123,104 +43,53 @@ export async function updateClientResources(
)
.limit(1);
const existingSiteIds = existingResource?.networkId
? await trx
.select({ siteId: sites.siteId })
.from(siteNetworks)
.where(eq(siteNetworks.networkId, existingResource.networkId))
: [];
const resourceSiteId = resourceData.site;
let site;
let allSites: { siteId: number }[] = [];
if (resourceData.site) {
let siteSingle;
const resourceSiteId = resourceData.site;
if (resourceSiteId) {
// Look up site by niceId
[siteSingle] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(
and(
eq(sites.niceId, resourceSiteId),
eq(sites.orgId, orgId)
)
if (resourceSiteId) {
// Look up site by niceId
[site] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(
and(
eq(sites.niceId, resourceSiteId),
eq(sites.orgId, orgId)
)
.limit(1);
} else if (siteId) {
// Use the provided siteId directly, but verify it belongs to the org
[siteSingle] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(
and(eq(sites.siteId, siteId), eq(sites.orgId, orgId))
)
.limit(1);
} else {
throw new Error(`Target site is required`);
}
if (!siteSingle) {
throw new Error(
`Site not found: ${resourceSiteId} in org ${orgId}`
);
}
allSites.push(siteSingle);
)
.limit(1);
} else if (siteId) {
// Use the provided siteId directly, but verify it belongs to the org
[site] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(and(eq(sites.siteId, siteId), eq(sites.orgId, orgId)))
.limit(1);
} else {
throw new Error(`Target site is required`);
}
if (resourceData.sites) {
for (const siteNiceId of resourceData.sites) {
const [site] = await trx
.select({ siteId: sites.siteId })
.from(sites)
.where(
and(
eq(sites.niceId, siteNiceId),
eq(sites.orgId, orgId)
)
)
.limit(1);
if (!site) {
throw new Error(
`Site not found: ${siteId} in org ${orgId}`
);
}
allSites.push(site);
}
if (!site) {
throw new Error(
`Site not found: ${resourceSiteId} in org ${orgId}`
);
}
if (existingResource) {
let domainInfo:
| { subdomain: string | null; domainId: string }
| undefined;
if (resourceData["full-domain"] && resourceData.mode === "http") {
domainInfo = await getDomainForSiteResource(
existingResource.siteResourceId,
resourceData["full-domain"],
orgId,
trx
);
}
// Update existing resource
const [updatedResource] = await trx
.update(siteResources)
.set({
name: resourceData.name || resourceNiceId,
siteId: site.siteId,
mode: resourceData.mode,
ssl: resourceData.ssl,
scheme: resourceData.scheme,
destination: resourceData.destination,
destinationPort: resourceData["destination-port"],
enabled: true, // hardcoded for now
// enabled: resourceData.enabled ?? true,
alias: resourceData.alias || null,
disableIcmp: resourceData["disable-icmp"],
tcpPortRangeString: resourceData["tcp-ports"],
udpPortRangeString: resourceData["udp-ports"],
fullDomain: resourceData["full-domain"] || null,
subdomain: domainInfo ? domainInfo.subdomain : null,
domainId: domainInfo ? domainInfo.domainId : null
udpPortRangeString: resourceData["udp-ports"]
})
.where(
eq(
@@ -231,21 +100,7 @@ export async function updateClientResources(
.returning();
const siteResourceId = existingResource.siteResourceId;
if (updatedResource.networkId) {
await trx
.delete(siteNetworks)
.where(
eq(siteNetworks.networkId, updatedResource.networkId)
);
for (const site of allSites) {
await trx.insert(siteNetworks).values({
siteId: site.siteId,
networkId: updatedResource.networkId
});
}
}
const orgId = existingResource.orgId;
await trx
.delete(clientSiteResources)
@@ -349,72 +204,37 @@ export async function updateClientResources(
results.push({
newSiteResource: updatedResource,
oldSiteResource: existingResource,
newSites: allSites,
oldSites: existingSiteIds
oldSiteResource: existingResource
});
} else {
let aliasAddress: string | null = null;
if (resourceData.mode === "host" || resourceData.mode === "http") {
if (resourceData.mode == "host") {
// we can only have an alias on a host
aliasAddress = await getNextAvailableAliasAddress(orgId);
}
let domainInfo:
| { subdomain: string | null; domainId: string }
| undefined;
if (resourceData["full-domain"] && resourceData.mode === "http") {
domainInfo = await getDomainForSiteResource(
undefined,
resourceData["full-domain"],
orgId,
trx
);
}
const [network] = await trx
.insert(networks)
.values({
scope: "resource",
orgId: orgId
})
.returning();
// Create new resource
const [newResource] = await trx
.insert(siteResources)
.values({
orgId: orgId,
siteId: site.siteId,
niceId: resourceNiceId,
networkId: network.networkId,
defaultNetworkId: network.networkId,
name: resourceData.name || resourceNiceId,
mode: resourceData.mode,
ssl: resourceData.ssl,
scheme: resourceData.scheme,
destination: resourceData.destination,
destinationPort: resourceData["destination-port"],
enabled: true, // hardcoded for now
// enabled: resourceData.enabled ?? true,
alias: resourceData.alias || null,
aliasAddress: aliasAddress,
disableIcmp: resourceData["disable-icmp"],
tcpPortRangeString: resourceData["tcp-ports"],
udpPortRangeString: resourceData["udp-ports"],
fullDomain: resourceData["full-domain"] || null,
subdomain: domainInfo ? domainInfo.subdomain : null,
domainId: domainInfo ? domainInfo.domainId : null
udpPortRangeString: resourceData["udp-ports"]
})
.returning();
const siteResourceId = newResource.siteResourceId;
for (const site of allSites) {
await trx.insert(siteNetworks).values({
siteId: site.siteId,
networkId: network.networkId
});
}
const [adminRole] = await trx
.select()
.from(roles)
@@ -504,11 +324,7 @@ export async function updateClientResources(
`Created new client resource ${newResource.name} (${newResource.siteResourceId}) for org ${orgId}`
);
results.push({
newSiteResource: newResource,
newSites: allSites,
oldSites: existingSiteIds
});
results.push({ newSiteResource: newResource });
}
}

View File

@@ -140,7 +140,6 @@ export async function updateProxyResources(
const [newHealthcheck] = await trx
.insert(targetHealthCheck)
.values({
name: `${targetData.hostname}:${targetData.port}`,
targetId: newTarget.targetId,
hcEnabled: healthcheckData?.enabled || false,
hcPath: healthcheckData?.path,
@@ -159,9 +158,7 @@ export async function updateProxyResources(
healthcheckData?.["follow-redirects"],
hcMethod: healthcheckData?.method,
hcStatus: healthcheckData?.status,
hcHealth: "unknown",
hcHealthyThreshold: healthcheckData?.["healthy-threshold"],
hcUnhealthyThreshold: healthcheckData?.["unhealthy-threshold"]
hcHealth: "unknown"
})
.returning();
@@ -525,9 +522,7 @@ export async function updateProxyResources(
healthcheckData?.followRedirects ||
healthcheckData?.["follow-redirects"],
hcMethod: healthcheckData?.method,
hcStatus: healthcheckData?.status,
hcHealthyThreshold: healthcheckData?.["healthy-threshold"],
hcUnhealthyThreshold: healthcheckData?.["unhealthy-threshold"]
hcStatus: healthcheckData?.status
})
.where(
eq(
@@ -1086,8 +1081,6 @@ function checkIfHealthcheckChanged(
JSON.stringify(incoming.hcHeaders)
)
return true;
if (existing.hcHealthyThreshold !== incoming.hcHealthyThreshold) return true;
if (existing.hcUnhealthyThreshold !== incoming.hcUnhealthyThreshold) return true;
return false;
}
@@ -1107,7 +1100,7 @@ function checkIfTargetChanged(
return false;
}
export async function getDomain(
async function getDomain(
resourceId: number | undefined,
fullDomain: string,
orgId: string,

View File

@@ -12,7 +12,7 @@ export const TargetHealthCheckSchema = z.object({
hostname: z.string(),
port: z.int().min(1).max(65535),
enabled: z.boolean().optional().default(true),
path: z.string().optional(),
path: z.string().optional().default("/"),
scheme: z.string().optional(),
mode: z.string().default("http"),
interval: z.int().default(30),
@@ -26,10 +26,8 @@ export const TargetHealthCheckSchema = z.object({
.default(null),
"follow-redirects": z.boolean().default(true),
followRedirects: z.boolean().optional(), // deprecated alias
method: z.string().optional(),
status: z.int().optional(),
"healthy-threshold": z.int().min(1).optional().default(1),
"unhealthy-threshold": z.int().min(1).optional().default(1)
method: z.string().default("GET"),
status: z.int().optional()
});
// Schema for individual target within a resource
@@ -166,7 +164,6 @@ export const ResourceSchema = z
name: z.string().optional(),
protocol: z.enum(["http", "tcp", "udp"]).optional(),
ssl: z.boolean().optional(),
scheme: z.enum(["http", "https"]).optional(),
"full-domain": z.string().optional(),
"proxy-port": z.int().min(1).max(65535).optional(),
enabled: z.boolean().optional(),
@@ -328,20 +325,16 @@ export function isTargetsOnlyResource(resource: any): boolean {
export const ClientResourceSchema = z
.object({
name: z.string().min(1).max(255),
mode: z.enum(["host", "cidr", "http"]),
site: z.string(), // DEPRECATED IN FAVOR OF sites
sites: z.array(z.string()).optional().default([]),
mode: z.enum(["host", "cidr"]),
site: z.string(),
// protocol: z.enum(["tcp", "udp"]).optional(),
// proxyPort: z.int().positive().optional(),
"destination-port": z.int().positive().optional(),
// destinationPort: z.int().positive().optional(),
destination: z.string().min(1),
// enabled: z.boolean().default(true),
"tcp-ports": portRangeStringSchema.optional().default("*"),
"udp-ports": portRangeStringSchema.optional().default("*"),
"disable-icmp": z.boolean().optional().default(false),
"full-domain": z.string().optional(),
ssl: z.boolean().optional(),
scheme: z.enum(["http", "https"]).optional().nullable(),
alias: z
.string()
.regex(
@@ -484,39 +477,6 @@ export const ConfigSchema = z
});
}
// Enforce the full-domain uniqueness across client-resources in the same stack
const clientFullDomainMap = new Map<string, string[]>();
Object.entries(config["client-resources"]).forEach(
([resourceKey, resource]) => {
const fullDomain = resource["full-domain"];
if (fullDomain) {
if (!clientFullDomainMap.has(fullDomain)) {
clientFullDomainMap.set(fullDomain, []);
}
clientFullDomainMap.get(fullDomain)!.push(resourceKey);
}
}
);
const clientFullDomainDuplicates = Array.from(
clientFullDomainMap.entries()
)
.filter(([_, resourceKeys]) => resourceKeys.length > 1)
.map(
([fullDomain, resourceKeys]) =>
`'${fullDomain}' used by resources: ${resourceKeys.join(", ")}`
)
.join("; ");
if (clientFullDomainDuplicates.length !== 0) {
ctx.addIssue({
code: z.ZodIssueCode.custom,
path: ["client-resources"],
message: `Duplicate 'full-domain' values found: ${clientFullDomainDuplicates}`
});
}
// Enforce proxy-port uniqueness within proxy-resources per protocol
const protocolPortMap = new Map<string, string[]>();

39
server/lib/encryption.ts Normal file
View File

@@ -0,0 +1,39 @@
import crypto from "crypto";
export function encryptData(data: string, key: Buffer): string {
const algorithm = "aes-256-gcm";
const iv = crypto.randomBytes(16);
const cipher = crypto.createCipheriv(algorithm, key, iv);
let encrypted = cipher.update(data, "utf8", "hex");
encrypted += cipher.final("hex");
const authTag = cipher.getAuthTag();
// Combine IV, auth tag, and encrypted data
return iv.toString("hex") + ":" + authTag.toString("hex") + ":" + encrypted;
}
// Helper function to decrypt data (you'll need this to read certificates)
export function decryptData(encryptedData: string, key: Buffer): string {
const algorithm = "aes-256-gcm";
const parts = encryptedData.split(":");
if (parts.length !== 3) {
throw new Error("Invalid encrypted data format");
}
const iv = Buffer.from(parts[0], "hex");
const authTag = Buffer.from(parts[1], "hex");
const encrypted = parts[2];
const decipher = crypto.createDecipheriv(algorithm, key, iv);
decipher.setAuthTag(authTag);
let decrypted = decipher.update(encrypted, "hex", "utf8");
decrypted += decipher.final("utf8");
return decrypted;
}
// openssl rand -hex 32 > config/encryption.key

View File

@@ -5,7 +5,6 @@ import config from "@server/lib/config";
import z from "zod";
import logger from "@server/logger";
import semver from "semver";
import { getValidCertificatesForDomains } from "#dynamic/lib/certificates";
interface IPRange {
start: bigint;
@@ -478,9 +477,9 @@ export type Alias = { alias: string | null; aliasAddress: string | null };
export function generateAliasConfig(allSiteResources: SiteResource[]): Alias[] {
return allSiteResources
.filter((sr) => sr.aliasAddress && ((sr.alias && sr.mode == "host") || (sr.fullDomain && sr.mode == "http")))
.filter((sr) => sr.alias && sr.aliasAddress && sr.mode == "host")
.map((sr) => ({
alias: sr.alias || sr.fullDomain,
alias: sr.alias,
aliasAddress: sr.aliasAddress
}));
}
@@ -583,26 +582,16 @@ export type SubnetProxyTargetV2 = {
protocol: "tcp" | "udp";
}[];
resourceId?: number;
protocol?: "http" | "https"; // if set, this target only applies to the specified protocol
httpTargets?: HTTPTarget[];
tlsCert?: string;
tlsKey?: string;
};
export type HTTPTarget = {
destAddr: string; // must be an IP or hostname
destPort: number;
scheme: "http" | "https";
};
export async function generateSubnetProxyTargetV2(
export function generateSubnetProxyTargetV2(
siteResource: SiteResource,
clients: {
clientId: number;
pubKey: string | null;
subnet: string | null;
}[]
): Promise<SubnetProxyTargetV2[] | undefined> {
): SubnetProxyTargetV2 | undefined {
if (clients.length === 0) {
logger.debug(
`No clients have access to site resource ${siteResource.siteResourceId}, skipping target generation.`
@@ -610,7 +599,7 @@ export async function generateSubnetProxyTargetV2(
return;
}
let targets: SubnetProxyTargetV2[] = [];
let target: SubnetProxyTargetV2 | null = null;
const portRange = [
...parsePortRangeString(siteResource.tcpPortRangeString, "tcp"),
@@ -625,115 +614,52 @@ export async function generateSubnetProxyTargetV2(
if (ipSchema.safeParse(destination).success) {
destination = `${destination}/32`;
targets.push({
target = {
sourcePrefixes: [],
destPrefix: destination,
portRange,
disableIcmp,
resourceId: siteResource.siteResourceId
});
resourceId: siteResource.siteResourceId,
};
}
if (siteResource.alias && siteResource.aliasAddress) {
// also push a match for the alias address
targets.push({
target = {
sourcePrefixes: [],
destPrefix: `${siteResource.aliasAddress}/32`,
rewriteTo: destination,
portRange,
disableIcmp,
resourceId: siteResource.siteResourceId
});
resourceId: siteResource.siteResourceId,
};
}
} else if (siteResource.mode == "cidr") {
targets.push({
target = {
sourcePrefixes: [],
destPrefix: siteResource.destination,
portRange,
disableIcmp,
resourceId: siteResource.siteResourceId
});
} else if (siteResource.mode == "http") {
let destination = siteResource.destination;
// check if this is a valid ip
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
if (ipSchema.safeParse(destination).success) {
destination = `${destination}/32`;
}
if (
!siteResource.aliasAddress ||
!siteResource.destinationPort ||
!siteResource.scheme ||
!siteResource.fullDomain
) {
logger.debug(
`Site resource ${siteResource.siteResourceId} is in HTTP mode but is missing alias or alias address or destinationPort or scheme, skipping alias target generation.`
);
return;
}
// also push a match for the alias address
let tlsCert: string | undefined;
let tlsKey: string | undefined;
if (siteResource.ssl && siteResource.fullDomain) {
try {
const certs = await getValidCertificatesForDomains(
new Set([siteResource.fullDomain]),
true
);
if (certs.length > 0 && certs[0].certFile && certs[0].keyFile) {
tlsCert = certs[0].certFile;
tlsKey = certs[0].keyFile;
} else {
logger.warn(
`No valid certificate found for SSL site resource ${siteResource.siteResourceId} with domain ${siteResource.fullDomain}`
);
}
} catch (err) {
logger.error(
`Failed to retrieve certificate for site resource ${siteResource.siteResourceId} domain ${siteResource.fullDomain}: ${err}`
);
}
}
targets.push({
sourcePrefixes: [],
destPrefix: `${siteResource.aliasAddress}/32`,
rewriteTo: destination,
portRange,
disableIcmp,
resourceId: siteResource.siteResourceId,
protocol: siteResource.ssl ? "https" : "http",
httpTargets: [
{
destAddr: siteResource.destination,
destPort: siteResource.destinationPort,
scheme: siteResource.scheme
}
],
...(tlsCert && tlsKey ? { tlsCert, tlsKey } : {})
});
};
}
if (targets.length == 0) {
if (!target) {
return;
}
for (const target of targets) {
for (const clientSite of clients) {
if (!clientSite.subnet) {
logger.debug(
`Client ${clientSite.clientId} has no subnet, skipping for site resource ${siteResource.siteResourceId}.`
);
continue;
}
const clientPrefix = `${clientSite.subnet.split("/")[0]}/32`;
// add client prefix to source prefixes
target.sourcePrefixes.push(clientPrefix);
for (const clientSite of clients) {
if (!clientSite.subnet) {
logger.debug(
`Client ${clientSite.clientId} has no subnet, skipping for site resource ${siteResource.siteResourceId}.`
);
continue;
}
const clientPrefix = `${clientSite.subnet.split("/")[0]}/32`;
// add client prefix to source prefixes
target.sourcePrefixes.push(clientPrefix);
}
// print a nice representation of the targets
@@ -741,34 +667,36 @@ export async function generateSubnetProxyTargetV2(
// `Generated subnet proxy targets for: ${JSON.stringify(targets, null, 2)}`
// );
return targets;
return target;
}
/**
* Converts a SubnetProxyTargetV2 to an array of SubnetProxyTarget (v1)
* by expanding each source prefix into its own target entry.
* @param targetV2 - The v2 target to convert
* @returns Array of v1 SubnetProxyTarget objects
*/
export function convertSubnetProxyTargetsV2ToV1(
targetsV2: SubnetProxyTargetV2[]
): SubnetProxyTarget[] {
return targetsV2.flatMap((targetV2) =>
targetV2.sourcePrefixes.map((sourcePrefix) => ({
sourcePrefix,
destPrefix: targetV2.destPrefix,
...(targetV2.disableIcmp !== undefined && {
disableIcmp: targetV2.disableIcmp
}),
...(targetV2.rewriteTo !== undefined && {
rewriteTo: targetV2.rewriteTo
}),
...(targetV2.portRange !== undefined && {
portRange: targetV2.portRange
})
}))
);
}
export function convertSubnetProxyTargetsV2ToV1(
targetsV2: SubnetProxyTargetV2[]
): SubnetProxyTarget[] {
return targetsV2.flatMap((targetV2) =>
targetV2.sourcePrefixes.map((sourcePrefix) => ({
sourcePrefix,
destPrefix: targetV2.destPrefix,
...(targetV2.disableIcmp !== undefined && {
disableIcmp: targetV2.disableIcmp
}),
...(targetV2.rewriteTo !== undefined && {
rewriteTo: targetV2.rewriteTo
}),
...(targetV2.portRange !== undefined && {
portRange: targetV2.portRange
})
}))
);
}
// Custom schema for validating port range strings
// Format: "80,443,8000-9000" or "*" for all ports, or empty string

View File

@@ -1,5 +1,3 @@
// Normalizes
/**
* Normalizes a post-authentication path for safe use when building redirect URLs.
* Returns a path that starts with / and does not allow open redirects (no //, no :).

View File

@@ -11,16 +11,17 @@ import {
roleSiteResources,
Site,
SiteResource,
siteNetworks,
siteResources,
sites,
Transaction,
userOrgRoles,
userOrgs,
userSiteResources
} from "@server/db";
import { and, eq, inArray, ne } from "drizzle-orm";
import {
addPeer as newtAddPeer,
deletePeer as newtDeletePeer
} from "@server/routers/newt/peers";
import {
@@ -34,6 +35,7 @@ import {
generateRemoteSubnets,
generateSubnetProxyTargetV2,
parseEndpoint,
formatEndpoint
} from "@server/lib/ip";
import {
addPeerData,
@@ -46,27 +48,15 @@ export async function getClientSiteResourceAccess(
siteResource: SiteResource,
trx: Transaction | typeof db = db
) {
// get all sites associated with this siteResource via its network
const sitesList = siteResource.networkId
? await trx
.select()
.from(sites)
.innerJoin(
siteNetworks,
eq(siteNetworks.siteId, sites.siteId)
)
.where(eq(siteNetworks.networkId, siteResource.networkId))
.then((rows) => rows.map((row) => row.sites))
: [];
// get the site
const [site] = await trx
.select()
.from(sites)
.where(eq(sites.siteId, siteResource.siteId))
.limit(1);
logger.debug(
`rebuildClientAssociations: [getClientSiteResourceAccess] siteResourceId=${siteResource.siteResourceId} networkId=${siteResource.networkId} siteCount=${sitesList.length} siteIds=[${sitesList.map((s) => s.siteId).join(", ")}]`
);
if (sitesList.length === 0) {
logger.warn(
`No sites found for siteResource ${siteResource.siteResourceId} with networkId ${siteResource.networkId}`
);
if (!site) {
throw new Error(`Site with ID ${siteResource.siteId} not found`);
}
const roleIds = await trx
@@ -146,12 +136,8 @@ export async function getClientSiteResourceAccess(
const mergedAllClients = Array.from(allClientsMap.values());
const mergedAllClientIds = mergedAllClients.map((c) => c.clientId);
logger.debug(
`rebuildClientAssociations: [getClientSiteResourceAccess] siteResourceId=${siteResource.siteResourceId} mergedClientCount=${mergedAllClientIds.length} clientIds=[${mergedAllClientIds.join(", ")}] (userBased=${newAllClients.length} direct=${directClients.length})`
);
return {
sitesList,
site,
mergedAllClients,
mergedAllClientIds
};
@@ -167,59 +153,40 @@ export async function rebuildClientAssociationsFromSiteResource(
subnet: string | null;
}[];
}> {
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] START siteResourceId=${siteResource.siteResourceId} networkId=${siteResource.networkId} orgId=${siteResource.orgId}`
);
const siteId = siteResource.siteId;
const { sitesList, mergedAllClients, mergedAllClientIds } =
const { site, mergedAllClients, mergedAllClientIds } =
await getClientSiteResourceAccess(siteResource, trx);
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] access resolved siteResourceId=${siteResource.siteResourceId} siteCount=${sitesList.length} siteIds=[${sitesList.map((s) => s.siteId).join(", ")}] mergedClientCount=${mergedAllClients.length} clientIds=[${mergedAllClientIds.join(", ")}]`
);
/////////// process the client-siteResource associations ///////////
// get all of the clients associated with other resources in the same network,
// joined through siteNetworks so we know which siteId each client belongs to
const allUpdatedClientsFromOtherResourcesOnThisSite = siteResource.networkId
? await trx
.select({
clientId: clientSiteResourcesAssociationsCache.clientId,
siteId: siteNetworks.siteId
})
.from(clientSiteResourcesAssociationsCache)
.innerJoin(
siteResources,
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
siteResources.siteResourceId
)
)
.innerJoin(
siteNetworks,
eq(siteNetworks.networkId, siteResources.networkId)
)
.where(
and(
eq(siteResources.networkId, siteResource.networkId),
ne(
siteResources.siteResourceId,
siteResource.siteResourceId
)
)
)
: [];
// get all of the clients associated with other resources on this site
const allUpdatedClientsFromOtherResourcesOnThisSite = await trx
.select({
clientId: clientSiteResourcesAssociationsCache.clientId
})
.from(clientSiteResourcesAssociationsCache)
.innerJoin(
siteResources,
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
siteResources.siteResourceId
)
)
.where(
and(
eq(siteResources.siteId, siteId),
ne(siteResources.siteResourceId, siteResource.siteResourceId)
)
);
// Build a per-site map so the loop below can check by siteId rather than
// across the entire network.
const clientsFromOtherResourcesBySite = new Map<number, Set<number>>();
for (const row of allUpdatedClientsFromOtherResourcesOnThisSite) {
if (!clientsFromOtherResourcesBySite.has(row.siteId)) {
clientsFromOtherResourcesBySite.set(row.siteId, new Set());
}
clientsFromOtherResourcesBySite.get(row.siteId)!.add(row.clientId);
}
const allClientIdsFromOtherResourcesOnThisSite = Array.from(
new Set(
allUpdatedClientsFromOtherResourcesOnThisSite.map(
(row) => row.clientId
)
)
);
const existingClientSiteResources = await trx
.select({
@@ -237,10 +204,6 @@ export async function rebuildClientAssociationsFromSiteResource(
(row) => row.clientId
);
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} existingResourceClientIds=[${existingClientSiteResourceIds.join(", ")}]`
);
// Get full client details for existing resource clients (needed for sending delete messages)
const existingResourceClients =
existingClientSiteResourceIds.length > 0
@@ -260,10 +223,6 @@ export async function rebuildClientAssociationsFromSiteResource(
(clientId) => !existingClientSiteResourceIds.includes(clientId)
);
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} resourceClients toAdd=[${clientSiteResourcesToAdd.join(", ")}]`
);
const clientSiteResourcesToInsert = clientSiteResourcesToAdd.map(
(clientId) => ({
clientId,
@@ -272,34 +231,17 @@ export async function rebuildClientAssociationsFromSiteResource(
);
if (clientSiteResourcesToInsert.length > 0) {
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} inserting ${clientSiteResourcesToInsert.length} clientSiteResource association(s)`
);
await trx
.insert(clientSiteResourcesAssociationsCache)
.values(clientSiteResourcesToInsert)
.returning();
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} inserted clientSiteResource associations`
);
} else {
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} no clientSiteResource associations to insert`
);
}
const clientSiteResourcesToRemove = existingClientSiteResourceIds.filter(
(clientId) => !mergedAllClientIds.includes(clientId)
);
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} resourceClients toRemove=[${clientSiteResourcesToRemove.join(", ")}]`
);
if (clientSiteResourcesToRemove.length > 0) {
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} deleting ${clientSiteResourcesToRemove.length} clientSiteResource association(s)`
);
await trx
.delete(clientSiteResourcesAssociationsCache)
.where(
@@ -318,127 +260,82 @@ export async function rebuildClientAssociationsFromSiteResource(
/////////// process the client-site associations ///////////
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteResourceId=${siteResource.siteResourceId} beginning client-site association loop over ${sitesList.length} site(s)`
const existingClientSites = await trx
.select({
clientId: clientSitesAssociationsCache.clientId
})
.from(clientSitesAssociationsCache)
.where(eq(clientSitesAssociationsCache.siteId, siteResource.siteId));
const existingClientSiteIds = existingClientSites.map(
(row) => row.clientId
);
for (const site of sitesList) {
const siteId = site.siteId;
// Get full client details for existing clients (needed for sending delete messages)
const existingClients = await trx
.select({
clientId: clients.clientId,
pubKey: clients.pubKey,
subnet: clients.subnet
})
.from(clients)
.where(inArray(clients.clientId, existingClientSiteIds));
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] processing siteId=${siteId} for siteResourceId=${siteResource.siteResourceId}`
);
const clientSitesToAdd = mergedAllClientIds.filter(
(clientId) =>
!existingClientSiteIds.includes(clientId) &&
!allClientIdsFromOtherResourcesOnThisSite.includes(clientId) // dont remove if there is still another connection for another site resource
);
const existingClientSites = await trx
.select({
clientId: clientSitesAssociationsCache.clientId
})
.from(clientSitesAssociationsCache)
.where(eq(clientSitesAssociationsCache.siteId, siteId));
const clientSitesToInsert = clientSitesToAdd.map((clientId) => ({
clientId,
siteId
}));
const existingClientSiteIds = existingClientSites.map(
(row) => row.clientId
);
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} existingClientSiteIds=[${existingClientSiteIds.join(", ")}]`
);
// Get full client details for existing clients (needed for sending delete messages)
const existingClients =
existingClientSiteIds.length > 0
? await trx
.select({
clientId: clients.clientId,
pubKey: clients.pubKey,
subnet: clients.subnet
})
.from(clients)
.where(inArray(clients.clientId, existingClientSiteIds))
: [];
const otherResourceClientIds = clientsFromOtherResourcesBySite.get(siteId) ?? new Set<number>();
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} otherResourceClientIds=[${[...otherResourceClientIds].join(", ")}] mergedAllClientIds=[${mergedAllClientIds.join(", ")}]`
);
const clientSitesToAdd = mergedAllClientIds.filter(
(clientId) =>
!existingClientSiteIds.includes(clientId) &&
!otherResourceClientIds.has(clientId) // dont add if already connected via another site resource
);
const clientSitesToInsert = clientSitesToAdd.map((clientId) => ({
clientId,
siteId
}));
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} clientSites toAdd=[${clientSitesToAdd.join(", ")}]`
);
if (clientSitesToInsert.length > 0) {
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} inserting ${clientSitesToInsert.length} clientSite association(s)`
);
await trx
.insert(clientSitesAssociationsCache)
.values(clientSitesToInsert)
.returning();
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} inserted clientSite associations`
);
} else {
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} no clientSite associations to insert`
);
}
// Now remove any client-site associations that should no longer exist
const clientSitesToRemove = existingClientSiteIds.filter(
(clientId) =>
!mergedAllClientIds.includes(clientId) &&
!otherResourceClientIds.has(clientId) // dont remove if there is still another connection for another site resource
);
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} clientSites toRemove=[${clientSitesToRemove.join(", ")}]`
);
if (clientSitesToRemove.length > 0) {
logger.debug(
`rebuildClientAssociations: [rebuildClientAssociationsFromSiteResource] siteId=${siteId} deleting ${clientSitesToRemove.length} clientSite association(s)`
);
await trx
.delete(clientSitesAssociationsCache)
.where(
and(
eq(clientSitesAssociationsCache.siteId, siteId),
inArray(
clientSitesAssociationsCache.clientId,
clientSitesToRemove
)
)
);
}
// Now handle the messages to add/remove peers on both the newt and olm sides
await handleMessagesForSiteClients(
site,
siteId,
mergedAllClients,
existingClients,
clientSitesToAdd,
clientSitesToRemove,
trx
);
if (clientSitesToInsert.length > 0) {
await trx
.insert(clientSitesAssociationsCache)
.values(clientSitesToInsert)
.returning();
}
// Now remove any client-site associations that should no longer exist
const clientSitesToRemove = existingClientSiteIds.filter(
(clientId) =>
!mergedAllClientIds.includes(clientId) &&
!allClientIdsFromOtherResourcesOnThisSite.includes(clientId) // dont remove if there is still another connection for another site resource
);
if (clientSitesToRemove.length > 0) {
await trx
.delete(clientSitesAssociationsCache)
.where(
and(
eq(clientSitesAssociationsCache.siteId, siteId),
inArray(
clientSitesAssociationsCache.clientId,
clientSitesToRemove
)
)
);
}
/////////// send the messages ///////////
// Now handle the messages to add/remove peers on both the newt and olm sides
await handleMessagesForSiteClients(
site,
siteId,
mergedAllClients,
existingClients,
clientSitesToAdd,
clientSitesToRemove,
trx
);
// Handle subnet proxy target updates for the resource associations
await handleSubnetProxyTargetUpdates(
siteResource,
sitesList,
mergedAllClients,
existingResourceClients,
clientSiteResourcesToAdd,
@@ -727,7 +624,6 @@ export async function updateClientSiteDestinations(
async function handleSubnetProxyTargetUpdates(
siteResource: SiteResource,
sitesList: Site[],
allClients: {
clientId: number;
pubKey: string | null;
@@ -742,138 +638,125 @@ async function handleSubnetProxyTargetUpdates(
clientSiteResourcesToRemove: number[],
trx: Transaction | typeof db = db
): Promise<void> {
const proxyJobs: Promise<any>[] = [];
const olmJobs: Promise<any>[] = [];
// Get the newt for this site
const [newt] = await trx
.select()
.from(newts)
.where(eq(newts.siteId, siteResource.siteId))
.limit(1);
for (const siteData of sitesList) {
const siteId = siteData.siteId;
if (!newt) {
logger.warn(
`Newt not found for site ${siteResource.siteId}, skipping subnet proxy target updates`
);
return;
}
// Get the newt for this site
const [newt] = await trx
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
const proxyJobs = [];
const olmJobs = [];
// Generate targets for added associations
if (clientSiteResourcesToAdd.length > 0) {
const addedClients = allClients.filter((client) =>
clientSiteResourcesToAdd.includes(client.clientId)
);
if (!newt) {
logger.warn(
`Newt not found for site ${siteId}, skipping subnet proxy target updates`
);
continue;
}
// Generate targets for added associations
if (clientSiteResourcesToAdd.length > 0) {
const addedClients = allClients.filter((client) =>
clientSiteResourcesToAdd.includes(client.clientId)
if (addedClients.length > 0) {
const targetToAdd = generateSubnetProxyTargetV2(
siteResource,
addedClients
);
if (addedClients.length > 0) {
const targetsToAdd = await generateSubnetProxyTargetV2(
siteResource,
addedClients
if (targetToAdd) {
proxyJobs.push(
addSubnetProxyTargets(
newt.newtId,
[targetToAdd],
newt.version
)
);
}
if (targetsToAdd) {
proxyJobs.push(
addSubnetProxyTargets(
newt.newtId,
targetsToAdd,
newt.version
)
);
}
for (const client of addedClients) {
olmJobs.push(
addPeerData(
client.clientId,
siteId,
generateRemoteSubnets([siteResource]),
generateAliasConfig([siteResource])
)
);
}
for (const client of addedClients) {
olmJobs.push(
addPeerData(
client.clientId,
siteResource.siteId,
generateRemoteSubnets([siteResource]),
generateAliasConfig([siteResource])
)
);
}
}
}
// here we use the existingSiteResource from BEFORE we updated the destination so we dont need to worry about updating destinations here
// here we use the existingSiteResource from BEFORE we updated the destination so we dont need to worry about updating destinations here
// Generate targets for removed associations
if (clientSiteResourcesToRemove.length > 0) {
const removedClients = existingClients.filter((client) =>
clientSiteResourcesToRemove.includes(client.clientId)
// Generate targets for removed associations
if (clientSiteResourcesToRemove.length > 0) {
const removedClients = existingClients.filter((client) =>
clientSiteResourcesToRemove.includes(client.clientId)
);
if (removedClients.length > 0) {
const targetToRemove = generateSubnetProxyTargetV2(
siteResource,
removedClients
);
if (removedClients.length > 0) {
const targetsToRemove = await generateSubnetProxyTargetV2(
siteResource,
removedClients
if (targetToRemove) {
proxyJobs.push(
removeSubnetProxyTargets(
newt.newtId,
[targetToRemove],
newt.version
)
);
}
if (targetsToRemove) {
proxyJobs.push(
removeSubnetProxyTargets(
newt.newtId,
targetsToRemove,
newt.version
for (const client of removedClients) {
// Check if this client still has access to another resource on this site with the same destination
const destinationStillInUse = await trx
.select()
.from(siteResources)
.innerJoin(
clientSiteResourcesAssociationsCache,
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
siteResources.siteResourceId
)
);
}
for (const client of removedClients) {
// Check if this client still has access to another resource
// on this specific site with the same destination. We scope
// by siteId (via siteNetworks) rather than networkId because
// removePeerData operates per-site - a resource on a different
// site sharing the same network should not block removal here.
const destinationStillInUse = await trx
.select()
.from(siteResources)
.innerJoin(
clientSiteResourcesAssociationsCache,
)
.where(
and(
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
siteResources.siteResourceId
clientSiteResourcesAssociationsCache.clientId,
client.clientId
),
eq(siteResources.siteId, siteResource.siteId),
eq(
siteResources.destination,
siteResource.destination
),
ne(
siteResources.siteResourceId,
siteResource.siteResourceId
)
)
.innerJoin(
siteNetworks,
eq(siteNetworks.networkId, siteResources.networkId)
)
.where(
and(
eq(
clientSiteResourcesAssociationsCache.clientId,
client.clientId
),
eq(siteNetworks.siteId, siteId),
eq(
siteResources.destination,
siteResource.destination
),
ne(
siteResources.siteResourceId,
siteResource.siteResourceId
)
)
);
// Only remove remote subnet if no other resource uses the same destination
const remoteSubnetsToRemove =
destinationStillInUse.length > 0
? []
: generateRemoteSubnets([siteResource]);
olmJobs.push(
removePeerData(
client.clientId,
siteId,
remoteSubnetsToRemove,
generateAliasConfig([siteResource])
)
);
}
// Only remove remote subnet if no other resource uses the same destination
const remoteSubnetsToRemove =
destinationStillInUse.length > 0
? []
: generateRemoteSubnets([siteResource]);
olmJobs.push(
removePeerData(
client.clientId,
siteResource.siteId,
remoteSubnetsToRemove,
generateAliasConfig([siteResource])
)
);
}
}
}
@@ -980,25 +863,10 @@ export async function rebuildClientAssociationsFromClient(
)
: [];
// Group by siteId for site-level associations - look up via siteNetworks since
// siteResources no longer carries a direct siteId column.
const networkIds = Array.from(
new Set(
newSiteResources
.map((sr) => sr.networkId)
.filter((id): id is number => id !== null)
)
// Group by siteId for site-level associations
const newSiteIds = Array.from(
new Set(newSiteResources.map((sr) => sr.siteId))
);
const newSiteIds =
networkIds.length > 0
? await trx
.select({ siteId: siteNetworks.siteId })
.from(siteNetworks)
.where(inArray(siteNetworks.networkId, networkIds))
.then((rows) =>
Array.from(new Set(rows.map((r) => r.siteId)))
)
: [];
/////////// Process client-siteResource associations ///////////
@@ -1271,45 +1139,13 @@ async function handleMessagesForClientResources(
resourcesToAdd.includes(r.siteResourceId)
);
// Build (resource, siteId) pairs by looking up siteNetworks for each resource's networkId
const addedNetworkIds = Array.from(
new Set(
addedResources
.map((r) => r.networkId)
.filter((id): id is number => id !== null)
)
);
const addedSiteNetworkRows =
addedNetworkIds.length > 0
? await trx
.select({
networkId: siteNetworks.networkId,
siteId: siteNetworks.siteId
})
.from(siteNetworks)
.where(inArray(siteNetworks.networkId, addedNetworkIds))
: [];
const addedNetworkToSites = new Map<number, number[]>();
for (const row of addedSiteNetworkRows) {
if (!addedNetworkToSites.has(row.networkId)) {
addedNetworkToSites.set(row.networkId, []);
}
addedNetworkToSites.get(row.networkId)!.push(row.siteId);
}
// Group by site for proxy updates
const addedBySite = new Map<number, SiteResource[]>();
for (const resource of addedResources) {
const siteIds =
resource.networkId != null
? (addedNetworkToSites.get(resource.networkId) ?? [])
: [];
for (const siteId of siteIds) {
if (!addedBySite.has(siteId)) {
addedBySite.set(siteId, []);
}
addedBySite.get(siteId)!.push(resource);
if (!addedBySite.has(resource.siteId)) {
addedBySite.set(resource.siteId, []);
}
addedBySite.get(resource.siteId)!.push(resource);
}
// Add subnet proxy targets for each site
@@ -1328,7 +1164,7 @@ async function handleMessagesForClientResources(
}
for (const resource of resources) {
const targets = await generateSubnetProxyTargetV2(resource, [
const target = generateSubnetProxyTargetV2(resource, [
{
clientId: client.clientId,
pubKey: client.pubKey,
@@ -1336,11 +1172,11 @@ async function handleMessagesForClientResources(
}
]);
if (targets) {
if (target) {
proxyJobs.push(
addSubnetProxyTargets(
newt.newtId,
targets,
[target],
newt.version
)
);
@@ -1351,7 +1187,7 @@ async function handleMessagesForClientResources(
olmJobs.push(
addPeerData(
client.clientId,
siteId,
resource.siteId,
generateRemoteSubnets([resource]),
generateAliasConfig([resource])
)
@@ -1363,7 +1199,7 @@ async function handleMessagesForClientResources(
error.message.includes("not found")
) {
logger.debug(
`Olm data not found for client ${client.clientId} and site ${siteId}, skipping addition`
`Olm data not found for client ${client.clientId} and site ${resource.siteId}, skipping removal`
);
} else {
throw error;
@@ -1380,45 +1216,13 @@ async function handleMessagesForClientResources(
.from(siteResources)
.where(inArray(siteResources.siteResourceId, resourcesToRemove));
// Build (resource, siteId) pairs via siteNetworks
const removedNetworkIds = Array.from(
new Set(
removedResources
.map((r) => r.networkId)
.filter((id): id is number => id !== null)
)
);
const removedSiteNetworkRows =
removedNetworkIds.length > 0
? await trx
.select({
networkId: siteNetworks.networkId,
siteId: siteNetworks.siteId
})
.from(siteNetworks)
.where(inArray(siteNetworks.networkId, removedNetworkIds))
: [];
const removedNetworkToSites = new Map<number, number[]>();
for (const row of removedSiteNetworkRows) {
if (!removedNetworkToSites.has(row.networkId)) {
removedNetworkToSites.set(row.networkId, []);
}
removedNetworkToSites.get(row.networkId)!.push(row.siteId);
}
// Group by site for proxy updates
const removedBySite = new Map<number, SiteResource[]>();
for (const resource of removedResources) {
const siteIds =
resource.networkId != null
? (removedNetworkToSites.get(resource.networkId) ?? [])
: [];
for (const siteId of siteIds) {
if (!removedBySite.has(siteId)) {
removedBySite.set(siteId, []);
}
removedBySite.get(siteId)!.push(resource);
if (!removedBySite.has(resource.siteId)) {
removedBySite.set(resource.siteId, []);
}
removedBySite.get(resource.siteId)!.push(resource);
}
// Remove subnet proxy targets for each site
@@ -1437,7 +1241,7 @@ async function handleMessagesForClientResources(
}
for (const resource of resources) {
const targets = await generateSubnetProxyTargetV2(resource, [
const target = generateSubnetProxyTargetV2(resource, [
{
clientId: client.clientId,
pubKey: client.pubKey,
@@ -1445,22 +1249,18 @@ async function handleMessagesForClientResources(
}
]);
if (targets) {
if (target) {
proxyJobs.push(
removeSubnetProxyTargets(
newt.newtId,
targets,
[target],
newt.version
)
);
}
try {
// Check if this client still has access to another resource
// on this specific site with the same destination. We scope
// by siteId (via siteNetworks) rather than networkId because
// removePeerData operates per-site - a resource on a different
// site sharing the same network should not block removal here.
// Check if this client still has access to another resource on this site with the same destination
const destinationStillInUse = await trx
.select()
.from(siteResources)
@@ -1471,17 +1271,13 @@ async function handleMessagesForClientResources(
siteResources.siteResourceId
)
)
.innerJoin(
siteNetworks,
eq(siteNetworks.networkId, siteResources.networkId)
)
.where(
and(
eq(
clientSiteResourcesAssociationsCache.clientId,
client.clientId
),
eq(siteNetworks.siteId, siteId),
eq(siteResources.siteId, resource.siteId),
eq(
siteResources.destination,
resource.destination
@@ -1503,7 +1299,7 @@ async function handleMessagesForClientResources(
olmJobs.push(
removePeerData(
client.clientId,
siteId,
resource.siteId,
remoteSubnetsToRemove,
generateAliasConfig([resource])
)
@@ -1515,7 +1311,7 @@ async function handleMessagesForClientResources(
error.message.includes("not found")
) {
logger.debug(
`Olm data not found for client ${client.clientId} and site ${siteId}, skipping removal`
`Olm data not found for client ${client.clientId} and site ${resource.siteId}, skipping removal`
);
} else {
throw error;

View File

@@ -1,5 +1,3 @@
// Sanitizes
/**
* Sanitize a string field before inserting into a database TEXT column.
*
@@ -39,4 +37,4 @@ export function sanitizeString(
// Strip null bytes, C0 control chars (except HT/LF/CR), and DEL.
.replace(/[\x00-\x08\x0B\x0C\x0E-\x1F\x7F]/g, "")
);
}
}

View File

@@ -1,133 +0,0 @@
import { z } from "zod";
export const statusHistoryQuerySchema = z
.object({
days: z
.string()
.optional()
.transform((v) => (v ? parseInt(v, 10) : 90)),
})
.pipe(
z.object({
days: z.number().int().min(1).max(365),
})
);
export interface StatusHistoryDayBucket {
date: string; // ISO date "YYYY-MM-DD"
uptimePercent: number; // 0-100
totalDowntimeSeconds: number;
downtimeWindows: { start: number; end: number | null; status: string }[];
status: "good" | "degraded" | "bad" | "no_data";
}
export interface StatusHistoryResponse {
entityType: string;
entityId: number;
days: StatusHistoryDayBucket[];
overallUptimePercent: number;
totalDowntimeSeconds: number;
}
export function computeBuckets(
events: { entityType: string; entityId: number; orgId: string; status: string; timestamp: number; id: number }[],
days: number
): { buckets: StatusHistoryDayBucket[]; totalDowntime: number } {
const nowSec = Math.floor(Date.now() / 1000);
const buckets: StatusHistoryDayBucket[] = [];
let totalDowntime = 0;
for (let d = 0; d < days; d++) {
const dayStartSec = nowSec - (days - d) * 86400;
const dayEndSec = dayStartSec + 86400;
const dayEvents = events.filter(
(e) => e.timestamp >= dayStartSec && e.timestamp < dayEndSec
);
// Determine the status at the start of this day (last event before dayStart)
const lastBeforeDay = [...events]
.filter((e) => e.timestamp < dayStartSec)
.at(-1);
const currentStatus = lastBeforeDay?.status ?? null;
const windows: { start: number; end: number | null; status: string }[] = [];
let dayDowntime = 0;
let windowStart = dayStartSec;
let windowStatus = currentStatus;
for (const evt of dayEvents) {
if (windowStatus !== null && windowStatus !== evt.status) {
const windowEnd = evt.timestamp;
const isDown =
windowStatus === "offline" ||
windowStatus === "unhealthy" ||
windowStatus === "unknown";
if (isDown) {
dayDowntime += windowEnd - windowStart;
windows.push({
start: windowStart,
end: windowEnd,
status: windowStatus,
});
}
}
windowStart = evt.timestamp;
windowStatus = evt.status;
}
// Close the final window at the end of the day (or now if day hasn't ended)
if (windowStatus !== null) {
const finalEnd = Math.min(dayEndSec, nowSec);
const isDown =
windowStatus === "offline" ||
windowStatus === "unhealthy" ||
windowStatus === "unknown";
if (isDown && finalEnd > windowStart) {
dayDowntime += finalEnd - windowStart;
windows.push({
start: windowStart,
end: finalEnd,
status: windowStatus,
});
}
}
totalDowntime += dayDowntime;
const effectiveDayLength = Math.max(
0,
Math.min(dayEndSec, nowSec) - dayStartSec
);
const uptimePct =
effectiveDayLength > 0
? Math.max(
0,
((effectiveDayLength - dayDowntime) /
effectiveDayLength) *
100
)
: 100;
const dateStr = new Date(dayStartSec * 1000).toISOString().slice(0, 10);
let status: StatusHistoryDayBucket["status"] = "no_data";
if (currentStatus !== null || dayEvents.length > 0) {
if (uptimePct >= 99) status = "good";
else if (uptimePct >= 50) status = "degraded";
else status = "bad";
}
buckets.push({
date: dateStr,
uptimePercent: Math.round(uptimePct * 100) / 100,
totalDowntimeSeconds: dayDowntime,
downtimeWindows: windows,
status,
});
}
return { buckets, totalDowntime };
}

View File

@@ -1,5 +1,3 @@
// tokenCache
/**
* Returns a cached plaintext token from Redis if one exists and decrypts
* cleanly, otherwise calls `createSession` to mint a fresh token, stores the

View File

@@ -19,7 +19,6 @@ export class TraefikConfigManager {
private timeoutId: NodeJS.Timeout | null = null;
private lastCertificateFetch: Date | null = null;
private lastKnownDomains = new Set<string>();
private pendingDeletion = new Map<string, number>(); // domain -> cycles remaining before delete
private lastLocalCertificateState = new Map<
string,
{
@@ -1005,62 +1004,33 @@ export class TraefikConfigManager {
const dirName = dirent.name;
// Only delete if NO current domain is exactly the same or ends with `.${dirName}`
const isUnused = !Array.from(currentActiveDomains).some(
const shouldDelete = !Array.from(currentActiveDomains).some(
(domain) =>
domain === dirName || domain.endsWith(`.${dirName}`)
);
if (!isUnused) {
// Domain is still active - remove from pending deletion if it was queued
if (this.pendingDeletion.has(dirName)) {
logger.info(
`Certificate ${dirName} is active again, cancelling pending deletion`
);
this.pendingDeletion.delete(dirName);
}
continue;
}
// Domain is unused - add to pending deletion or decrement its counter
if (!this.pendingDeletion.has(dirName)) {
const graceCycles = 3;
if (shouldDelete) {
const domainDir = path.join(certsPath, dirName);
logger.info(
`Certificate ${dirName} is no longer in use. Will delete after ${graceCycles} more cycles.`
`Cleaning up unused certificate directory: ${dirName}`
);
this.pendingDeletion.set(dirName, graceCycles);
} else {
const remaining = this.pendingDeletion.get(dirName)! - 1;
if (remaining > 0) {
logger.info(
`Certificate ${dirName} pending deletion: ${remaining} cycle(s) remaining`
fs.rmSync(domainDir, { recursive: true, force: true });
// Remove from local state tracking
this.lastLocalCertificateState.delete(dirName);
// Remove from dynamic config
const certFilePath = path.join(domainDir, "cert.pem");
const keyFilePath = path.join(domainDir, "key.pem");
const before = dynamicConfig.tls.certificates.length;
dynamicConfig.tls.certificates =
dynamicConfig.tls.certificates.filter(
(entry: any) =>
entry.certFile !== certFilePath &&
entry.keyFile !== keyFilePath
);
this.pendingDeletion.set(dirName, remaining);
} else {
// Grace period expired - actually delete now
this.pendingDeletion.delete(dirName);
const domainDir = path.join(certsPath, dirName);
logger.info(
`Cleaning up unused certificate directory: ${dirName}`
);
fs.rmSync(domainDir, { recursive: true, force: true });
// Remove from local state tracking
this.lastLocalCertificateState.delete(dirName);
// Remove from dynamic config
const certFilePath = path.join(domainDir, "cert.pem");
const keyFilePath = path.join(domainDir, "key.pem");
const before = dynamicConfig.tls.certificates.length;
dynamicConfig.tls.certificates =
dynamicConfig.tls.certificates.filter(
(entry: any) =>
entry.certFile !== certFilePath &&
entry.keyFile !== keyFilePath
);
if (dynamicConfig.tls.certificates.length !== before) {
configChanged = true;
}
if (dynamicConfig.tls.certificates.length !== before) {
configChanged = true;
}
}
}

View File

@@ -24,7 +24,7 @@ function encodePath(path: string | null | undefined): string {
/**
* Exact replica of the OLD key computation from upstream main.
* Uses sanitize() for paths - this is what had the collision bug.
* Uses sanitize() for paths this is what had the collision bug.
*/
function oldKeyComputation(
resourceId: number,
@@ -44,7 +44,7 @@ function oldKeyComputation(
/**
* Replica of the NEW key computation from our fix.
* Uses encodePath() for paths - collision-free.
* Uses encodePath() for paths collision-free.
*/
function newKeyComputation(
resourceId: number,
@@ -195,11 +195,11 @@ function runTests() {
true,
"/a/b and /a-b MUST have different keys"
);
console.log(" PASS: collision fix - /a/b vs /a-b have different keys");
console.log(" PASS: collision fix /a/b vs /a-b have different keys");
passed++;
}
// Test 9: demonstrate the old bug - old code maps /a/b and /a-b to same key
// Test 9: demonstrate the old bug old code maps /a/b and /a-b to same key
{
const oldKeyAB = oldKeyComputation(1, "/a/b", "prefix", null, null);
const oldKeyDash = oldKeyComputation(1, "/a-b", "prefix", null, null);
@@ -208,11 +208,11 @@ function runTests() {
oldKeyDash,
"old code MUST have this collision (confirms the bug exists)"
);
console.log(" PASS: confirmed old code bug - /a/b and /a-b collided");
console.log(" PASS: confirmed old code bug /a/b and /a-b collided");
passed++;
}
// Test 10: /api/v1 and /api-v1 - old code collision, new code fixes it
// Test 10: /api/v1 and /api-v1 old code collision, new code fixes it
{
const oldKey1 = oldKeyComputation(1, "/api/v1", "prefix", null, null);
const oldKey2 = oldKeyComputation(1, "/api-v1", "prefix", null, null);
@@ -229,11 +229,11 @@ function runTests() {
true,
"new code must separate /api/v1 and /api-v1"
);
console.log(" PASS: collision fix - /api/v1 vs /api-v1");
console.log(" PASS: collision fix /api/v1 vs /api-v1");
passed++;
}
// Test 11: /app.v2 and /app/v2 and /app-v2 - three-way collision fixed
// Test 11: /app.v2 and /app/v2 and /app-v2 three-way collision fixed
{
const a = newKeyComputation(1, "/app.v2", "prefix", null, null);
const b = newKeyComputation(1, "/app/v2", "prefix", null, null);
@@ -245,14 +245,14 @@ function runTests() {
"three paths must produce three unique keys"
);
console.log(
" PASS: collision fix - three-way /app.v2, /app/v2, /app-v2"
" PASS: collision fix three-way /app.v2, /app/v2, /app-v2"
);
passed++;
}
// ── Edge cases ───────────────────────────────────────────────────
// Test 12: same path in different resources - always separate
// Test 12: same path in different resources always separate
{
const key1 = newKeyComputation(1, "/api", "prefix", null, null);
const key2 = newKeyComputation(2, "/api", "prefix", null, null);
@@ -261,11 +261,11 @@ function runTests() {
true,
"different resources with same path must have different keys"
);
console.log(" PASS: edge case - same path, different resources");
console.log(" PASS: edge case same path, different resources");
passed++;
}
// Test 13: same resource, different pathMatchType - separate keys
// Test 13: same resource, different pathMatchType separate keys
{
const exact = newKeyComputation(1, "/api", "exact", null, null);
const prefix = newKeyComputation(1, "/api", "prefix", null, null);
@@ -274,11 +274,11 @@ function runTests() {
true,
"exact vs prefix must have different keys"
);
console.log(" PASS: edge case - same path, different match types");
console.log(" PASS: edge case same path, different match types");
passed++;
}
// Test 14: same resource and path, different rewrite config - separate keys
// Test 14: same resource and path, different rewrite config separate keys
{
const noRewrite = newKeyComputation(1, "/api", "prefix", null, null);
const withRewrite = newKeyComputation(
@@ -293,7 +293,7 @@ function runTests() {
true,
"with vs without rewrite must have different keys"
);
console.log(" PASS: edge case - same path, different rewrite config");
console.log(" PASS: edge case same path, different rewrite config");
passed++;
}
@@ -308,7 +308,7 @@ function runTests() {
paths.length,
"special URL chars must produce unique keys"
);
console.log(" PASS: edge case - special URL characters in paths");
console.log(" PASS: edge case special URL characters in paths");
passed++;
}

View File

@@ -15,7 +15,7 @@ export async function verifyDomainAccess(
try {
const userId = req.user!.userId;
const domainId =
req.params.domainId;
req.params.domainId || req.body.apiKeyId || req.query.apiKeyId;
const orgId = req.params.orgId;
if (!userId) {

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,478 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import fs from "fs";
import crypto from "crypto";
import {
certificates,
clients,
clientSiteResourcesAssociationsCache,
db,
domains,
newts,
siteNetworks,
SiteResource,
siteResources
} from "@server/db";
import { and, eq } from "drizzle-orm";
import { encrypt, decrypt } from "@server/lib/crypto";
import logger from "@server/logger";
import privateConfig from "#private/lib/config";
import config from "@server/lib/config";
import {
generateSubnetProxyTargetV2,
SubnetProxyTargetV2
} from "@server/lib/ip";
import { updateTargets } from "@server/routers/client/targets";
import cache from "#private/lib/cache";
import { build } from "@server/build";
interface AcmeCert {
domain: { main: string; sans?: string[] };
certificate: string;
key: string;
Store: string;
}
interface AcmeJson {
[resolver: string]: {
Certificates: AcmeCert[];
};
}
async function pushCertUpdateToAffectedNewts(
domain: string,
domainId: string | null,
oldCertPem: string | null,
oldKeyPem: string | null
): Promise<void> {
// Find all SSL-enabled HTTP site resources that use this cert's domain
let affectedResources: SiteResource[] = [];
if (domainId) {
affectedResources = await db
.select()
.from(siteResources)
.where(
and(
eq(siteResources.domainId, domainId),
eq(siteResources.ssl, true)
)
);
} else {
// Fallback: match by exact fullDomain when no domainId is available
affectedResources = await db
.select()
.from(siteResources)
.where(
and(
eq(siteResources.fullDomain, domain),
eq(siteResources.ssl, true)
)
);
}
if (affectedResources.length === 0) {
logger.debug(
`acmeCertSync: no affected site resources for cert domain "${domain}"`
);
return;
}
logger.info(
`acmeCertSync: pushing cert update to ${affectedResources.length} affected site resource(s) for domain "${domain}"`
);
for (const resource of affectedResources) {
try {
// Get all sites for this resource via siteNetworks
const resourceSiteRows = resource.networkId
? await db
.select({ siteId: siteNetworks.siteId })
.from(siteNetworks)
.where(eq(siteNetworks.networkId, resource.networkId))
: [];
if (resourceSiteRows.length === 0) {
logger.debug(
`acmeCertSync: no sites for resource ${resource.siteResourceId}, skipping`
);
continue;
}
// Get all clients with access to this resource
const resourceClients = await db
.select({
clientId: clients.clientId,
pubKey: clients.pubKey,
subnet: clients.subnet
})
.from(clients)
.innerJoin(
clientSiteResourcesAssociationsCache,
eq(
clients.clientId,
clientSiteResourcesAssociationsCache.clientId
)
)
.where(
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
resource.siteResourceId
)
);
if (resourceClients.length === 0) {
logger.debug(
`acmeCertSync: no clients for resource ${resource.siteResourceId}, skipping`
);
continue;
}
// Invalidate the cert cache so generateSubnetProxyTargetV2 fetches fresh data
if (resource.fullDomain) {
await cache.del(`cert:${resource.fullDomain}`);
}
// Generate target once - same cert applies to all sites for this resource
const newTargets = await generateSubnetProxyTargetV2(
resource,
resourceClients
);
if (!newTargets) {
logger.debug(
`acmeCertSync: could not generate target for resource ${resource.siteResourceId}, skipping`
);
continue;
}
// Construct the old targets - same routing shape but with the previous cert/key.
// The newt only uses destPrefix/sourcePrefixes for removal, but we keep the
// semantics correct so the update message accurately reflects what changed.
const oldTargets: SubnetProxyTargetV2[] = newTargets.map((t) => ({
...t,
tlsCert: oldCertPem ?? undefined,
tlsKey: oldKeyPem ?? undefined
}));
// Push update to each site's newt
for (const { siteId } of resourceSiteRows) {
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
if (!newt) {
logger.debug(
`acmeCertSync: no newt found for site ${siteId}, skipping resource ${resource.siteResourceId}`
);
continue;
}
await updateTargets(
newt.newtId,
{ oldTargets: oldTargets, newTargets: newTargets },
newt.version
);
logger.info(
`acmeCertSync: pushed cert update to newt for site ${siteId}, resource ${resource.siteResourceId}`
);
}
} catch (err) {
logger.error(
`acmeCertSync: error pushing cert update for resource ${resource?.siteResourceId}: ${err}`
);
}
}
}
async function findDomainId(certDomain: string): Promise<string | null> {
// Strip wildcard prefix before lookup (*.example.com -> example.com)
const lookupDomain = certDomain.startsWith("*.")
? certDomain.slice(2)
: certDomain;
// 1. Exact baseDomain match (any domain type)
const exactMatch = await db
.select({ domainId: domains.domainId })
.from(domains)
.where(eq(domains.baseDomain, lookupDomain))
.limit(1);
if (exactMatch.length > 0) {
return exactMatch[0].domainId;
}
// 2. Walk up the domain hierarchy looking for a wildcard-type domain whose
// baseDomain is a suffix of the cert domain. e.g. cert "sub.example.com"
// matches a wildcard domain with baseDomain "example.com".
const parts = lookupDomain.split(".");
for (let i = 1; i < parts.length; i++) {
const candidate = parts.slice(i).join(".");
if (!candidate) continue;
const wildcardMatch = await db
.select({ domainId: domains.domainId })
.from(domains)
.where(
and(
eq(domains.baseDomain, candidate),
eq(domains.type, "wildcard")
)
)
.limit(1);
if (wildcardMatch.length > 0) {
return wildcardMatch[0].domainId;
}
}
return null;
}
function extractFirstCert(pemBundle: string): string | null {
const match = pemBundle.match(
/-----BEGIN CERTIFICATE-----[\s\S]+?-----END CERTIFICATE-----/
);
return match ? match[0] : null;
}
async function syncAcmeCerts(
acmeJsonPath: string,
resolver: string
): Promise<void> {
let raw: string;
try {
raw = fs.readFileSync(acmeJsonPath, "utf8");
} catch (err) {
logger.debug(`acmeCertSync: could not read ${acmeJsonPath}: ${err}`);
return;
}
let acmeJson: AcmeJson;
try {
acmeJson = JSON.parse(raw);
} catch (err) {
logger.debug(`acmeCertSync: could not parse acme.json: ${err}`);
return;
}
const resolverData = acmeJson[resolver];
if (!resolverData || !Array.isArray(resolverData.Certificates)) {
logger.debug(
`acmeCertSync: no certificates found for resolver "${resolver}"`
);
return;
}
for (const cert of resolverData.Certificates) {
const domain = cert.domain?.main;
if (!domain) {
logger.debug(`acmeCertSync: skipping cert with missing domain`);
continue;
}
if (!cert.certificate || !cert.key) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - empty certificate or key field`
);
continue;
}
const certPem = Buffer.from(cert.certificate, "base64").toString(
"utf8"
);
const keyPem = Buffer.from(cert.key, "base64").toString("utf8");
if (!certPem.trim() || !keyPem.trim()) {
logger.debug(
`acmeCertSync: skipping cert for ${domain} - blank PEM after base64 decode`
);
continue;
}
// Check if cert already exists in DB
const existing = await db
.select()
.from(certificates)
.where(eq(certificates.domain, domain))
.limit(1);
let oldCertPem: string | null = null;
let oldKeyPem: string | null = null;
if (existing.length > 0 && existing[0].certFile) {
try {
const storedCertPem = decrypt(
existing[0].certFile,
config.getRawConfig().server.secret!
);
if (storedCertPem === certPem) {
logger.debug(
`acmeCertSync: cert for ${domain} is unchanged, skipping`
);
continue;
}
// Cert has changed; capture old values so we can send a correct
// update message to the newt after the DB write.
oldCertPem = storedCertPem;
if (existing[0].keyFile) {
try {
oldKeyPem = decrypt(
existing[0].keyFile,
config.getRawConfig().server.secret!
);
} catch (keyErr) {
logger.debug(
`acmeCertSync: could not decrypt stored key for ${domain}: ${keyErr}`
);
}
}
} catch (err) {
// Decryption failure means we should proceed with the update
logger.debug(
`acmeCertSync: could not decrypt stored cert for ${domain}, will update: ${err}`
);
}
}
// Parse cert expiry from the first cert in the PEM bundle
let expiresAt: number | null = null;
const firstCertPem = extractFirstCert(certPem);
if (firstCertPem) {
try {
const x509 = new crypto.X509Certificate(firstCertPem);
expiresAt = Math.floor(new Date(x509.validTo).getTime() / 1000);
} catch (err) {
logger.debug(
`acmeCertSync: could not parse cert expiry for ${domain}: ${err}`
);
}
}
const wildcard = domain.startsWith("*.");
const encryptedCert = encrypt(
certPem,
config.getRawConfig().server.secret!
);
const encryptedKey = encrypt(
keyPem,
config.getRawConfig().server.secret!
);
const now = Math.floor(Date.now() / 1000);
const domainId = await findDomainId(domain);
if (domainId) {
logger.debug(
`acmeCertSync: resolved domainId "${domainId}" for cert domain "${domain}"`
);
} else {
logger.debug(
`acmeCertSync: no matching domain record found for cert domain "${domain}"`
);
}
if (existing.length > 0) {
await db
.update(certificates)
.set({
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
updatedAt: now,
wildcard,
...(domainId !== null && { domainId })
})
.where(eq(certificates.domain, domain));
logger.info(
`acmeCertSync: updated certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
await pushCertUpdateToAffectedNewts(
domain,
domainId,
oldCertPem,
oldKeyPem
);
} else {
await db.insert(certificates).values({
domain,
domainId,
certFile: encryptedCert,
keyFile: encryptedKey,
status: "valid",
expiresAt,
createdAt: now,
updatedAt: now,
wildcard
});
logger.info(
`acmeCertSync: inserted new certificate for ${domain} (expires ${expiresAt ? new Date(expiresAt * 1000).toISOString() : "unknown"})`
);
// For a brand-new cert, push to any SSL resources that were waiting for it
await pushCertUpdateToAffectedNewts(domain, domainId, null, null);
}
}
}
export function initAcmeCertSync(): void {
if (build == "saas") {
logger.debug(`acmeCertSync: skipping ACME cert sync in SaaS build`);
return;
}
const privateConfigData = privateConfig.getRawPrivateConfig();
if (!privateConfigData.flags?.enable_acme_cert_sync) {
logger.debug(
`acmeCertSync: ACME cert sync is disabled by config flag, skipping`
);
return;
}
if (privateConfigData.flags.use_pangolin_dns) {
logger.debug(
`acmeCertSync: ACME cert sync requires use_pangolin_dns flag to be disabled, skipping`
);
return;
}
const acmeJsonPath =
privateConfigData.acme?.acme_json_path ??
"config/letsencrypt/acme.json";
const resolver = privateConfigData.acme?.resolver ?? "letsencrypt";
const intervalMs = privateConfigData.acme?.sync_interval_ms ?? 5000;
logger.info(
`acmeCertSync: starting ACME cert sync from "${acmeJsonPath}" using resolver "${resolver}" every ${intervalMs}ms`
);
// Run immediately on init, then on the configured interval
syncAcmeCerts(acmeJsonPath, resolver).catch((err) => {
logger.error(`acmeCertSync: error during initial sync: ${err}`);
});
setInterval(() => {
syncAcmeCerts(acmeJsonPath, resolver).catch((err) => {
logger.error(`acmeCertSync: error during sync: ${err}`);
});
}, intervalMs);
}

View File

@@ -1,91 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import logger from "@server/logger";
import { processAlerts } from "../processAlerts";
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/**
* Fire a `health_check_healthy` alert for the given health check.
*
* Call this after a previously-failing health check has recovered so that any
* matching `alertRules` can dispatch their email and webhook actions.
*
* @param orgId - Organisation that owns the health check.
* @param healthCheckId - Numeric primary key of the health check.
* @param healthCheckName - Human-readable name shown in notifications (optional).
* @param extra - Any additional key/value pairs to include in the payload.
*/
export async function fireHealthCheckHealthyAlert(
orgId: string,
healthCheckId: number,
healthCheckName?: string | null,
extra?: Record<string, unknown>
): Promise<void> {
try {
await processAlerts({
eventType: "health_check_healthy",
orgId,
healthCheckId,
data: {
healthCheckId,
...(healthCheckName != null ? { healthCheckName } : {}),
...extra
}
});
} catch (err) {
logger.error(
`fireHealthCheckHealthyAlert: unexpected error for healthCheckId ${healthCheckId}`,
err
);
}
}
/**
* Fire a `health_check_not_healthy` alert for the given health check.
*
* Call this after a health check has been detected as failing so that any
* matching `alertRules` can dispatch their email and webhook actions.
*
* @param orgId - Organisation that owns the health check.
* @param healthCheckId - Numeric primary key of the health check.
* @param healthCheckName - Human-readable name shown in notifications (optional).
* @param extra - Any additional key/value pairs to include in the payload.
*/
export async function fireHealthCheckNotHealthyAlert(
orgId: string,
healthCheckId: number,
healthCheckName?: string | null,
extra?: Record<string, unknown>
): Promise<void> {
try {
await processAlerts({
eventType: "health_check_not_healthy",
orgId,
healthCheckId,
data: {
healthCheckId,
...(healthCheckName != null ? { healthCheckName } : {}),
...extra
}
});
} catch (err) {
logger.error(
`fireHealthCheckNotHealthyAlert: unexpected error for healthCheckId ${healthCheckId}`,
err
);
}
}

View File

@@ -1,91 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import logger from "@server/logger";
import { processAlerts } from "../processAlerts";
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/**
* Fire a `health_check_healthy` alert for the given health check.
*
* Call this after a previously-failing health check has recovered so that any
* matching `alertRules` can dispatch their email and webhook actions.
*
* @param orgId - Organisation that owns the health check.
* @param healthCheckId - Numeric primary key of the health check.
* @param healthCheckName - Human-readable name shown in notifications (optional).
* @param extra - Any additional key/value pairs to include in the payload.
*/
export async function fireHealthCheckHealthyAlert(
orgId: string,
healthCheckId: number,
healthCheckName?: string | null,
extra?: Record<string, unknown>
): Promise<void> {
try {
await processAlerts({
eventType: "health_check_healthy",
orgId,
healthCheckId,
data: {
healthCheckId,
...(healthCheckName != null ? { healthCheckName } : {}),
...extra
}
});
} catch (err) {
logger.error(
`fireHealthCheckHealthyAlert: unexpected error for healthCheckId ${healthCheckId}`,
err
);
}
}
/**
* Fire a `health_check_not_healthy` alert for the given health check.
*
* Call this after a health check has been detected as failing so that any
* matching `alertRules` can dispatch their email and webhook actions.
*
* @param orgId - Organisation that owns the health check.
* @param healthCheckId - Numeric primary key of the health check.
* @param healthCheckName - Human-readable name shown in notifications (optional).
* @param extra - Any additional key/value pairs to include in the payload.
*/
export async function fireHealthCheckNotHealthyAlert(
orgId: string,
healthCheckId: number,
healthCheckName?: string | null,
extra?: Record<string, unknown>
): Promise<void> {
try {
await processAlerts({
eventType: "health_check_not_healthy",
orgId,
healthCheckId,
data: {
healthCheckId,
...(healthCheckName != null ? { healthCheckName } : {}),
...extra
}
});
} catch (err) {
logger.error(
`fireHealthCheckNotHealthyAlert: unexpected error for healthCheckId ${healthCheckId}`,
err
);
}
}

View File

@@ -1,91 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import logger from "@server/logger";
import { processAlerts } from "../processAlerts";
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/**
* Fire a `site_online` alert for the given site.
*
* Call this after the site has been confirmed reachable / connected so that
* any matching `alertRules` can dispatch their email and webhook actions.
*
* @param orgId - Organisation that owns the site.
* @param siteId - Numeric primary key of the site.
* @param siteName - Human-readable name shown in notifications (optional).
* @param extra - Any additional key/value pairs to include in the payload.
*/
export async function fireSiteOnlineAlert(
orgId: string,
siteId: number,
siteName?: string,
extra?: Record<string, unknown>
): Promise<void> {
try {
await processAlerts({
eventType: "site_online",
orgId,
siteId,
data: {
siteId,
...(siteName != null ? { siteName } : {}),
...extra
}
});
} catch (err) {
logger.error(
`fireSiteOnlineAlert: unexpected error for siteId ${siteId}`,
err
);
}
}
/**
* Fire a `site_offline` alert for the given site.
*
* Call this after the site has been detected as unreachable / disconnected so
* that any matching `alertRules` can dispatch their email and webhook actions.
*
* @param orgId - Organisation that owns the site.
* @param siteId - Numeric primary key of the site.
* @param siteName - Human-readable name shown in notifications (optional).
* @param extra - Any additional key/value pairs to include in the payload.
*/
export async function fireSiteOfflineAlert(
orgId: string,
siteId: number,
siteName?: string,
extra?: Record<string, unknown>
): Promise<void> {
try {
await processAlerts({
eventType: "site_offline",
orgId,
siteId,
data: {
siteId,
...(siteName != null ? { siteName } : {}),
...extra
}
});
} catch (err) {
logger.error(
`fireSiteOfflineAlert: unexpected error for siteId ${siteId}`,
err
);
}
}

View File

@@ -1,19 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
export * from "./types";
export * from "./processAlerts";
export * from "./sendAlertWebhook";
export * from "./sendAlertEmail";
export * from "./events/siteEvents";
export * from "./events/healthCheckEvents";

View File

@@ -1,294 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { and, eq, isNull, or } from "drizzle-orm";
import { db } from "@server/db";
import {
alertRules,
alertSites,
alertHealthChecks,
alertEmailActions,
alertEmailRecipients,
alertWebhookActions,
userOrgRoles,
users
} from "@server/db";
import config from "@server/lib/config";
import { decrypt } from "@server/lib/crypto";
import logger from "@server/logger";
import { AlertContext, WebhookAlertConfig } from "./types";
import { sendAlertWebhook } from "./sendAlertWebhook";
import { sendAlertEmail } from "./sendAlertEmail";
/**
* Core alert processing pipeline.
*
* Given an `AlertContext`, this function:
* 1. Finds all enabled `alertRules` whose `eventType` matches and whose
* `siteId` / `healthCheckId` is listed in the `alertSites` /
* `alertHealthChecks` junction tables (or has no junction entries,
* meaning "match all").
* 2. Applies per-rule cooldown gating.
* 3. Dispatches emails and webhook POSTs for every attached action.
* 4. Updates `lastTriggeredAt` and `lastSentAt` timestamps.
*/
export async function processAlerts(context: AlertContext): Promise<void> {
const now = Date.now();
// ------------------------------------------------------------------
// 1. Find matching alert rules
// ------------------------------------------------------------------
// Rules with no junction-table entries match ALL sites / health checks.
// Rules with junction entries match only those specific IDs.
// We implement this with a LEFT JOIN: a NULL join result means the rule
// has no scope restrictions (match all); a non-NULL result that satisfies
// the id equality filter means an explicit match.
const baseConditions = and(
eq(alertRules.orgId, context.orgId),
eq(alertRules.eventType, context.eventType),
eq(alertRules.enabled, true)
);
let rules: (typeof alertRules.$inferSelect)[];
if (context.siteId != null) {
const rows = await db
.select()
.from(alertRules)
.leftJoin(
alertSites,
eq(alertSites.alertRuleId, alertRules.alertRuleId)
)
.where(
and(
baseConditions,
or(
eq(alertSites.siteId, context.siteId),
isNull(alertSites.alertRuleId)
)
)
);
rules = rows.map((r) => r.alertRules);
} else if (context.healthCheckId != null) {
const rows = await db
.select()
.from(alertRules)
.leftJoin(
alertHealthChecks,
eq(alertHealthChecks.alertRuleId, alertRules.alertRuleId)
)
.where(
and(
baseConditions,
or(
eq(alertHealthChecks.healthCheckId, context.healthCheckId),
isNull(alertHealthChecks.alertRuleId)
)
)
);
rules = rows.map((r) => r.alertRules);
} else {
rules = [];
}
if (rules.length === 0) {
logger.debug(
`processAlerts: no matching rules for event "${context.eventType}" in org "${context.orgId}"`
);
return;
}
for (const rule of rules) {
try {
await processRule(rule, context, now);
} catch (err) {
logger.error(
`processAlerts: error processing rule ${rule.alertRuleId} for event "${context.eventType}"`,
err
);
}
}
}
// ---------------------------------------------------------------------------
// Per-rule processing
// ---------------------------------------------------------------------------
async function processRule(
rule: typeof alertRules.$inferSelect,
context: AlertContext,
now: number
): Promise<void> {
// ------------------------------------------------------------------
// 2. Cooldown check
// ------------------------------------------------------------------
if (
rule.lastTriggeredAt != null &&
now - rule.lastTriggeredAt < rule.cooldownSeconds * 1000
) {
const remainingSeconds = Math.ceil(
(rule.cooldownSeconds * 1000 - (now - rule.lastTriggeredAt)) / 1000
);
logger.debug(
`processAlerts: rule ${rule.alertRuleId} is in cooldown ${remainingSeconds}s remaining`
);
return;
}
// ------------------------------------------------------------------
// 3. Mark rule as triggered (optimistic update before sending so we
// don't re-trigger if the send is slow)
// ------------------------------------------------------------------
await db
.update(alertRules)
.set({ lastTriggeredAt: now })
.where(eq(alertRules.alertRuleId, rule.alertRuleId));
// ------------------------------------------------------------------
// 4. Process email actions
// ------------------------------------------------------------------
const emailActions = await db
.select()
.from(alertEmailActions)
.where(
and(
eq(alertEmailActions.alertRuleId, rule.alertRuleId),
eq(alertEmailActions.enabled, true)
)
);
for (const action of emailActions) {
try {
const recipients = await resolveEmailRecipients(action.emailActionId);
if (recipients.length > 0) {
await sendAlertEmail(recipients, context);
await db
.update(alertEmailActions)
.set({ lastSentAt: now })
.where(
eq(alertEmailActions.emailActionId, action.emailActionId)
);
}
} catch (err) {
logger.error(
`processAlerts: failed to send alert email for action ${action.emailActionId}`,
err
);
}
}
// ------------------------------------------------------------------
// 5. Process webhook actions
// ------------------------------------------------------------------
const webhookActions = await db
.select()
.from(alertWebhookActions)
.where(
and(
eq(alertWebhookActions.alertRuleId, rule.alertRuleId),
eq(alertWebhookActions.enabled, true)
)
);
const serverSecret = config.getRawConfig().server.secret!;
for (const action of webhookActions) {
try {
let webhookConfig: WebhookAlertConfig = { authType: "none" };
if (action.config) {
try {
const decrypted = decrypt(action.config, serverSecret);
webhookConfig = JSON.parse(decrypted) as WebhookAlertConfig;
} catch (err) {
logger.error(
`processAlerts: failed to decrypt webhook config for action ${action.webhookActionId}`,
err
);
continue;
}
}
await sendAlertWebhook(action.webhookUrl, webhookConfig, context);
await db
.update(alertWebhookActions)
.set({ lastSentAt: now })
.where(
eq(
alertWebhookActions.webhookActionId,
action.webhookActionId
)
);
} catch (err) {
logger.error(
`processAlerts: failed to send alert webhook for action ${action.webhookActionId}`,
err
);
}
}
}
// ---------------------------------------------------------------------------
// Email recipient resolution
// ---------------------------------------------------------------------------
/**
* Resolves all email addresses for a given `emailActionId`.
*
* Recipients may be:
* - Direct users (by `userId`)
* - All users in a role (by `roleId`, resolved via `userOrgRoles`)
* - Direct external email addresses
*/
async function resolveEmailRecipients(emailActionId: number): Promise<string[]> {
const rows = await db
.select()
.from(alertEmailRecipients)
.where(eq(alertEmailRecipients.emailActionId, emailActionId));
const emailSet = new Set<string>();
for (const row of rows) {
if (row.email) {
emailSet.add(row.email);
}
if (row.userId) {
const [user] = await db
.select({ email: users.email })
.from(users)
.where(eq(users.userId, row.userId))
.limit(1);
if (user?.email) {
emailSet.add(user.email);
}
}
if (row.roleId) {
// Find all users with this role via userOrgRoles
const roleUsers = await db
.select({ email: users.email })
.from(userOrgRoles)
.innerJoin(users, eq(userOrgRoles.userId, users.userId))
.where(eq(userOrgRoles.roleId, Number(row.roleId)));
for (const u of roleUsers) {
if (u.email) {
emailSet.add(u.email);
}
}
}
}
return Array.from(emailSet);
}

View File

@@ -1,87 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { sendEmail } from "@server/emails";
import AlertNotification from "@server/emails/templates/AlertNotification";
import config from "@server/lib/config";
import logger from "@server/logger";
import { AlertContext } from "./types";
/**
* Sends an alert notification email to every address in `recipients`.
*
* Each recipient receives an individual email (no BCC list) so that delivery
* failures for one address do not affect the others. Failures per recipient
* are logged and swallowed the caller only sees an error if something goes
* wrong before the send loop.
*/
export async function sendAlertEmail(
recipients: string[],
context: AlertContext
): Promise<void> {
if (recipients.length === 0) {
return;
}
const from = config.getNoReplyEmail();
const subject = buildSubject(context);
for (const to of recipients) {
try {
await sendEmail(
AlertNotification({
eventType: context.eventType,
orgId: context.orgId,
data: context.data
}),
{
from,
to,
subject
}
);
logger.debug(
`Alert email sent to "${to}" for event "${context.eventType}"`
);
} catch (err) {
logger.error(
`sendAlertEmail: failed to send alert email to "${to}" for event "${context.eventType}"`,
err
);
}
}
}
// ---------------------------------------------------------------------------
// Helpers
// ---------------------------------------------------------------------------
function buildSubject(context: AlertContext): string {
switch (context.eventType) {
case "site_online":
return "[Alert] Site Back Online";
case "site_offline":
return "[Alert] Site Offline";
case "health_check_healthy":
return "[Alert] Health Check Recovered";
case "health_check_not_healthy":
return "[Alert] Health Check Failing";
default: {
// Exhaustiveness fallback should never be reached with a
// well-typed caller, but keeps runtime behaviour predictable.
const _exhaustive: never = context.eventType;
void _exhaustive;
return "[Alert] Event Notification";
}
}
}

View File

@@ -1,140 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import logger from "@server/logger";
import { AlertContext, WebhookAlertConfig } from "./types";
const REQUEST_TIMEOUT_MS = 15_000;
/**
* Sends a single webhook POST for an alert event.
*
* The payload shape is:
* ```json
* {
* "event": "site_online",
* "timestamp": "2024-01-01T00:00:00.000Z",
* "data": { ... }
* }
* ```
*
* Authentication headers are applied according to `config.authType`,
* mirroring the same strategies supported by HttpLogDestination:
* none | bearer | basic | custom.
*/
export async function sendAlertWebhook(
url: string,
webhookConfig: WebhookAlertConfig,
context: AlertContext
): Promise<void> {
const payload = {
event: context.eventType,
timestamp: new Date().toISOString(),
data: {
orgId: context.orgId,
...context.data
}
};
const body = JSON.stringify(payload);
const headers = buildHeaders(webhookConfig);
const controller = new AbortController();
const timeoutHandle = setTimeout(() => controller.abort(), REQUEST_TIMEOUT_MS);
let response: Response;
try {
response = await fetch(url, {
method: webhookConfig.method ?? "POST",
headers,
body,
signal: controller.signal
});
} catch (err: unknown) {
const isAbort = err instanceof Error && err.name === "AbortError";
if (isAbort) {
throw new Error(
`Alert webhook: request to "${url}" timed out after ${REQUEST_TIMEOUT_MS} ms`
);
}
const msg = err instanceof Error ? err.message : String(err);
throw new Error(`Alert webhook: request to "${url}" failed ${msg}`);
} finally {
clearTimeout(timeoutHandle);
}
if (!response.ok) {
let snippet = "";
try {
const text = await response.text();
snippet = text.slice(0, 300);
} catch {
// best-effort
}
throw new Error(
`Alert webhook: server at "${url}" returned HTTP ${response.status} ${response.statusText}` +
(snippet ? ` ${snippet}` : "")
);
}
logger.debug(`Alert webhook sent successfully to "${url}" for event "${context.eventType}"`);
}
// ---------------------------------------------------------------------------
// Header construction (mirrors HttpLogDestination.buildHeaders)
// ---------------------------------------------------------------------------
function buildHeaders(webhookConfig: WebhookAlertConfig): Record<string, string> {
const headers: Record<string, string> = {
"Content-Type": "application/json"
};
switch (webhookConfig.authType) {
case "bearer": {
const token = webhookConfig.bearerToken?.trim();
if (token) {
headers["Authorization"] = `Bearer ${token}`;
}
break;
}
case "basic": {
const creds = webhookConfig.basicCredentials?.trim();
if (creds) {
const encoded = Buffer.from(creds).toString("base64");
headers["Authorization"] = `Basic ${encoded}`;
}
break;
}
case "custom": {
const name = webhookConfig.customHeaderName?.trim();
const value = webhookConfig.customHeaderValue ?? "";
if (name) {
headers[name] = value;
}
break;
}
case "none":
default:
break;
}
if (webhookConfig.headers) {
for (const { key, value } of webhookConfig.headers) {
if (key.trim()) {
headers[key.trim()] = value;
}
}
}
return headers;
}

View File

@@ -1,63 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
// ---------------------------------------------------------------------------
// Alert event types
// ---------------------------------------------------------------------------
export type AlertEventType =
| "site_online"
| "site_offline"
| "health_check_healthy"
| "health_check_not_healthy";
// ---------------------------------------------------------------------------
// Webhook authentication config (stored as encrypted JSON in the DB)
// ---------------------------------------------------------------------------
export type WebhookAuthType = "none" | "bearer" | "basic" | "custom";
/**
* Stored as an encrypted JSON blob in `alertWebhookActions.config`.
*/
export interface WebhookAlertConfig {
/** Authentication strategy for the webhook endpoint */
authType: WebhookAuthType;
/** Bearer token used when authType === "bearer" */
bearerToken?: string;
/** Basic credentials "username:password" used when authType === "basic" */
basicCredentials?: string;
/** Custom header name used when authType === "custom" */
customHeaderName?: string;
/** Custom header value used when authType === "custom" */
customHeaderValue?: string;
/** Extra headers to send with every webhook request */
headers?: Array<{ key: string; value: string }>;
/** HTTP method (default POST) */
method?: string;
}
// ---------------------------------------------------------------------------
// Internal alert event passed through the processing pipeline
// ---------------------------------------------------------------------------
export interface AlertContext {
eventType: AlertEventType;
orgId: string;
/** Set for site_online / site_offline events */
siteId?: number;
/** Set for health_check_* events */
healthCheckId?: number;
/** Human-readable context data included in emails and webhook payloads */
data: Record<string, unknown>;
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
@@ -11,15 +11,23 @@
* This file is not licensed under the AGPLv3.
*/
import privateConfig from "./config";
import config from "@server/lib/config";
import config from "./config";
import { certificates, db } from "@server/db";
import { and, eq, isNotNull, or, inArray, sql } from "drizzle-orm";
import { decrypt } from "@server/lib/crypto";
import { decryptData } from "@server/lib/encryption";
import logger from "@server/logger";
import cache from "#private/lib/cache";
let encryptionKeyHex = "";
let encryptionKey: Buffer;
function loadEncryptData() {
if (encryptionKey) {
return; // already loaded
}
encryptionKeyHex = config.getRawPrivateConfig().server.encryption_key;
encryptionKey = Buffer.from(encryptionKeyHex, "hex");
}
// Define the return type for clarity and type safety
export type CertificateResult = {
@@ -37,7 +45,7 @@ export async function getValidCertificatesForDomains(
domains: Set<string>,
useCache: boolean = true
): Promise<Array<CertificateResult>> {
loadEncryptData(); // Ensure encryption key is loaded
const finalResults: CertificateResult[] = [];
const domainsToQuery = new Set<string>();
@@ -60,7 +68,7 @@ export async function getValidCertificatesForDomains(
// 2. If all domains were resolved from the cache, return early
if (domainsToQuery.size === 0) {
const decryptedResults = decryptFinalResults(finalResults, config.getRawConfig().server.secret!);
const decryptedResults = decryptFinalResults(finalResults);
return decryptedResults;
}
@@ -165,23 +173,22 @@ export async function getValidCertificatesForDomains(
}
}
const decryptedResults = decryptFinalResults(finalResults, config.getRawConfig().server.secret!);
const decryptedResults = decryptFinalResults(finalResults);
return decryptedResults;
}
function decryptFinalResults(
finalResults: CertificateResult[],
secret: string
finalResults: CertificateResult[]
): CertificateResult[] {
const validCertsDecrypted = finalResults.map((cert) => {
// Decrypt and save certificate file
const decryptedCert = decrypt(
const decryptedCert = decryptData(
cert.certFile!, // is not null from query
secret
encryptionKey
);
// Decrypt and save key file
const decryptedKey = decrypt(cert.keyFile!, secret);
const decryptedKey = decryptData(cert.keyFile!, encryptionKey);
// Return only the certificate data without org information
return {

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
@@ -153,7 +153,7 @@ export async function flushConnectionLogToDb(): Promise<void> {
);
}
// Stop processing further batches from this snapshot - they will
// Stop processing further batches from this snapshot they will
// be picked up via the re-queued records on the next flush.
const remaining = snapshot.slice(i + INSERT_BATCH_SIZE);
if (remaining.length > 0) {
@@ -180,7 +180,7 @@ const flushTimer = setInterval(async () => {
}, FLUSH_INTERVAL_MS);
// Calling unref() means this timer will not keep the Node.js event loop alive
// on its own - the process can still exit normally when there is no other work
// on its own the process can still exit normally when there is no other work
// left. The graceful-shutdown path will call flushConnectionLogToDb() explicitly
// before process.exit(), so no data is lost.
flushTimer.unref();
@@ -223,7 +223,7 @@ export function logConnectionAudit(record: ConnectionLogRecord): void {
buffer.push(record);
if (buffer.length >= MAX_BUFFERED_RECORDS) {
// Fire and forget - errors are handled inside flushConnectionLogToDb
// Fire and forget errors are handled inside flushConnectionLogToDb
flushConnectionLogToDb().catch((error) => {
logger.error(
"Unexpected error during size-triggered connection log flush:",
@@ -231,4 +231,4 @@ export function logConnectionAudit(record: ConnectionLogRecord): void {
);
});
}
}
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
@@ -37,7 +37,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array";
*
* **Payload formats** (controlled by `config.format`):
*
* - `json_array` (default) - one POST per batch, body is a JSON array:
* - `json_array` (default) one POST per batch, body is a JSON array:
* ```json
* [
* { "event": "request", "timestamp": "2024-01-01T00:00:00.000Z", "data": { … } },
@@ -46,7 +46,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array";
* ```
* `Content-Type: application/json`
*
* - `ndjson` - one POST per batch, body is newline-delimited JSON (one object
* - `ndjson` one POST per batch, body is newline-delimited JSON (one object
* per line, no outer array). Required by Splunk HEC, Elastic/OpenSearch,
* and Grafana Loki:
* ```
@@ -55,7 +55,7 @@ const DEFAULT_FORMAT: PayloadFormat = "json_array";
* ```
* `Content-Type: application/x-ndjson`
*
* - `json_single` - one POST **per event**, body is a plain JSON object.
* - `json_single` one POST **per event**, body is a plain JSON object.
* Use only for endpoints that cannot handle batches at all.
*
* With a body template each event is rendered through the template before
@@ -319,4 +319,4 @@ function epochSecondsToIso(epochSeconds: number): string {
function escapeJsonString(value: string): string {
// JSON.stringify produces `"<escaped>"` strip the outer quotes.
return JSON.stringify(value).slice(1, -1);
}
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
@@ -60,9 +60,9 @@ export type AuthType = "none" | "bearer" | "basic" | "custom";
/**
* Controls how the batch of events is serialised into the HTTP request body.
*
* - `json_array` `[{…}, {…}]` - default; one POST per batch wrapped in a
* - `json_array` `[{…}, {…}]` default; one POST per batch wrapped in a
* JSON array. Works with most generic webhooks and Datadog.
* - `ndjson` `{…}\n{…}` - newline-delimited JSON, one object per
* - `ndjson` `{…}\n{…}` newline-delimited JSON, one object per
* line. Required by Splunk HEC, Elastic/OpenSearch, Loki.
* - `json_single` one HTTP POST per event, body is a plain JSON object.
* Use only for endpoints that cannot handle batches at all.
@@ -131,4 +131,4 @@ export interface DestinationFailureState {
nextRetryAt: number;
/** Date.now() value of the very first failure in the current streak */
firstFailedAt: number;
}
}

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
@@ -34,6 +34,10 @@ export const privateConfigSchema = z.object({
}),
server: z
.object({
encryption_key: z
.string()
.optional()
.transform(getEnvOrYaml("SERVER_ENCRYPTION_KEY")),
reo_client_id: z
.string()
.optional()
@@ -91,21 +95,10 @@ export const privateConfigSchema = z.object({
.object({
enable_redis: z.boolean().optional().default(false),
use_pangolin_dns: z.boolean().optional().default(false),
use_org_only_idp: z.boolean().optional(),
enable_acme_cert_sync: z.boolean().optional().default(true)
use_org_only_idp: z.boolean().optional()
})
.optional()
.prefault({}),
acme: z
.object({
acme_json_path: z
.string()
.optional()
.default("config/letsencrypt/acme.json"),
resolver: z.string().optional().default("letsencrypt"),
sync_interval_ms: z.number().optional().default(5000)
})
.optional(),
branding: z
.object({
app_name: z.string().optional(),

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
@@ -33,7 +33,7 @@ import {
} from "drizzle-orm";
import logger from "@server/logger";
import config from "@server/lib/config";
import { orgs, resources, sites, siteNetworks, siteResources, Target, targets } from "@server/db";
import { orgs, resources, sites, Target, targets } from "@server/db";
import {
sanitize,
encodePath,
@@ -267,35 +267,6 @@ export async function getTraefikConfig(
});
});
// Query siteResources in HTTP mode with SSL enabled and aliases - cert generation / HTTPS edge
const siteResourcesWithFullDomain = await db
.select({
siteResourceId: siteResources.siteResourceId,
fullDomain: siteResources.fullDomain,
mode: siteResources.mode
})
.from(siteResources)
.innerJoin(siteNetworks, eq(siteResources.networkId, siteNetworks.networkId))
.innerJoin(sites, eq(siteNetworks.siteId, sites.siteId))
.where(
and(
eq(siteResources.enabled, true),
isNotNull(siteResources.fullDomain),
eq(siteResources.mode, "http"),
eq(siteResources.ssl, true),
or(
eq(sites.exitNodeId, exitNodeId),
and(
isNull(sites.exitNodeId),
sql`(${siteTypes.includes("local") ? 1 : 0} = 1)`,
eq(sites.type, "local"),
sql`(${build != "saas" ? 1 : 0} = 1)`
)
),
inArray(sites.type, siteTypes)
)
);
let validCerts: CertificateResult[] = [];
if (privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) {
// create a list of all domains to get certs for
@@ -305,12 +276,6 @@ export async function getTraefikConfig(
domains.add(resource.fullDomain);
}
}
// Include siteResource aliases so pangolin-dns also fetches certs for them
for (const sr of siteResourcesWithFullDomain) {
if (sr.fullDomain) {
domains.add(sr.fullDomain);
}
}
// get the valid certs for these domains
validCerts = await getValidCertificatesForDomains(domains, true); // we are caching here because this is called often
// logger.debug(`Valid certs for domains: ${JSON.stringify(validCerts)}`);
@@ -902,139 +867,6 @@ export async function getTraefikConfig(
}
}
// Add Traefik routes for siteResource aliases (HTTP mode + SSL) so that
// Traefik generates TLS certificates for those domains even when no
// matching resource exists yet.
if (siteResourcesWithFullDomain.length > 0) {
// Build a set of domains already covered by normal resources
const existingFullDomains = new Set<string>();
for (const resource of resourcesMap.values()) {
if (resource.fullDomain) {
existingFullDomains.add(resource.fullDomain);
}
}
for (const sr of siteResourcesWithFullDomain) {
if (!sr.fullDomain) continue;
// Skip if this alias is already handled by a resource router
if (existingFullDomains.has(sr.fullDomain)) continue;
const fullDomain = sr.fullDomain;
const srKey = `site-resource-cert-${sr.siteResourceId}`;
const siteResourceServiceName = `${srKey}-service`;
const siteResourceRouterName = `${srKey}-router`;
const siteResourceRewriteMiddlewareName = `${srKey}-rewrite`;
const maintenancePort = config.getRawConfig().server.next_port;
const maintenanceHost =
config.getRawConfig().server.internal_hostname;
if (!config_output.http.routers) {
config_output.http.routers = {};
}
if (!config_output.http.services) {
config_output.http.services = {};
}
if (!config_output.http.middlewares) {
config_output.http.middlewares = {};
}
// Service pointing at the internal maintenance/Next.js page
config_output.http.services[siteResourceServiceName] = {
loadBalancer: {
servers: [
{
url: `http://${maintenanceHost}:${maintenancePort}`
}
],
passHostHeader: true
}
};
// Middleware that rewrites any path to /maintenance-screen
config_output.http.middlewares[
siteResourceRewriteMiddlewareName
] = {
replacePathRegex: {
regex: "^/(.*)",
replacement: "/private-maintenance-screen"
}
};
// HTTP -> HTTPS redirect so the ACME challenge can be served
config_output.http.routers[
`${siteResourceRouterName}-redirect`
] = {
entryPoints: [
config.getRawConfig().traefik.http_entrypoint
],
middlewares: [redirectHttpsMiddlewareName],
service: siteResourceServiceName,
rule: `Host(\`${fullDomain}\`)`,
priority: 100
};
// Determine TLS / cert-resolver configuration
let tls: any = {};
if (
!privateConfig.getRawPrivateConfig().flags.use_pangolin_dns
) {
const domainParts = fullDomain.split(".");
const wildCard =
domainParts.length <= 2
? `*.${domainParts.join(".")}`
: `*.${domainParts.slice(1).join(".")}`;
const globalDefaultResolver =
config.getRawConfig().traefik.cert_resolver;
const globalDefaultPreferWildcard =
config.getRawConfig().traefik.prefer_wildcard_cert;
tls = {
certResolver: globalDefaultResolver,
...(globalDefaultPreferWildcard
? { domains: [{ main: wildCard }] }
: {})
};
} else {
// pangolin-dns: only add route if we already have a valid cert
const matchingCert = validCerts.find(
(cert) => cert.queriedDomain === fullDomain
);
if (!matchingCert) {
logger.debug(
`No matching certificate found for siteResource alias: ${fullDomain}`
);
continue;
}
}
// HTTPS router - presence of this entry triggers cert generation
config_output.http.routers[siteResourceRouterName] = {
entryPoints: [
config.getRawConfig().traefik.https_entrypoint
],
service: siteResourceServiceName,
middlewares: [siteResourceRewriteMiddlewareName],
rule: `Host(\`${fullDomain}\`)`,
priority: 100,
tls
};
// Assets bypass router - lets Next.js static files load without rewrite
config_output.http.routers[`${siteResourceRouterName}-assets`] = {
entryPoints: [
config.getRawConfig().traefik.https_entrypoint
],
service: siteResourceServiceName,
rule: `Host(\`${fullDomain}\`) && (PathPrefix(\`/_next\`) || PathRegexp(\`^/__nextjs*\`))`,
priority: 101,
tls
};
}
}
if (generateLoginPageRouters) {
const exitNodeLoginPages = await db
.select({

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

View File

@@ -1,7 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.

Some files were not shown because too many files have changed in this diff Show More