Merge branch 'dev' into feat/login-page-customization

This commit is contained in:
Fred KISSIE
2025-12-05 22:38:07 +01:00
275 changed files with 21920 additions and 6990 deletions

3
.gitignore vendored
View File

@@ -49,4 +49,5 @@ postgres/
dynamic/
*.mmdb
scratch/
tsconfig.json
tsconfig.json
hydrateSaas.ts

2
.nvmrc
View File

@@ -1 +1 @@
22
25

View File

@@ -1,10 +1,12 @@
FROM node:22-alpine AS builder
FROM node:25-alpine AS builder
WORKDIR /app
ARG BUILD=oss
ARG DATABASE=sqlite
RUN apk add --no-cache curl tzdata python3 make g++
# COPY package.json package-lock.json ./
COPY package*.json ./
RUN npm ci
@@ -41,12 +43,13 @@ RUN test -f dist/server.mjs
RUN npm run build:cli
FROM node:22-alpine AS runner
FROM node:25-alpine AS runner
WORKDIR /app
# Curl used for the health checks
RUN apk add --no-cache curl tzdata
# Python and build tools needed for better-sqlite3 native compilation
RUN apk add --no-cache curl tzdata python3 make g++
# COPY package.json package-lock.json ./
COPY package*.json ./

View File

@@ -31,6 +31,7 @@ proxy-resources:
# - owen@pangolin.net
# whitelist-users:
# - owen@pangolin.net
# auto-login-idp: 1
headers:
- name: X-Example-Header
value: example-value

View File

@@ -5,14 +5,14 @@ meta {
}
post {
url: http://localhost:4000/api/v1/auth/login
url: http://localhost:3000/api/v1/auth/login
body: json
auth: none
}
body:json {
{
"email": "owen@pangolin.net",
"email": "admin@fosrl.io",
"password": "Password123!"
}
}

15
bruno/Olm/createOlm.bru Normal file
View File

@@ -0,0 +1,15 @@
meta {
name: createOlm
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/olm
body: none
auth: inherit
}
settings {
encodeUrl: true
}

8
bruno/Olm/folder.bru Normal file
View File

@@ -0,0 +1,8 @@
meta {
name: Olm
seq: 15
}
auth {
mode: inherit
}

View File

@@ -1,6 +1,6 @@
{
"version": "1",
"name": "Pangolin Saas",
"name": "Pangolin",
"type": "collection",
"ignore": [
"node_modules",

View File

@@ -25,4 +25,3 @@ flags:
disable_user_create_org: true
allow_raw_resources: true
enable_integration_api: true
enable_clients: true

View File

@@ -35,7 +35,7 @@ services:
- 80:80 # Port for traefik because of the network_mode
traefik:
image: traefik:v3.5
image: traefik:v3.6
container_name: traefik
restart: unless-stopped
network_mode: service:gerbil # Ports appear on the gerbil service
@@ -52,4 +52,4 @@ networks:
default:
driver: bridge
name: pangolin
enable_ipv6: true
enable_ipv6: true

View File

@@ -35,7 +35,7 @@ services:
- 80:80
{{end}}
traefik:
image: docker.io/traefik:v3.5
image: docker.io/traefik:v3.6
container_name: traefik
restart: unless-stopped
{{if .InstallGerbil}}
@@ -59,4 +59,4 @@ networks:
default:
driver: bridge
name: pangolin
{{if .EnableIPv6}} enable_ipv6: true{{end}}
{{if .EnableIPv6}} enable_ipv6: true{{end}}

View File

@@ -1080,11 +1080,11 @@
"actionDeleteIdpOrg": "IDP-Organisationsrichtlinie löschen",
"actionListIdpOrgs": "IDP-Organisationen auflisten",
"actionUpdateIdpOrg": "IDP-Organisation aktualisieren",
"actionCreateClient": "Client anlegen",
"actionDeleteClient": "Client löschen",
"actionUpdateClient": "Client aktualisieren",
"actionCreateClient": "Kunde erstellen",
"actionDeleteClient": "Kunde löschen",
"actionUpdateClient": "Kunde aktualisieren",
"actionListClients": "Clients auflisten",
"actionGetClient": "Clients abrufen",
"actionGetClient": "Kunde holen",
"actionCreateSiteResource": "Site-Ressource erstellen",
"actionDeleteSiteResource": "Site-Ressource löschen",
"actionGetSiteResource": "Site-Ressource abrufen",
@@ -1432,14 +1432,14 @@
},
"siteRequired": "Standort ist erforderlich.",
"olmTunnel": "Olm-Tunnel",
"olmTunnelDescription": "Nutzen Sie Olm für die Kundenverbindung",
"olmTunnelDescription": "Nutzen Sie Olm für die Client-Verbindung",
"errorCreatingClient": "Fehler beim Erstellen des Clients",
"clientDefaultsNotFound": "Standardeinstellungen des Clients nicht gefunden",
"createClient": "Client erstellen",
"createClientDescription": "Erstellen Sie einen neuen Client für die Verbindung zu Ihren Standorten.",
"seeAllClients": "Alle Clients anzeigen",
"clientInformation": "Client Informationen",
"clientNamePlaceholder": "Client Name",
"clientInformation": "Client-Informationen",
"clientNamePlaceholder": "Client-Name",
"address": "Adresse",
"subnetPlaceholder": "Subnetz",
"addressDescription": "Die Adresse, die dieser Client für die Verbindung verwenden wird.",
@@ -2110,7 +2110,6 @@
"selectedResources": "Ausgewählte Ressourcen",
"enableSelected": "Ausgewählte aktivieren",
"disableSelected": "Ausgewählte deaktivieren",
"checkSelectedStatus": "Status der Auswahl überprüfen",
"credentials": "Zugangsdaten",
"savecredentials": "Zugangsdaten speichern",
"regeneratecredentials": "Re-Key",
@@ -2136,5 +2135,6 @@
"niceIdUpdateErrorDescription": "Beim Aktualisieren der Nizza-ID ist ein Fehler aufgetreten.",
"niceIdCannotBeEmpty": "Nizza-ID darf nicht leer sein",
"enterIdentifier": "Identifikator eingeben",
"identifier": "Identifier"
"identifier": "Identifier",
"checkSelectedStatus": "Status der Auswahl überprüfen"
}

View File

@@ -1,12 +1,12 @@
{
"setupCreate": "Create your organization, site, and resources",
"setupCreate": "Create the organization, site, and resources",
"setupNewOrg": "New Organization",
"setupCreateOrg": "Create Organization",
"setupCreateResources": "Create Resources",
"setupOrgName": "Organization Name",
"orgDisplayName": "This is the display name of your organization.",
"orgDisplayName": "This is the display name of the organization.",
"orgId": "Organization ID",
"setupIdentifierMessage": "This is the unique identifier for your organization. This is separate from the display name.",
"setupIdentifierMessage": "This is the unique identifier for the organization.",
"setupErrorIdentifier": "Organization ID is already taken. Please choose a different one.",
"componentsErrorNoMemberCreate": "You are not currently a member of any organizations. Create an organization to get started.",
"componentsErrorNoMember": "You are not currently a member of any organizations.",
@@ -50,10 +50,10 @@
"siteMessageRemove": "Once removed the site will no longer be accessible. All targets associated with the site will also be removed.",
"siteQuestionRemove": "Are you sure you want to remove the site from the organization?",
"siteManageSites": "Manage Sites",
"siteDescription": "Allow connectivity to your network through secure tunnels",
"siteDescription": "Create and manage sites to enable connectivity to private networks",
"siteCreate": "Create Site",
"siteCreateDescription2": "Follow the steps below to create and connect a new site",
"siteCreateDescription": "Create a new site to start connecting your resources",
"siteCreateDescription": "Create a new site to start connecting resources",
"close": "Close",
"siteErrorCreate": "Error creating site",
"siteErrorCreateKeyPair": "Key pair or site defaults not found",
@@ -74,7 +74,7 @@
"siteInstallNewt": "Install Newt",
"siteInstallNewtDescription": "Get Newt running on your system",
"WgConfiguration": "WireGuard Configuration",
"WgConfigurationDescription": "Use the following configuration to connect to your network",
"WgConfigurationDescription": "Use the following configuration to connect to the network",
"operatingSystem": "Operating System",
"commands": "Commands",
"recommended": "Recommended",
@@ -87,32 +87,32 @@
"siteUpdated": "Site updated",
"siteUpdatedDescription": "The site has been updated.",
"siteGeneralDescription": "Configure the general settings for this site",
"siteSettingDescription": "Configure the settings on your site",
"siteSettingDescription": "Configure the settings on the site",
"siteSetting": "{siteName} Settings",
"siteNewtTunnel": "Newt Tunnel (Recommended)",
"siteNewtTunnelDescription": "Easiest way to create an entrypoint into your network. No extra setup.",
"siteNewtTunnel": "Newt Site (Recommended)",
"siteNewtTunnelDescription": "Easiest way to create an entrypoint into any network. No extra setup.",
"siteWg": "Basic WireGuard",
"siteWgDescription": "Use any WireGuard client to establish a tunnel. Manual NAT setup required.",
"siteWgDescriptionSaas": "Use any WireGuard client to establish a tunnel. Manual NAT setup required.",
"siteLocalDescription": "Local resources only. No tunneling.",
"siteLocalDescriptionSaas": "Local resources only. No tunneling. Only available on remote nodes.",
"siteSeeAll": "See All Sites",
"siteTunnelDescription": "Determine how you want to connect to your site",
"siteNewtCredentials": "Newt Credentials",
"siteNewtCredentialsDescription": "This is how Newt will authenticate with the server",
"siteCredentialsSave": "Save Your Credentials",
"siteTunnelDescription": "Determine how you want to connect to the site",
"siteNewtCredentials": "Credentials",
"siteNewtCredentialsDescription": "This is how the site will authenticate with the server",
"siteCredentialsSave": "Save the Credentials",
"siteCredentialsSaveDescription": "You will only be able to see this once. Make sure to copy it to a secure place.",
"siteInfo": "Site Information",
"status": "Status",
"shareTitle": "Manage Share Links",
"shareDescription": "Create shareable links to grant temporary or permanent access to your resources",
"shareDescription": "Create shareable links to grant temporary or permanent access to proxy resources",
"shareSearch": "Search share links...",
"shareCreate": "Create Share Link",
"shareErrorDelete": "Failed to delete link",
"shareErrorDeleteMessage": "An error occurred deleting link",
"shareDeleted": "Link deleted",
"shareDeletedDescription": "The link has been deleted",
"shareTokenDescription": "Your access token can be passed in two ways: as a query parameter or in the request headers. These must be passed from the client on every request for authenticated access.",
"shareTokenDescription": "The access token can be passed in two ways: as a query parameter or in the request headers. These must be passed from the client on every request for authenticated access.",
"accessToken": "Access Token",
"usageExamples": "Usage Examples",
"tokenId": "Token ID",
@@ -121,7 +121,7 @@
"importantNote": "Important Note",
"shareImportantDescription": "For security reasons, using headers is recommended over query parameters when possible, as query parameters may be logged in server logs or browser history.",
"token": "Token",
"shareTokenSecurety": "Keep your access token secure. Do not share it in publicly accessible areas or client-side code.",
"shareTokenSecurety": "Keep the access token secure. Do not share it in publicly accessible areas or client-side code.",
"shareErrorFetchResource": "Failed to fetch resources",
"shareErrorFetchResourceDescription": "An error occurred while fetching the resources",
"shareErrorCreate": "Failed to create share link",
@@ -144,8 +144,10 @@
"expires": "Expires",
"never": "Never",
"shareErrorSelectResource": "Please select a resource",
"resourceTitle": "Manage Resources",
"resourceDescription": "Create secure proxies to your private applications",
"proxyResourceTitle": "Manage Proxy Resources",
"proxyResourceDescription": "Create and manage resources that are publicly accessible through a web browser",
"clientResourceTitle": "Manage Client Resources",
"clientResourceDescription": "Create and manage resources that are only accessible through a connected client",
"resourcesSearch": "Search resources...",
"resourceAdd": "Add Resource",
"resourceErrorDelte": "Error deleting resource",
@@ -155,9 +157,9 @@
"resourceMessageRemove": "Once removed, the resource will no longer be accessible. All targets associated with the resource will also be removed.",
"resourceQuestionRemove": "Are you sure you want to remove the resource from the organization?",
"resourceHTTP": "HTTPS Resource",
"resourceHTTPDescription": "Proxy requests to your app over HTTPS using a subdomain or base domain.",
"resourceHTTPDescription": "Proxy requests to the app over HTTPS using a subdomain or base domain.",
"resourceRaw": "Raw TCP/UDP Resource",
"resourceRawDescription": "Proxy requests to your app over TCP/UDP using a port number. This only works when sites are connected to nodes.",
"resourceRawDescription": "Proxy requests to the app over TCP/UDP using a port number. This only works when sites are connected to nodes.",
"resourceCreate": "Create Resource",
"resourceCreateDescription": "Follow the steps below to create a new resource",
"resourceSeeAll": "See All Resources",
@@ -171,22 +173,22 @@
"noCountryFound": "No country found.",
"siteSelectionDescription": "This site will provide connectivity to the target.",
"resourceType": "Resource Type",
"resourceTypeDescription": "Determine how you want to access your resource",
"resourceTypeDescription": "Determine how to access the resource",
"resourceHTTPSSettings": "HTTPS Settings",
"resourceHTTPSSettingsDescription": "Configure how your resource will be accessed over HTTPS",
"resourceHTTPSSettingsDescription": "Configure how the resource will be accessed over HTTPS",
"domainType": "Domain Type",
"subdomain": "Subdomain",
"baseDomain": "Base Domain",
"subdomnainDescription": "The subdomain where your resource will be accessible.",
"subdomnainDescription": "The subdomain where the resource will be accessible.",
"resourceRawSettings": "TCP/UDP Settings",
"resourceRawSettingsDescription": "Configure how your resource will be accessed over TCP/UDP. You map the resource to a port on the host Pangolin server, so you can access the resource from server-public-ip:mapped-port.",
"resourceRawSettingsDescription": "Configure how the resource will be accessed over TCP/UDP. You map the resource to a port on the host Pangolin server, so you can access the resource from server-public-ip:mapped-port.",
"protocol": "Protocol",
"protocolSelect": "Select a protocol",
"resourcePortNumber": "Port Number",
"resourcePortNumberDescription": "The external port number to proxy requests.",
"cancel": "Cancel",
"resourceConfig": "Configuration Snippets",
"resourceConfigDescription": "Copy and paste these configuration snippets to set up your TCP/UDP resource",
"resourceConfigDescription": "Copy and paste these configuration snippets to set up the TCP/UDP resource",
"resourceAddEntrypoints": "Traefik: Add Entrypoints",
"resourceExposePorts": "Gerbil: Expose Ports in Docker Compose",
"resourceLearnRaw": "Learn how to configure TCP/UDP resources",
@@ -202,14 +204,14 @@
"proxy": "Proxy",
"internal": "Internal",
"rules": "Rules",
"resourceSettingDescription": "Configure the settings on your resource",
"resourceSettingDescription": "Configure the settings on the resource",
"resourceSetting": "{resourceName} Settings",
"alwaysAllow": "Always Allow",
"alwaysDeny": "Always Deny",
"alwaysAllow": "Bypass Auth",
"alwaysDeny": "Block Access",
"passToAuth": "Pass to Auth",
"orgSettingsDescription": "Configure your organization's settings",
"orgSettingsDescription": "Configure the organization's settings",
"orgGeneralSettings": "Organization Settings",
"orgGeneralSettingsDescription": "Manage your organization details and configuration",
"orgGeneralSettingsDescription": "Manage the organization's details and configuration",
"saveGeneralSettings": "Save General Settings",
"saveSettings": "Save Settings",
"orgDangerZone": "Danger Zone",
@@ -232,7 +234,7 @@
"orgMissing": "Organization ID Missing",
"orgMissingMessage": "Unable to regenerate invitation without an organization ID.",
"accessUsersManage": "Manage Users",
"accessUsersDescription": "Invite users and add them to roles to manage access to your organization",
"accessUsersDescription": "Invite and manage users with access to this organization",
"accessUsersSearch": "Search users...",
"accessUserCreate": "Create User",
"accessUserRemove": "Remove User",
@@ -241,13 +243,13 @@
"role": "Role",
"nameRequired": "Name is required",
"accessRolesManage": "Manage Roles",
"accessRolesDescription": "Configure roles to manage access to your organization",
"accessRolesDescription": "Create and manage roles for users in the organization",
"accessRolesSearch": "Search roles...",
"accessRolesAdd": "Add Role",
"accessRoleDelete": "Delete Role",
"description": "Description",
"inviteTitle": "Open Invitations",
"inviteDescription": "Manage your invitations to other users",
"inviteDescription": "Manage invitations for other users to join the organization",
"inviteSearch": "Search invitations...",
"minutes": "Minutes",
"hours": "Hours",
@@ -261,13 +263,13 @@
"apiKeysErrorCreate": "Error creating API key",
"apiKeysErrorSetPermission": "Error setting permissions",
"apiKeysCreate": "Generate API Key",
"apiKeysCreateDescription": "Generate a new API key for your organization",
"apiKeysCreateDescription": "Generate a new API key for the organization",
"apiKeysGeneralSettings": "Permissions",
"apiKeysGeneralSettingsDescription": "Determine what this API key can do",
"apiKeysList": "Your API Key",
"apiKeysSave": "Save Your API Key",
"apiKeysList": "New API Key",
"apiKeysSave": "Save the API Key",
"apiKeysSaveDescription": "You will only be able to see this once. Make sure to copy it to a secure place.",
"apiKeysInfo": "Your API key is:",
"apiKeysInfo": "The API key is:",
"apiKeysConfirmCopy": "I have copied the API key",
"generate": "Generate",
"done": "Done",
@@ -424,7 +426,7 @@
"userCreated": "User created",
"userCreatedDescription": "The user has been successfully created.",
"userTypeInternal": "Internal User",
"userTypeInternalDescription": "Invite a user to join your organization directly.",
"userTypeInternalDescription": "Invite a user to join the organization directly.",
"userTypeExternal": "External User",
"userTypeExternalDescription": "Create a user with an external identity provider.",
"accessUserCreateDescription": "Follow the steps below to create a new user",
@@ -468,13 +470,13 @@
"accessControlsSubmit": "Save Access Controls",
"roles": "Roles",
"accessUsersRoles": "Manage Users & Roles",
"accessUsersRolesDescription": "Invite users and add them to roles to manage access to your organization",
"accessUsersRolesDescription": "Invite users and add them to roles to manage access to the organization",
"key": "Key",
"createdAt": "Created At",
"proxyErrorInvalidHeader": "Invalid custom Host Header value. Use domain name format, or save empty to unset custom Host Header.",
"proxyErrorTls": "Invalid TLS Server Name. Use domain name format, or save empty to remove the TLS Server Name.",
"proxyEnableSSL": "Enable SSL",
"proxyEnableSSLDescription": "Enable SSL/TLS encryption for secure HTTPS connections to your targets.",
"proxyEnableSSLDescription": "Enable SSL/TLS encryption for secure HTTPS connections to the targets.",
"target": "Target",
"configureTarget": "Configure Targets",
"targetErrorFetch": "Failed to fetch targets",
@@ -490,29 +492,29 @@
"targetsErrorUpdate": "Failed to update targets",
"targetsErrorUpdateDescription": "An error occurred while updating targets",
"targetTlsUpdate": "TLS settings updated",
"targetTlsUpdateDescription": "Your TLS settings have been updated successfully",
"targetTlsUpdateDescription": "TLS settings have been updated successfully",
"targetErrorTlsUpdate": "Failed to update TLS settings",
"targetErrorTlsUpdateDescription": "An error occurred while updating TLS settings",
"proxyUpdated": "Proxy settings updated",
"proxyUpdatedDescription": "Your proxy settings have been updated successfully",
"proxyUpdatedDescription": "Proxy settings have been updated successfully",
"proxyErrorUpdate": "Failed to update proxy settings",
"proxyErrorUpdateDescription": "An error occurred while updating proxy settings",
"targetAddr": "IP / Hostname",
"targetPort": "Port",
"targetProtocol": "Protocol",
"targetTlsSettings": "Secure Connection Configuration",
"targetTlsSettingsDescription": "Configure SSL/TLS settings for your resource",
"targetTlsSettingsDescription": "Configure SSL/TLS settings for the resource",
"targetTlsSettingsAdvanced": "Advanced TLS Settings",
"targetTlsSni": "TLS Server Name",
"targetTlsSniDescription": "The TLS Server Name to use for SNI. Leave empty to use the default.",
"targetTlsSubmit": "Save Settings",
"targets": "Targets Configuration",
"targetsDescription": "Set up targets to route traffic to your backend services",
"targetsDescription": "Set up targets to route traffic to backend services",
"targetStickySessions": "Enable Sticky Sessions",
"targetStickySessionsDescription": "Keep connections on the same backend target for their entire session.",
"methodSelect": "Select method",
"targetSubmit": "Add Target",
"targetNoOne": "This resource doesn't have any targets. Add a target to configure where to send requests to your backend.",
"targetNoOne": "This resource doesn't have any targets. Add a target to configure where to send requests to the backend.",
"targetNoOneDescription": "Adding more than one target above will enable load balancing.",
"targetsSubmit": "Save Targets",
"addTarget": "Add Target",
@@ -530,7 +532,7 @@
"tlsServerNameDescription": "The TLS server name to use for SNI",
"save": "Save",
"proxyAdditional": "Additional Proxy Settings",
"proxyAdditionalDescription": "Configure how your resource handles proxy settings",
"proxyAdditionalDescription": "Configure how the resource handles proxy settings",
"proxyCustomHeader": "Custom Host Header",
"proxyCustomHeaderDescription": "The host header to set when proxying requests. Leave empty to use the default.",
"proxyAdditionalSubmit": "Save Proxy Settings",
@@ -570,7 +572,7 @@
"rulesMatchType": "Match Type",
"value": "Value",
"rulesAbout": "About Rules",
"rulesAboutDescription": "Rules allow you to control access to your resource based on a set of criteria. You can create rules to allow or deny access based on IP address or URL path.",
"rulesAboutDescription": "Rules allow you to control access to the resource based on a set of criteria. You can create rules to allow or deny access based on IP address or URL path.",
"rulesActions": "Actions",
"rulesActionAlwaysAllow": "Always Allow: Bypass all authentication methods",
"rulesActionAlwaysDeny": "Always Deny: Block all requests; no authentication can be attempted",
@@ -582,7 +584,7 @@
"rulesEnable": "Enable Rules",
"rulesEnableDescription": "Enable or disable rule evaluation for this resource",
"rulesResource": "Resource Rules Configuration",
"rulesResourceDescription": "Configure rules to control access to your resource",
"rulesResourceDescription": "Configure rules to control access to the resource",
"ruleSubmit": "Add Rule",
"rulesNoOne": "No rules. Add a rule using the form.",
"rulesOrder": "Rules are evaluated by priority in ascending order.",
@@ -598,7 +600,7 @@
"none": "None",
"unknown": "Unknown",
"resources": "Resources",
"resourcesDescription": "Resources are proxies to applications running on your private network. Create a resource for any HTTP/HTTPS or raw TCP/UDP service on your private network. Each resource must be connected to a site to enable private, secure connectivity through an encrypted WireGuard tunnel.",
"resourcesDescription": "Resources are proxies to applications running on the private network. Create a resource for any HTTP/HTTPS or raw TCP/UDP service on your private network. Each resource must be connected to a site to enable private, secure connectivity through an encrypted WireGuard tunnel.",
"resourcesWireGuardConnect": "Secure connectivity with WireGuard encryption",
"resourcesMultipleAuthenticationMethods": "Configure multiple authentication methods",
"resourcesUsersRolesAccess": "User and role-based access control",
@@ -609,7 +611,7 @@
"resourceSelect": "Select resource",
"shareLinks": "Share Links",
"share": "Shareable Links",
"shareDescription2": "Create shareable links to your resources. Links provide temporary or unlimited access to your resource. You can configure the expiration duration of the link when you create one.",
"shareDescription2": "Create shareable links to resources. Links provide temporary or unlimited access to your resource. You can configure the expiration duration of the link when you create one.",
"shareEasyCreate": "Easy to create and share",
"shareConfigurableExpirationDuration": "Configurable expiration duration",
"shareSecureAndRevocable": "Secure and revocable",
@@ -619,19 +621,19 @@
"unknownCommand": "Unknown command",
"newtErrorFetchReleases": "Failed to fetch release info: {err}",
"newtErrorFetchLatest": "Error fetching latest release: {err}",
"newtEndpoint": "Newt Endpoint",
"newtId": "Newt ID",
"newtSecretKey": "Newt Secret Key",
"newtEndpoint": "Endpoint",
"newtId": "ID",
"newtSecretKey": "Secret",
"architecture": "Architecture",
"sites": "Sites",
"siteWgAnyClients": "Use any WireGuard client to connect. You will have to address your internal resources using the peer IP.",
"siteWgAnyClients": "Use any WireGuard client to connect. You will have to address internal resources using the peer IP.",
"siteWgCompatibleAllClients": "Compatible with all WireGuard clients",
"siteWgManualConfigurationRequired": "Manual configuration required",
"userErrorNotAdminOrOwner": "User is not an admin or owner",
"pangolinSettings": "Settings - Pangolin",
"accessRoleYour": "Your role:",
"accessRoleSelect2": "Select a role",
"accessUserSelect": "Select a user",
"accessRoleSelect2": "Select roles",
"accessUserSelect": "Select users",
"otpEmailEnter": "Enter an email",
"otpEmailEnterDescription": "Press enter to add an email after typing it in the input field.",
"otpEmailErrorInvalid": "Invalid email address. Wildcard (*) must be the entire local part.",
@@ -683,7 +685,7 @@
"resourcePincodeSetupTitle": "Set Pincode",
"resourcePincodeSetupTitleDescription": "Set a pincode to protect this resource",
"resourceRoleDescription": "Admins can always access this resource.",
"resourceUsersRoles": "Users & Roles",
"resourceUsersRoles": "Access Controls",
"resourceUsersRolesDescription": "Configure which users and roles can visit this resource",
"resourceUsersRolesSubmit": "Save Users & Roles",
"resourceWhitelistSave": "Saved successfully",
@@ -779,15 +781,15 @@
"idpOidcConfigure": "OAuth2/OIDC Configuration",
"idpOidcConfigureDescription": "Configure the OAuth2/OIDC provider endpoints and credentials",
"idpClientId": "Client ID",
"idpClientIdDescription": "The OAuth2 client ID from your identity provider",
"idpClientIdDescription": "The OAuth2 client ID from the identity provider",
"idpClientSecret": "Client Secret",
"idpClientSecretDescription": "The OAuth2 client secret from your identity provider",
"idpClientSecretDescription": "The OAuth2 client secret from the identity provider",
"idpAuthUrl": "Authorization URL",
"idpAuthUrlDescription": "The OAuth2 authorization endpoint URL",
"idpTokenUrl": "Token URL",
"idpTokenUrlDescription": "The OAuth2 token endpoint URL",
"idpOidcConfigureAlert": "Important Information",
"idpOidcConfigureAlertDescription": "After creating the identity provider, you will need to configure the callback URL in your identity provider's settings. The callback URL will be provided after successful creation.",
"idpOidcConfigureAlertDescription": "After creating the identity provider, you will need to configure the callback URL in the identity provider's settings. The callback URL will be provided after successful creation.",
"idpToken": "Token Configuration",
"idpTokenDescription": "Configure how to extract user information from the ID token",
"idpJmespathAbout": "About JMESPath",
@@ -804,7 +806,7 @@
"idpSubmit": "Create Identity Provider",
"orgPolicies": "Organization Policies",
"idpSettings": "{idpName} Settings",
"idpCreateSettingsDescription": "Configure the settings for your identity provider",
"idpCreateSettingsDescription": "Configure the settings for the identity provider",
"roleMapping": "Role Mapping",
"orgMapping": "Organization Mapping",
"orgPoliciesSearch": "Search organization policies...",
@@ -839,7 +841,7 @@
"idpUpdatedDescription": "Identity provider updated successfully",
"redirectUrl": "Redirect URL",
"redirectUrlAbout": "About Redirect URL",
"redirectUrlAboutDescription": "This is the URL to which users will be redirected after authentication. You need to configure this URL in your identity provider settings.",
"redirectUrlAboutDescription": "This is the URL to which users will be redirected after authentication. You need to configure this URL in the identity provider's settings.",
"pangolinAuth": "Auth - Pangolin",
"verificationCodeLengthRequirements": "Your verification code must be 8 characters.",
"errorOccurred": "An error occurred",
@@ -922,6 +924,10 @@
"passwordResetSent": "We'll send a password reset code to this email address.",
"passwordResetCode": "Reset Code",
"passwordResetCodeDescription": "Check your email for the reset code.",
"generatePasswordResetCode": "Generate Password Reset Code",
"passwordResetCodeGenerated": "Password Reset Code Generated",
"passwordResetCodeGeneratedDescription": "Share this code with the user. They can use it to reset their password.",
"passwordResetUrl": "Reset URL",
"passwordNew": "New Password",
"passwordNewConfirm": "Confirm New Password",
"changePassword": "Change Password",
@@ -939,6 +945,9 @@
"pincodeAuth": "Authenticator Code",
"pincodeSubmit2": "Submit Code",
"passwordResetSubmit": "Request Reset",
"passwordResetAlreadyHaveCode": "Enter Password Reset Code",
"passwordResetSmtpRequired": "Please contact your administrator",
"passwordResetSmtpRequiredDescription": "A password reset code is required to reset your password. Please contact your administrator for assistance.",
"passwordBack": "Back to Password",
"loginBack": "Go back to log in",
"signup": "Sign up",
@@ -1104,12 +1113,15 @@
"actionListSiteResources": "List Site Resources",
"actionUpdateSiteResource": "Update Site Resource",
"actionListInvitations": "List Invitations",
"actionExportLogs": "Export Logs",
"actionViewLogs": "View Logs",
"noneSelected": "None selected",
"orgNotFound2": "No organizations found.",
"searchProgress": "Search...",
"create": "Create",
"orgs": "Organizations",
"loginError": "An error occurred while logging in",
"loginRequiredForDevice": "Login is required to authenticate your device.",
"passwordForgot": "Forgot your password?",
"otpAuth": "Two-Factor Authentication",
"otpAuthDescription": "Enter the code from your authenticator app or one of your single-use backup codes.",
@@ -1164,17 +1176,23 @@
"sidebarHome": "Home",
"sidebarSites": "Sites",
"sidebarResources": "Resources",
"sidebarProxyResources": "Proxy Resources",
"sidebarClientResources": "Client Resources",
"sidebarAccessControl": "Access Control",
"sidebarLogsAndAnalytics": "Logs & Analytics",
"sidebarUsers": "Users",
"sidebarAdmin": "Admin",
"sidebarInvitations": "Invitations",
"sidebarRoles": "Roles",
"sidebarShareableLinks": "Shareable Links",
"sidebarShareableLinks": "Links",
"sidebarApiKeys": "API Keys",
"sidebarSettings": "Settings",
"sidebarAllUsers": "All Users",
"sidebarIdentityProviders": "Identity Providers",
"sidebarLicense": "License",
"sidebarClients": "Clients",
"sidebarUserDevices": "User Devices",
"sidebarMachineClients": "Machine Clients",
"sidebarDomains": "Domains",
"sidebarGeneral": "General",
"sidebarLogAndAnalytics": "Log & Analytics",
@@ -1191,7 +1209,7 @@
"blueprintDetailsDescription": "See the result of the applied blueprint and any errors that occurred",
"blueprintInfo": "Blueprint Information",
"message": "Message",
"blueprintContentsDescription": "Define the YAML content describing your infrastructure",
"blueprintContentsDescription": "Define the YAML content describing the infrastructure",
"blueprintErrorCreateDescription": "An error occurred when applying the blueprint",
"blueprintErrorCreate": "Error creating blueprint",
"searchBlueprintProgress": "Search blueprints...",
@@ -1247,15 +1265,15 @@
"loading": "Loading",
"restart": "Restart",
"domains": "Domains",
"domainsDescription": "Manage domains for your organization",
"domainsDescription": "Create and manage domains available in the organization",
"domainsSearch": "Search domains...",
"domainAdd": "Add Domain",
"domainAddDescription": "Register a new domain with your organization",
"domainAddDescription": "Register a new domain with to the organization",
"domainCreate": "Create Domain",
"domainCreatedDescription": "Domain created successfully",
"domainDeletedDescription": "Domain deleted successfully",
"domainQuestionRemove": "Are you sure you want to remove the domain from your account?",
"domainMessageRemove": "Once removed, the domain will no longer be associated with your account.",
"domainQuestionRemove": "Are you sure you want to remove the domain?",
"domainMessageRemove": "Once removed, the domain will no longer be associated with the organization.",
"domainConfirmDelete": "Confirm Delete Domain",
"domainDelete": "Delete Domain",
"domain": "Domain",
@@ -1274,7 +1292,7 @@
"pending": "Pending",
"sidebarBilling": "Billing",
"billing": "Billing",
"orgBillingDescription": "Manage your billing information and subscriptions",
"orgBillingDescription": "Manage billing information and subscriptions",
"github": "GitHub",
"pangolinHosted": "Pangolin Hosted",
"fossorial": "Fossorial",
@@ -1317,7 +1335,7 @@
"domainPickerSortAsc": "A-Z",
"domainPickerSortDesc": "Z-A",
"domainPickerCheckingAvailability": "Checking availability...",
"domainPickerNoMatchingDomains": "No matching domains found. Try a different domain or check your organization's domain settings.",
"domainPickerNoMatchingDomains": "No matching domains found. Try a different domain or check the organization's domain settings.",
"domainPickerOrganizationDomains": "Organization Domains",
"domainPickerProvidedDomains": "Provided Domains",
"domainPickerSubdomain": "Subdomain: {subdomain}",
@@ -1351,7 +1369,7 @@
"billingModifySubscription": "Modify Subscription",
"billingStartSubscription": "Start Subscription",
"billingRecurringCharge": "Recurring Charge",
"billingManageSubscriptionSettings": "Manage your subscription settings and preferences",
"billingManageSubscriptionSettings": "Manage subscription settings and preferences",
"billingNoActiveSubscription": "You don't have an active subscription. Start your subscription to increase usage limits.",
"billingFailedToLoadSubscription": "Failed to load subscription",
"billingFailedToLoadUsage": "Failed to load usage",
@@ -1362,9 +1380,9 @@
"billingPortalError": "Portal Error",
"billingDataUsageInfo": "You're charged for all data transferred through your secure tunnels when connected to the cloud. This includes both incoming and outgoing traffic across all your sites. When you reach your limit, your sites will disconnect until you upgrade your plan or reduce usage. Data is not charged when using nodes.",
"billingOnlineTimeInfo": "You're charged based on how long your sites stay connected to the cloud. For example, 44,640 minutes equals one site running 24/7 for a full month. When you reach your limit, your sites will disconnect until you upgrade your plan or reduce usage. Time is not charged when using nodes.",
"billingUsersInfo": "You're charged for each user in your organization. Billing is calculated daily based on the number of active user accounts in your org.",
"billingDomainInfo": "You're charged for each domain in your organization. Billing is calculated daily based on the number of active domain accounts in your org.",
"billingRemoteExitNodesInfo": "You're charged for each managed Node in your organization. Billing is calculated daily based on the number of active managed Nodes in your org.",
"billingUsersInfo": "You're charged for each user in the organization. Billing is calculated daily based on the number of active user accounts in your org.",
"billingDomainInfo": "You're charged for each domain in the organization. Billing is calculated daily based on the number of active domain accounts in your org.",
"billingRemoteExitNodesInfo": "You're charged for each managed Node in the organization. Billing is calculated daily based on the number of active managed Nodes in your org.",
"domainNotFound": "Domain Not Found",
"domainNotFoundDescription": "This resource is disabled because the domain no longer exists our system. Please set a new domain for this resource.",
"failed": "Failed",
@@ -1456,23 +1474,23 @@
"errorCreatingClient": "Error creating client",
"clientDefaultsNotFound": "Client defaults not found",
"createClient": "Create Client",
"createClientDescription": "Create a new client for connecting to your sites",
"createClientDescription": "Create a new client to access private resources",
"seeAllClients": "See All Clients",
"clientInformation": "Client Information",
"clientNamePlaceholder": "Client name",
"address": "Address",
"subnetPlaceholder": "Subnet",
"addressDescription": "The address that this client will use for connectivity",
"addressDescription": "The internal address of the client. Must fall within the organization's subnet.",
"selectSites": "Select sites",
"sitesDescription": "The client will have connectivity to the selected sites",
"clientInstallOlm": "Install Olm",
"clientInstallOlmDescription": "Get Olm running on your system",
"clientOlmCredentials": "Olm Credentials",
"clientOlmCredentialsDescription": "This is how Olm will authenticate with the server",
"olmEndpoint": "Olm Endpoint",
"olmId": "Olm ID",
"olmSecretKey": "Olm Secret Key",
"clientCredentialsSave": "Save Your Credentials",
"clientOlmCredentials": "Credentials",
"clientOlmCredentialsDescription": "This is how the client will authenticate with the server",
"olmEndpoint": "Endpoint",
"olmId": "ID",
"olmSecretKey": "Secret",
"clientCredentialsSave": "Save the Credentials",
"clientCredentialsSaveDescription": "You will only be able to see this once. Make sure to copy it to a secure place.",
"generalSettingsDescription": "Configure the general settings for this client",
"clientUpdated": "Client updated",
@@ -1483,9 +1501,7 @@
"sitesFetchError": "An error occurred while fetching sites.",
"olmErrorFetchReleases": "An error occurred while fetching Olm releases.",
"olmErrorFetchLatest": "An error occurred while fetching the latest Olm release.",
"remoteSubnets": "Remote Subnets",
"enterCidrRange": "Enter CIDR range",
"remoteSubnetsDescription": "Add CIDR ranges that can be accessed from this site remotely using clients. Use format like 10.0.0.0/24. This ONLY applies to VPN client connectivity.",
"resourceEnableProxy": "Enable Public Proxy",
"resourceEnableProxyDescription": "Enable public proxying to this resource. This allows access to the resource from outside the network through the cloud on an open port. Requires Traefik config.",
"externalProxyEnabled": "External Proxy Enabled",
@@ -1503,14 +1519,15 @@
"enableHealthChecksDescription": "Monitor the health of this target. You can monitor a different endpoint than the target if required.",
"healthScheme": "Method",
"healthSelectScheme": "Select Method",
"healthCheckPortInvalid": "Health check port must be between 1 and 65535",
"healthCheckPath": "Path",
"healthHostname": "IP / Host",
"healthPort": "Port",
"healthCheckPathDescription": "The path to check for health status.",
"healthyIntervalSeconds": "Healthy Interval",
"unhealthyIntervalSeconds": "Unhealthy Interval",
"healthyIntervalSeconds": "Healthy Interval (sec)",
"unhealthyIntervalSeconds": "Unhealthy Interval (sec)",
"IntervalSeconds": "Healthy Interval",
"timeoutSeconds": "Timeout",
"timeoutSeconds": "Timeout (sec)",
"timeIsInSeconds": "Time is in seconds",
"retryAttempts": "Retry Attempts",
"expectedResponseCodes": "Expected Response Codes",
@@ -1546,12 +1563,12 @@
"resourceEditDomain": "Edit Domain",
"siteName": "Site Name",
"proxyPort": "Port",
"resourcesTableProxyResources": "Proxy Resources",
"resourcesTableClientResources": "Client Resources",
"resourcesTableProxyResources": "Public",
"resourcesTableClientResources": "Private",
"resourcesTableNoProxyResourcesFound": "No proxy resources found.",
"resourcesTableNoInternalResourcesFound": "No internal resources found.",
"resourcesTableDestination": "Destination",
"resourcesTableTheseResourcesForUseWith": "These resources are for use with",
"resourcesTableAlias": "Alias",
"resourcesTableClients": "Clients",
"resourcesTableAndOnlyAccessibleInternally": "and are only accessible internally when connected with a client.",
"resourcesTableNoTargets": "No targets",
@@ -1561,7 +1578,7 @@
"resourcesTableUnknown": "Unknown",
"resourcesTableNotMonitored": "Not monitored",
"editInternalResourceDialogEditClientResource": "Edit Client Resource",
"editInternalResourceDialogUpdateResourceProperties": "Update the resource properties and target configuration for {resourceName}.",
"editInternalResourceDialogUpdateResourceProperties": "Update the resource configuration and access controls for {resourceName}",
"editInternalResourceDialogResourceProperties": "Resource Properties",
"editInternalResourceDialogName": "Name",
"editInternalResourceDialogProtocol": "Protocol",
@@ -1580,11 +1597,22 @@
"editInternalResourceDialogInvalidIPAddressFormat": "Invalid IP address format",
"editInternalResourceDialogDestinationPortMin": "Destination port must be at least 1",
"editInternalResourceDialogDestinationPortMax": "Destination port must be less than 65536",
"editInternalResourceDialogPortModeRequired": "Protocol, proxy port, and destination port are required for port mode",
"editInternalResourceDialogMode": "Mode",
"editInternalResourceDialogModePort": "Port",
"editInternalResourceDialogModeHost": "Host",
"editInternalResourceDialogModeCidr": "CIDR",
"editInternalResourceDialogDestination": "Destination",
"editInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.",
"editInternalResourceDialogDestinationIPDescription": "The IP or hostname address of the resource on the site's network.",
"editInternalResourceDialogDestinationCidrDescription": "The CIDR range of the resource on the site's network.",
"editInternalResourceDialogAlias": "Alias",
"editInternalResourceDialogAliasDescription": "An optional internal DNS alias for this resource.",
"createInternalResourceDialogNoSitesAvailable": "No Sites Available",
"createInternalResourceDialogNoSitesAvailableDescription": "You need to have at least one Newt site with a subnet configured to create internal resources.",
"createInternalResourceDialogClose": "Close",
"createInternalResourceDialogCreateClientResource": "Create Client Resource",
"createInternalResourceDialogCreateClientResourceDescription": "Create a new resource that will be accessible to clients connected to the selected site.",
"createInternalResourceDialogCreateClientResourceDescription": "Create a new resource that will only be accessible to clients connected to the organization",
"createInternalResourceDialogResourceProperties": "Resource Properties",
"createInternalResourceDialogName": "Name",
"createInternalResourceDialogSite": "Site",
@@ -1613,11 +1641,22 @@
"createInternalResourceDialogInvalidIPAddressFormat": "Invalid IP address format",
"createInternalResourceDialogDestinationPortMin": "Destination port must be at least 1",
"createInternalResourceDialogDestinationPortMax": "Destination port must be less than 65536",
"createInternalResourceDialogPortModeRequired": "Protocol, proxy port, and destination port are required for port mode",
"createInternalResourceDialogMode": "Mode",
"createInternalResourceDialogModePort": "Port",
"createInternalResourceDialogModeHost": "Host",
"createInternalResourceDialogModeCidr": "CIDR",
"createInternalResourceDialogDestination": "Destination",
"createInternalResourceDialogDestinationHostDescription": "The IP address or hostname of the resource on the site's network.",
"createInternalResourceDialogDestinationCidrDescription": "The CIDR range of the resource on the site's network.",
"createInternalResourceDialogAlias": "Alias",
"createInternalResourceDialogAliasDescription": "An optional internal DNS alias for this resource.",
"siteConfiguration": "Configuration",
"siteAcceptClientConnections": "Accept Client Connections",
"siteAcceptClientConnectionsDescription": "Allow other devices to connect through this Newt instance as a gateway using clients.",
"siteAddress": "Site Address",
"siteAddressDescription": "Specify the IP address of the host for clients to connect to. This is the internal address of the site in the Pangolin network for clients to address. Must fall within the Org subnet.",
"siteAcceptClientConnectionsDescription": "Allow user devices and clients to access resources on this site. This can be changed later.",
"siteAddress": "Site Address (Advanced)",
"siteAddressDescription": "The internal address of the site. Must fall within the organization's subnet.",
"siteNameDescription": "The display name of the site that can be changed later.",
"autoLoginExternalIdp": "Auto Login with External IDP",
"autoLoginExternalIdpDescription": "Immediately redirect the user to the external IDP for authentication.",
"selectIdp": "Select IDP",
@@ -1631,7 +1670,7 @@
"autoLoginErrorNoRedirectUrl": "No redirect URL received from the identity provider.",
"autoLoginErrorGeneratingUrl": "Failed to generate authentication URL.",
"remoteExitNodeManageRemoteExitNodes": "Remote Nodes",
"remoteExitNodeDescription": "Self-host one or more remote nodes to extend your network connectivity and reduce reliance on the cloud",
"remoteExitNodeDescription": "Self-host one or more remote nodes to extend network connectivity and reduce reliance on the cloud",
"remoteExitNodes": "Nodes",
"searchRemoteExitNodes": "Search nodes...",
"remoteExitNodeAdd": "Add Node",
@@ -1643,11 +1682,11 @@
"sidebarRemoteExitNodes": "Remote Nodes",
"remoteExitNodeCreate": {
"title": "Create Node",
"description": "Create a new node to extend your network connectivity",
"description": "Create a new node to extend network connectivity",
"viewAllButton": "View All Nodes",
"strategy": {
"title": "Creation Strategy",
"description": "Choose this to manually configure your node or generate new credentials.",
"description": "Choose this to manually configure the node or generate new credentials.",
"adopt": {
"title": "Adopt Node",
"description": "Choose this if you already have the credentials for the node."
@@ -1668,7 +1707,7 @@
},
"generate": {
"title": "Generated Credentials",
"description": "Use these generated credentials to configure your node",
"description": "Use these generated credentials to configure the node",
"nodeIdTitle": "Node ID",
"secretTitle": "Secret",
"saveCredentialsTitle": "Add Credentials to Config",
@@ -1744,16 +1783,16 @@
"idpTypeLabel": "Identity Provider Type",
"roleMappingExpressionPlaceholder": "e.g., contains(groups, 'admin') && 'Admin' || 'Member'",
"idpGoogleConfiguration": "Google Configuration",
"idpGoogleConfigurationDescription": "Configure your Google OAuth2 credentials",
"idpGoogleClientIdDescription": "Your Google OAuth2 Client ID",
"idpGoogleClientSecretDescription": "Your Google OAuth2 Client Secret",
"idpGoogleConfigurationDescription": "Configure the Google OAuth2 credentials",
"idpGoogleClientIdDescription": "Google OAuth2 Client ID",
"idpGoogleClientSecretDescription": "Google OAuth2 Client Secret",
"idpAzureConfiguration": "Azure Entra ID Configuration",
"idpAzureConfigurationDescription": "Configure your Azure Entra ID OAuth2 credentials",
"idpAzureConfigurationDescription": "Configure Azure Entra ID OAuth2 credentials",
"idpTenantId": "Tenant ID",
"idpTenantIdPlaceholder": "your-tenant-id",
"idpAzureTenantIdDescription": "Your Azure tenant ID (found in Azure Active Directory overview)",
"idpAzureClientIdDescription": "Your Azure App Registration Client ID",
"idpAzureClientSecretDescription": "Your Azure App Registration Client Secret",
"idpTenantIdPlaceholder": "tenant-id",
"idpAzureTenantIdDescription": "Azure tenant ID (found in Azure Active Directory overview)",
"idpAzureClientIdDescription": "Azure App Registration Client ID",
"idpAzureClientSecretDescription": "Azure App Registration Client Secret",
"idpGoogleTitle": "Google",
"idpGoogleAlt": "Google",
"idpAzureTitle": "Azure Entra ID",
@@ -1761,14 +1800,14 @@
"idpGoogleConfigurationTitle": "Google Configuration",
"idpAzureConfigurationTitle": "Azure Entra ID Configuration",
"idpTenantIdLabel": "Tenant ID",
"idpAzureClientIdDescription2": "Your Azure App Registration Client ID",
"idpAzureClientSecretDescription2": "Your Azure App Registration Client Secret",
"idpAzureClientIdDescription2": "Azure App Registration Client ID",
"idpAzureClientSecretDescription2": "Azure App Registration Client Secret",
"idpGoogleDescription": "Google OAuth2/OIDC provider",
"idpAzureDescription": "Microsoft Azure OAuth2/OIDC provider",
"subnet": "Subnet",
"subnetDescription": "The subnet for this organization's network configuration.",
"authPage": "Auth Page",
"authPageDescription": "Configure the auth page for your organization",
"authPageDescription": "Configure the auth page for the organization",
"authPageDomain": "Auth Page Domain",
"authPageBranding": "Branding",
"authPageBrandingDescription": "Configure the branding for the auth page for your organization",
@@ -1798,7 +1837,7 @@
"setAuthPageDomain": "Set Auth Page Domain",
"failedToFetchCertificate": "Failed to fetch certificate",
"failedToRestartCertificate": "Failed to restart certificate",
"addDomainToEnableCustomAuthPages": "Add a domain to enable custom authentication pages for your organization",
"addDomainToEnableCustomAuthPages": "Add a domain to enable custom authentication pages for the organization",
"selectDomainForOrgAuthPage": "Select a domain for the organization's authentication page",
"domainPickerProvidedDomain": "Provided Domain",
"domainPickerFreeProvidedDomain": "Free Provided Domain",
@@ -1813,7 +1852,7 @@
"domainPickerInvalidSubdomainCannotMakeValid": "\"{sub}\" could not be made valid for {domain}.",
"domainPickerSubdomainSanitized": "Subdomain sanitized",
"domainPickerSubdomainCorrected": "\"{sub}\" was corrected to \"{sanitized}\"",
"orgAuthSignInTitle": "Sign in to your organization",
"orgAuthSignInTitle": "Sign in to the organization",
"orgAuthChooseIdpDescription": "Choose your identity provider to continue",
"orgAuthNoIdpConfigured": "This organization doesn't have any identity providers configured. You can log in with your Pangolin identity instead.",
"orgAuthSignInWithPangolin": "Sign in with Pangolin",
@@ -1831,7 +1870,7 @@
"enableTwoFactorAuthentication": "Enable two-factor authentication",
"completeSecuritySteps": "Complete Security Steps",
"securitySettings": "Security Settings",
"securitySettingsDescription": "Configure security policies for your organization",
"securitySettingsDescription": "Configure security policies for the organization",
"requireTwoFactorForAllUsers": "Require Two-Factor Authentication for All Users",
"requireTwoFactorDescription": "When enabled, all internal users in this organization must have two-factor authentication enabled to access the organization.",
"requireTwoFactorDisabledDescription": "This feature requires a valid license (Enterprise) or active subscription (SaaS)",
@@ -1894,8 +1933,12 @@
"enterpriseEdition": "Enterprise Edition",
"unlicensed": "Unlicensed",
"beta": "Beta",
"manageClients": "Manage Clients",
"manageClientsDescription": "Clients are devices that can connect to your sites",
"manageUserDevices": "User Devices",
"manageUserDevicesDescription": "View and manage devices that users use to privately connect to resources",
"manageMachineClients": "Manage Machine Clients",
"manageMachineClientsDescription": "Create and manage clients that servers and systems use to privately connect to resources",
"clientsTableUserClients": "User",
"clientsTableMachineClients": "Machine",
"licenseTableValidUntil": "Valid Until",
"saasLicenseKeysSettingsTitle": "Enterprise Licenses",
"saasLicenseKeysSettingsDescription": "Generate and manage Enterprise license keys for self-hosted Pangolin instances",
@@ -2095,7 +2138,7 @@
"preferWildcardCert": "Prefer Wildcard Certificate",
"unverified": "Unverified",
"domainSetting": "Domain Settings",
"domainSettingDescription": "Configure settings for your domain",
"domainSettingDescription": "Configure settings for the domain",
"preferWildcardCertDescription": "Attempt to generate a wildcard certificate (require a properly configured certificate resolver).",
"recordName": "Record Name",
"auto": "Auto",
@@ -2109,15 +2152,15 @@
"olmUpdateAvailableInfo": "An updated version of Olm is available. Please update to the latest version for the best experience.",
"client": "Client",
"proxyProtocol": "Proxy Protocol Settings",
"proxyProtocolDescription": "Configure Proxy Protocol to preserve client IP addresses for TCP/UDP services.",
"proxyProtocolDescription": "Configure Proxy Protocol to preserve client IP addresses for TCP services.",
"enableProxyProtocol": "Enable Proxy Protocol",
"proxyProtocolInfo": "Preserve client IP addresses for TCP/UDP backends",
"proxyProtocolInfo": "Preserve client IP addresses for TCP backends",
"proxyProtocolVersion": "Proxy Protocol Version",
"version1": " Version 1 (Recommended)",
"version2": "Version 2",
"versionDescription": "Version 1 is text-based and widely supported. Version 2 is binary and more efficient but less compatible. Make sure servers transport is added to dynamic config.",
"warning": "Warning",
"proxyProtocolWarning": "Your backend application must be configured to accept Proxy Protocol connections. If your backend doesn't support Proxy Protocol, enabling this will break all connections so only enable this if you know what you're doing. Make sure to configure your backend to trust Proxy Protocol headers from Traefik.",
"proxyProtocolWarning": "The backend application must be configured to accept Proxy Protocol connections. If your backend doesn't support Proxy Protocol, enabling this will break all connections so only enable this if you know what you're doing. Make sure to configure your backend to trust Proxy Protocol headers from Traefik.",
"restarting": "Restarting...",
"manual": "Manual",
"messageSupport": "Message Support",
@@ -2140,6 +2183,43 @@
"supportMessageSent": "Message Sent!",
"supportWillContact": "We'll be in touch shortly!",
"selectLogRetention": "Select log retention",
"terms": "Terms",
"privacy": "Privacy",
"security": "Security",
"docs": "Docs",
"deviceActivation": "Device activation",
"deviceCodeInvalidFormat": "Code must be 9 characters (e.g., A1AJ-N5JD)",
"deviceCodeInvalidOrExpired": "Invalid or expired code",
"deviceCodeVerifyFailed": "Failed to verify device code",
"signedInAs": "Signed in as",
"deviceCodeEnterPrompt": "Enter the code displayed on the device",
"continue": "Continue",
"deviceUnknownLocation": "Unknown location",
"deviceAuthorizationRequested": "This authorization was requested from {location} on {date}. Make sure you trust this device as it will get access to the account.",
"deviceLabel": "Device: {deviceName}",
"deviceWantsAccess": "wants to access your account",
"deviceExistingAccess": "Existing access:",
"deviceFullAccess": "Full access to your account",
"deviceOrganizationsAccess": "Access to all organizations your account has access to",
"deviceAuthorize": "Authorize {applicationName}",
"deviceConnected": "Device Connected!",
"deviceAuthorizedMessage": "Device is authorized to access your account.",
"pangolinCloud": "Pangolin Cloud",
"viewDevices": "View Devices",
"viewDevicesDescription": "Manage your connected devices",
"noDevices": "No devices found",
"dateCreated": "Date Created",
"unnamedDevice": "Unnamed Device",
"deviceQuestionRemove": "Are you sure you want to delete this device?",
"deviceMessageRemove": "This action cannot be undone.",
"deviceDeleteConfirm": "Delete Device",
"deleteDevice": "Delete Device",
"errorLoadingDevices": "Error loading devices",
"failedToLoadDevices": "Failed to load devices",
"deviceDeleted": "Device deleted",
"deviceDeletedDescription": "The device has been successfully deleted.",
"errorDeletingDevice": "Error deleting device",
"failedToDeleteDevice": "Failed to delete device",
"showColumns": "Show Columns",
"hideColumns": "Hide Columns",
"columnVisibility": "Column Visibility",
@@ -2154,6 +2234,10 @@
"enableSelected": "Enable Selected",
"disableSelected": "Disable Selected",
"checkSelectedStatus": "Check Status of Selected",
"clients": "Clients",
"accessClientSelect": "Select machine clients",
"resourceClientDescription": "Machine clients that can access this resource",
"regenerate": "Regenerate",
"credentials": "Credentials",
"savecredentials": "Save Credentials",
"regeneratecredentials": "Re-key",
@@ -2161,7 +2245,7 @@
"generatedcredentials": "Generated Credentials",
"copyandsavethesecredentials": "Copy and save these credentials",
"copyandsavethesecredentialsdescription": "These credentials will not be shown again after you leave this page. Save them securely now.",
"credentialsSaved" : "Credentials Saved",
"credentialsSaved": "Credentials Saved",
"credentialsSavedDescription": "Credentials have been regenerated and saved successfully.",
"credentialsSaveError": "Credentials Save Error",
"credentialsSaveErrorDescription": "An error occurred while regenerating and saving the credentials.",
@@ -2180,5 +2264,15 @@
"niceIdCannotBeEmpty": "Nice ID cannot be empty",
"enterIdentifier": "Enter identifier",
"identifier": "Identifier",
"noData": "No Data"
"deviceLoginUseDifferentAccount": "Not you? Use a different account.",
"deviceLoginDeviceRequestingAccessToAccount": "A device is requesting access to this account.",
"noData": "No Data",
"machineClients": "Machine Clients",
"install": "Install",
"run": "Run",
"clientNameDescription": "The display name of the client that can be changed later.",
"clientAddress": "Client Address (Advanced)",
"setupFailedToFetchSubnet": "Failed to fetch default subnet",
"setupSubnetAdvanced": "Subnet (Advanced)",
"setupSubnetDescription": "The subnet for this organization's internal network."
}

2099
messages/zh-TW.json Normal file

File diff suppressed because it is too large Load Diff

4880
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -86,7 +86,7 @@
"eslint-config-next": "16.0.3",
"express": "5.1.0",
"express-rate-limit": "8.2.1",
"glob": "11.0.3",
"glob": "11.1.0",
"helmet": "8.1.0",
"http-errors": "2.0.0",
"i": "^0.3.7",
@@ -98,22 +98,22 @@
"lucide-react": "^0.552.0",
"maxmind": "5.0.1",
"moment": "2.30.1",
"next": "15.5.6",
"next": "15.5.7",
"next-intl": "^4.4.0",
"next-themes": "0.4.6",
"nextjs-toploader": "^3.9.17",
"node-cache": "5.1.2",
"node-fetch": "3.3.2",
"nodemailer": "7.0.10",
"npm": "^11.6.2",
"npm": "^11.6.4",
"nprogress": "^0.2.0",
"oslo": "1.2.1",
"pg": "^8.16.2",
"posthog-node": "^5.11.2",
"qrcode.react": "4.2.0",
"react": "19.2.0",
"react": "19.2.1",
"react-day-picker": "9.11.1",
"react-dom": "19.2.0",
"react-dom": "19.2.1",
"react-easy-sort": "^1.8.0",
"react-hook-form": "7.66.0",
"react-icons": "^5.5.0",

View File

@@ -86,6 +86,7 @@ export enum ActionsEnum {
updateOrgDomain = "updateOrgDomain",
getDNSRecords = "getDNSRecords",
createNewt = "createNewt",
createOlm = "createOlm",
createIdp = "createIdp",
updateIdp = "updateIdp",
deleteIdp = "deleteIdp",

View File

@@ -36,13 +36,15 @@ export async function createSession(
const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token))
);
const session: Session = {
sessionId: sessionId,
userId,
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
issuedAt: new Date().getTime()
};
await db.insert(sessions).values(session);
const [session] = await db
.insert(sessions)
.values({
sessionId: sessionId,
userId,
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(),
issuedAt: new Date().getTime()
})
.returning();
return session;
}

View File

@@ -1,9 +1,43 @@
import { Request } from "express";
import { validateSessionToken, SESSION_COOKIE_NAME } from "@server/auth/sessions/app";
import {
validateSessionToken,
SESSION_COOKIE_NAME
} from "@server/auth/sessions/app";
export async function verifySession(req: Request) {
export async function verifySession(req: Request, forceLogin?: boolean) {
const res = await validateSessionToken(
req.cookies[SESSION_COOKIE_NAME] ?? "",
req.cookies[SESSION_COOKIE_NAME] ?? ""
);
if (!forceLogin) {
return res;
}
if (!res.session || !res.user) {
return {
session: null,
user: null
};
}
if (res.session.deviceAuthUsed) {
return {
session: null,
user: null
};
}
if (!res.session.issuedAt) {
return {
session: null,
user: null
};
}
const mins = 5 * 60 * 1000;
const now = new Date().getTime();
if (now - res.session.issuedAt > mins) {
return {
session: null,
user: null
};
}
return res;
}

View File

@@ -42,11 +42,17 @@ export async function getUniqueResourceName(orgId: string): Promise<string> {
}
const name = generateName();
const count = await db
.select({ niceId: resources.niceId, orgId: resources.orgId })
.from(resources)
.where(and(eq(resources.niceId, name), eq(resources.orgId, orgId)));
if (count.length === 0) {
const [resourceCount, siteResourceCount] = await Promise.all([
db
.select({ niceId: resources.niceId, orgId: resources.orgId })
.from(resources)
.where(and(eq(resources.niceId, name), eq(resources.orgId, orgId))),
db
.select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
.from(siteResources)
.where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)))
]);
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
return name;
}
loops++;
@@ -61,11 +67,17 @@ export async function getUniqueSiteResourceName(orgId: string): Promise<string>
}
const name = generateName();
const count = await db
.select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
.from(siteResources)
.where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)));
if (count.length === 0) {
const [resourceCount, siteResourceCount] = await Promise.all([
db
.select({ niceId: resources.niceId, orgId: resources.orgId })
.from(resources)
.where(and(eq(resources.niceId, name), eq(resources.orgId, orgId))),
db
.select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
.from(siteResources)
.where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)))
]);
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
return name;
}
loops++;

View File

@@ -73,7 +73,7 @@ function createDb() {
return withReplicas(
DrizzlePostgres(primaryPool, {
logger: process.env.NODE_ENV === "development"
logger: process.env.QUERY_LOGGING === "true"
}),
replicas as any
);

View File

@@ -12,6 +12,7 @@ import {
} from "drizzle-orm/pg-core";
import { InferSelectModel } from "drizzle-orm";
import { randomUUID } from "crypto";
import { alias } from "yargs";
export const domains = pgTable("domains", {
domainId: varchar("domainId").primaryKey(),
@@ -41,6 +42,7 @@ export const orgs = pgTable("orgs", {
orgId: varchar("orgId").primaryKey(),
name: varchar("name").notNull(),
subnet: varchar("subnet"),
utilitySubnet: varchar("utilitySubnet"), // this is the subnet for utility addresses
createdAt: text("createdAt"),
requireTwoFactor: boolean("requireTwoFactor"),
maxSessionLengthHours: integer("maxSessionLengthHours"),
@@ -89,8 +91,7 @@ export const sites = pgTable("sites", {
publicKey: varchar("publicKey"),
lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
listenPort: integer("listenPort"),
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true)
});
export const resources = pgTable("resources", {
@@ -206,11 +207,41 @@ export const siteResources = pgTable("siteResources", {
.references(() => orgs.orgId, { onDelete: "cascade" }),
niceId: varchar("niceId").notNull(),
name: varchar("name").notNull(),
protocol: varchar("protocol").notNull(),
proxyPort: integer("proxyPort").notNull(),
destinationPort: integer("destinationPort").notNull(),
destinationIp: varchar("destinationIp").notNull(),
enabled: boolean("enabled").notNull().default(true)
mode: varchar("mode").notNull(), // "host" | "cidr" | "port"
protocol: varchar("protocol"), // only for port mode
proxyPort: integer("proxyPort"), // only for port mode
destinationPort: integer("destinationPort"), // only for port mode
destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode
enabled: boolean("enabled").notNull().default(true),
alias: varchar("alias"),
aliasAddress: varchar("aliasAddress")
});
export const clientSiteResources = pgTable("clientSiteResources", {
clientId: integer("clientId")
.notNull()
.references(() => clients.clientId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const roleSiteResources = pgTable("roleSiteResources", {
roleId: integer("roleId")
.notNull()
.references(() => roles.roleId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const userSiteResources = pgTable("userSiteResources", {
userId: varchar("userId")
.notNull()
.references(() => users.userId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const users = pgTable("user", {
@@ -258,7 +289,8 @@ export const sessions = pgTable("session", {
.notNull()
.references(() => users.userId, { onDelete: "cascade" }),
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
issuedAt: bigint("issuedAt", { mode: "number" })
issuedAt: bigint("issuedAt", { mode: "number" }),
deviceAuthUsed: boolean("deviceAuthUsed").notNull().default(false)
});
export const newtSessions = pgTable("newtSession", {
@@ -600,7 +632,7 @@ export const idpOrg = pgTable("idpOrg", {
});
export const clients = pgTable("clients", {
clientId: serial("id").primaryKey(),
clientId: serial("clientId").primaryKey(),
orgId: varchar("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
@@ -609,6 +641,11 @@ export const clients = pgTable("clients", {
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
onDelete: "set null"
}),
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
}),
olmId: text("olmId"), // to lock it to a specific olm optionally
name: varchar("name").notNull(),
pubKey: varchar("pubKey"),
subnet: varchar("subnet").notNull(),
@@ -623,23 +660,40 @@ export const clients = pgTable("clients", {
maxConnections: integer("maxConnections")
});
export const clientSites = pgTable("clientSites", {
clientId: integer("clientId")
.notNull()
.references(() => clients.clientId, { onDelete: "cascade" }),
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" }),
isRelayed: boolean("isRelayed").notNull().default(false),
endpoint: varchar("endpoint")
});
export const clientSitesAssociationsCache = pgTable(
"clientSitesAssociationsCache",
{
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
.notNull(),
siteId: integer("siteId").notNull(),
isRelayed: boolean("isRelayed").notNull().default(false),
endpoint: varchar("endpoint"),
publicKey: varchar("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
}
);
export const clientSiteResourcesAssociationsCache = pgTable(
"clientSiteResourcesAssociationsCache",
{
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
.notNull(),
siteResourceId: integer("siteResourceId").notNull()
}
);
export const olms = pgTable("olms", {
olmId: varchar("id").primaryKey(),
secretHash: varchar("secretHash").notNull(),
dateCreated: varchar("dateCreated").notNull(),
version: text("version"),
agent: text("agent"),
name: varchar("name"),
clientId: integer("clientId").references(() => clients.clientId, {
// we will switch this depending on the current org it wants to connect to
onDelete: "set null"
}),
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
})
});
@@ -755,6 +809,21 @@ export const requestAuditLog = pgTable(
]
);
export const deviceWebAuthCodes = pgTable("deviceWebAuthCodes", {
codeId: serial("codeId").primaryKey(),
code: text("code").notNull().unique(),
ip: text("ip"),
city: text("city"),
deviceName: text("deviceName"),
applicationName: text("applicationName").notNull(),
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
verified: boolean("verified").notNull().default(false),
userId: varchar("userId").references(() => users.userId, {
onDelete: "cascade"
})
});
export type Org = InferSelectModel<typeof orgs>;
export type User = InferSelectModel<typeof users>;
export type Site = InferSelectModel<typeof sites>;
@@ -795,7 +864,7 @@ export type ApiKey = InferSelectModel<typeof apiKeys>;
export type ApiKeyAction = InferSelectModel<typeof apiKeyActions>;
export type ApiKeyOrg = InferSelectModel<typeof apiKeyOrg>;
export type Client = InferSelectModel<typeof clients>;
export type ClientSite = InferSelectModel<typeof clientSites>;
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>;
export type Olm = InferSelectModel<typeof olms>;
export type OlmSession = InferSelectModel<typeof olmSessions>;
export type UserClient = InferSelectModel<typeof userClients>;
@@ -810,4 +879,5 @@ export type Blueprint = InferSelectModel<typeof blueprints>;
export type LicenseKey = InferSelectModel<typeof licenseKey>;
export type SecurityKey = InferSelectModel<typeof securityKeys>;
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;

View File

@@ -1,13 +1,12 @@
import {
sqliteTable,
integer,
text,
real,
index
} from "drizzle-orm/sqlite-core";
import { InferSelectModel } from "drizzle-orm";
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
import { metadata } from "@app/app/[orgId]/settings/layout";
import {
index,
integer,
real,
sqliteTable,
text
} from "drizzle-orm/sqlite-core";
import { domains, exitNodes, orgs, sessions, users } from "./schema";
export const certificates = sqliteTable("certificates", {
certId: integer("certId").primaryKey({ autoIncrement: true }),

View File

@@ -7,7 +7,7 @@ import {
index,
uniqueIndex
} from "drizzle-orm/sqlite-core";
import { boolean } from "yargs";
import { no } from "zod/v4/locales";
export const domains = sqliteTable("domains", {
domainId: text("domainId").primaryKey(),
@@ -39,6 +39,7 @@ export const orgs = sqliteTable("orgs", {
orgId: text("orgId").primaryKey(),
name: text("name").notNull(),
subnet: text("subnet"),
utilitySubnet: text("utilitySubnet"), // this is the subnet for utility addresses
createdAt: text("createdAt"),
requireTwoFactor: integer("requireTwoFactor", { mode: "boolean" }),
maxSessionLengthHours: integer("maxSessionLengthHours"), // hours
@@ -100,8 +101,7 @@ export const sites = sqliteTable("sites", {
listenPort: integer("listenPort"),
dockerSocketEnabled: integer("dockerSocketEnabled", { mode: "boolean" })
.notNull()
.default(true),
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
.default(true)
});
export const resources = sqliteTable("resources", {
@@ -202,7 +202,7 @@ export const targetHealthCheck = sqliteTable("targetHealthCheck", {
hcMethod: text("hcMethod").default("GET"),
hcStatus: integer("hcStatus"), // http code
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy"
hcTlsServerName: text("hcTlsServerName"),
hcTlsServerName: text("hcTlsServerName")
});
export const exitNodes = sqliteTable("exitNodes", {
@@ -233,11 +233,41 @@ export const siteResources = sqliteTable("siteResources", {
.references(() => orgs.orgId, { onDelete: "cascade" }),
niceId: text("niceId").notNull(),
name: text("name").notNull(),
protocol: text("protocol").notNull(),
proxyPort: integer("proxyPort").notNull(),
destinationPort: integer("destinationPort").notNull(),
destinationIp: text("destinationIp").notNull(),
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true)
mode: text("mode").notNull(), // "host" | "cidr" | "port"
protocol: text("protocol"), // only for port mode
proxyPort: integer("proxyPort"), // only for port mode
destinationPort: integer("destinationPort"), // only for port mode
destination: text("destination").notNull(), // ip, cidr, hostname
enabled: integer("enabled", { mode: "boolean" }).notNull().default(true),
alias: text("alias"),
aliasAddress: text("aliasAddress")
});
export const clientSiteResources = sqliteTable("clientSiteResources", {
clientId: integer("clientId")
.notNull()
.references(() => clients.clientId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const roleSiteResources = sqliteTable("roleSiteResources", {
roleId: integer("roleId")
.notNull()
.references(() => roles.roleId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const userSiteResources = sqliteTable("userSiteResources", {
userId: text("userId")
.notNull()
.references(() => users.userId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const users = sqliteTable("user", {
@@ -313,7 +343,7 @@ export const newts = sqliteTable("newt", {
});
export const clients = sqliteTable("clients", {
clientId: integer("id").primaryKey({ autoIncrement: true }),
clientId: integer("clientId").primaryKey({ autoIncrement: true }),
orgId: text("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
@@ -322,8 +352,14 @@ export const clients = sqliteTable("clients", {
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
onDelete: "set null"
}),
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
}),
name: text("name").notNull(),
pubKey: text("pubKey"),
olmId: text("olmId"), // to lock it to a specific olm optionally
subnet: text("subnet").notNull(),
megabytesIn: integer("bytesIn"),
megabytesOut: integer("bytesOut"),
@@ -335,25 +371,42 @@ export const clients = sqliteTable("clients", {
lastHolePunch: integer("lastHolePunch")
});
export const clientSites = sqliteTable("clientSites", {
clientId: integer("clientId")
.notNull()
.references(() => clients.clientId, { onDelete: "cascade" }),
siteId: integer("siteId")
.notNull()
.references(() => sites.siteId, { onDelete: "cascade" }),
isRelayed: integer("isRelayed", { mode: "boolean" })
.notNull()
.default(false),
endpoint: text("endpoint")
});
export const clientSitesAssociationsCache = sqliteTable(
"clientSitesAssociationsCache",
{
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
.notNull(),
siteId: integer("siteId").notNull(),
isRelayed: integer("isRelayed", { mode: "boolean" })
.notNull()
.default(false),
endpoint: text("endpoint"),
publicKey: text("publicKey") // this will act as the session's public key for hole punching so we can track when it changes
}
);
export const clientSiteResourcesAssociationsCache = sqliteTable(
"clientSiteResourcesAssociationsCache",
{
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
.notNull(),
siteResourceId: integer("siteResourceId").notNull()
}
);
export const olms = sqliteTable("olms", {
olmId: text("id").primaryKey(),
secretHash: text("secretHash").notNull(),
dateCreated: text("dateCreated").notNull(),
version: text("version"),
agent: text("agent"),
name: text("name"),
clientId: integer("clientId").references(() => clients.clientId, {
// we will switch this depending on the current org it wants to connect to
onDelete: "set null"
}),
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
})
});
@@ -372,7 +425,10 @@ export const sessions = sqliteTable("session", {
.notNull()
.references(() => users.userId, { onDelete: "cascade" }),
expiresAt: integer("expiresAt").notNull(),
issuedAt: integer("issuedAt")
issuedAt: integer("issuedAt"),
deviceAuthUsed: integer("deviceAuthUsed", { mode: "boolean" })
.notNull()
.default(false)
});
export const newtSessions = sqliteTable("newtSession", {
@@ -809,6 +865,21 @@ export const requestAuditLog = sqliteTable(
]
);
export const deviceWebAuthCodes = sqliteTable("deviceWebAuthCodes", {
codeId: integer("codeId").primaryKey({ autoIncrement: true }),
code: text("code").notNull().unique(),
ip: text("ip"),
city: text("city"),
deviceName: text("deviceName"),
applicationName: text("applicationName").notNull(),
expiresAt: integer("expiresAt").notNull(),
createdAt: integer("createdAt").notNull(),
verified: integer("verified", { mode: "boolean" }).notNull().default(false),
userId: text("userId").references(() => users.userId, {
onDelete: "cascade"
})
});
export type Org = InferSelectModel<typeof orgs>;
export type User = InferSelectModel<typeof users>;
export type Site = InferSelectModel<typeof sites>;
@@ -847,7 +918,7 @@ export type ResourceRule = InferSelectModel<typeof resourceRules>;
export type Domain = InferSelectModel<typeof domains>;
export type DnsRecord = InferSelectModel<typeof dnsRecords>;
export type Client = InferSelectModel<typeof clients>;
export type ClientSite = InferSelectModel<typeof clientSites>;
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>;
export type RoleClient = InferSelectModel<typeof roleClients>;
export type UserClient = InferSelectModel<typeof userClients>;
export type SupporterKey = InferSelectModel<typeof supporterKey>;
@@ -866,3 +937,4 @@ export type LicenseKey = InferSelectModel<typeof licenseKey>;
export type SecurityKey = InferSelectModel<typeof securityKeys>;
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;

View File

@@ -11,6 +11,7 @@ import {
ApiKeyOrg,
RemoteExitNode,
Session,
SiteResource,
User,
UserOrg
} from "@server/db";
@@ -77,6 +78,8 @@ declare global {
userOrgId?: string;
userOrgIds?: string[];
remoteExitNode?: RemoteExitNode;
siteResource?: SiteResource;
orgPolicyAllowed?: boolean;
}
}
}

View File

@@ -122,19 +122,17 @@ export async function applyBlueprint({
)
.limit(1);
if (site) {
logger.debug(
`Updating client resource ${result.resource.siteResourceId} on site ${site.sites.siteId}`
);
logger.debug(
`Updating client resource ${result.resource.siteResourceId} on site ${site.sites.siteId}`
);
await addClientTargets(
site.newt.newtId,
result.resource.destinationIp,
result.resource.destinationPort,
result.resource.protocol,
result.resource.proxyPort
);
}
// await addClientTargets(
// site.newt.newtId,
// result.resource.destination,
// result.resource.destinationPort,
// result.resource.protocol,
// result.resource.proxyPort
// );
}
blueprintSucceeded = true;

View File

@@ -75,8 +75,9 @@ export async function updateClientResources(
.set({
name: resourceData.name || resourceNiceId,
siteId: site.siteId,
mode: "port",
proxyPort: resourceData["proxy-port"]!,
destinationIp: resourceData.hostname,
destination: resourceData.hostname,
destinationPort: resourceData["internal-port"],
protocol: resourceData.protocol
})
@@ -98,8 +99,9 @@ export async function updateClientResources(
siteId: site.siteId,
niceId: resourceNiceId,
name: resourceData.name || resourceNiceId,
mode: "port",
proxyPort: resourceData["proxy-port"]!,
destinationIp: resourceData.hostname,
destination: resourceData.hostname,
destinationPort: resourceData["internal-port"],
protocol: resourceData.protocol
})

View File

@@ -221,6 +221,7 @@ export async function updateProxyResources(
domainId: domain ? domain.domainId : null,
enabled: resourceEnabled,
sso: resourceData.auth?.["sso-enabled"] || false,
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
ssl: resourceSsl,
setHostHeader: resourceData["host-header"] || null,
tlsServerName: resourceData["tls-server-name"] || null,
@@ -610,6 +611,7 @@ export async function updateProxyResources(
domainId: domain ? domain.domainId : null,
enabled: resourceEnabled,
sso: resourceData.auth?.["sso-enabled"] || false,
skipToIdpId: resourceData.auth?.["auto-login-idp"] || null,
setHostHeader: resourceData["host-header"] || null,
tlsServerName: resourceData["tls-server-name"] || null,
ssl: resourceSsl,
@@ -789,10 +791,6 @@ async function syncRoleResources(
.where(eq(roleResources.resourceId, resourceId));
for (const roleName of ssoRoles) {
if (roleName === "Admin") {
continue; // never add admin access
}
const [role] = await trx
.select()
.from(roles)
@@ -803,6 +801,10 @@ async function syncRoleResources(
throw new Error(`Role not found: ${roleName} in org ${orgId}`);
}
if (role.isAdmin) {
continue; // never add admin access
}
const existingRoleResource = existingRoleResources.find(
(rr) => rr.roleId === role.roleId
);

View File

@@ -59,6 +59,7 @@ export const AuthSchema = z.object({
}),
"sso-users": z.array(z.email()).optional().default([]),
"whitelist-users": z.array(z.email()).optional().default([]),
"auto-login-idp": z.int().positive().optional(),
});
export const RuleSchema = z.object({

View File

@@ -0,0 +1,286 @@
import {
clients,
db,
olms,
orgs,
roleClients,
roles,
userClients,
userOrgs,
Transaction
} from "@server/db";
import { eq, and, notInArray } from "drizzle-orm";
import { listExitNodes } from "#dynamic/lib/exitNodes";
import { getNextAvailableClientSubnet } from "@server/lib/ip";
import logger from "@server/logger";
import { rebuildClientAssociationsFromClient } from "./rebuildClientAssociations";
import { sendTerminateClient } from "@server/routers/client/terminate";
export async function calculateUserClientsForOrgs(
userId: string,
trx?: Transaction
): Promise<void> {
const execute = async (transaction: Transaction) => {
// Get all OLMs for this user
const userOlms = await transaction
.select()
.from(olms)
.where(eq(olms.userId, userId));
if (userOlms.length === 0) {
// No OLMs for this user, but we should still clean up any orphaned clients
await cleanupOrphanedClients(userId, transaction);
return;
}
// Get all user orgs
const allUserOrgs = await transaction
.select()
.from(userOrgs)
.where(eq(userOrgs.userId, userId));
const userOrgIds = allUserOrgs.map((uo) => uo.orgId);
// For each OLM, ensure there's a client in each org the user is in
for (const olm of userOlms) {
for (const userOrg of allUserOrgs) {
const orgId = userOrg.orgId;
const [org] = await transaction
.select()
.from(orgs)
.where(eq(orgs.orgId, orgId));
if (!org) {
logger.warn(
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): org not found`
);
continue;
}
if (!org.subnet) {
logger.warn(
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): org has no subnet configured`
);
continue;
}
// Get admin role for this org (needed for access grants)
const [adminRole] = await transaction
.select()
.from(roles)
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)))
.limit(1);
if (!adminRole) {
logger.warn(
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): no admin role found`
);
continue;
}
// Check if a client already exists for this OLM+user+org combination
const [existingClient] = await transaction
.select()
.from(clients)
.where(
and(
eq(clients.userId, userId),
eq(clients.orgId, orgId),
eq(clients.olmId, olm.olmId)
)
)
.limit(1);
if (existingClient) {
// Ensure admin role has access to the client
const [existingRoleClient] = await transaction
.select()
.from(roleClients)
.where(
and(
eq(roleClients.roleId, adminRole.roleId),
eq(
roleClients.clientId,
existingClient.clientId
)
)
)
.limit(1);
if (!existingRoleClient) {
await transaction.insert(roleClients).values({
roleId: adminRole.roleId,
clientId: existingClient.clientId
});
logger.debug(
`Granted admin role access to existing client ${existingClient.clientId} for OLM ${olm.olmId} in org ${orgId} (user ${userId})`
);
}
// Ensure user has access to the client
const [existingUserClient] = await transaction
.select()
.from(userClients)
.where(
and(
eq(userClients.userId, userId),
eq(
userClients.clientId,
existingClient.clientId
)
)
)
.limit(1);
if (!existingUserClient) {
await transaction.insert(userClients).values({
userId,
clientId: existingClient.clientId
});
logger.debug(
`Granted user access to existing client ${existingClient.clientId} for OLM ${olm.olmId} in org ${orgId} (user ${userId})`
);
}
logger.debug(
`Client already exists for OLM ${olm.olmId} in org ${orgId} (user ${userId}), skipping creation`
);
continue;
}
// Get exit nodes for this org
const exitNodesList = await listExitNodes(orgId);
if (exitNodesList.length === 0) {
logger.warn(
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): no exit nodes found`
);
continue;
}
const randomExitNode =
exitNodesList[
Math.floor(Math.random() * exitNodesList.length)
];
// Get next available subnet
const newSubnet = await getNextAvailableClientSubnet(orgId);
if (!newSubnet) {
logger.warn(
`Skipping org ${orgId} for OLM ${olm.olmId} (user ${userId}): no available subnet found`
);
continue;
}
const subnet = newSubnet.split("/")[0];
const updatedSubnet = `${subnet}/${org.subnet.split("/")[1]}`;
// Create the client
const [newClient] = await transaction
.insert(clients)
.values({
userId,
orgId: userOrg.orgId,
exitNodeId: randomExitNode.exitNodeId,
name: olm.name || "User Client",
subnet: updatedSubnet,
olmId: olm.olmId,
type: "olm"
})
.returning();
await rebuildClientAssociationsFromClient(
newClient,
transaction
);
// Grant admin role access to the client
await transaction.insert(roleClients).values({
roleId: adminRole.roleId,
clientId: newClient.clientId
});
// Grant user access to the client
await transaction.insert(userClients).values({
userId,
clientId: newClient.clientId
});
logger.debug(
`Created client for OLM ${olm.olmId} in org ${orgId} (user ${userId}) with access granted to admin role and user`
);
}
}
// Clean up clients in orgs the user is no longer in
await cleanupOrphanedClients(userId, transaction, userOrgIds);
};
if (trx) {
// Use provided transaction
await execute(trx);
} else {
// Create new transaction
await db.transaction(async (transaction) => {
await execute(transaction);
});
}
}
async function cleanupOrphanedClients(
userId: string,
trx: Transaction,
userOrgIds: string[] = []
): Promise<void> {
// Find all OLM clients for this user that should be deleted
// If userOrgIds is empty, delete all OLM clients (user has no orgs)
// If userOrgIds has values, delete clients in orgs they're not in
const clientsToDelete = await trx
.select({ clientId: clients.clientId })
.from(clients)
.where(
userOrgIds.length > 0
? and(
eq(clients.userId, userId),
notInArray(clients.orgId, userOrgIds)
)
: and(eq(clients.userId, userId))
);
if (clientsToDelete.length > 0) {
const deletedClients = await trx
.delete(clients)
.where(
userOrgIds.length > 0
? and(
eq(clients.userId, userId),
notInArray(clients.orgId, userOrgIds)
)
: and(eq(clients.userId, userId))
)
.returning();
// Rebuild associations for each deleted client to clean up related data
for (const deletedClient of deletedClients) {
await rebuildClientAssociationsFromClient(deletedClient, trx);
if (deletedClient.olmId) {
await sendTerminateClient(
deletedClient.clientId,
deletedClient.olmId
);
}
}
if (userOrgIds.length === 0) {
logger.debug(
`Deleted all ${clientsToDelete.length} OLM client(s) for user ${userId} (user has no orgs)`
);
} else {
logger.debug(
`Deleted ${clientsToDelete.length} orphaned OLM client(s) for user ${userId} in orgs they're no longer in`
);
}
}
}

View File

@@ -85,10 +85,6 @@ export class Config {
? "true"
: "false";
process.env.FLAGS_ENABLE_CLIENTS = parsedConfig.flags?.enable_clients
? "true"
: "false";
process.env.PRODUCT_UPDATES_NOTIFICATION_ENABLED = parsedConfig.app
.notifications.product_updates
? "true"

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.12.1";
export const APP_VERSION = "1.12.3";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

View File

@@ -18,6 +18,7 @@ import { defaultRoleAllowedActions } from "@server/routers/role";
import { FeatureId, limitsService, sandboxLimitSet } from "@server/lib/billing";
import { createCustomer } from "#dynamic/lib/billing";
import { usageService } from "@server/lib/billing/usageService";
import config from "@server/lib/config";
export async function createUserAccountOrg(
userId: string,
@@ -76,6 +77,8 @@ export async function createUserAccountOrg(
.from(domains)
.where(eq(domains.configManaged, true));
const utilitySubnet = config.getRawConfig().orgs.utility_subnet_group;
const newOrg = await trx
.insert(orgs)
.values({
@@ -83,6 +86,7 @@ export async function createUserAccountOrg(
name,
// subnet
subnet: "100.90.128.0/24", // TODO: this should not be hardcoded - or can it be the same in all orgs?
utilitySubnet: utilitySubnet,
createdAt: new Date().toISOString()
})
.returning();

View File

@@ -1,7 +1,15 @@
import { db } from "@server/db";
import {
clientSitesAssociationsCache,
db,
SiteResource,
siteResources,
Transaction
} from "@server/db";
import { clients, orgs, sites } from "@server/db";
import { and, eq, isNotNull } from "drizzle-orm";
import config from "@server/lib/config";
import z from "zod";
import logger from "@server/logger";
interface IPRange {
start: bigint;
@@ -279,6 +287,56 @@ export async function getNextAvailableClientSubnet(
return subnet;
}
export async function getNextAvailableAliasAddress(
orgId: string
): Promise<string> {
const [org] = await db.select().from(orgs).where(eq(orgs.orgId, orgId));
if (!org) {
throw new Error(`Organization with ID ${orgId} not found`);
}
if (!org.subnet) {
throw new Error(`Organization with ID ${orgId} has no subnet defined`);
}
if (!org.utilitySubnet) {
throw new Error(
`Organization with ID ${orgId} has no utility subnet defined`
);
}
const existingAddresses = await db
.select({
aliasAddress: siteResources.aliasAddress
})
.from(siteResources)
.where(
and(
isNotNull(siteResources.aliasAddress),
eq(siteResources.orgId, orgId)
)
);
const addresses = [
...existingAddresses.map(
(site) => `${site.aliasAddress?.split("/")[0]}/32`
),
// reserve a /29 for the dns server and other stuff
`${org.utilitySubnet.split("/")[0]}/29`
].filter((address) => address !== null) as string[];
let subnet = findNextAvailableCidr(addresses, 32, org.utilitySubnet);
if (!subnet) {
throw new Error("No available subnets remaining in space");
}
// remove the cidr
subnet = subnet.split("/")[0];
return subnet;
}
export async function getNextAvailableOrgSubnet(): Promise<string> {
const existingAddresses = await db
.select({
@@ -300,3 +358,113 @@ export async function getNextAvailableOrgSubnet(): Promise<string> {
return subnet;
}
export function generateRemoteSubnets(allSiteResources: SiteResource[]): string[] {
const remoteSubnets = allSiteResources
.filter((sr) => {
if (sr.mode === "cidr") return true;
if (sr.mode === "host") {
// check if its a valid IP using zod
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
const parseResult = ipSchema.safeParse(sr.destination);
return parseResult.success;
}
return false;
})
.map((sr) => {
if (sr.mode === "cidr") return sr.destination;
if (sr.mode === "host") {
return `${sr.destination}/32`;
}
return ""; // This should never be reached due to filtering, but satisfies TypeScript
})
.filter((subnet) => subnet !== ""); // Remove empty strings just to be safe
// remove duplicates
return Array.from(new Set(remoteSubnets));
}
export type Alias = { alias: string | null; aliasAddress: string | null };
export function generateAliasConfig(allSiteResources: SiteResource[]): Alias[] {
let aliasConfigs = allSiteResources
.filter((sr) => sr.alias && sr.aliasAddress && sr.mode == "host")
.map((sr) => ({
alias: sr.alias,
aliasAddress: sr.aliasAddress
}));
return aliasConfigs;
}
export type SubnetProxyTarget = {
sourcePrefix: string; // must be a cidr
destPrefix: string; // must be a cidr
rewriteTo?: string; // must be a cidr
portRange?: {
min: number;
max: number;
}[];
};
export function generateSubnetProxyTargets(
siteResource: SiteResource,
clients: {
clientId: number;
pubKey: string | null;
subnet: string | null;
}[]
): SubnetProxyTarget[] {
const targets: SubnetProxyTarget[] = [];
if (clients.length === 0) {
logger.debug(
`No clients have access to site resource ${siteResource.siteResourceId}, skipping target generation.`
);
return [];
}
for (const clientSite of clients) {
if (!clientSite.subnet) {
logger.debug(
`Client ${clientSite.clientId} has no subnet, skipping for site resource ${siteResource.siteResourceId}.`
);
continue;
}
const clientPrefix = `${clientSite.subnet.split("/")[0]}/32`;
if (siteResource.mode == "host") {
let destination = siteResource.destination;
// check if this is a valid ip
const ipSchema = z.union([z.ipv4(), z.ipv6()]);
if (ipSchema.safeParse(destination).success) {
destination = `${destination}/32`;
targets.push({
sourcePrefix: clientPrefix,
destPrefix: destination
});
}
if (siteResource.alias && siteResource.aliasAddress) {
// also push a match for the alias address
targets.push({
sourcePrefix: clientPrefix,
destPrefix: `${siteResource.aliasAddress}/32`,
rewriteTo: destination
});
}
} else if (siteResource.mode == "cidr") {
targets.push({
sourcePrefix: clientPrefix,
destPrefix: siteResource.destination
});
}
}
// print a nice representation of the targets
// logger.debug(
// `Generated subnet proxy targets for: ${JSON.stringify(targets, null, 2)}`
// );
return targets;
}

111
server/lib/lock.ts Normal file
View File

@@ -0,0 +1,111 @@
export class LockManager {
/**
* Acquire a distributed lock using Redis SET with NX and PX options
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @returns Promise<boolean> - true if lock acquired, false otherwise
*/
async acquireLock(
lockKey: string,
ttlMs: number = 30000
): Promise<boolean> {
return true;
}
/**
* Release a lock using Lua script to ensure atomicity
* @param lockKey - Unique identifier for the lock
*/
async releaseLock(lockKey: string): Promise<void> {}
/**
* Force release a lock regardless of owner (use with caution)
* @param lockKey - Unique identifier for the lock
*/
async forceReleaseLock(lockKey: string): Promise<void> {}
/**
* Check if a lock exists and get its info
* @param lockKey - Unique identifier for the lock
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
*/
async getLockInfo(lockKey: string): Promise<{
exists: boolean;
ownedByMe: boolean;
ttl: number;
owner?: string;
}> {
return { exists: true, ownedByMe: true, ttl: 0 };
}
/**
* Extend the TTL of an existing lock owned by this worker
* @param lockKey - Unique identifier for the lock
* @param ttlMs - New TTL in milliseconds
* @returns Promise<boolean> - true if extended successfully
*/
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
return true;
}
/**
* Attempt to acquire lock with retries and exponential backoff
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @param maxRetries - Maximum number of retry attempts
* @param baseDelayMs - Base delay between retries in milliseconds
* @returns Promise<boolean> - true if lock acquired
*/
async acquireLockWithRetry(
lockKey: string,
ttlMs: number = 30000,
maxRetries: number = 5,
baseDelayMs: number = 100
): Promise<boolean> {
return true;
}
/**
* Execute a function while holding a lock
* @param lockKey - Unique identifier for the lock
* @param fn - Function to execute while holding the lock
* @param ttlMs - Lock TTL in milliseconds
* @returns Promise<T> - Result of the executed function
*/
async withLock<T>(
lockKey: string,
fn: () => Promise<T>,
ttlMs: number = 30000
): Promise<T> {
const acquired = await this.acquireLock(lockKey, ttlMs);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${lockKey}`);
}
try {
return await fn();
} finally {
await this.releaseLock(lockKey);
}
}
/**
* Clean up expired locks - Redis handles this automatically, but this method
* can be used to get statistics about locks
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
*/
async getLockStatistics(): Promise<{
activeLocksCount: number;
locksOwnedByMe: number;
}> {
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
/**
* Close the Redis connection
*/
async disconnect(): Promise<void> {}
}
export const lockManager = new LockManager();

View File

@@ -229,6 +229,11 @@ export const configSchema = z
.default(51820)
.transform(stoi)
.pipe(portSchema),
clients_start_port: portSchema
.optional()
.default(21820)
.transform(stoi)
.pipe(portSchema),
base_endpoint: z
.string()
.optional()
@@ -249,12 +254,14 @@ export const configSchema = z
orgs: z
.object({
block_size: z.number().positive().gt(0).optional().default(24),
subnet_group: z.string().optional().default("100.90.128.0/24")
subnet_group: z.string().optional().default("100.90.128.0/24"),
utility_subnet_group: z.string().optional().default("100.96.128.0/24") //just hardcode this for now as well
})
.optional()
.default({
block_size: 24,
subnet_group: "100.90.128.0/24"
subnet_group: "100.90.128.0/24",
utility_subnet_group: "100.96.128.0/24"
}),
rate_limits: z
.object({
@@ -318,8 +325,7 @@ export const configSchema = z
enable_integration_api: z.boolean().optional(),
disable_local_sites: z.boolean().optional(),
disable_basic_wireguard_sites: z.boolean().optional(),
disable_config_managed_domains: z.boolean().optional(),
enable_clients: z.boolean().optional().default(true)
disable_config_managed_domains: z.boolean().optional()
})
.optional(),
dns: z

File diff suppressed because it is too large Load Diff

View File

@@ -4,7 +4,7 @@ import { getHostMeta } from "./hostMeta";
import logger from "@server/logger";
import { apiKeys, db, roles } from "@server/db";
import { sites, users, orgs, resources, clients, idp } from "@server/db";
import { eq, count, notInArray } from "drizzle-orm";
import { eq, count, notInArray, and } from "drizzle-orm";
import { APP_VERSION } from "./consts";
import crypto from "crypto";
import { UserType } from "@server/types/UserTypes";
@@ -113,7 +113,12 @@ class TelemetryClient {
const [customRoles] = await db
.select({ count: count() })
.from(roles)
.where(notInArray(roles.name, ["Admin", "Member"]));
.where(
and(
eq(roles.isAdmin, false),
notInArray(roles.name, ["Member"])
)
);
const adminUsers = await db
.select({ email: users.email })

View File

@@ -345,9 +345,9 @@ export async function getTraefikConfig(
routerMiddlewares.push(rewriteMiddlewareName);
}
logger.debug(
`Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
);
// logger.debug(
// `Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
// );
} catch (error) {
logger.error(
`Failed to create path rewrite middleware for resource ${resource.resourceId}: ${error}`

View File

@@ -11,6 +11,7 @@ export * from "./verifyRoleAccess";
export * from "./verifyUserAccess";
export * from "./verifyAdmin";
export * from "./verifySetResourceUsers";
export * from "./verifySetResourceClients";
export * from "./verifyUserInRole";
export * from "./verifyAccessTokenAccess";
export * from "./requestTimeout";
@@ -24,7 +25,7 @@ export * from "./integration";
export * from "./verifyUserHasAction";
export * from "./verifyApiKeyAccess";
export * from "./verifyDomainAccess";
export * from "./verifyClientsEnabled";
export * from "./verifyUserIsOrgOwner";
export * from "./verifySiteResourceAccess";
export * from "./logActionAudit";
export * from "./logActionAudit";
export * from "./verifyOlmAccess";

View File

@@ -7,6 +7,7 @@ export * from "./verifyApiKeyTargetAccess";
export * from "./verifyApiKeyRoleAccess";
export * from "./verifyApiKeyUserAccess";
export * from "./verifyApiKeySetResourceUsers";
export * from "./verifyApiKeySetResourceClients";
export * from "./verifyAccessTokenAccess";
export * from "./verifyApiKeyIsRoot";
export * from "./verifyApiKeyApiKeyAccess";

View File

@@ -0,0 +1,73 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { clients } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
export async function verifyApiKeySetResourceClients(
req: Request,
res: Response,
next: NextFunction
) {
const apiKey = req.apiKey;
const singleClientId = req.params.clientId || req.body.clientId || req.query.clientId;
const { clientIds } = req.body;
const allClientIds = clientIds || (singleClientId ? [parseInt(singleClientId as string)] : []);
if (!apiKey) {
return next(
createHttpError(HttpCode.UNAUTHORIZED, "Key not authenticated")
);
}
if (apiKey.isRoot) {
// Root keys can access any client in any org
return next();
}
if (!req.apiKeyOrg) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Key does not have access to this organization"
)
);
}
if (allClientIds.length === 0) {
return next();
}
try {
const orgId = req.apiKeyOrg.orgId;
const clientsData = await db
.select()
.from(clients)
.where(
and(
inArray(clients.clientId, allClientIds),
eq(clients.orgId, orgId)
)
);
if (clientsData.length !== allClientIds.length) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Key does not have access to one or more specified clients"
)
);
}
return next();
} catch (error) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Error checking if key has access to the specified clients"
)
);
}
}

View File

@@ -11,7 +11,9 @@ export async function verifyApiKeySetResourceUsers(
next: NextFunction
) {
const apiKey = req.apiKey;
const userIds = req.body.userIds;
const singleUserId = req.params.userId || req.body.userId || req.query.userId;
const { userIds } = req.body;
const allUserIds = userIds || (singleUserId ? [singleUserId] : []);
if (!apiKey) {
return next(
@@ -33,11 +35,7 @@ export async function verifyApiKeySetResourceUsers(
);
}
if (!userIds) {
return next(createHttpError(HttpCode.BAD_REQUEST, "Invalid user IDs"));
}
if (userIds.length === 0) {
if (allUserIds.length === 0) {
return next();
}
@@ -48,12 +46,12 @@ export async function verifyApiKeySetResourceUsers(
.from(userOrgs)
.where(
and(
inArray(userOrgs.userId, userIds),
inArray(userOrgs.userId, allUserIds),
eq(userOrgs.orgId, orgId)
)
);
if (userOrgsData.length !== userIds.length) {
if (userOrgsData.length !== allUserIds.length) {
return next(
createHttpError(
HttpCode.FORBIDDEN,

View File

@@ -13,8 +13,6 @@ export async function verifyApiKeySiteResourceAccess(
try {
const apiKey = req.apiKey;
const siteResourceId = parseInt(req.params.siteResourceId);
const siteId = parseInt(req.params.siteId);
const orgId = req.params.orgId;
if (!apiKey) {
return next(
@@ -22,11 +20,11 @@ export async function verifyApiKeySiteResourceAccess(
);
}
if (!siteResourceId || !siteId || !orgId) {
if (!siteResourceId) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Missing required parameters"
"Missing siteResourceId parameter"
)
);
}
@@ -41,9 +39,7 @@ export async function verifyApiKeySiteResourceAccess(
.select()
.from(siteResources)
.where(and(
eq(siteResources.siteResourceId, siteResourceId),
eq(siteResources.siteId, siteId),
eq(siteResources.orgId, orgId)
eq(siteResources.siteResourceId, siteResourceId)
))
.limit(1);
@@ -64,11 +60,11 @@ export async function verifyApiKeySiteResourceAccess(
.where(
and(
eq(apiKeyOrg.apiKeyId, apiKey.apiKeyId),
eq(apiKeyOrg.orgId, orgId)
eq(apiKeyOrg.orgId, siteResource.orgId)
)
)
.limit(1);
if (apiKeyOrgRes.length === 0) {
return next(
createHttpError(
@@ -77,12 +73,11 @@ export async function verifyApiKeySiteResourceAccess(
)
);
}
req.apiKeyOrg = apiKeyOrgRes[0];
}
// Attach the siteResource to the request for use in the next middleware/route
// @ts-ignore - Extending Request type
req.siteResource = siteResource;
return next();

View File

@@ -5,6 +5,7 @@ import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { canUserAccessResource } from "@server/auth/canUserAccessResource";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyAccessTokenAccess(
req: Request,
@@ -96,6 +97,24 @@ export async function verifyAccessTokenAccess(
req.userOrgId = resource[0].orgId!;
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const resourceAllowed = await canUserAccessResource({
userId,
resourceId,

View File

@@ -4,6 +4,7 @@ import { roles, userOrgs } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyAdmin(
req: Request,
@@ -43,6 +44,24 @@ export async function verifyAdmin(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const userRole = await db
.select()
.from(roles)

View File

@@ -4,6 +4,7 @@ import { userOrgs, apiKeys, apiKeyOrg } from "@server/db";
import { and, eq, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyApiKeyAccess(
req: Request,
@@ -84,6 +85,24 @@ export async function verifyApiKeyAccess(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;

View File

@@ -4,6 +4,7 @@ import { userOrgs, clients, roleClients, userClients } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyClientAccess(
req: Request,
@@ -75,6 +76,24 @@ export async function verifyClientAccess(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = client.orgId;

View File

@@ -1,29 +0,0 @@
import { Request, Response, NextFunction } from "express";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import config from "@server/lib/config";
export async function verifyClientsEnabled(
req: Request,
res: Response,
next: NextFunction
) {
try {
if (!config.getRawConfig().flags?.enable_clients) {
return next(
createHttpError(
HttpCode.NOT_IMPLEMENTED,
"Clients are not enabled on this server."
)
);
}
return next();
} catch (error) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to check if clients are enabled"
)
);
}
}

View File

@@ -4,6 +4,7 @@ import { userOrgs, apiKeyOrg } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyDomainAccess(
req: Request,
@@ -78,6 +79,24 @@ export async function verifyDomainAccess(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;

View File

@@ -0,0 +1,45 @@
import { Request, Response, NextFunction } from "express";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { db, olms } from "@server/db";
import { and, eq } from "drizzle-orm";
export async function verifyOlmAccess(
req: Request,
res: Response,
next: NextFunction
) {
try {
const userId = req.user!.userId;
const olmId = req.params.olmId || req.body.olmId || req.query.olmId;
if (!userId) {
return next(
createHttpError(HttpCode.UNAUTHORIZED, "User not authenticated")
);
}
const [existingOlm] = await db
.select()
.from(olms)
.where(and(eq(olms.olmId, olmId), eq(olms.userId, userId)));
if (!existingOlm) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User does not have access to this olm"
)
);
}
return next();
} catch (error) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Error checking if user has access to this user"
)
);
}
}

View File

@@ -47,22 +47,22 @@ export async function verifyOrgAccess(
);
}
const policyCheck = await checkOrgAccessPolicy({
orgId,
userId,
session: req.session
});
logger.debug("Org check policy result", { policyCheck });
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
if (req.orgPolicyAllowed === undefined) {
const policyCheck = await checkOrgAccessPolicy({
orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
// User has access, attach the user's role to the request for potential future use

View File

@@ -1,14 +1,10 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import {
resources,
userOrgs,
userResources,
roleResources,
} from "@server/db";
import { resources, userOrgs, userResources, roleResources } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyResourceAccess(
req: Request,
@@ -73,6 +69,24 @@ export async function verifyResourceAccess(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = resource[0].orgId;

View File

@@ -5,6 +5,7 @@ import { and, eq, inArray } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyRoleAccess(
req: Request,
@@ -105,6 +106,33 @@ export async function verifyRoleAccess(
req.userOrgRoleId = userOrg[0].roleId;
}
if (!req.userOrg) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User does not have access to this organization"
)
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
return next();
} catch (error) {
logger.error("Error verifying role access:", error);
@@ -116,4 +144,3 @@ export async function verifyRoleAccess(
);
}
}

View File

@@ -1,10 +1,5 @@
import { NextFunction, Response } from "express";
import ErrorResponse from "@server/types/ErrorResponse";
import { db } from "@server/db";
import { users } from "@server/db";
import { eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { verifySession } from "@server/auth/sessions/verifySession";
import { unauthorized } from "@server/auth/unauthorizedResponse";
@@ -13,24 +8,15 @@ export const verifySessionMiddleware = async (
res: Response<ErrorResponse>,
next: NextFunction
) => {
const { session, user } = await verifySession(req);
const { forceLogin } = req.query;
const { session, user } = await verifySession(req, forceLogin === "true");
if (!session || !user) {
return next(unauthorized());
}
const existingUser = await db
.select()
.from(users)
.where(eq(users.userId, user.userId));
if (!existingUser || !existingUser[0]) {
return next(
createHttpError(HttpCode.BAD_REQUEST, "User does not exist")
);
}
req.user = existingUser[0];
req.user = user;
req.session = session;
next();
return next();
};

View File

@@ -0,0 +1,90 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { clients } from "@server/db";
import { and, eq, inArray } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifySetResourceClients(
req: Request,
res: Response,
next: NextFunction
) {
const userId = req.user!.userId;
const singleClientId =
req.params.clientId || req.body.clientId || req.query.clientId;
const { clientIds } = req.body;
const allClientIds =
clientIds ||
(singleClientId ? [parseInt(singleClientId as string)] : []);
if (!userId) {
return next(
createHttpError(HttpCode.UNAUTHORIZED, "User not authenticated")
);
}
if (!req.userOrg) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User does not have access to this organization"
)
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
if (allClientIds.length === 0) {
return next();
}
try {
const orgId = req.userOrg.orgId;
// get all clients for the clientIds
const clientsData = await db
.select()
.from(clients)
.where(
and(
inArray(clients.clientId, allClientIds),
eq(clients.orgId, orgId)
)
);
if (clientsData.length !== allClientIds.length) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User does not have access to one or more specified clients"
)
);
}
return next();
} catch (error) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Error checking if user has access to the specified clients"
)
);
}
}

View File

@@ -4,6 +4,7 @@ import { userOrgs } from "@server/db";
import { and, eq, inArray, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifySetResourceUsers(
req: Request,
@@ -28,6 +29,24 @@ export async function verifySetResourceUsers(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
if (!userIds) {
return next(createHttpError(HttpCode.BAD_REQUEST, "Invalid user IDs"));
}

View File

@@ -1,16 +1,11 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import {
sites,
userOrgs,
userSites,
roleSites,
roles,
} from "@server/db";
import { sites, userOrgs, userSites, roleSites, roles } from "@server/db";
import { and, eq, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifySiteAccess(
req: Request,
@@ -82,6 +77,24 @@ export async function verifySiteAccess(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = site[0].orgId;

View File

@@ -1,10 +1,11 @@
import { Request, Response, NextFunction } from "express";
import { db } from "@server/db";
import { db, roleSiteResources, userOrgs, userSiteResources } from "@server/db";
import { siteResources } from "@server/db";
import { eq, and } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifySiteResourceAccess(
req: Request,
@@ -12,44 +13,145 @@ export async function verifySiteResourceAccess(
next: NextFunction
): Promise<any> {
try {
const siteResourceId = parseInt(req.params.siteResourceId);
const siteId = parseInt(req.params.siteId);
const orgId = req.params.orgId;
const userId = req.user!.userId;
const siteResourceId =
req.params.siteResourceId ||
req.body.siteResourceId ||
req.query.siteResourceId;
if (!siteResourceId || !siteId || !orgId) {
if (!userId) {
return next(
createHttpError(HttpCode.UNAUTHORIZED, "User not authenticated")
);
}
if (!siteResourceId) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Missing required parameters"
"Site resource ID is required"
)
);
}
const siteResourceIdNum = parseInt(siteResourceId as string, 10);
if (isNaN(siteResourceIdNum)) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Invalid site resource ID"
)
);
}
// Check if the site resource exists and belongs to the specified site and org
const [siteResource] = await db
.select()
.from(siteResources)
.where(and(
eq(siteResources.siteResourceId, siteResourceId),
eq(siteResources.siteId, siteId),
eq(siteResources.orgId, orgId)
))
.where(eq(siteResources.siteResourceId, siteResourceIdNum))
.limit(1);
if (!siteResource) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
"Site resource not found"
`Site resource with ID ${siteResourceIdNum} not found`
)
);
}
if (!siteResource.orgId) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
`Site resource with ID ${siteResourceIdNum} does not have an organization ID`
)
);
}
if (!req.userOrg) {
const userOrgRole = await db
.select()
.from(userOrgs)
.where(
and(
eq(userOrgs.userId, userId),
eq(userOrgs.orgId, siteResource.orgId)
)
)
.limit(1);
req.userOrg = userOrgRole[0];
}
if (!req.userOrg) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User does not have access to this organization"
)
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const userOrgRoleId = req.userOrg.roleId;
req.userOrgRoleId = userOrgRoleId;
req.userOrgId = siteResource.orgId;
// Attach the siteResource to the request for use in the next middleware/route
// @ts-ignore - Extending Request type
req.siteResource = siteResource;
next();
const roleResourceAccess = await db
.select()
.from(roleSiteResources)
.where(
and(
eq(roleSiteResources.siteResourceId, siteResourceIdNum),
eq(roleSiteResources.roleId, userOrgRoleId)
)
)
.limit(1);
if (roleResourceAccess.length > 0) {
return next();
}
const userResourceAccess = await db
.select()
.from(userSiteResources)
.where(
and(
eq(userSiteResources.userId, userId),
eq(userSiteResources.siteResourceId, siteResourceIdNum)
)
)
.limit(1);
if (userResourceAccess.length > 0) {
return next();
}
return next(
createHttpError(
HttpCode.FORBIDDEN,
"User does not have access to this resource"
)
);
} catch (error) {
logger.error("Error verifying site resource access:", error);
return next(

View File

@@ -5,6 +5,7 @@ import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { canUserAccessResource } from "../auth/canUserAccessResource";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyTargetAccess(
req: Request,
@@ -102,6 +103,26 @@ export async function verifyTargetAccess(
req.userOrgId = resource[0].orgId!;
}
const orgId = req.userOrg.orgId;
if (req.orgPolicyAllowed === undefined && orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
const resourceAllowed = await canUserAccessResource({
userId,
resourceId,

View File

@@ -15,7 +15,9 @@ export const verifySessionUserMiddleware = async (
res: Response<ErrorResponse>,
next: NextFunction
) => {
const { session, user } = await verifySession(req);
const { forceLogin } = req.query;
const { session, user } = await verifySession(req, forceLogin === "true");
if (!session || !user) {
if (config.getRawConfig().app.log_failed_attempts) {
logger.info(`User session not found. IP: ${req.ip}.`);

View File

@@ -4,6 +4,7 @@ import { userOrgs } from "@server/db";
import { and, eq, or } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { checkOrgAccessPolicy } from "#dynamic/lib/checkOrgAccessPolicy";
export async function verifyUserAccess(
req: Request,
@@ -47,6 +48,24 @@ export async function verifyUserAccess(
);
}
if (req.orgPolicyAllowed === undefined && req.userOrg.orgId) {
const policyCheck = await checkOrgAccessPolicy({
orgId: req.userOrg.orgId,
userId,
session: req.session
});
req.orgPolicyAllowed = policyCheck.allowed;
if (!policyCheck.allowed || policyCheck.error) {
return next(
createHttpError(
HttpCode.FORBIDDEN,
"Failed organization access policy check: " +
(policyCheck.error || "Unknown error")
)
);
}
}
return next();
} catch (error) {
return next(

View File

@@ -45,6 +45,11 @@ export class PrivateConfig {
this.rawPrivateConfig = parsedPrivateConfig;
process.env.BRANDING_HIDE_AUTH_LAYOUT_FOOTER =
this.rawPrivateConfig.branding?.hide_auth_layout_footer === true
? "true"
: "false";
if (this.rawPrivateConfig.branding?.colors) {
process.env.BRANDING_COLORS = JSON.stringify(
this.rawPrivateConfig.branding?.colors

View File

@@ -197,7 +197,7 @@ export async function listExitNodes(orgId: string, filterOnline = false, noCloud
// // set the item in the database if it is offline
// if (isActuallyOnline != node.online) {
// await db
// await trx
// .update(exitNodes)
// .set({ online: isActuallyOnline })
// .where(eq(exitNodes.exitNodeId, node.exitNodeId));

363
server/private/lib/lock.ts Normal file
View File

@@ -0,0 +1,363 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { config } from "@server/lib/config";
import logger from "@server/logger";
import { redis } from "#private/lib/redis";
export class LockManager {
/**
* Acquire a distributed lock using Redis SET with NX and PX options
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @returns Promise<boolean> - true if lock acquired, false otherwise
*/
async acquireLock(
lockKey: string,
ttlMs: number = 30000
): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
const lockValue = `${
config.getRawConfig().gerbil.exit_node_name
}:${Date.now()}`;
const redisKey = `lock:${lockKey}`;
try {
// Use SET with NX (only set if not exists) and PX (expire in milliseconds)
// This is atomic and handles both setting and expiration
const result = await redis.set(
redisKey,
lockValue,
"PX",
ttlMs,
"NX"
);
if (result === "OK") {
logger.debug(
`Lock acquired: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
// Check if the existing lock is from this worker (reentrant behavior)
const existingValue = await redis.get(redisKey);
if (
existingValue &&
existingValue.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
) {
// Extend the lock TTL since it's the same worker
await redis.pexpire(redisKey, ttlMs);
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
return true;
}
return false;
} catch (error) {
logger.error(`Failed to acquire lock ${lockKey}:`, error);
return false;
}
}
/**
* Release a lock using Lua script to ensure atomicity
* @param lockKey - Unique identifier for the lock
*/
async releaseLock(lockKey: string): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
const redisKey = `lock:${lockKey}`;
// Lua script to ensure we only delete the lock if it belongs to this worker
const luaScript = `
local key = KEYS[1]
local worker_prefix = ARGV[1]
local current_value = redis.call('GET', key)
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
return redis.call('DEL', key)
else
return 0
end
`;
try {
const result = (await redis.eval(
luaScript,
1,
redisKey,
`${config.getRawConfig().gerbil.exit_node_name}:`
)) as number;
if (result === 1) {
logger.debug(
`Lock released: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
} else {
logger.warn(
`Lock not released - not owned by worker: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
}`
);
}
} catch (error) {
logger.error(`Failed to release lock ${lockKey}:`, error);
}
}
/**
* Force release a lock regardless of owner (use with caution)
* @param lockKey - Unique identifier for the lock
*/
async forceReleaseLock(lockKey: string): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
const redisKey = `lock:${lockKey}`;
try {
const result = await redis.del(redisKey);
if (result === 1) {
logger.debug(`Lock force released: ${lockKey}`);
}
} catch (error) {
logger.error(`Failed to force release lock ${lockKey}:`, error);
}
}
/**
* Check if a lock exists and get its info
* @param lockKey - Unique identifier for the lock
* @returns Promise<{exists: boolean, ownedByMe: boolean, ttl: number}>
*/
async getLockInfo(lockKey: string): Promise<{
exists: boolean;
ownedByMe: boolean;
ttl: number;
owner?: string;
}> {
if (!redis || !redis.status || redis.status !== "ready") {
return { exists: false, ownedByMe: true, ttl: 0 };
}
const redisKey = `lock:${lockKey}`;
try {
const [value, ttl] = await Promise.all([
redis.get(redisKey),
redis.pttl(redisKey)
]);
const exists = value !== null;
const ownedByMe =
exists &&
value!.startsWith(`${config.getRawConfig().gerbil.exit_node_name}:`);
const owner = exists ? value!.split(":")[0] : undefined;
return {
exists,
ownedByMe,
ttl: ttl > 0 ? ttl : 0,
owner
};
} catch (error) {
logger.error(`Failed to get lock info ${lockKey}:`, error);
return { exists: false, ownedByMe: false, ttl: 0 };
}
}
/**
* Extend the TTL of an existing lock owned by this worker
* @param lockKey - Unique identifier for the lock
* @param ttlMs - New TTL in milliseconds
* @returns Promise<boolean> - true if extended successfully
*/
async extendLock(lockKey: string, ttlMs: number): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
const redisKey = `lock:${lockKey}`;
// Lua script to extend TTL only if lock is owned by this worker
const luaScript = `
local key = KEYS[1]
local worker_prefix = ARGV[1]
local ttl = tonumber(ARGV[2])
local current_value = redis.call('GET', key)
if current_value and string.find(current_value, worker_prefix, 1, true) == 1 then
return redis.call('PEXPIRE', key, ttl)
else
return 0
end
`;
try {
const result = (await redis.eval(
luaScript,
1,
redisKey,
`${config.getRawConfig().gerbil.exit_node_name}:`,
ttlMs.toString()
)) as number;
if (result === 1) {
logger.debug(
`Lock extended: ${lockKey} by ${
config.getRawConfig().gerbil.exit_node_name
} for ${ttlMs}ms`
);
return true;
}
return false;
} catch (error) {
logger.error(`Failed to extend lock ${lockKey}:`, error);
return false;
}
}
/**
* Attempt to acquire lock with retries and exponential backoff
* @param lockKey - Unique identifier for the lock
* @param ttlMs - Time to live in milliseconds
* @param maxRetries - Maximum number of retry attempts
* @param baseDelayMs - Base delay between retries in milliseconds
* @returns Promise<boolean> - true if lock acquired
*/
async acquireLockWithRetry(
lockKey: string,
ttlMs: number = 30000,
maxRetries: number = 5,
baseDelayMs: number = 100
): Promise<boolean> {
if (!redis || !redis.status || redis.status !== "ready") {
return true;
}
for (let attempt = 0; attempt <= maxRetries; attempt++) {
const acquired = await this.acquireLock(lockKey, ttlMs);
if (acquired) {
return true;
}
if (attempt < maxRetries) {
// Exponential backoff with jitter
const delay =
baseDelayMs * Math.pow(2, attempt) + Math.random() * 100;
await new Promise((resolve) => setTimeout(resolve, delay));
}
}
logger.warn(
`Failed to acquire lock ${lockKey} after ${maxRetries + 1} attempts`
);
return false;
}
/**
* Execute a function while holding a lock
* @param lockKey - Unique identifier for the lock
* @param fn - Function to execute while holding the lock
* @param ttlMs - Lock TTL in milliseconds
* @returns Promise<T> - Result of the executed function
*/
async withLock<T>(
lockKey: string,
fn: () => Promise<T>,
ttlMs: number = 30000
): Promise<T> {
if (!redis || !redis.status || redis.status !== "ready") {
return await fn();
}
const acquired = await this.acquireLock(lockKey, ttlMs);
if (!acquired) {
throw new Error(`Failed to acquire lock: ${lockKey}`);
}
try {
return await fn();
} finally {
await this.releaseLock(lockKey);
}
}
/**
* Clean up expired locks - Redis handles this automatically, but this method
* can be used to get statistics about locks
* @returns Promise<{activeLocksCount: number, locksOwnedByMe: number}>
*/
async getLockStatistics(): Promise<{
activeLocksCount: number;
locksOwnedByMe: number;
}> {
if (!redis || !redis.status || redis.status !== "ready") {
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
try {
const keys = await redis.keys("lock:*");
let locksOwnedByMe = 0;
if (keys.length > 0) {
const values = await redis.mget(...keys);
locksOwnedByMe = values.filter(
(value) =>
value &&
value.startsWith(
`${config.getRawConfig().gerbil.exit_node_name}:`
)
).length;
}
return {
activeLocksCount: keys.length,
locksOwnedByMe
};
} catch (error) {
logger.error("Failed to get lock statistics:", error);
return { activeLocksCount: 0, locksOwnedByMe: 0 };
}
}
/**
* Close the Redis connection
*/
async disconnect(): Promise<void> {
if (!redis || !redis.status || redis.status !== "ready") {
return;
}
await redis.quit();
}
}
export const lockManager = new LockManager();

View File

@@ -124,6 +124,7 @@ export const privateConfigSchema = z.object({
})
)
.optional(),
hide_auth_layout_footer: z.boolean().optional().default(false),
login_page: z
.object({
subtitle_text: z.string().optional(),

View File

@@ -434,9 +434,9 @@ export async function getTraefikConfig(
routerMiddlewares.push(rewriteMiddlewareName);
}
logger.debug(
`Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
);
// logger.debug(
// `Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
// );
} catch (error) {
logger.error(
`Failed to create path rewrite middleware for resource ${resource.resourceId}: ${error}`

View File

@@ -16,4 +16,5 @@ export * from "./verifyRemoteExitNodeAccess";
export * from "./verifyIdpAccess";
export * from "./verifyLoginPageAccess";
export * from "./logActionAudit";
export * from "./verifySubscription";
export * from "./verifySubscription";
export * from "./verifyValidLicense";

View File

@@ -31,7 +31,6 @@ import {
verifyUserIsServerAdmin,
verifySiteAccess,
verifyClientAccess,
verifyClientsEnabled,
} from "@server/middlewares";
import { ActionsEnum } from "@server/auth/actions";
import {
@@ -437,7 +436,6 @@ authenticated.get(
authenticated.post(
"/re-key/:clientId/regenerate-client-secret",
verifyClientsEnabled,
verifyClientAccess,
verifyUserHasAction(ActionsEnum.reGenerateSecret),
reKey.reGenerateClientSecret

View File

@@ -1043,7 +1043,7 @@ hybridRouter.get(
);
}
let rules = await db
const rules = await db
.select()
.from(resourceRules)
.where(eq(resourceRules.resourceId, resourceId));
@@ -1369,7 +1369,7 @@ const updateHolePunchSchema = z.object({
port: z.number(),
timestamp: z.number(),
reachableAt: z.string().optional(),
publicKey: z.string().optional()
publicKey: z.string() // this is the client public key
});
hybridRouter.post(
"/gerbil/update-hole-punch",
@@ -1408,7 +1408,7 @@ hybridRouter.post(
);
}
const { olmId, newtId, ip, port, timestamp, token, reachableAt } =
const { olmId, newtId, ip, port, timestamp, token, publicKey, reachableAt } =
parsedParams.data;
const destinations = await updateAndGenerateEndpointDestinations(
@@ -1418,6 +1418,7 @@ hybridRouter.post(
port,
timestamp,
token,
publicKey,
exitNode,
true
);
@@ -1742,7 +1743,12 @@ hybridRouter.post(
tls: logEntry.tls
}));
await db.insert(requestAuditLog).values(logEntries);
// batch them into inserts of 100 to avoid exceeding parameter limits
const batchSize = 100;
for (let i = 0; i < logEntries.length; i += batchSize) {
const batch = logEntries.slice(i, i + batchSize);
await db.insert(requestAuditLog).values(batch);
}
return response(res, {
data: null,

View File

@@ -13,13 +13,17 @@
import * as orgIdp from "#private/routers/orgIdp";
import * as org from "#private/routers/org";
import * as logs from "#private/routers/auditLogs";
import { Router } from "express";
import {
verifyApiKey,
verifyApiKeyHasAction,
verifyApiKeyIsRoot,
verifyApiKeyOrgAccess,
} from "@server/middlewares";
import {
verifyValidSubscription,
verifyValidLicense
} from "#private/middlewares";
import { ActionsEnum } from "@server/auth/actions";
import { unauthenticated as ua, authenticated as a } from "@server/routers/integration";
@@ -42,4 +46,42 @@ authenticated.delete(
verifyApiKeyHasAction(ActionsEnum.deleteIdp),
logActionAudit(ActionsEnum.deleteIdp),
orgIdp.deleteOrgIdp,
);
);
authenticated.get(
"/org/:orgId/logs/action",
verifyValidLicense,
verifyValidSubscription,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.exportLogs),
logs.queryActionAuditLogs
);
authenticated.get(
"/org/:orgId/logs/action/export",
verifyValidLicense,
verifyValidSubscription,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.exportLogs),
logActionAudit(ActionsEnum.exportLogs),
logs.exportActionAuditLogs
);
authenticated.get(
"/org/:orgId/logs/access",
verifyValidLicense,
verifyValidSubscription,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.exportLogs),
logs.queryAccessAuditLogs
);
authenticated.get(
"/org/:orgId/logs/access/export",
verifyValidLicense,
verifyValidSubscription,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.exportLogs),
logActionAudit(ActionsEnum.exportLogs),
logs.exportAccessAuditLogs
);

View File

@@ -164,7 +164,7 @@ export async function createLoginPage(
.select()
.from(exitNodes)
.where(and(eq(exitNodes.type, "gerbil"), eq(exitNodes.online, true)))
.limit(10);
.limit(10);
}
// select a random exit node

View File

@@ -38,6 +38,7 @@ import { rateLimitService } from "#private/lib/rateLimit";
import { messageHandlers } from "@server/routers/ws/messageHandlers";
import { messageHandlers as privateMessageHandlers } from "#private/routers/ws/messageHandlers";
import { AuthenticatedWebSocket, ClientType, WSMessage, TokenPayload, WebSocketRequest, RedisMessage } from "@server/routers/ws";
import { validateSessionToken } from "@server/auth/sessions/app";
// Merge public and private message handlers
Object.assign(messageHandlers, privateMessageHandlers);
@@ -370,6 +371,9 @@ const sendToClientLocal = async (
client.send(messageString);
}
});
logger.debug(`sendToClient: Message type ${message.type} sent to clientId ${clientId}`);
return true;
};
@@ -478,7 +482,8 @@ const getActiveNodes = async (
// Token verification middleware
const verifyToken = async (
token: string,
clientType: ClientType
clientType: ClientType,
userToken: string
): Promise<TokenPayload | null> => {
try {
if (clientType === "newt") {
@@ -506,6 +511,17 @@ const verifyToken = async (
if (!existingOlm || !existingOlm[0]) {
return null;
}
if (olm.userId) { // this is a user device and we need to check the user token
const { session: userSession, user } = await validateSessionToken(userToken);
if (!userSession || !user) {
return null;
}
if (user.userId !== olm.userId) {
return null;
}
}
return { client: existingOlm[0], session, clientType };
} else if (clientType === "remoteExitNode") {
const { session, remoteExitNode } =
@@ -652,6 +668,7 @@ const handleWSUpgrade = (server: HttpServer): void => {
url.searchParams.get("token") ||
request.headers["sec-websocket-protocol"] ||
"";
const userToken = url.searchParams.get('userToken') || '';
let clientType = url.searchParams.get(
"clientType"
) as ClientType;
@@ -673,7 +690,7 @@ const handleWSUpgrade = (server: HttpServer): void => {
return;
}
const tokenPayload = await verifyToken(token, clientType);
const tokenPayload = await verifyToken(token, clientType, userToken);
if (!tokenPayload) {
logger.debug(
"Unauthorized connection attempt: invalid token..."
@@ -792,6 +809,28 @@ if (redisManager.isRedisEnabled()) {
);
}
// Disconnect a specific client and force them to reconnect
const disconnectClient = async (clientId: string): Promise<boolean> => {
const mapKey = getClientMapKey(clientId);
const clients = connectedClients.get(mapKey);
if (!clients || clients.length === 0) {
logger.debug(`No connections found for client ID: ${clientId}`);
return false;
}
logger.info(`Disconnecting client ID: ${clientId} (${clients.length} connection(s))`);
// Close all connections for this client
clients.forEach((client) => {
if (client.readyState === WebSocket.OPEN) {
client.close(1000, "Disconnected by server");
}
});
return true;
};
// Cleanup function for graceful shutdown
const cleanup = async (): Promise<void> => {
try {
@@ -829,6 +868,7 @@ export {
connectedClients,
hasActiveConnections,
getActiveNodes,
disconnectClient,
NODE_ID,
cleanup
};

View File

@@ -131,7 +131,7 @@ export function queryRequest(data: Q) {
eq(requestAuditLog.resourceId, resources.resourceId)
) // TODO: Is this efficient?
.where(getWhere(data))
.orderBy(desc(requestAuditLog.timestamp), desc(requestAuditLog.id));
.orderBy(desc(requestAuditLog.timestamp));
}
export function countRequestQuery(data: Q) {

View File

@@ -13,4 +13,7 @@ export * from "./initialSetupComplete";
export * from "./validateSetupToken";
export * from "./changePassword";
export * from "./checkResourceSession";
export * from "./securityKey";
export * from "./securityKey";
export * from "./startDeviceWebAuth";
export * from "./verifyDeviceWebAuth";
export * from "./pollDeviceWebAuth";

View File

@@ -1,7 +1,9 @@
import {
createSession,
generateSessionToken,
serializeSessionCookie
invalidateSession,
serializeSessionCookie,
SESSION_COOKIE_NAME
} from "@server/auth/sessions/app";
import { db, resources } from "@server/db";
import { users, securityKeys } from "@server/db";
@@ -21,11 +23,11 @@ import { UserType } from "@server/types/UserTypes";
import { logAccessAudit } from "#dynamic/lib/logAccessAudit";
export const loginBodySchema = z.strictObject({
email: z.email().toLowerCase(),
password: z.string(),
code: z.string().optional(),
resourceGuid: z.string().optional()
});
email: z.email().toLowerCase(),
password: z.string(),
code: z.string().optional(),
resourceGuid: z.string().optional()
});
export type LoginBody = z.infer<typeof loginBodySchema>;
@@ -41,6 +43,21 @@ export async function login(
res: Response,
next: NextFunction
): Promise<any> {
const { forceLogin } = req.query;
const { session: existingSession } = await verifySession(
req,
forceLogin === "true"
);
if (existingSession) {
return response<null>(res, {
data: null,
success: true,
error: false,
message: "Already logged in",
status: HttpCode.OK
});
}
const parsedBody = loginBodySchema.safeParse(req.body);
if (!parsedBody.success) {
@@ -55,17 +72,6 @@ export async function login(
const { email, password, code, resourceGuid } = parsedBody.data;
try {
const { session: existingSession } = await verifySession(req);
if (existingSession) {
return response<null>(res, {
data: null,
success: true,
error: false,
message: "Already logged in",
status: HttpCode.OK
});
}
let resourceId: number | null = null;
let orgId: string | null = null;
if (resourceGuid) {
@@ -225,6 +231,12 @@ export async function login(
}
}
// check for previous cookie value and expire it
const previousCookie = req.cookies[SESSION_COOKIE_NAME];
if (previousCookie) {
await invalidateSession(previousCookie);
}
const token = generateSessionToken();
const sess = await createSession(token, existingUser.userId);
const isSecure = req.protocol === "https";

View File

@@ -0,0 +1,168 @@
import { Request, Response, NextFunction } from "express";
import createHttpError from "http-errors";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { response } from "@server/lib/response";
import { db, deviceWebAuthCodes } from "@server/db";
import { eq, and, gt } from "drizzle-orm";
import {
createSession,
generateSessionToken
} from "@server/auth/sessions/app";
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
const paramsSchema = z.object({
code: z.string().min(1, "Code is required")
});
export type PollDeviceWebAuthParams = z.infer<typeof paramsSchema>;
// Helper function to hash device code before querying database
function hashDeviceCode(code: string): string {
return encodeHexLowerCase(
sha256(new TextEncoder().encode(code))
);
}
export type PollDeviceWebAuthResponse = {
verified: boolean;
token?: string;
};
// Helper function to extract IP from request (same as in startDeviceWebAuth)
function extractIpFromRequest(req: Request): string | undefined {
const ip = req.ip || req.socket.remoteAddress;
if (!ip) {
return undefined;
}
// Handle IPv6 format [::1] or IPv4 format
if (ip.startsWith("[") && ip.includes("]")) {
const ipv6Match = ip.match(/\[(.*?)\]/);
if (ipv6Match) {
return ipv6Match[1];
}
}
// Handle IPv4 with port (split at last colon)
const lastColonIndex = ip.lastIndexOf(":");
if (lastColonIndex !== -1) {
return ip.substring(0, lastColonIndex);
}
return ip;
}
export async function pollDeviceWebAuth(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
try {
const { code } = parsedParams.data;
const now = Date.now();
const requestIp = extractIpFromRequest(req);
// Hash the code before querying
const hashedCode = hashDeviceCode(code);
// Find the code in the database
const [deviceCode] = await db
.select()
.from(deviceWebAuthCodes)
.where(eq(deviceWebAuthCodes.code, hashedCode))
.limit(1);
if (!deviceCode) {
return response<PollDeviceWebAuthResponse>(res, {
data: {
verified: false
},
success: true,
error: false,
message: "Code not found",
status: HttpCode.OK
});
}
// Check if code is expired
if (deviceCode.expiresAt <= now) {
return response<PollDeviceWebAuthResponse>(res, {
data: {
verified: false
},
success: true,
error: false,
message: "Code expired",
status: HttpCode.OK
});
}
// Check if code is verified
if (!deviceCode.verified) {
return response<PollDeviceWebAuthResponse>(res, {
data: {
verified: false
},
success: true,
error: false,
message: "Code not yet verified",
status: HttpCode.OK
});
}
// Check if userId is set (should be set when verified)
if (!deviceCode.userId) {
logger.error("Device code is verified but userId is missing", { codeId: deviceCode.codeId });
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Invalid code state"
)
);
}
// Generate session token
const token = generateSessionToken();
await createSession(token, deviceCode.userId);
// Delete the code after successful exchange for a token
await db
.delete(deviceWebAuthCodes)
.where(eq(deviceWebAuthCodes.codeId, deviceCode.codeId));
return response<PollDeviceWebAuthResponse>(res, {
data: {
verified: true,
token
},
success: true,
error: false,
message: "Code verified and session created",
status: HttpCode.OK
});
} catch (e) {
logger.error(e);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to poll device code"
)
);
}
}

View File

@@ -52,7 +52,7 @@ setInterval(async () => {
await db
.delete(webauthnChallenge)
.where(lt(webauthnChallenge.expiresAt, now));
logger.debug("Cleaned up expired security key challenges");
// logger.debug("Cleaned up expired security key challenges");
} catch (error) {
logger.error("Failed to clean up expired security key challenges", error);
}

View File

@@ -0,0 +1,156 @@
import { Request, Response, NextFunction } from "express";
import createHttpError from "http-errors";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { response } from "@server/lib/response";
import { db, deviceWebAuthCodes } from "@server/db";
import { alphabet, generateRandomString } from "oslo/crypto";
import { createDate } from "oslo";
import { TimeSpan } from "oslo";
import { maxmindLookup } from "@server/db/maxmind";
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
const bodySchema = z.object({
deviceName: z.string().optional(),
applicationName: z.string().min(1, "Application name is required")
}).strict();
export type StartDeviceWebAuthBody = z.infer<typeof bodySchema>;
export type StartDeviceWebAuthResponse = {
code: string;
expiresInSeconds: number;
};
// Helper function to generate device code in format A1AJ-N5JD
function generateDeviceCode(): string {
const part1 = generateRandomString(4, alphabet("A-Z", "0-9"));
const part2 = generateRandomString(4, alphabet("A-Z", "0-9"));
return `${part1}-${part2}`;
}
// Helper function to hash device code before storing in database
function hashDeviceCode(code: string): string {
return encodeHexLowerCase(
sha256(new TextEncoder().encode(code))
);
}
// Helper function to extract IP from request
function extractIpFromRequest(req: Request): string | undefined {
const ip = req.ip || req.socket.remoteAddress;
if (!ip) {
return undefined;
}
// Handle IPv6 format [::1] or IPv4 format
if (ip.startsWith("[") && ip.includes("]")) {
const ipv6Match = ip.match(/\[(.*?)\]/);
if (ipv6Match) {
return ipv6Match[1];
}
}
// Handle IPv4 with port (split at last colon)
const lastColonIndex = ip.lastIndexOf(":");
if (lastColonIndex !== -1) {
return ip.substring(0, lastColonIndex);
}
return ip;
}
// Helper function to get city from IP (if available)
async function getCityFromIp(ip: string): Promise<string | undefined> {
try {
if (!maxmindLookup) {
return undefined;
}
const result = maxmindLookup.get(ip);
if (!result) {
return undefined;
}
// MaxMind CountryResponse doesn't include city by default
// If city data is available, it would be in result.city?.names?.en
// But since we're using CountryResponse type, we'll just return undefined
// The user said "don't do this if not easy", so we'll skip city for now
return undefined;
} catch (error) {
logger.debug("Failed to get city from IP", error);
return undefined;
}
}
export async function startDeviceWebAuth(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
const parsedBody = bodySchema.safeParse(req.body);
if (!parsedBody.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedBody.error).toString()
)
);
}
try {
const { deviceName, applicationName } = parsedBody.data;
// Generate device code
const code = generateDeviceCode();
// Hash the code before storing in database
const hashedCode = hashDeviceCode(code);
// Extract IP from request
const ip = extractIpFromRequest(req);
// Get city (optional, may return undefined)
const city = ip ? await getCityFromIp(ip) : undefined;
// Set expiration to 5 minutes from now
const expiresAt = createDate(new TimeSpan(5, "m")).getTime();
// Insert into database (store hashed code)
await db.insert(deviceWebAuthCodes).values({
code: hashedCode,
ip: ip || null,
city: city || null,
deviceName: deviceName || null,
applicationName,
expiresAt,
createdAt: Date.now()
});
// calculate relative expiration in seconds
const expiresInSeconds = Math.floor((expiresAt - Date.now()) / 1000);
return response<StartDeviceWebAuthResponse>(res, {
data: {
code,
expiresInSeconds
},
success: true,
error: false,
message: "Device web auth code generated",
status: HttpCode.OK
});
} catch (e) {
logger.error(e);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to start device web auth"
)
);
}
}

View File

@@ -0,0 +1,180 @@
import { Request, Response, NextFunction } from "express";
import createHttpError from "http-errors";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import HttpCode from "@server/types/HttpCode";
import logger from "@server/logger";
import { response } from "@server/lib/response";
import { db, deviceWebAuthCodes, sessions } from "@server/db";
import { eq, and, gt } from "drizzle-orm";
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
import { unauthorized } from "@server/auth/unauthorizedResponse";
const bodySchema = z
.object({
code: z.string().min(1, "Code is required"),
verify: z.boolean().optional().default(false) // If false, just check and return metadata
})
.strict();
// Helper function to hash device code before querying database
function hashDeviceCode(code: string): string {
return encodeHexLowerCase(sha256(new TextEncoder().encode(code)));
}
export type VerifyDeviceWebAuthBody = z.infer<typeof bodySchema>;
export type VerifyDeviceWebAuthResponse = {
success: boolean;
message: string;
metadata?: {
ip: string | null;
city: string | null;
deviceName: string | null;
applicationName: string;
createdAt: number;
};
};
export async function verifyDeviceWebAuth(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
const { user, session } = req;
if (!user || !session) {
return next(createHttpError(HttpCode.UNAUTHORIZED, "Unauthorized"));
}
if (session.deviceAuthUsed) {
return next(
createHttpError(
HttpCode.UNAUTHORIZED,
"Device web auth code already used for this session"
)
);
}
if (!session.issuedAt) {
return next(
createHttpError(
HttpCode.UNAUTHORIZED,
"Session issuedAt timestamp missing"
)
);
}
// make sure sessions is not older than 5 minutes
const now = Date.now();
if (now - session.issuedAt > 5 * 60 * 1000) {
return next(
createHttpError(
HttpCode.UNAUTHORIZED,
"Session is too old to verify device web auth code"
)
);
}
const parsedBody = bodySchema.safeParse(req.body);
if (!parsedBody.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedBody.error).toString()
)
);
}
try {
const { code, verify } = parsedBody.data;
const now = Date.now();
logger.debug("Verifying device web auth code:", { code });
// Hash the code before querying
const hashedCode = hashDeviceCode(code);
// Find the code in the database that is not expired and not already verified
const [deviceCode] = await db
.select()
.from(deviceWebAuthCodes)
.where(
and(
eq(deviceWebAuthCodes.code, hashedCode),
gt(deviceWebAuthCodes.expiresAt, now),
eq(deviceWebAuthCodes.verified, false)
)
)
.limit(1);
logger.debug("Device code lookup result:", deviceCode);
if (!deviceCode) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Invalid, expired, or already verified code"
)
);
}
// If verify is false, just return metadata without verifying
if (!verify) {
return response<VerifyDeviceWebAuthResponse>(res, {
data: {
success: true,
message: "Code is valid",
metadata: {
ip: deviceCode.ip,
city: deviceCode.city,
deviceName: deviceCode.deviceName,
applicationName: deviceCode.applicationName,
createdAt: deviceCode.createdAt
}
},
success: true,
error: false,
message: "Code validation successful",
status: HttpCode.OK
});
}
// Update the code to mark it as verified and store the user who verified it
await db
.update(deviceWebAuthCodes)
.set({
verified: true,
userId: req.user!.userId
})
.where(eq(deviceWebAuthCodes.codeId, deviceCode.codeId));
// Also update the session to mark that device auth was used
await db
.update(sessions)
.set({
deviceAuthUsed: true
})
.where(eq(sessions.sessionId, session.sessionId));
return response<VerifyDeviceWebAuthResponse>(res, {
data: {
success: true,
message: "Device code verified successfully"
},
success: true,
error: false,
message: "Device code verified successfully",
status: HttpCode.OK
});
} catch (e) {
logger.error(e);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to verify device code"
)
);
}
}

View File

@@ -69,9 +69,9 @@ export async function cleanUpOldLogs(orgId: string, retentionDays: number) {
)
);
logger.debug(
`Cleaned up request audit logs older than ${retentionDays} days`
);
// logger.debug(
// `Cleaned up request audit logs older than ${retentionDays} days`
// );
} catch (error) {
logger.error("Error cleaning up old request audit logs:", error);
}

View File

@@ -8,8 +8,6 @@ import {
roleClients,
userClients,
olms,
clientSites,
exitNodes,
orgs,
sites
} from "@server/db";
@@ -21,23 +19,24 @@ import { eq, and } from "drizzle-orm";
import { fromError } from "zod-validation-error";
import moment from "moment";
import { hashPassword } from "@server/auth/password";
import { isValidCIDR, isValidIP } from "@server/lib/validators";
import { isValidIP } from "@server/lib/validators";
import { isIpInCidr } from "@server/lib/ip";
import { OpenAPITags, registry } from "@server/openApi";
import { listExitNodes } from "#dynamic/lib/exitNodes";
import { generateId } from "@server/auth/sessions/app";
import { OpenAPITags, registry } from "@server/openApi";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
const createClientParamsSchema = z.strictObject({
orgId: z.string()
});
orgId: z.string()
});
const createClientSchema = z.strictObject({
name: z.string().min(1).max(255),
siteIds: z.array(z.int().positive()),
olmId: z.string(),
secret: z.string(),
subnet: z.string(),
type: z.enum(["olm"])
});
name: z.string().min(1).max(255),
olmId: z.string(),
secret: z.string(),
subnet: z.string(),
type: z.enum(["olm"])
});
export type CreateClientBody = z.infer<typeof createClientSchema>;
@@ -46,7 +45,7 @@ export type CreateClientResponse = Client;
registry.registerPath({
method: "put",
path: "/org/{orgId}/client",
description: "Create a new client.",
description: "Create a new client for an organization.",
tags: [OpenAPITags.Client, OpenAPITags.Org],
request: {
params: createClientParamsSchema,
@@ -77,7 +76,7 @@ export async function createClient(
);
}
const { name, type, siteIds, olmId, secret, subnet } = parsedBody.data;
const { name, type, olmId, secret, subnet } = parsedBody.data;
const parsedParams = createClientParamsSchema.safeParse(req.params);
if (!parsedParams.success) {
@@ -172,75 +171,90 @@ export async function createClient(
);
}
// check if the olmId already exists
const [existingOlm] = await db
.select()
.from(olms)
.where(eq(olms.olmId, olmId))
.limit(1);
if (existingOlm) {
return next(
createHttpError(
HttpCode.CONFLICT,
`OLM with ID ${olmId} already exists`
)
);
}
let newClient: Client | null = null;
await db.transaction(async (trx) => {
// TODO: more intelligent way to pick the exit node
const exitNodesList = await listExitNodes(orgId);
const randomExitNode =
exitNodesList[Math.floor(Math.random() * exitNodesList.length)];
const adminRole = await trx
const [adminRole] = await trx
.select()
.from(roles)
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)))
.limit(1);
if (adminRole.length === 0) {
trx.rollback();
if (!adminRole) {
return next(
createHttpError(HttpCode.NOT_FOUND, `Admin role not found`)
);
}
const [newClient] = await trx
[newClient] = await trx
.insert(clients)
.values({
exitNodeId: randomExitNode.exitNodeId,
orgId,
name,
subnet: updatedSubnet,
type
type,
olmId // this is to lock it to a specific olm even if the olm moves across clients
})
.returning();
await trx.insert(roleClients).values({
roleId: adminRole[0].roleId,
roleId: adminRole.roleId,
clientId: newClient.clientId
});
if (req.user && req.userOrgRoleId != adminRole[0].roleId) {
// make sure the user can access the site
if (req.user && req.userOrgRoleId != adminRole.roleId) {
// make sure the user can access the client
trx.insert(userClients).values({
userId: req.user?.userId!,
userId: req.user.userId,
clientId: newClient.clientId
});
}
// Create site to client associations
if (siteIds && siteIds.length > 0) {
await trx.insert(clientSites).values(
siteIds.map((siteId) => ({
clientId: newClient.clientId,
siteId
}))
);
let secretToUse = secret;
if (!secretToUse) {
secretToUse = generateId(48);
}
const secretHash = await hashPassword(secret);
const secretHash = await hashPassword(secretToUse);
await trx.insert(olms).values({
olmId,
secretHash,
name,
clientId: newClient.clientId,
dateCreated: moment().toISOString()
});
return response<CreateClientResponse>(res, {
data: newClient,
success: true,
error: false,
message: "Site created successfully",
status: HttpCode.CREATED
});
await rebuildClientAssociationsFromClient(newClient, trx);
});
return response<CreateClientResponse>(res, {
data: newClient,
success: true,
error: false,
message: "Site created successfully",
status: HttpCode.CREATED
});
} catch (error) {
logger.error(error);

View File

@@ -0,0 +1,253 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import {
roles,
Client,
clients,
roleClients,
userClients,
olms,
orgs,
sites
} from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import { eq, and } from "drizzle-orm";
import { fromError } from "zod-validation-error";
import { isValidIP } from "@server/lib/validators";
import { isIpInCidr } from "@server/lib/ip";
import { listExitNodes } from "#dynamic/lib/exitNodes";
import { OpenAPITags, registry } from "@server/openApi";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
const paramsSchema = z
.object({
orgId: z.string(),
userId: z.string()
})
.strict();
const bodySchema = z
.object({
name: z.string().min(1).max(255),
olmId: z.string(),
subnet: z.string(),
type: z.enum(["olm"])
})
.strict();
export type CreateClientAndOlmBody = z.infer<typeof bodySchema>;
export type CreateClientAndOlmResponse = Client;
registry.registerPath({
method: "put",
path: "/org/{orgId}/user/{userId}/client",
description:
"Create a new client for a user and associate it with an existing olm.",
tags: [OpenAPITags.Client, OpenAPITags.Org, OpenAPITags.User],
request: {
params: paramsSchema,
body: {
content: {
"application/json": {
schema: bodySchema
}
}
}
},
responses: {}
});
export async function createUserClient(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedBody = bodySchema.safeParse(req.body);
if (!parsedBody.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedBody.error).toString()
)
);
}
const { name, type, olmId, subnet } = parsedBody.data;
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { orgId, userId } = parsedParams.data;
if (!isValidIP(subnet)) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"Invalid subnet format. Please provide a valid CIDR notation."
)
);
}
const [org] = await db.select().from(orgs).where(eq(orgs.orgId, orgId));
if (!org) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`Organization with ID ${orgId} not found`
)
);
}
if (!org.subnet) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Organization with ID ${orgId} has no subnet defined`
)
);
}
if (!isIpInCidr(subnet, org.subnet)) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
"IP is not in the CIDR range of the subnet."
)
);
}
const updatedSubnet = `${subnet}/${org.subnet.split("/")[1]}`; // we want the block size of the whole org
// make sure the subnet is unique
const subnetExistsClients = await db
.select()
.from(clients)
.where(
and(eq(clients.subnet, updatedSubnet), eq(clients.orgId, orgId))
)
.limit(1);
if (subnetExistsClients.length > 0) {
return next(
createHttpError(
HttpCode.CONFLICT,
`Subnet ${updatedSubnet} already exists in clients`
)
);
}
const subnetExistsSites = await db
.select()
.from(sites)
.where(
and(eq(sites.address, updatedSubnet), eq(sites.orgId, orgId))
)
.limit(1);
if (subnetExistsSites.length > 0) {
return next(
createHttpError(
HttpCode.CONFLICT,
`Subnet ${updatedSubnet} already exists in sites`
)
);
}
// check if the olmId already exists
const [existingOlm] = await db
.select()
.from(olms)
.where(eq(olms.olmId, olmId))
.limit(1);
if (!existingOlm) {
return next(
createHttpError(
HttpCode.NOT_FOUND,
`OLM with ID ${olmId} does not exist`
)
);
}
if (existingOlm.userId !== userId) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`OLM with ID ${olmId} does not belong to user with ID ${userId}`
)
);
}
let newClient: Client | null = null;
await db.transaction(async (trx) => {
// TODO: more intelligent way to pick the exit node
const exitNodesList = await listExitNodes(orgId);
const randomExitNode =
exitNodesList[Math.floor(Math.random() * exitNodesList.length)];
const [adminRole] = await trx
.select()
.from(roles)
.where(and(eq(roles.isAdmin, true), eq(roles.orgId, orgId)))
.limit(1);
if (!adminRole) {
return next(
createHttpError(HttpCode.NOT_FOUND, `Admin role not found`)
);
}
[newClient] = await trx
.insert(clients)
.values({
exitNodeId: randomExitNode.exitNodeId,
orgId,
name,
subnet: updatedSubnet,
type,
olmId, // this is to lock it to a specific olm even if the olm moves across clients
userId
})
.returning();
await trx.insert(roleClients).values({
roleId: adminRole.roleId,
clientId: newClient.clientId
});
trx.insert(userClients).values({
userId,
clientId: newClient.clientId
});
await rebuildClientAssociationsFromClient(newClient, trx);
});
return response<CreateClientAndOlmResponse>(res, {
data: newClient,
success: true,
error: false,
message: "Site created successfully",
status: HttpCode.CREATED
});
} catch (error) {
logger.error(error);
return next(
createHttpError(HttpCode.INTERNAL_SERVER_ERROR, "An error occurred")
);
}
}

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { clients, clientSites } from "@server/db";
import { db, olms } from "@server/db";
import { clients, clientSitesAssociationsCache } from "@server/db";
import { eq } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -9,10 +9,12 @@ import createHttpError from "http-errors";
import logger from "@server/logger";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
import { sendTerminateClient } from "./terminate";
const deleteClientSchema = z.strictObject({
clientId: z.string().transform(Number).pipe(z.int().positive())
});
clientId: z.string().transform(Number).pipe(z.int().positive())
});
registry.registerPath({
method: "delete",
@@ -58,16 +60,38 @@ export async function deleteClient(
);
}
await db.transaction(async (trx) => {
// Delete the client-site associations first
await trx
.delete(clientSites)
.where(eq(clientSites.clientId, clientId));
if (client.userId) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
`Cannot delete a user client with this endpoint`
)
);
}
await db.transaction(async (trx) => {
// Then delete the client itself
await trx
const [deletedClient] = await trx
.delete(clients)
.where(eq(clients.clientId, clientId));
.where(eq(clients.clientId, clientId))
.returning();
const [olm] = await trx
.select()
.from(olms)
.where(eq(olms.clientId, clientId))
.limit(1);
// this is a machine client so we also delete the olm
if (!client.userId && client.olmId) {
await trx.delete(olms).where(eq(olms.olmId, client.olmId));
}
await rebuildClientAssociationsFromClient(deletedClient, trx);
if (olm) {
await sendTerminateClient(deletedClient.clientId, olm.olmId); // the olmId needs to be provided because it cant look it up after deletion
}
});
return response(res, {

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { db } from "@server/db";
import { clients, clientSites } from "@server/db";
import { clients, clientSitesAssociationsCache } from "@server/db";
import { eq, and } from "drizzle-orm";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
@@ -29,9 +29,9 @@ async function query(clientId: number) {
// Get the siteIds associated with this client
const sites = await db
.select({ siteId: clientSites.siteId })
.from(clientSites)
.where(eq(clientSites.clientId, clientId));
.select({ siteId: clientSitesAssociationsCache.siteId })
.from(clientSitesAssociationsCache)
.where(eq(clientSitesAssociationsCache.clientId, clientId));
// Add the siteIds to the client object
return {

View File

@@ -3,4 +3,5 @@ export * from "./createClient";
export * from "./deleteClient";
export * from "./listClients";
export * from "./updateClient";
export * from "./getClient";
export * from "./getClient";
export * from "./createUserClient";

View File

@@ -1,16 +1,16 @@
import { db, olms } from "@server/db";
import { db, olms, users } from "@server/db";
import {
clients,
orgs,
roleClients,
sites,
userClients,
clientSites
clientSitesAssociationsCache
} from "@server/db";
import logger from "@server/logger";
import HttpCode from "@server/types/HttpCode";
import response from "@server/lib/response";
import { and, count, eq, inArray, or, sql } from "drizzle-orm";
import { and, count, eq, inArray, isNotNull, isNull, or, sql } from "drizzle-orm";
import { NextFunction, Request, Response } from "express";
import createHttpError from "http-errors";
import { z } from "zod";
@@ -19,7 +19,7 @@ import { OpenAPITags, registry } from "@server/openApi";
import NodeCache from "node-cache";
import semver from "semver";
const olmVersionCache = new NodeCache({ stdTTL: 3600 });
const olmVersionCache = new NodeCache({ stdTTL: 3600 });
async function getLatestOlmVersion(): Promise<string | null> {
try {
@@ -29,7 +29,7 @@ async function getLatestOlmVersion(): Promise<string | null> {
}
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 1500);
const timeoutId = setTimeout(() => controller.abort(), 1500);
const response = await fetch(
"https://api.github.com/repos/fosrl/olm/tags",
@@ -94,10 +94,25 @@ const listClientsSchema = z.object({
.optional()
.default("0")
.transform(Number)
.pipe(z.int().nonnegative())
.pipe(z.int().nonnegative()),
filter: z
.enum(["user", "machine"])
.optional()
});
function queryClients(orgId: string, accessibleClientIds: number[]) {
function queryClients(orgId: string, accessibleClientIds: number[], filter?: "user" | "machine") {
const conditions = [
inArray(clients.clientId, accessibleClientIds),
eq(clients.orgId, orgId)
];
// Add filter condition based on filter type
if (filter === "user") {
conditions.push(isNotNull(clients.userId));
} else if (filter === "machine") {
conditions.push(isNull(clients.userId));
}
return db
.select({
clientId: clients.clientId,
@@ -110,17 +125,16 @@ function queryClients(orgId: string, accessibleClientIds: number[]) {
orgName: orgs.name,
type: clients.type,
online: clients.online,
olmVersion: olms.version
olmVersion: olms.version,
userId: clients.userId,
username: users.username,
userEmail: users.email
})
.from(clients)
.leftJoin(orgs, eq(clients.orgId, orgs.orgId))
.leftJoin(olms, eq(clients.clientId, olms.clientId))
.where(
and(
inArray(clients.clientId, accessibleClientIds),
eq(clients.orgId, orgId)
)
);
.leftJoin(users, eq(clients.userId, users.userId))
.where(and(...conditions));
}
async function getSiteAssociations(clientIds: number[]) {
@@ -128,14 +142,14 @@ async function getSiteAssociations(clientIds: number[]) {
return db
.select({
clientId: clientSites.clientId,
siteId: clientSites.siteId,
clientId: clientSitesAssociationsCache.clientId,
siteId: clientSitesAssociationsCache.siteId,
siteName: sites.name,
siteNiceId: sites.niceId
})
.from(clientSites)
.leftJoin(sites, eq(clientSites.siteId, sites.siteId))
.where(inArray(clientSites.clientId, clientIds));
.from(clientSitesAssociationsCache)
.leftJoin(sites, eq(clientSitesAssociationsCache.siteId, sites.siteId))
.where(inArray(clientSitesAssociationsCache.clientId, clientIds));
}
type OlmWithUpdateAvailable = Awaited<ReturnType<typeof queryClients>>[0] & {
@@ -182,7 +196,7 @@ export async function listClients(
)
);
}
const { limit, offset } = parsedQuery.data;
const { limit, offset, filter } = parsedQuery.data;
const parsedParams = listClientsParamsSchema.safeParse(req.params);
if (!parsedParams.success) {
@@ -231,18 +245,24 @@ export async function listClients(
const accessibleClientIds = accessibleClients.map(
(client) => client.clientId
);
const baseQuery = queryClients(orgId, accessibleClientIds);
const baseQuery = queryClients(orgId, accessibleClientIds, filter);
// Get client count with filter
const countConditions = [
inArray(clients.clientId, accessibleClientIds),
eq(clients.orgId, orgId)
];
if (filter === "user") {
countConditions.push(isNotNull(clients.userId));
} else if (filter === "machine") {
countConditions.push(isNull(clients.userId));
}
// Get client count
const countQuery = db
.select({ count: count() })
.from(clients)
.where(
and(
inArray(clients.clientId, accessibleClientIds),
eq(clients.orgId, orgId)
)
);
.where(and(...countConditions));
const clientsList = await baseQuery.limit(limit).offset(offset);
const totalCountResult = await countQuery;

View File

@@ -1,35 +1,136 @@
import { sendToClient } from "#dynamic/routers/ws";
import { db, olms } from "@server/db";
import { Alias, SubnetProxyTarget } from "@server/lib/ip";
import logger from "@server/logger";
import { eq } from "drizzle-orm";
export async function addTargets(
newtId: string,
destinationIp: string,
destinationPort: number,
protocol: string,
port: number
) {
const target = `${port}:${destinationIp}:${destinationPort}`;
export async function addTargets(newtId: string, targets: SubnetProxyTarget[]) {
await sendToClient(newtId, {
type: `newt/wg/${protocol}/add`,
data: {
targets: [target] // We can only use one target for WireGuard right now
}
type: `newt/wg/targets/add`,
data: targets
});
}
export async function removeTargets(
newtId: string,
destinationIp: string,
destinationPort: number,
protocol: string,
port: number
targets: SubnetProxyTarget[]
) {
const target = `${port}:${destinationIp}:${destinationPort}`;
await sendToClient(newtId, {
type: `newt/wg/${protocol}/remove`,
data: {
targets: [target] // We can only use one target for WireGuard right now
}
type: `newt/wg/targets/remove`,
data: targets
});
}
export async function updateTargets(
newtId: string,
targets: {
oldTargets: SubnetProxyTarget[];
newTargets: SubnetProxyTarget[];
}
) {
await sendToClient(newtId, {
type: `newt/wg/targets/update`,
data: targets
}).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
export async function addPeerData(
clientId: number,
siteId: number,
remoteSubnets: string[],
aliases: Alias[],
olmId?: string
) {
if (!olmId) {
const [olm] = await db
.select()
.from(olms)
.where(eq(olms.clientId, clientId))
.limit(1);
if (!olm) {
return; // ignore this because an olm might not be associated with the client anymore
}
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: `olm/wg/peer/data/add`,
data: {
siteId: siteId,
remoteSubnets: remoteSubnets,
aliases: aliases
}
}).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
export async function removePeerData(
clientId: number,
siteId: number,
remoteSubnets: string[],
aliases: Alias[],
olmId?: string
) {
if (!olmId) {
const [olm] = await db
.select()
.from(olms)
.where(eq(olms.clientId, clientId))
.limit(1);
if (!olm) {
return;
}
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: `olm/wg/peer/data/remove`,
data: {
siteId: siteId,
remoteSubnets: remoteSubnets,
aliases: aliases
}
}).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}
export async function updatePeerData(
clientId: number,
siteId: number,
remoteSubnets: {
oldRemoteSubnets: string[];
newRemoteSubnets: string[];
} | undefined,
aliases: {
oldAliases: Alias[];
newAliases: Alias[];
} | undefined,
olmId?: string
) {
if (!olmId) {
const [olm] = await db
.select()
.from(olms)
.where(eq(olms.clientId, clientId))
.limit(1);
if (!olm) {
return;
}
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: `olm/wg/peer/data/update`,
data: {
siteId: siteId,
...remoteSubnets,
...aliases
}
}).catch((error) => {
logger.warn(`Error sending message:`, error);
});
}

View File

@@ -0,0 +1,22 @@
import { sendToClient } from "#dynamic/routers/ws";
import { db, olms } from "@server/db";
import { eq } from "drizzle-orm";
export async function sendTerminateClient(clientId: number, olmId?: string | null) {
if (!olmId) {
const [olm] = await db
.select()
.from(olms)
.where(eq(olms.clientId, clientId))
.limit(1);
if (!olm) {
throw new Error(`Olm with ID ${clientId} not found`);
}
olmId = olm.olmId;
}
await sendToClient(olmId, {
type: `olm/terminate`,
data: {}
});
}

View File

@@ -1,7 +1,7 @@
import { Request, Response, NextFunction } from "express";
import { z } from "zod";
import { Client, db, exitNodes, olms, sites } from "@server/db";
import { clients, clientSites } from "@server/db";
import { clients, clientSitesAssociationsCache } from "@server/db";
import response from "@server/lib/response";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
@@ -9,27 +9,14 @@ import logger from "@server/logger";
import { eq, and } from "drizzle-orm";
import { fromError } from "zod-validation-error";
import { OpenAPITags, registry } from "@server/openApi";
import {
addPeer as newtAddPeer,
deletePeer as newtDeletePeer
} from "../newt/peers";
import {
addPeer as olmAddPeer,
deletePeer as olmDeletePeer
} from "../olm/peers";
import { sendToExitNode } from "#dynamic/lib/exitNodes";
import { hashPassword } from "@server/auth/password";
const updateClientParamsSchema = z.strictObject({
clientId: z.string().transform(Number).pipe(z.int().positive())
});
clientId: z.string().transform(Number).pipe(z.int().positive())
});
const updateClientSchema = z.strictObject({
name: z.string().min(1).max(255).optional(),
siteIds: z
.array(z.int().positive())
.optional(),
});
name: z.string().min(1).max(255).optional()
});
export type UpdateClientBody = z.infer<typeof updateClientSchema>;
@@ -51,11 +38,6 @@ registry.registerPath({
responses: {}
});
interface PeerDestination {
destinationIP: string;
destinationPort: number;
}
export async function updateClient(
req: Request,
res: Response,
@@ -72,7 +54,7 @@ export async function updateClient(
);
}
const { name, siteIds } = parsedBody.data;
const { name } = parsedBody.data;
const parsedParams = updateClientParamsSchema.safeParse(req.params);
if (!parsedParams.success) {
@@ -86,7 +68,6 @@ export async function updateClient(
const { clientId } = parsedParams.data;
// Fetch the client to make sure it exists and the user has access to it
const [client] = await db
.select()
@@ -103,266 +84,11 @@ export async function updateClient(
);
}
let sitesAdded = [];
let sitesRemoved = [];
// Fetch existing site associations
const existingSites = await db
.select({ siteId: clientSites.siteId })
.from(clientSites)
.where(eq(clientSites.clientId, clientId));
const existingSiteIds = existingSites.map((site) => site.siteId);
const siteIdsToProcess = siteIds || [];
// Determine which sites were added and removed
sitesAdded = siteIdsToProcess.filter(
(siteId) => !existingSiteIds.includes(siteId)
);
sitesRemoved = existingSiteIds.filter(
(siteId) => !siteIdsToProcess.includes(siteId)
);
let updatedClient: Client | undefined = undefined;
let sitesData: any; // TODO: define type somehow from the query below
await db.transaction(async (trx) => {
// Update client name if provided
if (name) {
await trx
.update(clients)
.set({ name })
.where(eq(clients.clientId, clientId));
}
// Update site associations if provided
// Remove sites that are no longer associated
for (const siteId of sitesRemoved) {
await trx
.delete(clientSites)
.where(
and(
eq(clientSites.clientId, clientId),
eq(clientSites.siteId, siteId)
)
);
}
// Add new site associations
for (const siteId of sitesAdded) {
await trx.insert(clientSites).values({
clientId,
siteId
});
}
// Fetch the updated client
[updatedClient] = await trx
.select()
.from(clients)
.where(eq(clients.clientId, clientId))
.limit(1);
// get all sites for this client and join with exit nodes with site.exitNodeId
sitesData = await trx
.select()
.from(sites)
.innerJoin(clientSites, eq(sites.siteId, clientSites.siteId))
.leftJoin(exitNodes, eq(sites.exitNodeId, exitNodes.exitNodeId))
.where(eq(clientSites.clientId, client.clientId));
});
logger.info(
`Adding ${sitesAdded.length} new sites to client ${client.clientId}`
);
for (const siteId of sitesAdded) {
if (!client.subnet || !client.pubKey) {
logger.debug("Client subnet, pubKey or endpoint is not set");
continue;
}
// TODO: WE NEED TO HANDLE THIS BETTER. WE ARE DEFAULTING TO RELAYING FOR NEW SITES
// BUT REALLY WE NEED TO TRACK THE USERS PREFERENCE THAT THEY CHOSE IN THE CLIENTS
// AND TRIGGER A HOLEPUNCH OR SOMETHING TO GET THE ENDPOINT AND HP TO THE NEW SITES
const isRelayed = true;
const site = await newtAddPeer(siteId, {
publicKey: client.pubKey,
allowedIps: [`${client.subnet.split("/")[0]}/32`], // we want to only allow from that client
// endpoint: isRelayed ? "" : clientSite.endpoint
endpoint: isRelayed ? "" : "" // we are not HPing yet so no endpoint
});
if (!site) {
logger.debug("Failed to add peer to newt - missing site");
continue;
}
if (!site.endpoint || !site.publicKey) {
logger.debug("Site endpoint or publicKey is not set");
continue;
}
let endpoint;
if (isRelayed) {
if (!site.exitNodeId) {
logger.warn(
`Site ${site.siteId} has no exit node, skipping`
);
return null;
}
// get the exit node for the site
const [exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.exitNodeId, site.exitNodeId))
.limit(1);
if (!exitNode) {
logger.warn(`Exit node not found for site ${site.siteId}`);
return null;
}
endpoint = `${exitNode.endpoint}:21820`;
} else {
if (!site.endpoint) {
logger.warn(
`Site ${site.siteId} has no endpoint, skipping`
);
return null;
}
endpoint = site.endpoint;
}
await olmAddPeer(client.clientId, {
siteId: site.siteId,
endpoint: endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort,
remoteSubnets: site.remoteSubnets
});
}
logger.info(
`Removing ${sitesRemoved.length} sites from client ${client.clientId}`
);
for (const siteId of sitesRemoved) {
if (!client.pubKey) {
logger.debug("Client pubKey is not set");
continue;
}
const site = await newtDeletePeer(siteId, client.pubKey);
if (!site) {
logger.debug("Failed to delete peer from newt - missing site");
continue;
}
if (!site.endpoint || !site.publicKey) {
logger.debug("Site endpoint or publicKey is not set");
continue;
}
await olmDeletePeer(client.clientId, site.siteId, site.publicKey);
}
if (!updatedClient || !sitesData) {
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
`Failed to update client`
)
);
}
let exitNodeDestinations: {
reachableAt: string;
exitNodeId: number;
type: string;
name: string;
sourceIp: string;
sourcePort: number;
destinations: PeerDestination[];
}[] = [];
for (const site of sitesData) {
if (!site.sites.subnet) {
logger.warn(
`Site ${site.sites.siteId} has no subnet, skipping`
);
continue;
}
if (!site.clientSites.endpoint) {
logger.warn(
`Site ${site.sites.siteId} has no endpoint, skipping`
);
continue;
}
// find the destinations in the array
let destinations = exitNodeDestinations.find(
(d) => d.reachableAt === site.exitNodes?.reachableAt
);
if (!destinations) {
destinations = {
reachableAt: site.exitNodes?.reachableAt || "",
exitNodeId: site.exitNodes?.exitNodeId || 0,
type: site.exitNodes?.type || "",
name: site.exitNodes?.name || "",
sourceIp: site.clientSites.endpoint.split(":")[0] || "",
sourcePort:
parseInt(site.clientSites.endpoint.split(":")[1]) || 0,
destinations: [
{
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
}
]
};
} else {
// add to the existing destinations
destinations.destinations.push({
destinationIP: site.sites.subnet.split("/")[0],
destinationPort: site.sites.listenPort || 0
});
}
// update it in the array
exitNodeDestinations = exitNodeDestinations.filter(
(d) => d.reachableAt !== site.exitNodes?.reachableAt
);
exitNodeDestinations.push(destinations);
}
for (const destination of exitNodeDestinations) {
logger.info(
`Updating destinations for exit node at ${destination.reachableAt}`
);
const payload = {
sourceIp: destination.sourceIp,
sourcePort: destination.sourcePort,
destinations: destination.destinations
};
logger.info(
`Payload for update-destinations: ${JSON.stringify(payload, null, 2)}`
);
// Create an ExitNode-like object for sendToExitNode
const exitNodeForComm = {
exitNodeId: destination.exitNodeId,
type: destination.type,
reachableAt: destination.reachableAt,
name: destination.name
} as any; // Using 'as any' since we know sendToExitNode will handle this correctly
await sendToExitNode(exitNodeForComm, {
remoteType: "remoteExitNode/update-destinations",
localPath: "/update-destinations",
method: "POST",
data: payload
});
}
const updatedClient = await db
.update(clients)
.set({ name })
.where(eq(clients.clientId, clientId))
.returning();
return response(res, {
data: updatedClient,

View File

@@ -16,6 +16,8 @@ import * as idp from "./idp";
import * as blueprints from "./blueprints";
import * as apiKeys from "./apiKeys";
import * as logs from "./auditLogs";
import * as newt from "./newt";
import * as olm from "./olm";
import HttpCode from "@server/types/HttpCode";
import {
verifyAccessTokenAccess,
@@ -27,6 +29,7 @@ import {
verifyTargetAccess,
verifyRoleAccess,
verifySetResourceUsers,
verifySetResourceClients,
verifyUserAccess,
getUserOrgs,
verifyUserIsServerAdmin,
@@ -34,14 +37,12 @@ import {
verifyClientAccess,
verifyApiKeyAccess,
verifyDomainAccess,
verifyClientsEnabled,
verifyUserHasAction,
verifyUserIsOrgOwner,
verifySiteResourceAccess
verifySiteResourceAccess,
verifyOlmAccess
} from "@server/middlewares";
import { ActionsEnum } from "@server/auth/actions";
import { createNewt, getNewtToken } from "./newt";
import { getOlmToken } from "./olm";
import rateLimit, { ipKeyGenerator } from "express-rate-limit";
import createHttpError from "http-errors";
import { build } from "@server/build";
@@ -129,7 +130,6 @@ authenticated.get(
authenticated.get(
"/org/:orgId/pick-client-defaults",
verifyClientsEnabled,
verifyOrgAccess,
verifyUserHasAction(ActionsEnum.createClient),
client.pickClientDefaults
@@ -137,7 +137,6 @@ authenticated.get(
authenticated.get(
"/org/:orgId/clients",
verifyClientsEnabled,
verifyOrgAccess,
verifyUserHasAction(ActionsEnum.listClients),
client.listClients
@@ -145,7 +144,6 @@ authenticated.get(
authenticated.get(
"/client/:clientId",
verifyClientsEnabled,
verifyClientAccess,
verifyUserHasAction(ActionsEnum.getClient),
client.getClient
@@ -153,16 +151,15 @@ authenticated.get(
authenticated.put(
"/org/:orgId/client",
verifyClientsEnabled,
verifyOrgAccess,
verifyUserHasAction(ActionsEnum.createClient),
logActionAudit(ActionsEnum.createClient),
client.createClient
);
// TODO: Separate into a deleteUserClient (for user clients) and deleteClient (for machine clients)
authenticated.delete(
"/client/:clientId",
verifyClientsEnabled,
verifyClientAccess,
verifyUserHasAction(ActionsEnum.deleteClient),
logActionAudit(ActionsEnum.deleteClient),
@@ -171,7 +168,6 @@ authenticated.delete(
authenticated.post(
"/client/:clientId",
verifyClientsEnabled,
verifyClientAccess, // this will check if the user has access to the client
verifyUserHasAction(ActionsEnum.updateClient), // this will check if the user has permission to update the client
logActionAudit(ActionsEnum.updateClient),
@@ -286,6 +282,72 @@ authenticated.delete(
siteResource.deleteSiteResource
);
authenticated.get(
"/site-resource/:siteResourceId/roles",
verifySiteResourceAccess,
verifyUserHasAction(ActionsEnum.listResourceRoles),
siteResource.listSiteResourceRoles
);
authenticated.get(
"/site-resource/:siteResourceId/users",
verifySiteResourceAccess,
verifyUserHasAction(ActionsEnum.listResourceUsers),
siteResource.listSiteResourceUsers
);
authenticated.get(
"/site-resource/:siteResourceId/clients",
verifySiteResourceAccess,
verifyUserHasAction(ActionsEnum.listResourceUsers),
siteResource.listSiteResourceClients
);
authenticated.post(
"/site-resource/:siteResourceId/roles",
verifySiteResourceAccess,
verifyRoleAccess,
verifyUserHasAction(ActionsEnum.setResourceRoles),
logActionAudit(ActionsEnum.setResourceRoles),
siteResource.setSiteResourceRoles,
);
authenticated.post(
"/site-resource/:siteResourceId/users",
verifySiteResourceAccess,
verifySetResourceUsers,
verifyUserHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.setSiteResourceUsers,
);
authenticated.post(
"/site-resource/:siteResourceId/clients",
verifySiteResourceAccess,
verifySetResourceClients,
verifyUserHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.setSiteResourceClients,
);
authenticated.post(
"/site-resource/:siteResourceId/clients/add",
verifySiteResourceAccess,
verifySetResourceClients,
verifyUserHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.addClientToSiteResource,
);
authenticated.post(
"/site-resource/:siteResourceId/clients/remove",
verifySiteResourceAccess,
verifySetResourceClients,
verifyUserHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.removeClientFromSiteResource,
);
authenticated.put(
"/org/:orgId/resource",
verifyOrgAccess,
@@ -649,9 +711,15 @@ unauthenticated.get(
// );
unauthenticated.get("/user", verifySessionMiddleware, user.getUser);
unauthenticated.get("/my-device", verifySessionMiddleware, user.myDevice);
authenticated.get("/users", verifyUserIsServerAdmin, user.adminListUsers);
authenticated.get("/user/:userId", verifyUserIsServerAdmin, user.adminGetUser);
authenticated.post(
"/user/:userId/generate-password-reset-code",
verifyUserIsServerAdmin,
user.adminGeneratePasswordResetCode
);
authenticated.delete(
"/user/:userId",
verifyUserIsServerAdmin,
@@ -734,6 +802,32 @@ authenticated.delete(
// createNewt
// );
authenticated.put(
"/user/:userId/olm",
verifyIsLoggedInUser,
olm.createUserOlm
);
authenticated.get(
"/user/:userId/olms",
verifyIsLoggedInUser,
olm.listUserOlms
);
authenticated.delete(
"/user/:userId/olm/:olmId",
verifyIsLoggedInUser,
verifyOlmAccess,
olm.deleteUserOlm
);
authenticated.get(
"/user/:userId/olm/:olmId",
verifyIsLoggedInUser,
verifyOlmAccess,
olm.getUserOlm
);
authenticated.put(
"/idp/oidc",
verifyUserIsServerAdmin,
@@ -993,7 +1087,7 @@ authRouter.post(
},
store: createStore()
}),
getNewtToken
newt.getNewtToken
);
authRouter.post(
"/olm/get-token",
@@ -1008,7 +1102,7 @@ authRouter.post(
},
store: createStore()
}),
getOlmToken
olm.getOlmToken
);
authRouter.post(
@@ -1253,3 +1347,51 @@ authRouter.delete(
}),
auth.deleteSecurityKey
);
authRouter.post(
"/device-web-auth/start",
rateLimit({
windowMs: 15 * 60 * 1000, // 15 minutes
max: 30, // Allow 30 device auth code requests per 15 minutes per IP
keyGenerator: (req) =>
`deviceWebAuthStart:${ipKeyGenerator(req.ip || "")}`,
handler: (req, res, next) => {
const message = `You can only request a device auth code ${30} times every ${15} minutes. Please try again later.`;
return next(createHttpError(HttpCode.TOO_MANY_REQUESTS, message));
},
store: createStore()
}),
auth.startDeviceWebAuth
);
authRouter.get(
"/device-web-auth/poll/:code",
rateLimit({
windowMs: 60 * 1000, // 1 minute
max: 60, // Allow 60 polling requests per minute per IP (poll every second)
keyGenerator: (req) =>
`deviceWebAuthPoll:${ipKeyGenerator(req.ip || "")}:${req.params.code}`,
handler: (req, res, next) => {
const message = `You can only poll a device auth code ${60} times per minute. Please try again later.`;
return next(createHttpError(HttpCode.TOO_MANY_REQUESTS, message));
},
store: createStore()
}),
auth.pollDeviceWebAuth
);
authenticated.post(
"/device-web-auth/verify",
rateLimit({
windowMs: 15 * 60 * 1000, // 15 minutes
max: 50, // Allow 50 verification attempts per 15 minutes per user
keyGenerator: (req) =>
`deviceWebAuthVerify:${req.user?.userId || ipKeyGenerator(req.ip || "")}`,
handler: (req, res, next) => {
const message = `You can only verify a device auth code ${50} times every ${15} minutes. Please try again later.`;
return next(createHttpError(HttpCode.TOO_MANY_REQUESTS, message));
},
store: createStore()
}),
auth.verifyDeviceWebAuth
);

View File

@@ -7,7 +7,7 @@ import {
olms,
Site,
sites,
clientSites,
clientSitesAssociationsCache,
ExitNode
} from "@server/db";
import { db } from "@server/db";
@@ -109,8 +109,8 @@ export async function generateRelayMappings(exitNode: ExitNode) {
// Find all clients associated with this site through clientSites
const clientSitesRes = await db
.select()
.from(clientSites)
.where(eq(clientSites.siteId, site.siteId));
.from(clientSitesAssociationsCache)
.where(eq(clientSitesAssociationsCache.siteId, site.siteId));
for (const clientSite of clientSitesRes) {
if (!clientSite.endpoint) {

View File

@@ -6,7 +6,7 @@ import {
olms,
Site,
sites,
clientSites,
clientSitesAssociationsCache,
exitNodes,
ExitNode
} from "@server/db";
@@ -19,6 +19,8 @@ import { fromError } from "zod-validation-error";
import { validateNewtSessionToken } from "@server/auth/sessions/newt";
import { validateOlmSessionToken } from "@server/auth/sessions/olm";
import { checkExitNodeOrg } from "#dynamic/lib/exitNodes";
import { updatePeer as updateOlmPeer } from "../olm/peers";
import { updatePeer as updateNewtPeer } from "../newt/peers";
// Define Zod schema for request validation
const updateHolePunchSchema = z.object({
@@ -28,8 +30,9 @@ const updateHolePunchSchema = z.object({
ip: z.string(),
port: z.number(),
timestamp: z.number(),
publicKey: z.string(),
reachableAt: z.string().optional(),
publicKey: z.string().optional()
exitNodePublicKey: z.string().optional()
});
// New response type with multi-peer destination support
@@ -63,23 +66,26 @@ export async function updateHolePunch(
timestamp,
token,
reachableAt,
publicKey
publicKey, // this is the client's current public key for this session
exitNodePublicKey
} = parsedParams.data;
let exitNode: ExitNode | undefined;
if (publicKey) {
if (exitNodePublicKey) {
// Get the exit node by public key
[exitNode] = await db
.select()
.from(exitNodes)
.where(eq(exitNodes.publicKey, publicKey));
.where(eq(exitNodes.publicKey, exitNodePublicKey));
} else {
// FOR BACKWARDS COMPATIBILITY IF GERBIL IS STILL =<1.1.0
[exitNode] = await db.select().from(exitNodes).limit(1);
}
if (!exitNode) {
logger.warn(`Exit node not found for publicKey: ${publicKey}`);
logger.warn(
`Exit node not found for publicKey: ${exitNodePublicKey}`
);
return next(
createHttpError(HttpCode.NOT_FOUND, "Exit node not found")
);
@@ -92,12 +98,13 @@ export async function updateHolePunch(
port,
timestamp,
token,
publicKey,
exitNode
);
logger.debug(
`Returning ${destinations.length} peer destinations for olmId: ${olmId} or newtId: ${newtId}: ${JSON.stringify(destinations, null, 2)}`
);
// logger.debug(
// `Returning ${destinations.length} peer destinations for olmId: ${olmId} or newtId: ${newtId}: ${JSON.stringify(destinations, null, 2)}`
// );
// Return the new multi-peer structure
return res.status(HttpCode.OK).send({
@@ -121,6 +128,7 @@ export async function updateAndGenerateEndpointDestinations(
port: number,
timestamp: number,
token: string,
publicKey: string,
exitNode: ExitNode,
checkOrg = false
) {
@@ -128,9 +136,9 @@ export async function updateAndGenerateEndpointDestinations(
const destinations: PeerDestination[] = [];
if (olmId) {
logger.debug(
`Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}`
);
// logger.debug(
// `Got hole punch with ip: ${ip}, port: ${port} for olmId: ${olmId}`
// );
const { session, olm: olmSession } =
await validateOlmSessionToken(token);
@@ -150,7 +158,7 @@ export async function updateAndGenerateEndpointDestinations(
throw new Error("Olm not found");
}
const [client] = await db
const [updatedClient] = await db
.update(clients)
.set({
lastHolePunch: timestamp
@@ -158,10 +166,16 @@ export async function updateAndGenerateEndpointDestinations(
.where(eq(clients.clientId, olm.clientId))
.returning();
if (await checkExitNodeOrg(exitNode.exitNodeId, client.orgId) && checkOrg) {
if (
(await checkExitNodeOrg(
exitNode.exitNodeId,
updatedClient.orgId
)) &&
checkOrg
) {
// not allowed
logger.warn(
`Exit node ${exitNode.exitNodeId} is not allowed for org ${client.orgId}`
`Exit node ${exitNode.exitNodeId} is not allowed for org ${updatedClient.orgId}`
);
throw new Error("Exit node not allowed");
}
@@ -171,40 +185,70 @@ export async function updateAndGenerateEndpointDestinations(
.select({
siteId: sites.siteId,
subnet: sites.subnet,
listenPort: sites.listenPort
listenPort: sites.listenPort,
publicKey: sites.publicKey,
endpoint: clientSitesAssociationsCache.endpoint
})
.from(sites)
.innerJoin(clientSites, eq(sites.siteId, clientSites.siteId))
.innerJoin(
clientSitesAssociationsCache,
eq(sites.siteId, clientSitesAssociationsCache.siteId)
)
.where(
and(
eq(sites.exitNodeId, exitNode.exitNodeId),
eq(clientSites.clientId, olm.clientId)
eq(clientSitesAssociationsCache.clientId, olm.clientId)
)
);
// Update clientSites for each site on this exit node
for (const site of sitesOnExitNode) {
logger.debug(
`Updating site ${site.siteId} on exit node ${exitNode.exitNodeId}`
);
// logger.debug(
// `Updating site ${site.siteId} on exit node ${exitNode.exitNodeId}`
// );
await db
.update(clientSites)
// if the public key or endpoint has changed, update it otherwise continue
if (
site.endpoint === `${ip}:${port}` &&
site.publicKey === publicKey
) {
continue;
}
const [updatedClientSitesAssociationsCache] = await db
.update(clientSitesAssociationsCache)
.set({
endpoint: `${ip}:${port}`
endpoint: `${ip}:${port}`,
publicKey: publicKey
})
.where(
and(
eq(clientSites.clientId, olm.clientId),
eq(clientSites.siteId, site.siteId)
eq(clientSitesAssociationsCache.clientId, olm.clientId),
eq(clientSitesAssociationsCache.siteId, site.siteId)
)
)
.returning();
if (
updatedClientSitesAssociationsCache.endpoint !==
site.endpoint && // this is the endpoint from the join table not the site
updatedClient.pubKey === publicKey // only trigger if the client's public key matches the current public key which means it has registered so we dont prematurely send the update
) {
logger.info(
`ClientSitesAssociationsCache for client ${olm.clientId} and site ${site.siteId} endpoint changed from ${site.endpoint} to ${updatedClientSitesAssociationsCache.endpoint}`
);
// Handle any additional logic for endpoint change
handleClientEndpointChange(
olm.clientId,
updatedClientSitesAssociationsCache.endpoint!
);
}
}
logger.debug(
`Updated ${sitesOnExitNode.length} sites on exit node ${exitNode.exitNodeId}`
);
if (!client) {
// logger.debug(
// `Updated ${sitesOnExitNode.length} sites on exit node ${exitNode.exitNodeId}`
// );
if (!updatedClient) {
logger.warn(`Client not found for olm: ${olmId}`);
throw new Error("Client not found");
}
@@ -219,9 +263,9 @@ export async function updateAndGenerateEndpointDestinations(
}
}
} else if (newtId) {
logger.debug(
`Got hole punch with ip: ${ip}, port: ${port} for newtId: ${newtId}`
);
// logger.debug(
// `Got hole punch with ip: ${ip}, port: ${port} for newtId: ${newtId}`
// );
const { session, newt: newtSession } =
await validateNewtSessionToken(token);
@@ -253,7 +297,10 @@ export async function updateAndGenerateEndpointDestinations(
.where(eq(sites.siteId, newt.siteId))
.limit(1);
if (await checkExitNodeOrg(exitNode.exitNodeId, site.orgId) && checkOrg) {
if (
(await checkExitNodeOrg(exitNode.exitNodeId, site.orgId)) &&
checkOrg
) {
// not allowed
logger.warn(
`Exit node ${exitNode.exitNodeId} is not allowed for org ${site.orgId}`
@@ -273,6 +320,18 @@ export async function updateAndGenerateEndpointDestinations(
.where(eq(sites.siteId, newt.siteId))
.returning();
if (
updatedSite.endpoint != site.endpoint &&
updatedSite.publicKey == publicKey
) {
// only trigger if the site's public key matches the current public key which means it has registered so we dont prematurely send the update
logger.info(
`Site ${newt.siteId} endpoint changed from ${site.endpoint} to ${updatedSite.endpoint}`
);
// Handle any additional logic for endpoint change
handleSiteEndpointChange(newt.siteId, updatedSite.endpoint!);
}
if (!updatedSite || !updatedSite.subnet) {
logger.warn(`Site not found: ${newt.siteId}`);
throw new Error("Site not found");
@@ -326,3 +385,143 @@ export async function updateAndGenerateEndpointDestinations(
}
return destinations;
}
async function handleSiteEndpointChange(siteId: number, newEndpoint: string) {
// Alert all clients connected to this site that the endpoint has changed (only if NOT relayed)
try {
// Get site details
const [site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site || !site.publicKey) {
logger.warn(`Site ${siteId} not found or has no public key`);
return;
}
// Get all non-relayed clients connected to this site
const connectedClients = await db
.select({
clientId: clients.clientId,
olmId: olms.olmId,
isRelayed: clientSitesAssociationsCache.isRelayed
})
.from(clientSitesAssociationsCache)
.innerJoin(
clients,
eq(clientSitesAssociationsCache.clientId, clients.clientId)
)
.innerJoin(olms, eq(olms.clientId, clients.clientId))
.where(
and(
eq(clientSitesAssociationsCache.siteId, siteId),
eq(clientSitesAssociationsCache.isRelayed, false)
)
);
// Update each non-relayed client with the new site endpoint
for (const client of connectedClients) {
try {
await updateOlmPeer(
client.clientId,
{
siteId: siteId,
publicKey: site.publicKey,
endpoint: newEndpoint
},
client.olmId
);
logger.debug(
`Updated client ${client.clientId} with new site ${siteId} endpoint: ${newEndpoint}`
);
} catch (error) {
logger.error(
`Failed to update client ${client.clientId} with new site endpoint: ${error}`
);
}
}
} catch (error) {
logger.error(
`Error handling site endpoint change for site ${siteId}: ${error}`
);
}
}
async function handleClientEndpointChange(
clientId: number,
newEndpoint: string
) {
// Alert all sites connected to this client that the endpoint has changed (only if NOT relayed)
try {
// Get client details
const [client] = await db
.select()
.from(clients)
.where(eq(clients.clientId, clientId))
.limit(1);
if (!client || !client.pubKey) {
logger.warn(`Client ${clientId} not found or has no public key`);
return;
}
// Get all non-relayed sites connected to this client
const connectedSites = await db
.select({
siteId: sites.siteId,
newtId: newts.newtId,
isRelayed: clientSitesAssociationsCache.isRelayed,
subnet: clients.subnet
})
.from(clientSitesAssociationsCache)
.innerJoin(
sites,
eq(clientSitesAssociationsCache.siteId, sites.siteId)
)
.innerJoin(newts, eq(newts.siteId, sites.siteId))
.innerJoin(
clients,
eq(clientSitesAssociationsCache.clientId, clients.clientId)
)
.where(
and(
eq(clientSitesAssociationsCache.clientId, clientId),
eq(clientSitesAssociationsCache.isRelayed, false)
)
);
// Update each non-relayed site with the new client endpoint
for (const siteData of connectedSites) {
try {
if (!siteData.subnet) {
logger.warn(
`Client ${clientId} has no subnet, skipping update for site ${siteData.siteId}`
);
continue;
}
await updateNewtPeer(
siteData.siteId,
client.pubKey,
{
endpoint: newEndpoint
},
siteData.newtId
);
logger.debug(
`Updated site ${siteData.siteId} with new client ${clientId} endpoint: ${newEndpoint}`
);
} catch (error) {
logger.error(
`Failed to update site ${siteData.siteId} with new client endpoint: ${error}`
);
}
}
} catch (error) {
logger.error(
`Error handling client endpoint change for client ${clientId}: ${error}`
);
}
}

View File

@@ -33,6 +33,7 @@ import { UserType } from "@server/types/UserTypes";
import { FeatureId } from "@server/lib/billing";
import { usageService } from "@server/lib/billing/usageService";
import { build } from "@server/build";
import { calculateUserClientsForOrgs } from "@server/lib/calculateUserClientsForOrgs";
const ensureTrailingSlash = (url: string): string => {
return url;
@@ -364,10 +365,18 @@ export async function validateOidcCallback(
);
if (!existingUserOrgs.length) {
// delete the user
// await db
// .delete(users)
// .where(eq(users.userId, existingUser.userId));
// delete all auto -provisioned user orgs
await db
.delete(userOrgs)
.where(
and(
eq(userOrgs.userId, existingUser.userId),
eq(userOrgs.autoProvisioned, true)
)
);
await calculateUserClientsForOrgs(existingUser.userId);
return next(
createHttpError(
HttpCode.UNAUTHORIZED,
@@ -513,6 +522,8 @@ export async function validateOidcCallback(
userCount: userCount.length
});
}
await calculateUserClientsForOrgs(userId!, trx);
});
for (const orgCount of orgUserCounts) {
@@ -553,6 +564,24 @@ export async function validateOidcCallback(
);
}
// check for existing user orgs
const existingUserOrgs = await db
.select()
.from(userOrgs)
.where(and(eq(userOrgs.userId, existingUser.userId)));
if (!existingUserOrgs.length) {
logger.debug(
"No existing user orgs found for non-auto-provisioned IdP"
);
return next(
createHttpError(
HttpCode.UNAUTHORIZED,
`User with username ${userIdentifier} is unprovisioned. This user must be added to an organization before logging in.`
)
);
}
const token = generateSessionToken();
const sess = await createSession(token, existingUser.userId);
const isSecure = req.protocol === "https";

View File

@@ -10,6 +10,7 @@ import * as client from "./client";
import * as accessToken from "./accessToken";
import * as apiKeys from "./apiKeys";
import * as idp from "./idp";
import * as logs from "./auditLogs";
import * as siteResource from "./siteResource";
import {
verifyApiKey,
@@ -24,8 +25,8 @@ import {
verifyApiKeyAccessTokenAccess,
verifyApiKeyIsRoot,
verifyApiKeyClientAccess,
verifyClientsEnabled,
verifyApiKeySiteResourceAccess
verifyApiKeySiteResourceAccess,
verifyApiKeySetResourceClients
} from "@server/middlewares";
import HttpCode from "@server/types/HttpCode";
import { Router } from "express";
@@ -197,6 +198,108 @@ authenticated.delete(
siteResource.deleteSiteResource
);
authenticated.get(
"/site-resource/:siteResourceId/roles",
verifyApiKeySiteResourceAccess,
verifyApiKeyHasAction(ActionsEnum.listResourceRoles),
siteResource.listSiteResourceRoles
);
authenticated.get(
"/site-resource/:siteResourceId/users",
verifyApiKeySiteResourceAccess,
verifyApiKeyHasAction(ActionsEnum.listResourceUsers),
siteResource.listSiteResourceUsers
);
authenticated.get(
"/site-resource/:siteResourceId/clients",
verifyApiKeySiteResourceAccess,
verifyApiKeyHasAction(ActionsEnum.listResourceUsers),
siteResource.listSiteResourceClients
);
authenticated.post(
"/site-resource/:siteResourceId/roles",
verifyApiKeySiteResourceAccess,
verifyApiKeyRoleAccess,
verifyApiKeyHasAction(ActionsEnum.setResourceRoles),
logActionAudit(ActionsEnum.setResourceRoles),
siteResource.setSiteResourceRoles
);
authenticated.post(
"/site-resource/:siteResourceId/users",
verifyApiKeySiteResourceAccess,
verifyApiKeySetResourceUsers,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.setSiteResourceUsers
);
authenticated.post(
"/site-resource/:siteResourceId/roles/add",
verifyApiKeySiteResourceAccess,
verifyApiKeyRoleAccess,
verifyApiKeyHasAction(ActionsEnum.setResourceRoles),
logActionAudit(ActionsEnum.setResourceRoles),
siteResource.addRoleToSiteResource
);
authenticated.post(
"/site-resource/:siteResourceId/roles/remove",
verifyApiKeySiteResourceAccess,
verifyApiKeyRoleAccess,
verifyApiKeyHasAction(ActionsEnum.setResourceRoles),
logActionAudit(ActionsEnum.setResourceRoles),
siteResource.removeRoleFromSiteResource
);
authenticated.post(
"/site-resource/:siteResourceId/users/add",
verifyApiKeySiteResourceAccess,
verifyApiKeySetResourceUsers,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.addUserToSiteResource
);
authenticated.post(
"/site-resource/:siteResourceId/users/remove",
verifyApiKeySiteResourceAccess,
verifyApiKeySetResourceUsers,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.removeUserFromSiteResource
);
authenticated.post(
"/site-resource/:siteResourceId/clients",
verifyApiKeySiteResourceAccess,
verifyApiKeySetResourceClients,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.setSiteResourceClients
);
authenticated.post(
"/site-resource/:siteResourceId/clients/add",
verifyApiKeySiteResourceAccess,
verifyApiKeySetResourceClients,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.addClientToSiteResource
);
authenticated.post(
"/site-resource/:siteResourceId/clients/remove",
verifyApiKeySiteResourceAccess,
verifyApiKeySetResourceClients,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
siteResource.removeClientFromSiteResource
);
authenticated.put(
"/org/:orgId/resource",
verifyApiKeyOrgAccess,
@@ -412,6 +515,42 @@ authenticated.post(
resource.setResourceUsers
);
authenticated.post(
"/resource/:resourceId/roles/add",
verifyApiKeyResourceAccess,
verifyApiKeyRoleAccess,
verifyApiKeyHasAction(ActionsEnum.setResourceRoles),
logActionAudit(ActionsEnum.setResourceRoles),
resource.addRoleToResource
);
authenticated.post(
"/resource/:resourceId/roles/remove",
verifyApiKeyResourceAccess,
verifyApiKeyRoleAccess,
verifyApiKeyHasAction(ActionsEnum.setResourceRoles),
logActionAudit(ActionsEnum.setResourceRoles),
resource.removeRoleFromResource
);
authenticated.post(
"/resource/:resourceId/users/add",
verifyApiKeyResourceAccess,
verifyApiKeySetResourceUsers,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
resource.addUserToResource
);
authenticated.post(
"/resource/:resourceId/users/remove",
verifyApiKeyResourceAccess,
verifyApiKeySetResourceUsers,
verifyApiKeyHasAction(ActionsEnum.setResourceUsers),
logActionAudit(ActionsEnum.setResourceUsers),
resource.removeUserFromResource
);
authenticated.post(
`/resource/:resourceId/password`,
verifyApiKeyResourceAccess,
@@ -657,7 +796,6 @@ authenticated.get(
authenticated.get(
"/org/:orgId/pick-client-defaults",
verifyClientsEnabled,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.createClient),
client.pickClientDefaults
@@ -665,7 +803,6 @@ authenticated.get(
authenticated.get(
"/org/:orgId/clients",
verifyClientsEnabled,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.listClients),
client.listClients
@@ -673,7 +810,6 @@ authenticated.get(
authenticated.get(
"/client/:clientId",
verifyClientsEnabled,
verifyApiKeyClientAccess,
verifyApiKeyHasAction(ActionsEnum.getClient),
client.getClient
@@ -681,16 +817,24 @@ authenticated.get(
authenticated.put(
"/org/:orgId/client",
verifyClientsEnabled,
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.createClient),
logActionAudit(ActionsEnum.createClient),
client.createClient
);
// authenticated.put(
// "/org/:orgId/user/:userId/client",
// verifyClientsEnabled,
// verifyApiKeyOrgAccess,
// verifyApiKeyUserAccess,
// verifyApiKeyHasAction(ActionsEnum.createClient),
// logActionAudit(ActionsEnum.createClient),
// client.createUserClient
// );
authenticated.delete(
"/client/:clientId",
verifyClientsEnabled,
verifyApiKeyClientAccess,
verifyApiKeyHasAction(ActionsEnum.deleteClient),
logActionAudit(ActionsEnum.deleteClient),
@@ -699,7 +843,6 @@ authenticated.delete(
authenticated.post(
"/client/:clientId",
verifyClientsEnabled,
verifyApiKeyClientAccess,
verifyApiKeyHasAction(ActionsEnum.updateClient),
logActionAudit(ActionsEnum.updateClient),
@@ -713,3 +856,32 @@ authenticated.put(
logActionAudit(ActionsEnum.applyBlueprint),
blueprints.applyJSONBlueprint
);
authenticated.get(
"/org/:orgId/logs/request",
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.viewLogs),
logs.queryRequestAuditLogs
);
authenticated.get(
"/org/:orgId/logs/request/export",
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.exportLogs),
logActionAudit(ActionsEnum.exportLogs),
logs.exportRequestAuditLogs
);
authenticated.get(
"/org/:orgId/logs/analytics",
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.viewLogs),
logs.queryRequestAnalytics
);
authenticated.get(
"/org/:orgId/resource-names",
verifyApiKeyOrgAccess,
verifyApiKeyHasAction(ActionsEnum.listResources),
resource.listAllResourceNames
);

View File

@@ -6,15 +6,15 @@ import {
db,
ExitNode,
exitNodes,
resources,
siteResources,
Target,
targets
clientSiteResourcesAssociationsCache
} from "@server/db";
import { clients, clientSites, Newt, sites } from "@server/db";
import { eq, and, inArray } from "drizzle-orm";
import { clients, clientSitesAssociationsCache, Newt, sites } from "@server/db";
import { eq } from "drizzle-orm";
import { updatePeer } from "../olm/peers";
import { sendToExitNode } from "#dynamic/lib/exitNodes";
import { generateSubnetProxyTargets, SubnetProxyTarget } from "@server/lib/ip";
import config from "@server/lib/config";
const inputSchema = z.object({
publicKey: z.string(),
@@ -66,7 +66,9 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
// we need to wait for hole punch success
if (!existingSite.endpoint) {
logger.debug(`In newt get config: existing site ${existingSite.siteId} has no endpoint, skipping`);
logger.debug(
`In newt get config: existing site ${existingSite.siteId} has no endpoint, skipping`
);
return;
}
@@ -74,12 +76,12 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
// TODO: somehow we should make sure a recent hole punch has happened if this occurs (hole punch could be from the last restart if done quickly)
}
// if (existingSite.lastHolePunch && now - existingSite.lastHolePunch > 6) {
// logger.warn(
// `Site ${existingSite.siteId} last hole punch is too old, skipping`
// );
// return;
// }
if (existingSite.lastHolePunch && now - existingSite.lastHolePunch > 5) {
logger.warn(
`handleGetConfigMessage: Site ${existingSite.siteId} last hole punch is too old, skipping`
);
return;
}
// update the endpoint and the public key
const [site] = await db
@@ -132,75 +134,95 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
const clientsRes = await db
.select()
.from(clients)
.innerJoin(clientSites, eq(clients.clientId, clientSites.clientId))
.where(eq(clientSites.siteId, siteId));
.innerJoin(
clientSitesAssociationsCache,
eq(clients.clientId, clientSitesAssociationsCache.clientId)
)
.where(eq(clientSitesAssociationsCache.siteId, siteId));
// Prepare peers data for the response
const peers = await Promise.all(
clientsRes
.filter((client) => {
if (!client.clients.pubKey) {
logger.warn(
`Client ${client.clients.clientId} has no public key, skipping`
);
return false;
}
if (!client.clients.subnet) {
logger.warn(
`Client ${client.clients.clientId} has no subnet, skipping`
);
return false;
}
return true;
})
.map(async (client) => {
// Add or update this peer on the olm if it is connected
try {
if (!site.publicKey) {
logger.warn(
`Site ${site.siteId} has no public key, skipping`
);
return null;
}
let endpoint = site.endpoint;
if (client.clientSites.isRelayed) {
if (!site.exitNodeId) {
logger.warn(
`Site ${site.siteId} has no exit node, skipping`
);
return null;
}
if (!exitNode) {
logger.warn(
`Exit node not found for site ${site.siteId}`
);
return null;
}
endpoint = `${exitNode.endpoint}:21820`;
}
if (!endpoint) {
logger.warn(
`In Newt get config: Peer site ${site.siteId} has no endpoint, skipping`
);
return null;
}
await updatePeer(client.clients.clientId, {
siteId: site.siteId,
endpoint: endpoint,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort,
remoteSubnets: site.remoteSubnets
});
} catch (error) {
logger.error(
`Failed to add/update peer ${client.clients.pubKey} to olm ${newt.newtId}: ${error}`
if (!site.publicKey) {
logger.warn(
`Site ${site.siteId} has no public key, skipping`
);
return null;
}
if (!exitNode) {
logger.warn(`Exit node not found for site ${site.siteId}`);
return null;
}
if (!site.endpoint) {
logger.warn(
`Site ${site.siteId} has no endpoint, skipping`
);
return null;
}
// const allSiteResources = await db // only get the site resources that this client has access to
// .select()
// .from(siteResources)
// .innerJoin(
// clientSiteResourcesAssociationsCache,
// eq(
// siteResources.siteResourceId,
// clientSiteResourcesAssociationsCache.siteResourceId
// )
// )
// .where(
// and(
// eq(siteResources.siteId, site.siteId),
// eq(
// clientSiteResourcesAssociationsCache.clientId,
// client.clients.clientId
// )
// )
// );
await updatePeer(client.clients.clientId, {
siteId: site.siteId,
endpoint: site.endpoint,
relayEndpoint: `${exitNode.endpoint}:${config.getRawConfig().gerbil.clients_start_port}`,
publicKey: site.publicKey,
serverIP: site.address,
serverPort: site.listenPort
// remoteSubnets: generateRemoteSubnets(
// allSiteResources.map(
// ({ siteResources }) => siteResources
// )
// ),
// aliases: generateAliasConfig(
// allSiteResources.map(
// ({ siteResources }) => siteResources
// )
// )
});
return {
publicKey: client.clients.pubKey!,
allowedIps: [`${client.clients.subnet.split("/")[0]}/32`], // we want to only allow from that client
endpoint: client.clientSites.isRelayed
endpoint: client.clientSitesAssociationsCache.isRelayed
? ""
: client.clientSites.endpoint! // if its relayed it should be localhost
: client.clientSitesAssociationsCache.endpoint! // if its relayed it should be localhost
};
})
);
@@ -208,42 +230,50 @@ export const handleGetConfigMessage: MessageHandler = async (context) => {
// Filter out any null values from peers that didn't have an olm
const validPeers = peers.filter((peer) => peer !== null);
// Get all enabled targets with their resource protocol information
// Get all enabled site resources for this site
const allSiteResources = await db
.select()
.from(siteResources)
.where(eq(siteResources.siteId, siteId));
const { tcpTargets, udpTargets } = allSiteResources.reduce(
(acc, resource) => {
// Filter out invalid targets
if (!resource.proxyPort || !resource.destinationIp || !resource.destinationPort) {
return acc;
}
const targetsToSend: SubnetProxyTarget[] = [];
// Format target into string
const formattedTarget = `${resource.proxyPort}:${resource.destinationIp}:${resource.destinationPort}`;
for (const resource of allSiteResources) {
// Get clients associated with this specific resource
const resourceClients = await db
.select({
clientId: clients.clientId,
pubKey: clients.pubKey,
subnet: clients.subnet
})
.from(clients)
.innerJoin(
clientSiteResourcesAssociationsCache,
eq(
clients.clientId,
clientSiteResourcesAssociationsCache.clientId
)
)
.where(
eq(
clientSiteResourcesAssociationsCache.siteResourceId,
resource.siteResourceId
)
);
// Add to the appropriate protocol array
if (resource.protocol === "tcp") {
acc.tcpTargets.push(formattedTarget);
} else {
acc.udpTargets.push(formattedTarget);
}
const resourceTargets = generateSubnetProxyTargets(
resource,
resourceClients
);
return acc;
},
{ tcpTargets: [] as string[], udpTargets: [] as string[] }
);
targetsToSend.push(...resourceTargets);
}
// Build the configuration response
const configResponse = {
ipAddress: site.address,
peers: validPeers,
targets: {
udp: udpTargets,
tcp: tcpTargets
}
targets: targetsToSend
};
logger.debug("Sending config: ", configResponse);

View File

@@ -1,8 +1,8 @@
import { db, exitNodeOrgs, newts } from "@server/db";
import { db, ExitNode, exitNodeOrgs, newts, Transaction } from "@server/db";
import { MessageHandler } from "@server/routers/ws";
import { exitNodes, Newt, resources, sites, Target, targets } from "@server/db";
import { targetHealthCheck } from "@server/db";
import { eq, and, sql, inArray } from "drizzle-orm";
import { eq, and, sql, inArray, ne } from "drizzle-orm";
import { addPeer, deletePeer } from "../gerbil/peers";
import logger from "@server/logger";
import config from "@server/lib/config";
@@ -17,6 +17,7 @@ import {
verifyExitNodeOrgAccess
} from "#dynamic/lib/exitNodes";
import { fetchContainers } from "./dockerSocket";
import { lockManager } from "#dynamic/lib/lock";
export type ExitNodePingResult = {
exitNodeId: number;
@@ -151,27 +152,8 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
return;
}
const sitesQuery = await db
.select({
subnet: sites.subnet
})
.from(sites)
.where(eq(sites.exitNodeId, exitNodeId));
const newSubnet = await getUniqueSubnetForSite(exitNode);
const blockSize = config.getRawConfig().gerbil.site_block_size;
const subnets = sitesQuery
.map((site) => site.subnet)
.filter(
(subnet) =>
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
)
.filter((subnet) => subnet !== null);
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
const newSubnet = findNextAvailableCidr(
subnets,
blockSize,
exitNode.address
);
if (!newSubnet) {
logger.error(
`No available subnets found for the new exit node id ${exitNodeId} and site id ${siteId}`
@@ -378,3 +360,39 @@ export const handleNewtRegisterMessage: MessageHandler = async (context) => {
excludeSender: false // Include sender in broadcast
};
};
async function getUniqueSubnetForSite(
exitNode: ExitNode,
trx: Transaction | typeof db = db
): Promise<string | null> {
const lockKey = `subnet-allocation:${exitNode.exitNodeId}`;
return await lockManager.withLock(
lockKey,
async () => {
const sitesQuery = await trx
.select({
subnet: sites.subnet
})
.from(sites)
.where(eq(sites.exitNodeId, exitNode.exitNodeId));
const blockSize = config.getRawConfig().gerbil.site_block_size;
const subnets = sitesQuery
.map((site) => site.subnet)
.filter(
(subnet) =>
subnet && /^(\d{1,3}\.){3}\d{1,3}\/\d{1,2}$/.test(subnet)
)
.filter((subnet) => subnet !== null);
subnets.push(exitNode.address.replace(/\/\d+$/, `/${blockSize}`));
const newSubnet = findNextAvailableCidr(
subnets,
blockSize,
exitNode.address
);
return newSubnet;
},
5000 // 5 second lock TTL - subnet allocation should be quick
);
}

View File

@@ -1,4 +1,4 @@
import { db } from "@server/db";
import { db, Site } from "@server/db";
import { newts, sites } from "@server/db";
import { eq } from "drizzle-orm";
import { sendToClient } from "#dynamic/routers/ws";
@@ -10,65 +10,78 @@ export async function addPeer(
publicKey: string;
allowedIps: string[];
endpoint: string;
}
},
newtId?: string
) {
const [site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site) {
throw new Error(`Exit node with ID ${siteId} not found`);
let site: Site | null = null;
if (!newtId) {
[site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site) {
throw new Error(`Site with ID ${siteId} not found`);
}
// get the newt on the site
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
if (!newt) {
throw new Error(`Site found for site ${siteId}`);
}
newtId = newt.newtId;
}
// get the newt on the site
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
if (!newt) {
throw new Error(`Site found for site ${siteId}`);
}
sendToClient(newt.newtId, {
await sendToClient(newtId, {
type: "newt/wg/peer/add",
data: peer
}).catch((error) => {
logger.warn(`Error sending message:`, error);
});
logger.info(`Added peer ${peer.publicKey} to newt ${newt.newtId}`);
logger.info(`Added peer ${peer.publicKey} to newt ${newtId}`);
return site;
}
export async function deletePeer(siteId: number, publicKey: string) {
const [site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site) {
throw new Error(`Site with ID ${siteId} not found`);
export async function deletePeer(siteId: number, publicKey: string, newtId?: string) {
let site: Site | null = null;
if (!newtId) {
[site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site) {
throw new Error(`Site with ID ${siteId} not found`);
}
// get the newt on the site
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
if (!newt) {
throw new Error(`Newt not found for site ${siteId}`);
}
newtId = newt.newtId;
}
// get the newt on the site
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
if (!newt) {
throw new Error(`Newt not found for site ${siteId}`);
}
sendToClient(newt.newtId, {
await sendToClient(newtId, {
type: "newt/wg/peer/remove",
data: {
publicKey
}
}).catch((error) => {
logger.warn(`Error sending message:`, error);
});
logger.info(`Deleted peer ${publicKey} from newt ${newt.newtId}`);
logger.info(`Deleted peer ${publicKey} from newt ${newtId}`);
return site;
}
@@ -79,36 +92,43 @@ export async function updatePeer(
peer: {
allowedIps?: string[];
endpoint?: string;
}
},
newtId?: string
) {
const [site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site) {
throw new Error(`Site with ID ${siteId} not found`);
let site: Site | null = null;
if (!newtId) {
[site] = await db
.select()
.from(sites)
.where(eq(sites.siteId, siteId))
.limit(1);
if (!site) {
throw new Error(`Site with ID ${siteId} not found`);
}
// get the newt on the site
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
if (!newt) {
throw new Error(`Newt not found for site ${siteId}`);
}
newtId = newt.newtId;
}
// get the newt on the site
const [newt] = await db
.select()
.from(newts)
.where(eq(newts.siteId, siteId))
.limit(1);
if (!newt) {
throw new Error(`Newt not found for site ${siteId}`);
}
sendToClient(newt.newtId, {
await sendToClient(newtId, {
type: "newt/wg/peer/update",
data: {
publicKey,
...peer
}
}).catch((error) => {
logger.warn(`Error sending message:`, error);
});
logger.info(`Updated peer ${publicKey} on newt ${newt.newtId}`);
logger.info(`Updated peer ${publicKey} on newt ${newtId}`);
return site;
}

View File

@@ -0,0 +1,116 @@
import { NextFunction, Request, Response } from "express";
import { db, olms } from "@server/db";
import HttpCode from "@server/types/HttpCode";
import { z } from "zod";
import createHttpError from "http-errors";
import response from "@server/lib/response";
import moment from "moment";
import { generateId } from "@server/auth/sessions/app";
import { fromError } from "zod-validation-error";
import { hashPassword } from "@server/auth/password";
import { OpenAPITags, registry } from "@server/openApi";
import { calculateUserClientsForOrgs } from "@server/lib/calculateUserClientsForOrgs";
const bodySchema = z
.object({
name: z.string().min(1).max(255)
})
.strict();
const paramsSchema = z.object({
userId: z.string()
});
export type CreateOlmBody = z.infer<typeof bodySchema>;
export type CreateOlmResponse = {
olmId: string;
secret: string;
};
// registry.registerPath({
// method: "put",
// path: "/user/{userId}/olm",
// description: "Create a new olm for a user.",
// tags: [OpenAPITags.User, OpenAPITags.Client],
// request: {
// body: {
// content: {
// "application/json": {
// schema: bodySchema
// }
// }
// },
// params: paramsSchema
// },
// responses: {}
// });
export async function createUserOlm(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedBody = bodySchema.safeParse(req.body);
if (!parsedBody.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedBody.error).toString()
)
);
}
const { name } = parsedBody.data;
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { userId } = parsedParams.data;
const olmId = generateId(15);
const secret = generateId(48);
const secretHash = await hashPassword(secret);
await db.transaction(async (trx) => {
await trx.insert(olms).values({
olmId: olmId,
userId,
name,
secretHash,
dateCreated: moment().toISOString()
});
await calculateUserClientsForOrgs(userId, trx);
});
return response<CreateOlmResponse>(res, {
data: {
olmId,
secret
},
success: true,
error: false,
message: "Olm created successfully",
status: HttpCode.OK
});
} catch (e) {
console.error(e);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to create olm"
)
);
}
}

View File

@@ -0,0 +1,101 @@
import { NextFunction, Request, Response } from "express";
import { Client, db } from "@server/db";
import { olms, clients, clientSitesAssociationsCache } from "@server/db";
import { eq } from "drizzle-orm";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import response from "@server/lib/response";
import { z } from "zod";
import { fromError } from "zod-validation-error";
import logger from "@server/logger";
import { OpenAPITags, registry } from "@server/openApi";
import { rebuildClientAssociationsFromClient } from "@server/lib/rebuildClientAssociations";
import { sendTerminateClient } from "../client/terminate";
const paramsSchema = z
.object({
userId: z.string(),
olmId: z.string()
})
.strict();
// registry.registerPath({
// method: "delete",
// path: "/user/{userId}/olm/{olmId}",
// description: "Delete an olm for a user.",
// tags: [OpenAPITags.User, OpenAPITags.Client],
// request: {
// params: paramsSchema
// },
// responses: {}
// });
export async function deleteUserOlm(
req: Request,
res: Response,
next: NextFunction
): Promise<any> {
try {
const parsedParams = paramsSchema.safeParse(req.params);
if (!parsedParams.success) {
return next(
createHttpError(
HttpCode.BAD_REQUEST,
fromError(parsedParams.error).toString()
)
);
}
const { olmId } = parsedParams.data;
// Delete associated clients and the OLM in a transaction
await db.transaction(async (trx) => {
// Find all clients associated with this OLM
const associatedClients = await trx
.select({ clientId: clients.clientId })
.from(clients)
.where(eq(clients.olmId, olmId));
let deletedClient: Client | null = null;
// Delete all associated clients
if (associatedClients.length > 0) {
[deletedClient] = await trx
.delete(clients)
.where(eq(clients.olmId, olmId))
.returning();
}
// Finally, delete the OLM itself
const [olm] = await trx
.delete(olms)
.where(eq(olms.olmId, olmId))
.returning();
if (deletedClient) {
await rebuildClientAssociationsFromClient(deletedClient, trx);
if (olm) {
await sendTerminateClient(
deletedClient.clientId,
olm.olmId
); // the olmId needs to be provided because it cant look it up after deletion
}
}
});
return response(res, {
data: null,
success: true,
error: false,
message: "Device deleted successfully",
status: HttpCode.OK
});
} catch (error) {
logger.error(error);
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Failed to delete device"
)
);
}
}

Some files were not shown because too many files have changed in this diff Show More