Compare commits

...

13 Commits

Author SHA1 Message Date
Owen
c50392c947 Remove logging 2025-10-15 13:57:42 -07:00
Owen
ceee978fcd Merge branch 'dev' 2025-10-15 12:13:15 -07:00
Owen
c5a73dc87e Try to handle the certs better 2025-10-15 12:12:59 -07:00
Owen
7198ef2774 Merge branch 'dev' of github.com:fosrl/pangolin into dev 2025-10-15 11:12:38 -07:00
miloschwartz
7e9a066797 update form 2025-10-15 11:10:37 -07:00
Owen
e2d0338b0b Merge branch 'dev' 2025-10-15 10:39:50 -07:00
Owen
59ecab5738 Dont ping remote nodes; handle certs better 2025-10-15 10:39:45 -07:00
miloschwartz
721bf3403d fix form 2025-10-15 10:21:00 -07:00
Owen
3b8ba47377 Update package lock 2025-10-14 18:00:46 -07:00
Milo Schwartz
e752929f69 Update README.md 2025-10-14 20:50:41 -04:00
Milo Schwartz
e41c3e6f54 Update README.md 2025-10-14 20:48:44 -04:00
Milo Schwartz
9dedd1a8de Update README.md 2025-10-14 20:41:14 -04:00
Owen
c4a5fae28f Update workflow and add runner 2025-10-14 17:34:47 -07:00
10 changed files with 4664 additions and 1690 deletions

View File

@@ -8,7 +8,7 @@ on:
jobs: jobs:
release: release:
name: Build and Release name: Build and Release
runs-on: ubuntu-latest runs-on: amd64-runner
steps: steps:
- name: Checkout code - name: Checkout code

View File

@@ -2,7 +2,7 @@
major_tag := $(shell echo $(tag) | cut -d. -f1) major_tag := $(shell echo $(tag) | cut -d. -f1)
minor_tag := $(shell echo $(tag) | cut -d. -f1,2) minor_tag := $(shell echo $(tag) | cut -d. -f1,2)
build-release: build-release-arm:
@if [ -z "$(tag)" ]; then \ @if [ -z "$(tag)" ]; then \
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \ echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
exit 1; \ exit 1; \
@@ -25,6 +25,24 @@ build-release:
--tag fosrl/pangolin:postgresql-$(minor_tag) \ --tag fosrl/pangolin:postgresql-$(minor_tag) \
--tag fosrl/pangolin:postgresql-$(tag) \ --tag fosrl/pangolin:postgresql-$(tag) \
--push . --push .
docker buildx build \
--build-arg BUILD=enterprise
--build-arg DATABASE=sqlite \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-latest \
--tag fosrl/pangolin:ee-$(major_tag) \
--tag fosrl/pangolin:ee-$(minor_tag) \
--tag fosrl/pangolin:ee-$(tag) \
--push .
docker buildx build \
--build-arg BUILD=enterprise
--build-arg DATABASE=pg \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-postgresql-latest \
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
--tag fosrl/pangolin:ee-postgresql-$(minor_tag) \
--tag fosrl/pangolin:ee-postgresql-$(tag) \
--push .
build-arm: build-arm:
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest . docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .

View File

@@ -35,19 +35,24 @@
</div> </div>
<p align="center">
<strong>
Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
</strong>
</p>
Pangolin is a self-hosted tunneled reverse proxy server with identity and context aware access control, designed to easily expose and protect applications running anywhere. Pangolin acts as a central hub and connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports or requiring a VPN. Pangolin is a self-hosted tunneled reverse proxy server with identity and context aware access control, designed to easily expose and protect applications running anywhere. Pangolin acts as a central hub and connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports or requiring a VPN.
## Installation ## Installation
Check out the [quick install guide](https://docs.digpangolin.com) for how to install and set up Pangolin. Check out the [quick install guide](https://docs.digpangolin.com/self-host/quick-install) for how to install and set up Pangolin.
## Deployment Options ## Deployment Options
| <img width=500 /> | Description | | <img width=500 /> | Description |
|-----------------|--------------| |-----------------|--------------|
| **Self-Host: Community Edition** | Free, open source, and AGPL-3 compliant. | | **Self-Host** | Free, open source, and AGPL-3 compliant. |
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. | | **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.digpangolin.com/manage/remote-node/nodes) and connect to our control plane. |
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://github.com/fosrl/remote-note) and connect to our control plane. |
## Key Features ## Key Features

180
install/get-installer.sh Normal file
View File

@@ -0,0 +1,180 @@
#!/bin/bash
# Get installer - Cross-platform installation script
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/installer/refs/heads/main/get-installer.sh | bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# GitHub repository info
REPO="fosrl/pangolin"
GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
# Function to print colored output
print_status() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to get latest version from GitHub API
get_latest_version() {
local latest_info
if command -v curl >/dev/null 2>&1; then
latest_info=$(curl -fsSL "$GITHUB_API_URL" 2>/dev/null)
elif command -v wget >/dev/null 2>&1; then
latest_info=$(wget -qO- "$GITHUB_API_URL" 2>/dev/null)
else
print_error "Neither curl nor wget is available. Please install one of them." >&2
exit 1
fi
if [ -z "$latest_info" ]; then
print_error "Failed to fetch latest version information" >&2
exit 1
fi
# Extract version from JSON response (works without jq)
local version=$(echo "$latest_info" | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')
if [ -z "$version" ]; then
print_error "Could not parse version from GitHub API response" >&2
exit 1
fi
# Remove 'v' prefix if present
version=$(echo "$version" | sed 's/^v//')
echo "$version"
}
# Detect OS and architecture
detect_platform() {
local os arch
# Detect OS - only support Linux
case "$(uname -s)" in
Linux*) os="linux" ;;
*)
print_error "Unsupported operating system: $(uname -s). Only Linux is supported."
exit 1
;;
esac
# Detect architecture - only support amd64 and arm64
case "$(uname -m)" in
x86_64|amd64) arch="amd64" ;;
arm64|aarch64) arch="arm64" ;;
*)
print_error "Unsupported architecture: $(uname -m). Only amd64 and arm64 are supported on Linux."
exit 1
;;
esac
echo "${os}_${arch}"
}
# Get installation directory
get_install_dir() {
# Install to the current directory
local install_dir="$(pwd)"
if [ ! -d "$install_dir" ]; then
print_error "Installation directory does not exist: $install_dir"
exit 1
fi
echo "$install_dir"
}
# Download and install installer
install_installer() {
local platform="$1"
local install_dir="$2"
local binary_name="installer_${platform}"
local download_url="${BASE_URL}/${binary_name}"
local temp_file="/tmp/installer"
local final_path="${install_dir}/installer"
print_status "Downloading installer from ${download_url}"
# Download the binary
if command -v curl >/dev/null 2>&1; then
curl -fsSL "$download_url" -o "$temp_file"
elif command -v wget >/dev/null 2>&1; then
wget -q "$download_url" -O "$temp_file"
else
print_error "Neither curl nor wget is available. Please install one of them."
exit 1
fi
# Create install directory if it doesn't exist
mkdir -p "$install_dir"
# Move binary to install directory
mv "$temp_file" "$final_path"
# Make executable
chmod +x "$final_path"
print_status "Installer downloaded to ${final_path}"
}
# Verify installation
verify_installation() {
local install_dir="$1"
local installer_path="${install_dir}/installer"
if [ -f "$installer_path" ] && [ -x "$installer_path" ]; then
print_status "Installation successful!"
return 0
else
print_error "Installation failed. Binary not found or not executable."
return 1
fi
}
# Main installation process
main() {
print_status "Installing latest version of installer..."
# Get latest version
print_status "Fetching latest version from GitHub..."
VERSION=$(get_latest_version)
print_status "Latest version: v${VERSION}"
# Set base URL with the fetched version
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
# Detect platform
PLATFORM=$(detect_platform)
print_status "Detected platform: ${PLATFORM}"
# Get install directory
INSTALL_DIR=$(get_install_dir)
print_status "Install directory: ${INSTALL_DIR}"
# Install installer
install_installer "$PLATFORM" "$INSTALL_DIR"
# Verify installation
if verify_installation "$INSTALL_DIR"; then
print_status "Installer is ready to use!"
else
exit 1
fi
}
# Run main function
main "$@"

View File

@@ -1839,7 +1839,7 @@
"companyPhoneNumber": "Company phone number", "companyPhoneNumber": "Company phone number",
"country": "Country", "country": "Country",
"phoneNumberOptional": "Phone number (optional)", "phoneNumberOptional": "Phone number (optional)",
"complianceConfirmation": "I confirm that I am in compliance with the Fossorial Commercial License and that reporting inaccurate information or misidentifying use of the product is a violation of the license." "complianceConfirmation": "I confirm that the information I provided is accurate and that I am in compliance with the Fossorial Commercial License. Reporting inaccurate information or misidentifying use of the product is a violation of the license and may result in your key getting revoked."
}, },
"buttons": { "buttons": {
"close": "Close", "close": "Close",

3784
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -13,72 +13,174 @@
import config from "./config"; import config from "./config";
import { certificates, db } from "@server/db"; import { certificates, db } from "@server/db";
import { and, eq, isNotNull } from "drizzle-orm"; import { and, eq, isNotNull, or, inArray, sql } from "drizzle-orm";
import { decryptData } from "@server/lib/encryption"; import { decryptData } from "@server/lib/encryption";
import * as fs from "fs"; import * as fs from "fs";
import NodeCache from "node-cache";
import logger from "@server/logger";
const encryptionKeyPath =
config.getRawPrivateConfig().server.encryption_key_path;
if (!fs.existsSync(encryptionKeyPath)) {
throw new Error(
"Encryption key file not found. Please generate one first."
);
}
const encryptionKeyHex = fs.readFileSync(encryptionKeyPath, "utf8").trim();
const encryptionKey = Buffer.from(encryptionKeyHex, "hex");
// Define the return type for clarity and type safety
export type CertificateResult = {
id: number;
domain: string;
queriedDomain: string; // The domain that was originally requested (may differ for wildcards)
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
};
// --- In-Memory Cache Implementation ---
const certificateCache = new NodeCache({ stdTTL: 180 }); // Cache for 3 minutes (180 seconds)
export async function getValidCertificatesForDomains( export async function getValidCertificatesForDomains(
domains: Set<string> domains: Set<string>,
): Promise< useCache: boolean = true
Array<{ ): Promise<Array<CertificateResult>> {
id: number; const finalResults: CertificateResult[] = [];
domain: string; const domainsToQuery = new Set<string>();
wildcard: boolean | null;
certFile: string | null; // 1. Check cache first if enabled
keyFile: string | null; if (useCache) {
expiresAt: number | null; for (const domain of domains) {
updatedAt?: number | null; const cachedCert = certificateCache.get<CertificateResult>(domain);
}> if (cachedCert) {
> { finalResults.push(cachedCert); // Valid cache hit
if (domains.size === 0) { } else {
return []; domainsToQuery.add(domain); // Cache miss or expired
}
}
} else {
// If caching is disabled, add all domains to the query set
domains.forEach((d) => domainsToQuery.add(d));
} }
const domainArray = Array.from(domains); // 2. If all domains were resolved from the cache, return early
if (domainsToQuery.size === 0) {
const decryptedResults = decryptFinalResults(finalResults);
return decryptedResults;
}
// TODO: add more foreign keys to make this query more efficient - we dont need to keep getting every certificate // 3. Prepare domains for the database query
const validCerts = await db const domainsToQueryArray = Array.from(domainsToQuery);
.select({ const parentDomainsToQuery = new Set<string>();
id: certificates.certId,
domain: certificates.domain, domainsToQueryArray.forEach((domain) => {
certFile: certificates.certFile, const parts = domain.split(".");
keyFile: certificates.keyFile, // A wildcard can only match a domain with at least two parts (e.g., example.com)
expiresAt: certificates.expiresAt, if (parts.length > 1) {
updatedAt: certificates.updatedAt, parentDomainsToQuery.add(parts.slice(1).join("."));
wildcard: certificates.wildcard }
}) });
const parentDomainsArray = Array.from(parentDomainsToQuery);
// 4. Build and execute a single, efficient Drizzle query
// This query fetches all potential exact and wildcard matches in one database round-trip.
const potentialCerts = await db
.select()
.from(certificates) .from(certificates)
.where( .where(
and( and(
eq(certificates.status, "valid"), eq(certificates.status, "valid"),
isNotNull(certificates.certFile), isNotNull(certificates.certFile),
isNotNull(certificates.keyFile) isNotNull(certificates.keyFile),
or(
// Condition for exact matches on the requested domains
inArray(certificates.domain, domainsToQueryArray),
// Condition for wildcard matches on the parent domains
parentDomainsArray.length > 0
? and(
inArray(certificates.domain, parentDomainsArray),
eq(certificates.wildcard, true)
)
: // If there are no possible parent domains, this condition is false
sql`false`
)
) )
); );
// Filter certificates for the specified domains and if it is a wildcard then you can match on everything up to the first dot // 5. Process the database results, prioritizing exact matches over wildcards
const validCertsFiltered = validCerts.filter((cert) => { const exactMatches = new Map<string, (typeof potentialCerts)[0]>();
return ( const wildcardMatches = new Map<string, (typeof potentialCerts)[0]>();
domainArray.includes(cert.domain) ||
(cert.wildcard &&
domainArray.some((domain) =>
domain.endsWith(`.${cert.domain}`)
))
);
});
const encryptionKeyPath = config.getRawPrivateConfig().server.encryption_key_path; for (const cert of potentialCerts) {
if (cert.wildcard) {
if (!fs.existsSync(encryptionKeyPath)) { wildcardMatches.set(cert.domain, cert);
throw new Error( } else {
"Encryption key file not found. Please generate one first." exactMatches.set(cert.domain, cert);
); }
} }
const encryptionKeyHex = fs.readFileSync(encryptionKeyPath, "utf8").trim(); for (const domain of domainsToQuery) {
const encryptionKey = Buffer.from(encryptionKeyHex, "hex"); let foundCert: (typeof potentialCerts)[0] | undefined = undefined;
const validCertsDecrypted = validCertsFiltered.map((cert) => { // Priority 1: Check for an exact match (non-wildcard)
if (exactMatches.has(domain)) {
foundCert = exactMatches.get(domain);
}
// Priority 2: Check for a wildcard certificate that matches the exact domain
else {
if (wildcardMatches.has(domain)) {
foundCert = wildcardMatches.get(domain);
}
// Priority 3: Check for a wildcard match on the parent domain
else {
const parts = domain.split(".");
if (parts.length > 1) {
const parentDomain = parts.slice(1).join(".");
if (wildcardMatches.has(parentDomain)) {
foundCert = wildcardMatches.get(parentDomain);
}
}
}
}
// If a certificate was found, format it, add to results, and cache it
if (foundCert) {
logger.debug(`Creating result cert for ${domain} using cert from ${foundCert.domain}`);
const resultCert: CertificateResult = {
id: foundCert.certId,
domain: foundCert.domain, // The actual domain of the cert record
queriedDomain: domain, // The domain that was originally requested
wildcard: foundCert.wildcard,
certFile: foundCert.certFile,
keyFile: foundCert.keyFile,
expiresAt: foundCert.expiresAt,
updatedAt: foundCert.updatedAt
};
finalResults.push(resultCert);
// Add to cache for future requests, using the *requested domain* as the key
if (useCache) {
certificateCache.set(domain, resultCert);
}
}
}
const decryptedResults = decryptFinalResults(finalResults);
return decryptedResults;
}
function decryptFinalResults(
finalResults: CertificateResult[]
): CertificateResult[] {
const validCertsDecrypted = finalResults.map((cert) => {
// Decrypt and save certificate file // Decrypt and save certificate file
const decryptedCert = decryptData( const decryptedCert = decryptData(
cert.certFile!, // is not null from query cert.certFile!, // is not null from query
@@ -97,4 +199,4 @@ export async function getValidCertificatesForDomains(
}); });
return validCertsDecrypted; return validCertsDecrypted;
} }

View File

@@ -183,47 +183,47 @@ export async function listExitNodes(orgId: string, filterOnline = false, noCloud
return []; return [];
} }
// Enhanced online checking: consider node offline if either DB says offline OR HTTP ping fails // // Enhanced online checking: consider node offline if either DB says offline OR HTTP ping fails
const nodesWithRealOnlineStatus = await Promise.all( // const nodesWithRealOnlineStatus = await Promise.all(
allExitNodes.map(async (node) => { // allExitNodes.map(async (node) => {
// If database says it's online, verify with HTTP ping // // If database says it's online, verify with HTTP ping
let online: boolean; // let online: boolean;
if (filterOnline && node.type == "remoteExitNode") { // if (filterOnline && node.type == "remoteExitNode") {
try { // try {
const isActuallyOnline = await checkExitNodeOnlineStatus( // const isActuallyOnline = await checkExitNodeOnlineStatus(
node.endpoint // node.endpoint
); // );
// set the item in the database if it is offline // // set the item in the database if it is offline
if (isActuallyOnline != node.online) { // if (isActuallyOnline != node.online) {
await db // await db
.update(exitNodes) // .update(exitNodes)
.set({ online: isActuallyOnline }) // .set({ online: isActuallyOnline })
.where(eq(exitNodes.exitNodeId, node.exitNodeId)); // .where(eq(exitNodes.exitNodeId, node.exitNodeId));
} // }
online = isActuallyOnline; // online = isActuallyOnline;
} catch (error) { // } catch (error) {
logger.warn( // logger.warn(
`Failed to check online status for exit node ${node.name} (${node.endpoint}): ${error instanceof Error ? error.message : "Unknown error"}` // `Failed to check online status for exit node ${node.name} (${node.endpoint}): ${error instanceof Error ? error.message : "Unknown error"}`
); // );
online = false; // online = false;
} // }
} else { // } else {
online = node.online; // online = node.online;
} // }
return { // return {
...node, // ...node,
online // online
}; // };
}) // })
); // );
const remoteExitNodes = nodesWithRealOnlineStatus.filter( const remoteExitNodes = allExitNodes.filter(
(node) => (node) =>
node.type === "remoteExitNode" && (!filterOnline || node.online) node.type === "remoteExitNode" && (!filterOnline || node.online)
); );
const gerbilExitNodes = nodesWithRealOnlineStatus.filter( const gerbilExitNodes = allExitNodes.filter(
(node) => node.type === "gerbil" && (!filterOnline || node.online) && !noCloud (node) => node.type === "gerbil" && (!filterOnline || node.online) && !noCloud
); );

View File

@@ -26,6 +26,10 @@ import { orgs, resources, sites, Target, targets } from "@server/db";
import { sanitize, validatePathRewriteConfig } from "@server/lib/traefik/utils"; import { sanitize, validatePathRewriteConfig } from "@server/lib/traefik/utils";
import privateConfig from "#private/lib/config"; import privateConfig from "#private/lib/config";
import createPathRewriteMiddleware from "@server/lib/traefik/middleware"; import createPathRewriteMiddleware from "@server/lib/traefik/middleware";
import {
CertificateResult,
getValidCertificatesForDomains
} from "#private/lib/certificates";
const redirectHttpsMiddlewareName = "redirect-to-https"; const redirectHttpsMiddlewareName = "redirect-to-https";
const redirectToRootMiddlewareName = "redirect-to-root"; const redirectToRootMiddlewareName = "redirect-to-root";
@@ -89,14 +93,11 @@ export async function getTraefikConfig(
subnet: sites.subnet, subnet: sites.subnet,
exitNodeId: sites.exitNodeId, exitNodeId: sites.exitNodeId,
// Namespace // Namespace
domainNamespaceId: domainNamespaces.domainNamespaceId, domainNamespaceId: domainNamespaces.domainNamespaceId
// Certificate
certificateStatus: certificates.status
}) })
.from(sites) .from(sites)
.innerJoin(targets, eq(targets.siteId, sites.siteId)) .innerJoin(targets, eq(targets.siteId, sites.siteId))
.innerJoin(resources, eq(resources.resourceId, targets.resourceId)) .innerJoin(resources, eq(resources.resourceId, targets.resourceId))
.leftJoin(certificates, eq(certificates.domainId, resources.domainId))
.leftJoin( .leftJoin(
targetHealthCheck, targetHealthCheck,
eq(targetHealthCheck.targetId, targets.targetId) eq(targetHealthCheck.targetId, targets.targetId)
@@ -183,7 +184,6 @@ export async function getTraefikConfig(
tlsServerName: row.tlsServerName, tlsServerName: row.tlsServerName,
setHostHeader: row.setHostHeader, setHostHeader: row.setHostHeader,
enableProxy: row.enableProxy, enableProxy: row.enableProxy,
certificateStatus: row.certificateStatus,
targets: [], targets: [],
headers: row.headers, headers: row.headers,
path: row.path, // the targets will all have the same path path: row.path, // the targets will all have the same path
@@ -213,6 +213,20 @@ export async function getTraefikConfig(
}); });
}); });
let validCerts: CertificateResult[] = [];
if (privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) {
// create a list of all domains to get certs for
const domains = new Set<string>();
for (const resource of resourcesMap.values()) {
if (resource.enabled && resource.ssl && resource.fullDomain) {
domains.add(resource.fullDomain);
}
}
// get the valid certs for these domains
validCerts = await getValidCertificatesForDomains(domains, true); // we are caching here because this is called often
logger.debug(`Valid certs for domains: ${JSON.stringify(validCerts)}`);
}
const config_output: any = { const config_output: any = {
http: { http: {
middlewares: { middlewares: {
@@ -255,14 +269,6 @@ export async function getTraefikConfig(
continue; continue;
} }
// TODO: for now dont filter it out because if you have multiple domain ids and one is failed it causes all of them to fail
// if (resource.certificateStatus !== "valid" && privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) {
// logger.debug(
// `Resource ${resource.resourceId} has certificate stats ${resource.certificateStats}`
// );
// continue;
// }
// add routers and services empty objects if they don't exist // add routers and services empty objects if they don't exist
if (!config_output.http.routers) { if (!config_output.http.routers) {
config_output.http.routers = {}; config_output.http.routers = {};
@@ -272,22 +278,22 @@ export async function getTraefikConfig(
config_output.http.services = {}; config_output.http.services = {};
} }
const domainParts = fullDomain.split(".");
let wildCard;
if (domainParts.length <= 2) {
wildCard = `*.${domainParts.join(".")}`;
} else {
wildCard = `*.${domainParts.slice(1).join(".")}`;
}
if (!resource.subdomain) {
wildCard = resource.fullDomain;
}
const configDomain = config.getDomain(resource.domainId);
let tls = {}; let tls = {};
if (!privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) { if (!privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) {
const domainParts = fullDomain.split(".");
let wildCard;
if (domainParts.length <= 2) {
wildCard = `*.${domainParts.join(".")}`;
} else {
wildCard = `*.${domainParts.slice(1).join(".")}`;
}
if (!resource.subdomain) {
wildCard = resource.fullDomain;
}
const configDomain = config.getDomain(resource.domainId);
let certResolver: string, preferWildcardCert: boolean; let certResolver: string, preferWildcardCert: boolean;
if (!configDomain) { if (!configDomain) {
certResolver = config.getRawConfig().traefik.cert_resolver; certResolver = config.getRawConfig().traefik.cert_resolver;
@@ -310,6 +316,17 @@ export async function getTraefikConfig(
} }
: {}) : {})
}; };
} else {
// find a cert that matches the full domain, if not continue
const matchingCert = validCerts.find(
(cert) => cert.queriedDomain === resource.fullDomain
);
if (!matchingCert) {
logger.warn(
`No matching certificate found for domain: ${resource.fullDomain}`
);
continue;
}
} }
const additionalMiddlewares = const additionalMiddlewares =
@@ -676,20 +693,31 @@ export async function getTraefikConfig(
loginPageId: loginPage.loginPageId, loginPageId: loginPage.loginPageId,
fullDomain: loginPage.fullDomain, fullDomain: loginPage.fullDomain,
exitNodeId: exitNodes.exitNodeId, exitNodeId: exitNodes.exitNodeId,
domainId: loginPage.domainId, domainId: loginPage.domainId
certificateStatus: certificates.status
}) })
.from(loginPage) .from(loginPage)
.innerJoin( .innerJoin(
exitNodes, exitNodes,
eq(exitNodes.exitNodeId, loginPage.exitNodeId) eq(exitNodes.exitNodeId, loginPage.exitNodeId)
) )
.leftJoin(
certificates,
eq(certificates.domainId, loginPage.domainId)
)
.where(eq(exitNodes.exitNodeId, exitNodeId)); .where(eq(exitNodes.exitNodeId, exitNodeId));
let validCertsLoginPages: CertificateResult[] = [];
if (privateConfig.getRawPrivateConfig().flags.use_pangolin_dns) {
// create a list of all domains to get certs for
const domains = new Set<string>();
for (const lp of exitNodeLoginPages) {
if (lp.fullDomain) {
domains.add(lp.fullDomain);
}
}
// get the valid certs for these domains
validCertsLoginPages = await getValidCertificatesForDomains(
domains,
true
); // we are caching here because this is called often
}
if (exitNodeLoginPages.length > 0) { if (exitNodeLoginPages.length > 0) {
if (!config_output.http.services) { if (!config_output.http.services) {
config_output.http.services = {}; config_output.http.services = {};
@@ -719,8 +747,22 @@ export async function getTraefikConfig(
continue; continue;
} }
if (lp.certificateStatus !== "valid") { let tls = {};
continue; if (
!privateConfig.getRawPrivateConfig().flags.use_pangolin_dns
) {
// TODO: we need to add the wildcard logic here too
} else {
// find a cert that matches the full domain, if not continue
const matchingCert = validCertsLoginPages.find(
(cert) => cert.queriedDomain === lp.fullDomain
);
if (!matchingCert) {
logger.warn(
`No matching certificate found for login page domain: ${lp.fullDomain}`
);
continue;
}
} }
// auth-allowed: // auth-allowed:
@@ -743,7 +785,7 @@ export async function getTraefikConfig(
service: "landing-service", service: "landing-service",
rule: `Host(\`${fullDomain}\`) && (PathRegexp(\`^/auth/resource/[^/]+$\`) || PathRegexp(\`^/auth/idp/[0-9]+/oidc/callback\`) || PathPrefix(\`/_next\`) || Path(\`/auth/org\`) || PathRegexp(\`^/__nextjs*\`))`, rule: `Host(\`${fullDomain}\`) && (PathRegexp(\`^/auth/resource/[^/]+$\`) || PathRegexp(\`^/auth/idp/[0-9]+/oidc/callback\`) || PathPrefix(\`/_next\`) || Path(\`/auth/org\`) || PathRegexp(\`^/__nextjs*\`))`,
priority: 203, priority: 203,
tls: {} tls: tls
}; };
// auth-catchall: // auth-catchall:
@@ -762,7 +804,7 @@ export async function getTraefikConfig(
service: "landing-service", service: "landing-service",
rule: `Host(\`${fullDomain}\`)`, rule: `Host(\`${fullDomain}\`)`,
priority: 202, priority: 202,
tls: {} tls: tls
}; };
// we need to add a redirect from http to https too // we need to add a redirect from http to https too

File diff suppressed because it is too large Load Diff