Merge pull request #1669 from fosrl/dev

Dev
This commit is contained in:
Owen Schwartz
2025-10-14 16:57:01 -07:00
committed by GitHub
365 changed files with 10330 additions and 8965 deletions

View File

@@ -29,4 +29,6 @@ CONTRIBUTING.md
dist
.git
migrations/
config/
config/
build.ts
tsconfig.json

View File

@@ -1,75 +0,0 @@
name: Create Dev-Image
on:
pull_request:
branches:
- main
- dev
types:
- opened
- synchronize
- reopened
jobs:
docker:
runs-on: ubuntu-latest
env:
TAG_URL: https://hub.docker.com/r/${{ vars.DOCKER_HUB_REPO }}/tags
TAG: ${{ vars.DOCKER_HUB_REPO }}:dev-pr${{ github.event.pull_request.number }}
TAG_PG: ${{ vars.DOCKER_HUB_REPO }}:postgresql-dev-pr${{ github.event.pull_request.number }}
steps:
- name: Login to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build and push Docker image SQLITE
uses: docker/build-push-action@v6
with:
platforms: linux/amd64
push: true
tags: ${{ env.TAG }}
cache-from: type=registry,ref=${{ vars.DOCKER_HUB_REPO }}:buildcache
cache-to: type=registry,ref=${{ vars.DOCKER_HUB_REPO }}:buildcache,mode=max
build-args: DATABASE=sqlite
- name: Build and push Docker image PG
uses: docker/build-push-action@v6
with:
platforms: linux/amd64
push: true
tags: ${{ env.TAG_PG }}
cache-from: type=registry,ref=${{ vars.DOCKER_HUB_REPO }}:buildcache-pg
cache-to: type=registry,ref=${{ vars.DOCKER_HUB_REPO }}:buildcache-pg,mode=max
build-args: DATABASE=pg
- uses: actions/github-script@v8
with:
script: |
const repoUrl = process.env.TAG_URL;
const tag = process.env.TAG;
const tagPg = process.env.TAG_PG;
github.rest.issues.createComment({
issue_number: context.issue.number,
owner: context.repo.owner,
repo: context.repo.repo,
body: `👋 Thanks for your PR!
Dev images for this PR are now available on [docker hub](${repoUrl}):
**SQLITE Image:**
\`\`\`
${tag}
\`\`\`
**Postgresql Image:**
\`\`\`
${tagPg}
\`\`\``
})

4
.gitignore vendored
View File

@@ -47,4 +47,6 @@ server/db/index.ts
server/build.ts
postgres/
dynamic/
*.mmdb
*.mmdb
scratch/
tsconfig.json

View File

@@ -15,9 +15,29 @@ RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema.ts --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema.ts --out init; fi
# Copy the appropriate TypeScript configuration based on build type
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
elif [ "$BUILD" = "saas" ]; then cp tsconfig.saas.json tsconfig.json; \
elif [ "$BUILD" = "enterprise" ]; then cp tsconfig.enterprise.json tsconfig.json; \
fi
# if the build is oss then remove the server/private directory
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema --out init; fi
RUN mkdir -p dist
RUN npm run next:build
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
RUN if [ "$DATABASE" = "pg" ]; then \
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
else \
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
fi
# test to make sure the build output is there and error if not
RUN test -f dist/server.mjs
RUN npm run build:$DATABASE
RUN npm run build:cli
FROM node:22-alpine AS runner

View File

@@ -8,6 +8,7 @@ build-release:
exit 1; \
fi
docker buildx build \
--build-arg BUILD=oss
--build-arg DATABASE=sqlite \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:latest \
@@ -16,6 +17,7 @@ build-release:
--tag fosrl/pangolin:$(tag) \
--push .
docker buildx build \
--build-arg BUILD=oss
--build-arg DATABASE=pg \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:postgresql-latest \

147
README.md
View File

@@ -1,157 +1,82 @@
<div align="center">
<h2>
<picture>
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="250">
<a href="https://digpangolin.com">
<picture>
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="350">
</picture>
</a>
</h2>
</div>
<h4 align="center">Secure gateway to your private networks</h4>
<div align="center">
_Pangolin tunnels your services to the internet so you can access anything from anywhere._
</div>
<div align="center">
<h5>
<a href="https://digpangolin.com">
Website
</a>
<span> | </span>
<a href="https://docs.digpangolin.com/self-host/quick-install-managed">
Quick Install Guide
<a href="https://docs.digpangolin.com/">
Documentation
</a>
<span> | </span>
<a href="mailto:contact@fossorial.io">
Contact Us
</a>
<span> | </span>
<a href="https://digpangolin.com/slack">
Slack
</a>
<span> | </span>
<a href="https://discord.gg/HCJR8Xhme4">
Discord
</a>
</h5>
</div>
<div align="center">
[![Discord](https://img.shields.io/discord/1325658630518865980?logo=discord&style=flat-square)](https://discord.gg/HCJR8Xhme4)
[![Slack](https://img.shields.io/badge/chat-slack-yellow?style=flat-square&logo=slack)](https://digpangolin.com/slack)
[![Docker](https://img.shields.io/docker/pulls/fosrl/pangolin?style=flat-square)](https://hub.docker.com/r/fosrl/pangolin)
![Stars](https://img.shields.io/github/stars/fosrl/pangolin?style=flat-square)
[![Discord](https://img.shields.io/discord/1325658630518865980?logo=discord&style=flat-square)](https://discord.gg/HCJR8Xhme4)
[![YouTube](https://img.shields.io/badge/YouTube-red?logo=youtube&logoColor=white&style=flat-square)](https://www.youtube.com/@fossorial-app)
</div>
<p align="center">
<strong>
Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
</strong>
</p>
Pangolin is a self-hosted tunneled reverse proxy server with identity and context aware access control, designed to easily expose and protect applications running anywhere. Pangolin acts as a central hub and connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports or requiring a VPN.
Pangolin is a self-hosted tunneled reverse proxy server with identity and access control, designed to securely expose private resources on distributed networks. Acting as a central hub, it connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports.
## Installation
<img src="public/screenshots/hero.png" alt="Preview"/>
![gif](public/clip.gif)
## Key Features
### Reverse Proxy Through WireGuard Tunnel
- Expose private resources on your network **without opening ports** (firewall punching).
- Secure and easy to configure private connectivity via a custom **user space WireGuard client**, [Newt](https://github.com/fosrl/newt).
- Built-in support for any WireGuard client.
- Automated **SSL certificates** (https) via [LetsEncrypt](https://letsencrypt.org/).
- Support for HTTP/HTTPS and **raw TCP/UDP services**.
- Load balancing.
- Extend functionality with existing [Traefik](https://github.com/traefik/traefik) plugins, such as [CrowdSec](https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin) and [Geoblock](https://github.com/PascalMinder/geoblock).
- **Automatically install and configure Crowdsec via Pangolin's installer script.**
- Attach as many sites to the central server as you wish.
### Identity & Access Management
- Centralized authentication system using platform SSO. **Users will only have to manage one login.**
- **Define access control rules for IPs, IP ranges, and URL paths per resource.**
- TOTP with backup codes for two-factor authentication.
- Create organizations, each with multiple sites, users, and roles.
- **Role-based access control** to manage resource access permissions.
- Additional authentication options include:
- Email whitelisting with **one-time passcodes.**
- **Temporary, self-destructing share links.**
- Resource specific pin codes.
- Resource specific passwords.
- Passkeys
- External identity provider (IdP) support with OAuth2/OIDC, such as Authentik, Keycloak, Okta, and others.
- Auto-provision users and roles from your IdP.
<img src="public/auth-diagram1.png" alt="Auth and diagram"/>
## Use Cases
### Manage Access to Internal Apps
- Grant users access to your apps from anywhere using just a web browser. No client software required.
### Developers and DevOps
- Expose and test internal tools and dashboards like **Grafana**. Bring localhost or private IPs online for easy access.
### Secure API Gateway
- One application load balancer across multiple clouds and on-premises.
### IoT and Edge Devices
- Easily expose **IoT devices**, **edge servers**, or **Raspberry Pi** to the internet for field equipment monitoring.
<img src="public/screenshots/sites.png" alt="Sites"/>
Check out the [quick install guide](https://docs.digpangolin.com) for how to install and set up Pangolin.
## Deployment Options
### Fully Self Hosted
| <img width=500 /> | Description |
|-----------------|--------------|
| **Self-Host: Community Edition** | Free, open source, and AGPL-3 compliant. |
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://github.com/fosrl/remote-note) and connect to our control plane. |
Host the full application on your own server or on the cloud with a VPS. Take a look at the [documentation](https://docs.digpangolin.com/self-host/quick-install) to get started.
## Key Features
> Many of our users have had a great experience with [RackNerd](https://my.racknerd.com/aff.php?aff=13788). Depending on promotions, you can get a [**VPS with 1 vCPU, 1GB RAM, and ~20GB SSD for just around $12/year**](https://my.racknerd.com/aff.php?aff=13788&pid=912). That's a great deal!
Pangolin packages everything you need for seamless application access and exposure into one cohesive platform.
### Pangolin Cloud
| <img width=500 /> | <img width=500 /> |
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
| **Manage applications in one place**<br /><br /> Pangolin provides a unified dashboard where you can monitor, configure, and secure all of your services regardless of where they are hosted. | <img src="public/screenshots/hero.png" /><tr></tr> |
| **Reverse proxy across networks anywhere**<br /><br />Route traffic via tunnels to any private network. Pangolin works like a reverse proxy that spans multiple networks and handles routing, load balancing, health checking, and more to the right services on the other end. | <img src="public/screenshots/sites.png" /><tr></tr> |
| **Enforce identity and context aware rules**<br /><br />Protect your applications with identity and context aware rules such as SSO, OIDC, PIN, password, temporary share links, geolocation, IP, and more. | <img src="public/auth-diagram1.png" /><tr></tr> |
| **Quickly connect Pangolin sites**<br /><br />Pangolin's lightweight [Newt](https://github.com/fosrl/newt) client runs in userspace and can run anywhere. Use it as a site connector to route traffic to backends across all of your environments. | <img src="public/clip.gif" /><tr></tr> |
Easy to use with simple [pay as you go pricing](https://digpangolin.com/pricing). [Check it out here](https://pangolin.fossorial.io/auth/signup).
## Get Started
- Everything you get with self hosted Pangolin, but fully managed for you.
### Check out the docs
### Managed & High Availability
We encourage everyone to read the full documentation first, which is
available at [docs.digpangolin.com](https://docs.digpangolin.com). This README provides only a very brief subset of
the docs to illustrate some basic ideas.
Managed control plane, your infrastructure
### Sign up and try now
- We manage database and control plane.
- You self-host lightweight exit-node.
- Traffic flows through your infra.
- We coordinate failover between your nodes or to Cloud when things go bad.
Try it out using [Pangolin Cloud](https://pangolin.fossorial.io)
### Full Enterprise On-Premises
[Contact us](mailto:numbat@fossorial.io) for a full distributed and enterprise deployments on your infrastructure controlled by your team.
## Project Development / Roadmap
We want to hear your feature requests! Add them to the [discussion board](https://github.com/orgs/fosrl/discussions/categories/feature-requests).
For Pangolin's managed service, you will first need to create an account at
[pangolin.fossorial.io](https://pangolin.fossorial.io). We have a generous free tier to get started.
## Licensing
Pangolin is dual licensed under the AGPL-3 and the Fossorial Commercial license. For inquiries about commercial licensing, please contact us at [numbat@fossorial.io](mailto:numbat@fossorial.io).
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://digpangolin.com/fcl.html). For inquiries about commercial licensing, please contact us at [contact@fossorial.io](mailto:contact@fossorial.io).
## Contributions
Looking for something to contribute? Take a look at issues marked with [help wanted](https://github.com/fosrl/pangolin/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22). Also take a look through the freature requests in Discussions - any are available and some are marked as a good first issue.
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
Please post bug reports and other functional issues in the [Issues](https://github.com/fosrl/pangolin/issues) section of the repository.
If you are looking to help with translations, please contribute [on Crowdin](https://crowdin.com/project/fossorial-pangolin) or open a PR with changes to the translations files found in `messages/`.

View File

@@ -1,16 +1,9 @@
import { defineConfig } from "drizzle-kit";
import path from "path";
import { build } from "@server/build";
let schema;
if (build === "oss") {
schema = [path.join("server", "db", "pg", "schema.ts")];
} else {
schema = [
path.join("server", "db", "pg", "schema.ts"),
path.join("server", "db", "pg", "privateSchema.ts")
];
}
const schema = [
path.join("server", "db", "pg", "schema"),
];
export default defineConfig({
dialect: "postgresql",

View File

@@ -1,17 +1,10 @@
import { build } from "@server/build";
import { APP_PATH } from "@server/lib/consts";
import { defineConfig } from "drizzle-kit";
import path from "path";
let schema;
if (build === "oss") {
schema = [path.join("server", "db", "sqlite", "schema.ts")];
} else {
schema = [
path.join("server", "db", "sqlite", "schema.ts"),
path.join("server", "db", "sqlite", "privateSchema.ts")
];
}
const schema = [
path.join("server", "db", "sqlite", "schema"),
];
export default defineConfig({
dialect: "sqlite",

View File

@@ -2,8 +2,9 @@ import esbuild from "esbuild";
import yargs from "yargs";
import { hideBin } from "yargs/helpers";
import { nodeExternalsPlugin } from "esbuild-node-externals";
import path from "path";
import fs from "fs";
// import { glob } from "glob";
// import path from "path";
const banner = `
// patch __dirname
@@ -18,7 +19,7 @@ const require = topLevelCreateRequire(import.meta.url);
`;
const argv = yargs(hideBin(process.argv))
.usage("Usage: $0 -entry [string] -out [string]")
.usage("Usage: $0 -entry [string] -out [string] -build [string]")
.option("entry", {
alias: "e",
describe: "Entry point file",
@@ -31,6 +32,13 @@ const argv = yargs(hideBin(process.argv))
type: "string",
demandOption: true,
})
.option("build", {
alias: "b",
describe: "Build type (oss, saas, enterprise)",
type: "string",
choices: ["oss", "saas", "enterprise"],
default: "oss",
})
.help()
.alias("help", "h").argv;
@@ -46,6 +54,179 @@ function getPackagePaths() {
return ["package.json"];
}
// Plugin to guard against bad imports from #private
function privateImportGuardPlugin() {
return {
name: "private-import-guard",
setup(build) {
const violations = [];
build.onResolve({ filter: /^#private\// }, (args) => {
const importingFile = args.importer;
// Check if the importing file is NOT in server/private
const normalizedImporter = path.normalize(importingFile);
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
if (!isInServerPrivate) {
const violation = {
file: importingFile,
importPath: args.path,
resolveDir: args.resolveDir
};
violations.push(violation);
console.log(`PRIVATE IMPORT VIOLATION:`);
console.log(` File: ${importingFile}`);
console.log(` Import: ${args.path}`);
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
console.log('');
}
// Return null to let the default resolver handle it
return null;
});
build.onEnd((result) => {
if (violations.length > 0) {
console.log(`\nSUMMARY: Found ${violations.length} private import violation(s):`);
violations.forEach((v, i) => {
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
});
console.log('');
result.errors.push({
text: `Private import violations detected: ${violations.length} violation(s) found`,
location: null,
notes: violations.map(v => ({
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
location: null
}))
});
}
});
}
};
}
// Plugin to guard against bad imports from #private
function dynamicImportGuardPlugin() {
return {
name: "dynamic-import-guard",
setup(build) {
const violations = [];
build.onResolve({ filter: /^#dynamic\// }, (args) => {
const importingFile = args.importer;
// Check if the importing file is NOT in server/private
const normalizedImporter = path.normalize(importingFile);
const isInServerPrivate = normalizedImporter.includes(path.normalize("server/private"));
if (isInServerPrivate) {
const violation = {
file: importingFile,
importPath: args.path,
resolveDir: args.resolveDir
};
violations.push(violation);
console.log(`DYNAMIC IMPORT VIOLATION:`);
console.log(` File: ${importingFile}`);
console.log(` Import: ${args.path}`);
console.log(` Resolve dir: ${args.resolveDir || 'N/A'}`);
console.log('');
}
// Return null to let the default resolver handle it
return null;
});
build.onEnd((result) => {
if (violations.length > 0) {
console.log(`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`);
violations.forEach((v, i) => {
console.log(` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`);
});
console.log('');
result.errors.push({
text: `Dynamic import violations detected: ${violations.length} violation(s) found`,
location: null,
notes: violations.map(v => ({
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
location: null
}))
});
}
});
}
};
}
// Plugin to dynamically switch imports based on build type
function dynamicImportSwitcherPlugin(buildValue) {
return {
name: "dynamic-import-switcher",
setup(build) {
const switches = [];
build.onStart(() => {
console.log(`Dynamic import switcher using build type: ${buildValue}`);
});
build.onResolve({ filter: /^#dynamic\// }, (args) => {
// Extract the path after #dynamic/
const dynamicPath = args.path.replace(/^#dynamic\//, '');
// Determine the replacement based on build type
let replacement;
if (buildValue === "oss") {
replacement = `#open/${dynamicPath}`;
} else if (buildValue === "saas" || buildValue === "enterprise") {
replacement = `#closed/${dynamicPath}`; // We use #closed here so that the route guards dont complain after its been changed but this is the same as #private
} else {
console.warn(`Unknown build type '${buildValue}', defaulting to #open/`);
replacement = `#open/${dynamicPath}`;
}
const switchInfo = {
file: args.importer,
originalPath: args.path,
replacementPath: replacement,
buildType: buildValue
};
switches.push(switchInfo);
console.log(`DYNAMIC IMPORT SWITCH:`);
console.log(` File: ${args.importer}`);
console.log(` Original: ${args.path}`);
console.log(` Switched to: ${replacement} (build: ${buildValue})`);
console.log('');
// Rewrite the import path and let the normal resolution continue
return build.resolve(replacement, {
importer: args.importer,
namespace: args.namespace,
resolveDir: args.resolveDir,
kind: args.kind
});
});
build.onEnd((result) => {
if (switches.length > 0) {
console.log(`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`);
switches.forEach((s, i) => {
console.log(` ${i + 1}. ${path.relative(process.cwd(), s.file)}`);
console.log(` ${s.originalPath}${s.replacementPath}`);
});
console.log('');
}
});
}
};
}
esbuild
.build({
entryPoints: [argv.entry],
@@ -59,6 +240,9 @@ esbuild
platform: "node",
external: ["body-parser"],
plugins: [
privateImportGuardPlugin(),
dynamicImportGuardPlugin(),
dynamicImportSwitcherPlugin(argv.build),
nodeExternalsPlugin({
packagePath: getPackagePaths(),
}),
@@ -66,7 +250,27 @@ esbuild
sourcemap: "inline",
target: "node22",
})
.then(() => {
.then((result) => {
// Check if there were any errors in the build result
if (result.errors && result.errors.length > 0) {
console.error(`Build failed with ${result.errors.length} error(s):`);
result.errors.forEach((error, i) => {
console.error(`${i + 1}. ${error.text}`);
if (error.notes) {
error.notes.forEach(note => {
console.error(` - ${note.text}`);
});
}
});
// remove the output file if it was created
if (fs.existsSync(argv.out)) {
fs.unlinkSync(argv.out);
}
process.exit(1);
}
console.log("Build completed successfully");
})
.catch((error) => {

View File

@@ -1,15 +1,10 @@
# To see all available options, please visit the docs:
# https://docs.digpangolin.com/self-host/advanced/config-file
# https://docs.digpangolin.com/
gerbil:
start_port: 51820
base_endpoint: "{{.DashboardDomain}}"
{{if .HybridMode}}
managed:
id: "{{.HybridId}}"
secret: "{{.HybridSecret}}"
{{else}}
app:
dashboard_url: "https://{{.DashboardDomain}}"
log_level: "info"
@@ -28,6 +23,7 @@ server:
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
allowed_headers: ["X-CSRF-Token", "Content-Type"]
credentials: false
{{if .EnableGeoblocking}}maxmind_db_path: "./config/GeoLite2-Country.mmdb"{{end}}
{{if .EnableEmail}}
email:
smtp_host: "{{.EmailSMTPHost}}"
@@ -40,5 +36,4 @@ flags:
require_email_verification: {{.EnableEmail}}
disable_signup_without_invite: true
disable_user_create_org: false
allow_raw_resources: true
{{end}}
allow_raw_resources: true

View File

@@ -6,8 +6,8 @@ services:
restart: unless-stopped
volumes:
- ./config:/app/config
- pangolin-data:/var/certificates
- pangolin-data:/var/dynamic
- pangolin-data-certificates:/var/certificates
- pangolin-data-dynamic:/var/dynamic
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"]
interval: "10s"
@@ -33,7 +33,7 @@ services:
ports:
- 51820:51820/udp
- 21820:21820/udp
- 443:{{if .HybridMode}}8443{{else}}443{{end}}
- 443:443
- 80:80
{{end}}
traefik:
@@ -57,8 +57,8 @@ services:
- ./config/letsencrypt:/letsencrypt # Volume to store the Let's Encrypt certificates
- ./config/traefik/logs:/var/log/traefik # Volume to store Traefik logs
# Shared volume for certificates and dynamic config in file mode
- pangolin-data:/var/certificates:ro
- pangolin-data:/var/dynamic:ro
- pangolin-data-certificates:/var/certificates:ro
- pangolin-data-dynamic:/var/dynamic:ro
networks:
default:
@@ -67,4 +67,5 @@ networks:
{{if .EnableIPv6}} enable_ipv6: true{{end}}
volumes:
pangolin-data:
pangolin-data-dynamic:
pangolin-data-certificates:

View File

@@ -3,17 +3,12 @@ api:
dashboard: true
providers:
{{if not .HybridMode}}
http:
endpoint: "http://pangolin:3001/api/v1/traefik-config"
pollInterval: "5s"
file:
filename: "/etc/traefik/dynamic_config.yml"
{{else}}
file:
directory: "/var/dynamic"
watch: true
{{end}}
experimental:
plugins:
badger:
@@ -27,7 +22,7 @@ log:
maxBackups: 3
maxAge: 3
compress: true
{{if not .HybridMode}}
certificatesResolvers:
letsencrypt:
acme:
@@ -36,22 +31,18 @@ certificatesResolvers:
email: "{{.LetsEncryptEmail}}"
storage: "/letsencrypt/acme.json"
caServer: "https://acme-v02.api.letsencrypt.org/directory"
{{end}}
entryPoints:
web:
address: ":80"
websecure:
address: ":443"
{{if .HybridMode}} proxyProtocol:
trustedIPs:
- 0.0.0.0/0
- ::1/128{{end}}
transport:
respondingTimeouts:
readTimeout: "30m"
{{if not .HybridMode}} http:
http:
tls:
certResolver: "letsencrypt"{{end}}
certResolver: "letsencrypt"
serversTransport:
insecureSkipVerify: true

View File

@@ -2,7 +2,6 @@ package main
import (
"bufio"
"bytes"
"embed"
"fmt"
"io"
@@ -48,10 +47,8 @@ type Config struct {
InstallGerbil bool
TraefikBouncerKey string
DoCrowdsecInstall bool
EnableGeoblocking bool
Secret string
HybridMode bool
HybridId string
HybridSecret string
}
type SupportedContainer string
@@ -98,24 +95,6 @@ func main() {
fmt.Println("\n=== Generating Configuration Files ===")
// If the secret and id are not generated then generate them
if config.HybridMode && (config.HybridId == "" || config.HybridSecret == "") {
// fmt.Println("Requesting hybrid credentials from cloud...")
credentials, err := requestHybridCredentials()
if err != nil {
fmt.Printf("Error requesting hybrid credentials: %v\n", err)
fmt.Println("Please obtain credentials manually from the dashboard and run the installer again.")
os.Exit(1)
}
config.HybridId = credentials.RemoteExitNodeId
config.HybridSecret = credentials.Secret
fmt.Printf("Your managed credentials have been obtained successfully.\n")
fmt.Printf(" ID: %s\n", config.HybridId)
fmt.Printf(" Secret: %s\n", config.HybridSecret)
fmt.Println("Take these to the Pangolin dashboard https://pangolin.fossorial.io to adopt your node.")
readBool(reader, "Have you adopted your node?", true)
}
if err := createConfigFiles(config); err != nil {
fmt.Printf("Error creating config files: %v\n", err)
os.Exit(1)
@@ -125,6 +104,15 @@ func main() {
fmt.Println("\nConfiguration files created successfully!")
// Download MaxMind database if requested
if config.EnableGeoblocking {
fmt.Println("\n=== Downloading MaxMind Database ===")
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error downloading MaxMind database: %v\n", err)
fmt.Println("You can download it manually later if needed.")
}
}
fmt.Println("\n=== Starting installation ===")
if readBool(reader, "Would you like to install and start the containers?", true) {
@@ -172,9 +160,34 @@ func main() {
} else {
alreadyInstalled = true
fmt.Println("Looks like you already installed Pangolin!")
// Check if MaxMind database exists and offer to update it
fmt.Println("\n=== MaxMind Database Update ===")
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
fmt.Println("MaxMind GeoLite2 Country database found.")
if readBool(reader, "Would you like to update the MaxMind database to the latest version?", false) {
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error updating MaxMind database: %v\n", err)
fmt.Println("You can try updating it manually later if needed.")
}
}
} else {
fmt.Println("MaxMind GeoLite2 Country database not found.")
if readBool(reader, "Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error downloading MaxMind database: %v\n", err)
fmt.Println("You can try downloading it manually later if needed.")
}
// Now you need to update your config file accordingly to enable geoblocking
fmt.Println("Please remember to update your config/config.yml file to enable geoblocking! \n")
// add maxmind_db_path: "./config/GeoLite2-Country.mmdb" under server
fmt.Println("Add the following line under the 'server' section:")
fmt.Println(" maxmind_db_path: \"./config/GeoLite2-Country.mmdb\"")
}
}
}
if !checkIsCrowdsecInstalledInCompose() && !checkIsPangolinInstalledWithHybrid() {
if !checkIsCrowdsecInstalledInCompose() {
fmt.Println("\n=== CrowdSec Install ===")
// check if crowdsec is installed
if readBool(reader, "Would you like to install CrowdSec?", false) {
@@ -230,7 +243,7 @@ func main() {
}
}
if !config.HybridMode && !alreadyInstalled {
if !alreadyInstalled {
// Setup Token Section
fmt.Println("\n=== Setup Token ===")
@@ -251,9 +264,7 @@ func main() {
fmt.Println("\nInstallation complete!")
if !config.HybridMode && !checkIsPangolinInstalledWithHybrid() {
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
}
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
}
func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
@@ -328,66 +339,38 @@ func collectUserInput(reader *bufio.Reader) Config {
// Basic configuration
fmt.Println("\n=== Basic Configuration ===")
for {
response := readString(reader, "Do you want to install Pangolin as a cloud-managed (beta) node? (yes/no)", "")
if strings.EqualFold(response, "yes") || strings.EqualFold(response, "y") {
config.HybridMode = true
break
} else if strings.EqualFold(response, "no") || strings.EqualFold(response, "n") {
config.HybridMode = false
break
}
fmt.Println("Please answer 'yes' or 'no'")
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
// Set default dashboard domain after base domain is collected
defaultDashboardDomain := ""
if config.BaseDomain != "" {
defaultDashboardDomain = "pangolin." + config.BaseDomain
}
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
// Email configuration
fmt.Println("\n=== Email Configuration ===")
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
if config.EnableEmail {
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
}
if config.HybridMode {
alreadyHaveCreds := readBool(reader, "Do you already have credentials from the dashboard? If not, we will create them later", false)
if alreadyHaveCreds {
config.HybridId = readString(reader, "Enter your ID", "")
config.HybridSecret = readString(reader, "Enter your secret", "")
}
// Try to get public IP as default
publicIP := getPublicIP()
if publicIP != "" {
fmt.Printf("Detected public IP: %s\n", publicIP)
}
config.DashboardDomain = readString(reader, "The public addressable IP address for this node or a domain pointing to it", publicIP)
config.InstallGerbil = true
} else {
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
// Set default dashboard domain after base domain is collected
defaultDashboardDomain := ""
if config.BaseDomain != "" {
defaultDashboardDomain = "pangolin." + config.BaseDomain
}
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
// Email configuration
fmt.Println("\n=== Email Configuration ===")
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
if config.EnableEmail {
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
}
// Validate required fields
if config.BaseDomain == "" {
fmt.Println("Error: Domain name is required")
os.Exit(1)
}
if config.LetsEncryptEmail == "" {
fmt.Println("Error: Let's Encrypt email is required")
os.Exit(1)
}
// Validate required fields
if config.BaseDomain == "" {
fmt.Println("Error: Domain name is required")
os.Exit(1)
}
if config.LetsEncryptEmail == "" {
fmt.Println("Error: Let's Encrypt email is required")
os.Exit(1)
}
// Advanced configuration
@@ -395,6 +378,7 @@ func collectUserInput(reader *bufio.Reader) Config {
fmt.Println("\n=== Advanced Configuration ===")
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", false)
if config.DashboardDomain == "" {
fmt.Println("Error: Dashboard Domain name is required")
@@ -429,11 +413,6 @@ func createConfigFiles(config Config) error {
return nil
}
// the hybrid does not need the dynamic config
if config.HybridMode && strings.Contains(path, "dynamic_config.yml") {
return nil
}
// skip .DS_Store
if strings.Contains(path, ".DS_Store") {
return nil
@@ -663,18 +642,30 @@ func checkPortsAvailable(port int) error {
return nil
}
func checkIsPangolinInstalledWithHybrid() bool {
// Check if config/config.yml exists and contains hybrid section
if _, err := os.Stat("config/config.yml"); err != nil {
return false
func downloadMaxMindDatabase() error {
fmt.Println("Downloading MaxMind GeoLite2 Country database...")
// Download the GeoLite2 Country database
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
}
// Read config file to check for hybrid section
content, err := os.ReadFile("config/config.yml")
if err != nil {
return false
// Extract the database
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil {
return fmt.Errorf("failed to extract GeoLite2 database: %v", err)
}
// Check for hybrid section
return bytes.Contains(content, []byte("managed:"))
// Find the .mmdb file and move it to the config directory
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil {
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
}
// Clean up the downloaded files
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
}
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
return nil
}

View File

@@ -1,110 +0,0 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
const (
FRONTEND_SECRET_KEY = "af4e4785-7e09-11f0-b93a-74563c4e2a7e"
// CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
)
// HybridCredentials represents the response from the cloud API
type HybridCredentials struct {
RemoteExitNodeId string `json:"remoteExitNodeId"`
Secret string `json:"secret"`
}
// APIResponse represents the full response structure from the cloud API
type APIResponse struct {
Data HybridCredentials `json:"data"`
}
// RequestPayload represents the request body structure
type RequestPayload struct {
Token string `json:"token"`
}
func generateValidationToken() string {
timestamp := time.Now().UnixMilli()
data := fmt.Sprintf("%s|%d", FRONTEND_SECRET_KEY, timestamp)
obfuscated := make([]byte, len(data))
for i, char := range []byte(data) {
obfuscated[i] = char + 5
}
return base64.StdEncoding.EncodeToString(obfuscated)
}
// requestHybridCredentials makes an HTTP POST request to the cloud API
// to get hybrid credentials (ID and secret)
func requestHybridCredentials() (*HybridCredentials, error) {
// Generate validation token
token := generateValidationToken()
// Create request payload
payload := RequestPayload{
Token: token,
}
// Marshal payload to JSON
jsonData, err := json.Marshal(payload)
if err != nil {
return nil, fmt.Errorf("failed to marshal request payload: %v", err)
}
// Create HTTP request
req, err := http.NewRequest("POST", CLOUD_API_URL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
}
// Set headers
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-CSRF-Token", "x-csrf-protection")
// Create HTTP client with timeout
client := &http.Client{
Timeout: 30 * time.Second,
}
// Make the request
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to make HTTP request: %v", err)
}
defer resp.Body.Close()
// Check response status
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("API request failed with status code: %d", resp.StatusCode)
}
// Read response body for debugging
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %v", err)
}
// Print the raw JSON response for debugging
// fmt.Printf("Raw JSON response: %s\n", string(body))
// Parse response
var apiResponse APIResponse
if err := json.Unmarshal(body, &apiResponse); err != nil {
return nil, fmt.Errorf("failed to decode API response: %v", err)
}
// Validate response data
if apiResponse.Data.RemoteExitNodeId == "" || apiResponse.Data.Secret == "" {
return nil, fmt.Errorf("invalid response: missing remoteExitNodeId or secret")
}
return &apiResponse.Data, nil
}

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Администратор на сървър - Панголин",
"licenseTierProfessional": "Професионален лиценз",
"licenseTierEnterprise": "Предприятие лиценз",
"licenseTierCommercial": "Търговски лиценз",
"licensed": "Лицензиран",
"yes": "Да",
"no": "Не",
@@ -1084,7 +1083,6 @@
"navbar": "Навигационно меню",
"navbarDescription": "Главно навигационно меню за приложението",
"navbarDocsLink": "Документация",
"commercialEdition": "Търговско издание",
"otpErrorEnable": "Не може да се активира 2FA",
"otpErrorEnableDescription": "Възникна грешка при активиране на 2FA",
"otpSetupCheckCode": "Моля, въведете 6-цифрен код",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Správce serveru - Pangolin",
"licenseTierProfessional": "Profesionální licence",
"licenseTierEnterprise": "Podniková licence",
"licenseTierCommercial": "Obchodní licence",
"licensed": "Licencováno",
"yes": "Ano",
"no": "Ne",
@@ -1084,7 +1083,6 @@
"navbar": "Navigation Menu",
"navbarDescription": "Hlavní navigační menu aplikace",
"navbarDocsLink": "Dokumentace",
"commercialEdition": "Obchodní vydání",
"otpErrorEnable": "2FA nelze povolit",
"otpErrorEnableDescription": "Došlo k chybě při povolování 2FA",
"otpSetupCheckCode": "Zadejte 6místný kód",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Server-Admin - Pangolin",
"licenseTierProfessional": "Professional Lizenz",
"licenseTierEnterprise": "Enterprise Lizenz",
"licenseTierCommercial": "Gewerbliche Lizenz",
"licensed": "Lizenziert",
"yes": "Ja",
"no": "Nein",
@@ -1084,7 +1083,6 @@
"navbar": "Navigationsmenü",
"navbarDescription": "Hauptnavigationsmenü für die Anwendung",
"navbarDocsLink": "Dokumentation",
"commercialEdition": "Kommerzielle Edition",
"otpErrorEnable": "2FA konnte nicht aktiviert werden",
"otpErrorEnableDescription": "Beim Aktivieren der 2FA ist ein Fehler aufgetreten",
"otpSetupCheckCode": "Bitte geben Sie einen 6-stelligen Code ein",

View File

@@ -96,7 +96,7 @@
"siteWgDescription": "Use any WireGuard client to establish a tunnel. Manual NAT setup required.",
"siteWgDescriptionSaas": "Use any WireGuard client to establish a tunnel. Manual NAT setup required.",
"siteLocalDescription": "Local resources only. No tunneling.",
"siteLocalDescriptionSaas": "Local resources only. No tunneling.",
"siteLocalDescriptionSaas": "Local resources only. No tunneling. Only available on remote nodes.",
"siteSeeAll": "See All Sites",
"siteTunnelDescription": "Determine how you want to connect to your site",
"siteNewtCredentials": "Newt Credentials",
@@ -468,7 +468,10 @@
"createdAt": "Created At",
"proxyErrorInvalidHeader": "Invalid custom Host Header value. Use domain name format, or save empty to unset custom Host Header.",
"proxyErrorTls": "Invalid TLS Server Name. Use domain name format, or save empty to remove the TLS Server Name.",
"proxyEnableSSL": "Enable SSL (https)",
"proxyEnableSSL": "Enable SSL",
"proxyEnableSSLDescription": "Enable SSL/TLS encryption for secure HTTPS connections to your targets.",
"target": "Target",
"configureTarget": "Configure Targets",
"targetErrorFetch": "Failed to fetch targets",
"targetErrorFetchDescription": "An error occurred while fetching targets",
"siteErrorFetch": "Failed to fetch resource",
@@ -495,7 +498,7 @@
"targetTlsSettings": "Secure Connection Configuration",
"targetTlsSettingsDescription": "Configure SSL/TLS settings for your resource",
"targetTlsSettingsAdvanced": "Advanced TLS Settings",
"targetTlsSni": "TLS Server Name (SNI)",
"targetTlsSni": "TLS Server Name",
"targetTlsSniDescription": "The TLS Server Name to use for SNI. Leave empty to use the default.",
"targetTlsSubmit": "Save Settings",
"targets": "Targets Configuration",
@@ -504,9 +507,21 @@
"targetStickySessionsDescription": "Keep connections on the same backend target for their entire session.",
"methodSelect": "Select method",
"targetSubmit": "Add Target",
"targetNoOne": "No targets. Add a target using the form.",
"targetNoOne": "This resource doesn't have any targets. Add a target to configure where to send requests to your backend.",
"targetNoOneDescription": "Adding more than one target above will enable load balancing.",
"targetsSubmit": "Save Targets",
"addTarget": "Add Target",
"targetErrorInvalidIp": "Invalid IP address",
"targetErrorInvalidIpDescription": "Please enter a valid IP address or hostname",
"targetErrorInvalidPort": "Invalid port",
"targetErrorInvalidPortDescription": "Please enter a valid port number",
"targetErrorNoSite": "No site selected",
"targetErrorNoSiteDescription": "Please select a site for the target",
"targetCreated": "Target created",
"targetCreatedDescription": "Target has been created successfully",
"targetErrorCreate": "Failed to create target",
"targetErrorCreateDescription": "An error occurred while creating the target",
"save": "Save",
"proxyAdditional": "Additional Proxy Settings",
"proxyAdditionalDescription": "Configure how your resource handles proxy settings",
"proxyCustomHeader": "Custom Host Header",
@@ -715,7 +730,7 @@
"pangolinServerAdmin": "Server Admin - Pangolin",
"licenseTierProfessional": "Professional License",
"licenseTierEnterprise": "Enterprise License",
"licenseTierCommercial": "Commercial License",
"licenseTierPersonal": "Personal License",
"licensed": "Licensed",
"yes": "Yes",
"no": "No",
@@ -750,7 +765,7 @@
"idpDisplayName": "A display name for this identity provider",
"idpAutoProvisionUsers": "Auto Provision Users",
"idpAutoProvisionUsersDescription": "When enabled, users will be automatically created in the system upon first login with the ability to map users to roles and organizations.",
"licenseBadge": "Professional",
"licenseBadge": "EE",
"idpType": "Provider Type",
"idpTypeDescription": "Select the type of identity provider you want to configure",
"idpOidcConfigure": "OAuth2/OIDC Configuration",
@@ -1084,7 +1099,6 @@
"navbar": "Navigation Menu",
"navbarDescription": "Main navigation menu for the application",
"navbarDocsLink": "Documentation",
"commercialEdition": "Commercial Edition",
"otpErrorEnable": "Unable to enable 2FA",
"otpErrorEnableDescription": "An error occurred while enabling 2FA",
"otpSetupCheckCode": "Please enter a 6-digit code",
@@ -1140,7 +1154,7 @@
"sidebarAllUsers": "All Users",
"sidebarIdentityProviders": "Identity Providers",
"sidebarLicense": "License",
"sidebarClients": "Clients (Beta)",
"sidebarClients": "Clients",
"sidebarDomains": "Domains",
"enableDockerSocket": "Enable Docker Blueprint",
"enableDockerSocketDescription": "Enable Docker Socket label scraping for blueprint labels. Socket path must be provided to Newt.",
@@ -1333,7 +1347,6 @@
"twoFactorRequired": "Two-factor authentication is required to register a security key.",
"twoFactor": "Two-Factor Authentication",
"adminEnabled2FaOnYourAccount": "Your administrator has enabled two-factor authentication for {email}. Please complete the setup process to continue.",
"continueToApplication": "Continue to Application",
"securityKeyAdd": "Add Security Key",
"securityKeyRegisterTitle": "Register New Security Key",
"securityKeyRegisterDescription": "Connect your security key and enter a name to identify it",
@@ -1411,6 +1424,7 @@
"externalProxyEnabled": "External Proxy Enabled",
"addNewTarget": "Add New Target",
"targetsList": "Targets List",
"advancedMode": "Advanced Mode",
"targetErrorDuplicateTargetFound": "Duplicate target found",
"healthCheckHealthy": "Healthy",
"healthCheckUnhealthy": "Unhealthy",
@@ -1543,8 +1557,8 @@
"autoLoginError": "Auto Login Error",
"autoLoginErrorNoRedirectUrl": "No redirect URL received from the identity provider.",
"autoLoginErrorGeneratingUrl": "Failed to generate authentication URL.",
"remoteExitNodeManageRemoteExitNodes": "Manage Self-Hosted",
"remoteExitNodeDescription": "Manage nodes to extend your network connectivity",
"remoteExitNodeManageRemoteExitNodes": "Remote Nodes",
"remoteExitNodeDescription": "Self-host one or more remote nodes to extend your network connectivity and reduce reliance on the cloud",
"remoteExitNodes": "Nodes",
"searchRemoteExitNodes": "Search nodes...",
"remoteExitNodeAdd": "Add Node",
@@ -1554,7 +1568,7 @@
"remoteExitNodeMessageConfirm": "To confirm, please type the name of the node below.",
"remoteExitNodeConfirmDelete": "Confirm Delete Node",
"remoteExitNodeDelete": "Delete Node",
"sidebarRemoteExitNodes": "Nodes",
"sidebarRemoteExitNodes": "Remote Nodes",
"remoteExitNodeCreate": {
"title": "Create Node",
"description": "Create a new node to extend your network connectivity",
@@ -1724,20 +1738,160 @@
"healthCheckNotAvailable": "Local",
"rewritePath": "Rewrite Path",
"rewritePathDescription": "Optionally rewrite the path before forwarding to the target.",
"continueToApplication": "Continue to application",
"checkingInvite": "Checking Invite",
"setResourceHeaderAuth": "setResourceHeaderAuth",
"resourceHeaderAuthRemove": "Remove Header Auth",
"resourceHeaderAuthRemoveDescription": "Header authentication removed successfully.",
"resourceErrorHeaderAuthRemove": "Failed to remove Header Authentication",
"resourceErrorHeaderAuthRemoveDescription": "Could not remove header authentication for the resource.",
"resourceHeaderAuthProtection": "Header Authentication Protection: {{status}}",
"headerAuthRemove": "Remove",
"headerAuthAdd": "Add",
"resourceHeaderAuthProtectionEnabled": "Header Authentication Enabled",
"resourceHeaderAuthProtectionDisabled": "Header Authentication Disabled",
"headerAuthRemove": "Remove Header Auth",
"headerAuthAdd": "Add Header Auth",
"resourceErrorHeaderAuthSetup": "Failed to set Header Authentication",
"resourceErrorHeaderAuthSetupDescription": "Could not set header authentication for the resource.",
"resourceHeaderAuthSetup": "Header Authentication set successfully",
"resourceHeaderAuthSetupDescription": "Header authentication has been successfully set.",
"resourceHeaderAuthSetupTitle": "Set Header Authentication",
"resourceHeaderAuthSetupTitleDescription": "Set the basic auth credentials (username and password) to protect this resource with HTTP Header Authentication. Leave both fields blank to remove existing header authentication.",
"resourceHeaderAuthSetupTitleDescription": "Set the basic auth credentials (username and password) to protect this resource with HTTP Header Authentication. Access it using the format https://username:password@resource.example.com",
"resourceHeaderAuthSubmit": "Set Header Authentication",
"actionSetResourceHeaderAuth": "Set Header Authentication"
}
"actionSetResourceHeaderAuth": "Set Header Authentication",
"enterpriseEdition": "Enterprise Edition",
"unlicensed": "Unlicensed",
"beta": "Beta",
"manageClients": "Manage Clients",
"manageClientsDescription": "Clients are devices that can connect to your sites",
"licenseTableValidUntil": "Valid Until",
"saasLicenseKeysSettingsTitle": "Enterprise Licenses",
"saasLicenseKeysSettingsDescription": "Generate and manage Enterprise license keys for self-hosted Pangolin instances",
"sidebarEnterpriseLicenses": "Licenses",
"generateLicenseKey": "Generate License Key",
"generateLicenseKeyForm": {
"validation": {
"emailRequired": "Please enter a valid email address",
"useCaseTypeRequired": "Please select a use case type",
"firstNameRequired": "First name is required",
"lastNameRequired": "Last name is required",
"primaryUseRequired": "Please describe your primary use",
"jobTitleRequiredBusiness": "Job title is required for business use",
"industryRequiredBusiness": "Industry is required for business use",
"stateProvinceRegionRequired": "State/Province/Region is required",
"postalZipCodeRequired": "Postal/ZIP Code is required",
"companyNameRequiredBusiness": "Company name is required for business use",
"countryOfResidenceRequiredBusiness": "Country of residence is required for business use",
"countryRequiredPersonal": "Country is required for personal use",
"agreeToTermsRequired": "You must agree to the terms",
"complianceConfirmationRequired": "You must confirm compliance with the Fossorial Commercial License"
},
"useCaseOptions": {
"personal": {
"title": "Personal Use",
"description": "For individual, non-commercial use such as learning, personal projects, or experimentation."
},
"business": {
"title": "Business Use",
"description": "For use within organizations, companies, or commercial or revenue-generating activities."
}
},
"steps": {
"emailLicenseType": {
"title": "Email & License Type",
"description": "Enter your email and choose your license type"
},
"personalInformation": {
"title": "Personal Information",
"description": "Tell us about yourself"
},
"contactInformation": {
"title": "Contact Information",
"description": "Your contact details"
},
"termsGenerate": {
"title": "Terms & Generate",
"description": "Review and accept terms to generate your license"
}
},
"alerts": {
"commercialUseDisclosure": {
"title": "Usage Disclosure",
"description": "Select the license tier that accurately reflects your intended use. The Personal License permits free use of the Software for individual, non-commercial or small-scale commercial activities with annual gross revenue under $100,000 USD. Any use beyond these limits — including use within a business, organization, or other revenue-generating environment — requires a valid Enterprise License and payment of the applicable licensing fee. All users, whether Personal or Enterprise, must comply with the Fossorial Commercial License Terms."
},
"trialPeriodInformation": {
"title": "Trial Period Information",
"description": "This License Key enables Enterprise features for a 7-day evaluation period. Continued access to Paid Features beyond the evaluation period requires activation under a valid Personal or Enterprise License. For Enterprise licensing, contact sales@fossorial.io."
}
},
"form": {
"useCaseQuestion": "Are you using Pangolin for personal or business use?",
"firstName": "First Name",
"lastName": "Last Name",
"jobTitle": "Job Title",
"primaryUseQuestion": "What do you primarily plan to use Pangolin for?",
"industryQuestion": "What is your industry?",
"prospectiveUsersQuestion": "How many prospective users do you expect to have?",
"prospectiveSitesQuestion": "How many prospective sites (tunnels) do you expect to have?",
"companyName": "Company name",
"countryOfResidence": "Country of residence",
"stateProvinceRegion": "State / Province / Region",
"postalZipCode": "Postal / ZIP Code",
"companyWebsite": "Company website",
"companyPhoneNumber": "Company phone number",
"country": "Country",
"phoneNumberOptional": "Phone number (optional)",
"complianceConfirmation": "I confirm that I am in compliance with the Fossorial Commercial License and that reporting inaccurate information or misidentifying use of the product is a violation of the license."
},
"buttons": {
"close": "Close",
"previous": "Previous",
"next": "Next",
"generateLicenseKey": "Generate License Key"
},
"toasts": {
"success": {
"title": "License key generated successfully",
"description": "Your license key has been generated and is ready to use."
},
"error": {
"title": "Failed to generate license key",
"description": "An error occurred while generating the license key."
}
}
},
"priority": "Priority",
"priorityDescription": "Higher priority routes are evaluated first. Priority = 100 means automatic ordering (system decides). Use another number to enforce manual priority.",
"instanceName": "Instance Name",
"pathMatchModalTitle": "Configure Path Matching",
"pathMatchModalDescription": "Set up how incoming requests should be matched based on their path.",
"pathMatchType": "Match Type",
"pathMatchPrefix": "Prefix",
"pathMatchExact": "Exact",
"pathMatchRegex": "Regex",
"pathMatchValue": "Path Value",
"clear": "Clear",
"saveChanges": "Save Changes",
"pathMatchRegexPlaceholder": "^/api/.*",
"pathMatchDefaultPlaceholder": "/path",
"pathMatchPrefixHelp": "Example: /api matches /api, /api/users, etc.",
"pathMatchExactHelp": "Example: /api matches only /api",
"pathMatchRegexHelp": "Example: ^/api/.* matches /api/anything",
"pathRewriteModalTitle": "Configure Path Rewriting",
"pathRewriteModalDescription": "Transform the matched path before forwarding to the target.",
"pathRewriteType": "Rewrite Type",
"pathRewritePrefixOption": "Prefix - Replace prefix",
"pathRewriteExactOption": "Exact - Replace entire path",
"pathRewriteRegexOption": "Regex - Pattern replacement",
"pathRewriteStripPrefixOption": "Strip Prefix - Remove prefix",
"pathRewriteValue": "Rewrite Value",
"pathRewriteRegexPlaceholder": "/new/$1",
"pathRewriteDefaultPlaceholder": "/new-path",
"pathRewritePrefixHelp": "Replace the matched prefix with this value",
"pathRewriteExactHelp": "Replace the entire path with this value when the path matches exactly",
"pathRewriteRegexHelp": "Use capture groups like $1, $2 for replacement",
"pathRewriteStripPrefixHelp": "Leave empty to strip prefix or provide new prefix",
"pathRewritePrefix": "Prefix",
"pathRewriteExact": "Exact",
"pathRewriteRegex": "Regex",
"pathRewriteStrip": "Strip",
"pathRewriteStripLabel": "strip"
}

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Admin Servidor - Pangolin",
"licenseTierProfessional": "Licencia profesional",
"licenseTierEnterprise": "Licencia Enterprise",
"licenseTierCommercial": "Licencia comercial",
"licensed": "Licenciado",
"yes": "Sí",
"no": "Nu",
@@ -1084,7 +1083,6 @@
"navbar": "Menú de navegación",
"navbarDescription": "Menú de navegación principal para la aplicación",
"navbarDocsLink": "Documentación",
"commercialEdition": "Edición Comercial",
"otpErrorEnable": "No se puede habilitar 2FA",
"otpErrorEnableDescription": "Se ha producido un error al habilitar 2FA",
"otpSetupCheckCode": "Por favor, introduzca un código de 6 dígitos",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Admin Serveur - Pangolin",
"licenseTierProfessional": "Licence Professionnelle",
"licenseTierEnterprise": "Licence Entreprise",
"licenseTierCommercial": "Licence commerciale",
"licensed": "Sous licence",
"yes": "Oui",
"no": "Non",
@@ -1084,7 +1083,6 @@
"navbar": "Menu de navigation",
"navbarDescription": "Menu de navigation principal de l'application",
"navbarDocsLink": "Documentation",
"commercialEdition": "Édition Commerciale",
"otpErrorEnable": "Impossible d'activer l'A2F",
"otpErrorEnableDescription": "Une erreur s'est produite lors de l'activation de l'A2F",
"otpSetupCheckCode": "Veuillez entrer un code à 6 chiffres",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Server Admin - Pangolina",
"licenseTierProfessional": "Licenza Professional",
"licenseTierEnterprise": "Licenza Enterprise",
"licenseTierCommercial": "Licenza Commerciale",
"licensed": "Con Licenza",
"yes": "Sì",
"no": "No",
@@ -1084,7 +1083,6 @@
"navbar": "Menu di Navigazione",
"navbarDescription": "Menu di navigazione principale dell'applicazione",
"navbarDocsLink": "Documentazione",
"commercialEdition": "Edizione Commerciale",
"otpErrorEnable": "Impossibile abilitare 2FA",
"otpErrorEnableDescription": "Si è verificato un errore durante l'abilitazione di 2FA",
"otpSetupCheckCode": "Inserisci un codice a 6 cifre",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "서버 관리자 - 판골린",
"licenseTierProfessional": "전문 라이센스",
"licenseTierEnterprise": "기업 라이선스",
"licenseTierCommercial": "상업용 라이선스",
"licensed": "라이센스",
"yes": "예",
"no": "아니요",
@@ -1084,7 +1083,6 @@
"navbar": "탐색 메뉴",
"navbarDescription": "애플리케이션의 주요 탐색 메뉴",
"navbarDocsLink": "문서",
"commercialEdition": "상업용 에디션",
"otpErrorEnable": "2FA를 활성화할 수 없습니다.",
"otpErrorEnableDescription": "2FA를 활성화하는 동안 오류가 발생했습니다",
"otpSetupCheckCode": "6자리 코드를 입력하세요",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Server Admin - Pangolin",
"licenseTierProfessional": "Profesjonell lisens",
"licenseTierEnterprise": "Bedriftslisens",
"licenseTierCommercial": "Kommersiell lisens",
"licensed": "Lisensiert",
"yes": "Ja",
"no": "Nei",
@@ -1084,7 +1083,6 @@
"navbar": "Navigasjonsmeny",
"navbarDescription": "Hovednavigasjonsmeny for applikasjonen",
"navbarDocsLink": "Dokumentasjon",
"commercialEdition": "Kommersiell utgave",
"otpErrorEnable": "Kunne ikke aktivere 2FA",
"otpErrorEnableDescription": "En feil oppstod under aktivering av 2FA",
"otpSetupCheckCode": "Vennligst skriv inn en 6-sifret kode",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Serverbeheer - Pangolin",
"licenseTierProfessional": "Professionele licentie",
"licenseTierEnterprise": "Enterprise Licentie",
"licenseTierCommercial": "Commerciële licentie",
"licensed": "Gelicentieerd",
"yes": "ja",
"no": "Neen",
@@ -1084,7 +1083,6 @@
"navbar": "Navigatiemenu",
"navbarDescription": "Hoofd navigatie menu voor de applicatie",
"navbarDocsLink": "Documentatie",
"commercialEdition": "Commerciële editie",
"otpErrorEnable": "Kan 2FA niet inschakelen",
"otpErrorEnableDescription": "Er is een fout opgetreden tijdens het inschakelen van 2FA",
"otpSetupCheckCode": "Voer een 6-cijferige code in",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Administrator serwera - Pangolin",
"licenseTierProfessional": "Licencja Professional",
"licenseTierEnterprise": "Licencja Enterprise",
"licenseTierCommercial": "Licencja handlowa",
"licensed": "Licencjonowany",
"yes": "Tak",
"no": "Nie",
@@ -1084,7 +1083,6 @@
"navbar": "Menu nawigacyjne",
"navbarDescription": "Główne menu nawigacyjne aplikacji",
"navbarDocsLink": "Dokumentacja",
"commercialEdition": "Edycja komercyjna",
"otpErrorEnable": "Nie można włączyć 2FA",
"otpErrorEnableDescription": "Wystąpił błąd podczas włączania 2FA",
"otpSetupCheckCode": "Wprowadź 6-cyfrowy kod",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Administrador do Servidor - Pangolin",
"licenseTierProfessional": "Licença Profissional",
"licenseTierEnterprise": "Licença Empresarial",
"licenseTierCommercial": "Licença comercial",
"licensed": "Licenciado",
"yes": "Sim",
"no": "Não",
@@ -1084,7 +1083,6 @@
"navbar": "Menu de Navegação",
"navbarDescription": "Menu de navegação principal da aplicação",
"navbarDocsLink": "Documentação",
"commercialEdition": "Edição Comercial",
"otpErrorEnable": "Não foi possível ativar 2FA",
"otpErrorEnableDescription": "Ocorreu um erro ao ativar 2FA",
"otpSetupCheckCode": "Por favor, insira um código de 6 dígitos",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Администратор сервера - Pangolin",
"licenseTierProfessional": "Профессиональная лицензия",
"licenseTierEnterprise": "Корпоративная лицензия",
"licenseTierCommercial": "Коммерческая лицензия",
"licensed": "Лицензировано",
"yes": "Да",
"no": "Нет",
@@ -1084,7 +1083,6 @@
"navbar": "Навигационное меню",
"navbarDescription": "Главное навигационное меню приложения",
"navbarDocsLink": "Документация",
"commercialEdition": "Коммерческая версия",
"otpErrorEnable": "Невозможно включить 2FA",
"otpErrorEnableDescription": "Произошла ошибка при включении 2FA",
"otpSetupCheckCode": "Пожалуйста, введите 6-значный код",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "Sunucu Yöneticisi - Pangolin",
"licenseTierProfessional": "Profesyonel Lisans",
"licenseTierEnterprise": "Kurumsal Lisans",
"licenseTierCommercial": "Ticari Lisans",
"licensed": "Lisanslı",
"yes": "Evet",
"no": "Hayır",
@@ -1084,7 +1083,6 @@
"navbar": "Navigasyon Menüsü",
"navbarDescription": "Uygulamanın ana navigasyon menüsü",
"navbarDocsLink": "Dokümantasyon",
"commercialEdition": "Ticari Sürüm",
"otpErrorEnable": "2FA etkinleştirilemedi",
"otpErrorEnableDescription": "2FA etkinleştirilirken bir hata oluştu",
"otpSetupCheckCode": "6 haneli bir kod girin",

View File

@@ -715,7 +715,6 @@
"pangolinServerAdmin": "服务器管理员 - Pangolin",
"licenseTierProfessional": "专业许可证",
"licenseTierEnterprise": "企业许可证",
"licenseTierCommercial": "商业许可证",
"licensed": "已授权",
"yes": "是",
"no": "否",
@@ -1084,7 +1083,6 @@
"navbar": "导航菜单",
"navbarDescription": "应用程序的主导航菜单",
"navbarDocsLink": "文件",
"commercialEdition": "商业版",
"otpErrorEnable": "无法启用 2FA",
"otpErrorEnableDescription": "启用 2FA 时出错",
"otpSetupCheckCode": "请输入您的6位数字代码",

1963
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -19,17 +19,17 @@
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
"db:clear-migrations": "rm -rf server/migrations",
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts",
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts",
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts",
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
"set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
"set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
"next:build": "next build",
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
"email": "email dev --dir server/emails/templates --port 3005",
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs",
"db:sqlite:seed-exit-node": "sqlite3 config/db/db.sqlite \"INSERT INTO exitNodes (exitNodeId, name, address, endpoint, publicKey, listenPort, reachableAt, maxConnections, online, lastPing, type, region) VALUES (null, 'test', '10.0.0.1/24', 'localhost', 'MJ44MpnWGxMZURgxW/fWXDFsejhabnEFYDo60LQwK3A=', 1234, 'http://localhost:3003', 123, 1, null, 'gerbil', null);\""
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs"
},
"dependencies": {
"@asteasolutions/zod-to-openapi": "^7.3.4",

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 13 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 33 KiB

After

Width:  |  Height:  |  Size: 13 KiB

View File

@@ -7,21 +7,21 @@ import {
errorHandlerMiddleware,
notFoundMiddleware
} from "@server/middlewares";
import { corsWithLoginPageSupport } from "@server/middlewares/private/corsWithLoginPage";
import { authenticated, unauthenticated } from "@server/routers/external";
import { router as wsRouter, handleWSUpgrade } from "@server/routers/ws";
import { authenticated, unauthenticated } from "#dynamic/routers/external";
import { router as wsRouter, handleWSUpgrade } from "#dynamic/routers/ws";
import { logIncomingMiddleware } from "./middlewares/logIncoming";
import { csrfProtectionMiddleware } from "./middlewares/csrfProtection";
import helmet from "helmet";
import { stripeWebhookHandler } from "@server/routers/private/billing/webhooks";
import { build } from "./build";
import rateLimit, { ipKeyGenerator } from "express-rate-limit";
import createHttpError from "http-errors";
import HttpCode from "./types/HttpCode";
import requestTimeoutMiddleware from "./middlewares/requestTimeout";
import { createStore } from "@server/lib/private/rateLimitStore";
import hybridRouter from "@server/routers/private/hybrid";
import { createStore } from "#dynamic/lib/rateLimitStore";
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
import { corsWithLoginPageSupport } from "@server/lib/corsWithLoginPage";
import { hybridRouter } from "#dynamic/routers/hybrid";
import { billingWebhookHandler } from "#dynamic/routers/billing/webhooks";
const dev = config.isDev;
const externalPort = config.getRawConfig().server.external_port;
@@ -39,32 +39,30 @@ export function createApiServer() {
apiServer.post(
`${prefix}/billing/webhooks`,
express.raw({ type: "application/json" }),
stripeWebhookHandler
billingWebhookHandler
);
}
const corsConfig = config.getRawConfig().server.cors;
const options = {
...(corsConfig?.origins
? { origin: corsConfig.origins }
: {
origin: (origin: any, callback: any) => {
callback(null, true);
}
}),
...(corsConfig?.methods && { methods: corsConfig.methods }),
...(corsConfig?.allowed_headers && {
allowedHeaders: corsConfig.allowed_headers
}),
credentials: !(corsConfig?.credentials === false)
};
if (build == "oss") {
const options = {
...(corsConfig?.origins
? { origin: corsConfig.origins }
: {
origin: (origin: any, callback: any) => {
callback(null, true);
}
}),
...(corsConfig?.methods && { methods: corsConfig.methods }),
...(corsConfig?.allowed_headers && {
allowedHeaders: corsConfig.allowed_headers
}),
credentials: !(corsConfig?.credentials === false)
};
if (build == "oss" || !corsConfig) {
logger.debug("Using CORS options", options);
apiServer.use(cors(options));
} else {
} else if (corsConfig) {
// Use the custom CORS middleware with loginPage support
apiServer.use(corsWithLoginPageSupport(corsConfig));
}

View File

@@ -4,7 +4,6 @@ import { userActions, roleActions, userOrgs } from "@server/db";
import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode";
import { sendUsageNotification } from "@server/routers/org";
export enum ActionsEnum {
createOrgUser = "createOrgUser",

View File

@@ -4,9 +4,6 @@ import { resourceSessions, ResourceSession } from "@server/db";
import { db } from "@server/db";
import { eq, and } from "drizzle-orm";
import config from "@server/lib/config";
import axios from "axios";
import logger from "@server/logger";
import { tokenManager } from "@server/lib/tokenManager";
export const SESSION_COOKIE_NAME =
config.getRawConfig().server.session_cookie_name;
@@ -65,29 +62,6 @@ export async function validateResourceSessionToken(
token: string,
resourceId: number
): Promise<ResourceSessionValidationResult> {
if (config.isManagedMode()) {
try {
const response = await axios.post(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/session/validate`, {
token: token
}, await tokenManager.getAuthHeader());
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error validating resource session token in hybrid mode:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error validating resource session token in hybrid mode:", error);
}
return { resourceSession: null };
}
}
const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token))
);

13
server/cleanup.ts Normal file
View File

@@ -0,0 +1,13 @@
import { cleanup as wsCleanup } from "@server/routers/ws";
async function cleanup() {
await wsCleanup();
process.exit(0);
}
export async function initCleanup() {
// Handle process termination
process.on("SIGTERM", () => cleanup());
process.on("SIGINT", () => cleanup());
}

View File

@@ -38,9 +38,9 @@ function createDb() {
const poolConfig = config.postgres.pool;
const primaryPool = new Pool({
connectionString,
max: poolConfig.max_connections,
idleTimeoutMillis: poolConfig.idle_timeout_ms,
connectionTimeoutMillis: poolConfig.connection_timeout_ms,
max: poolConfig?.max_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
});
const replicas = [];
@@ -51,9 +51,9 @@ function createDb() {
for (const conn of replicaConnections) {
const replicaPool = new Pool({
connectionString: conn.connection_string,
max: poolConfig.max_replica_connections,
idleTimeoutMillis: poolConfig.idle_timeout_ms,
connectionTimeoutMillis: poolConfig.connection_timeout_ms,
max: poolConfig?.max_replica_connections || 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000,
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000,
});
replicas.push(DrizzlePostgres(replicaPool));
}

View File

@@ -1,3 +1,3 @@
export * from "./driver";
export * from "./schema";
export * from "./privateSchema";
export * from "./schema/schema";
export * from "./schema/privateSchema";

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import {
pgTable,
serial,

View File

@@ -720,4 +720,5 @@ export type OrgDomains = InferSelectModel<typeof orgDomains>;
export type SiteResource = InferSelectModel<typeof siteResources>;
export type SetupToken = InferSelectModel<typeof setupTokens>;
export type HostMeta = InferSelectModel<typeof hostMeta>;
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>;

View File

@@ -17,10 +17,6 @@ import {
users
} from "@server/db";
import { and, eq } from "drizzle-orm";
import axios from "axios";
import config from "@server/lib/config";
import logger from "@server/logger";
import { tokenManager } from "@server/lib/tokenManager";
export type ResourceWithAuth = {
resource: Resource | null;
@@ -40,30 +36,6 @@ export type UserSessionWithUser = {
export async function getResourceByDomain(
domain: string
): Promise<ResourceWithAuth | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/domain/${domain}`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const [result] = await db
.select()
.from(resources)
@@ -100,30 +72,6 @@ export async function getResourceByDomain(
export async function getUserSessionWithUser(
userSessionId: string
): Promise<UserSessionWithUser | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/session/${userSessionId}`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const [res] = await db
.select()
.from(sessions)
@@ -144,30 +92,6 @@ export async function getUserSessionWithUser(
* Get user organization role
*/
export async function getUserOrgRole(userId: string, orgId: string) {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/org/${orgId}/role`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const userOrgRole = await db
.select()
.from(userOrgs)
@@ -184,30 +108,6 @@ export async function getRoleResourceAccess(
resourceId: number,
roleId: number
) {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/role/${roleId}/resource/${resourceId}/access`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const roleResourceAccess = await db
.select()
.from(roleResources)
@@ -229,30 +129,6 @@ export async function getUserResourceAccess(
userId: string,
resourceId: number
) {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/resource/${resourceId}/access`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const userResourceAccess = await db
.select()
.from(userResources)
@@ -273,30 +149,6 @@ export async function getUserResourceAccess(
export async function getResourceRules(
resourceId: number
): Promise<ResourceRule[]> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/rules`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return [];
}
}
const rules = await db
.select()
.from(resourceRules)
@@ -311,30 +163,6 @@ export async function getResourceRules(
export async function getOrgLoginPage(
orgId: string
): Promise<LoginPage | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/org/${orgId}/login-page`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const [result] = await db
.select()
.from(loginPageOrg)

View File

@@ -1,6 +1,6 @@
import { drizzle as DrizzleSqlite } from "drizzle-orm/better-sqlite3";
import Database from "better-sqlite3";
import * as schema from "./schema";
import * as schema from "./schema/schema";
import path from "path";
import fs from "fs";
import { APP_PATH } from "@server/lib/consts";

View File

@@ -1,3 +1,3 @@
export * from "./driver";
export * from "./schema";
export * from "./privateSchema";
export * from "./schema/schema";
export * from "./schema/privateSchema";

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import {
sqliteTable,
integer,

View File

@@ -759,4 +759,5 @@ export type SiteResource = InferSelectModel<typeof siteResources>;
export type OrgDomains = InferSelectModel<typeof orgDomains>;
export type SetupToken = InferSelectModel<typeof setupTokens>;
export type HostMeta = InferSelectModel<typeof hostMeta>;
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>;

View File

@@ -6,11 +6,6 @@ import logger from "@server/logger";
import SMTPTransport from "nodemailer/lib/smtp-transport";
function createEmailClient() {
if (config.isManagedMode()) {
// LETS NOT WORRY ABOUT EMAILS IN HYBRID
return;
}
const emailConfig = config.getRawConfig().email;
if (!emailConfig) {
logger.warn(

View File

@@ -2,7 +2,6 @@ import { render } from "@react-email/render";
import { ReactElement } from "react";
import emailClient from "@server/emails";
import logger from "@server/logger";
import config from "@server/lib/config";
export async function sendEmail(
template: ReactElement,
@@ -25,7 +24,7 @@ export async function sendEmail(
const emailHtml = await render(template);
const appName = config.getRawPrivateConfig().branding?.app_name || "Pangolin";
const appName = process.env.BRANDING_APP_NAME || "Pangolin"; // From the private config loading into env vars to seperate away the private config
await emailClient.sendMail({
from: {

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import React from "react";
import { Body, Head, Html, Preview, Tailwind } from "@react-email/components";
import { themeColors } from "./lib/theme";

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import React from "react";
import { Body, Head, Html, Preview, Tailwind } from "@react-email/components";
import { themeColors } from "./lib/theme";

View File

@@ -1,151 +0,0 @@
import logger from "@server/logger";
import config from "@server/lib/config";
import { createWebSocketClient } from "./routers/ws/client";
import { addPeer, deletePeer } from "./routers/gerbil/peers";
import { db, exitNodes } from "./db";
import { TraefikConfigManager } from "./lib/traefik/TraefikConfigManager";
import { tokenManager } from "./lib/tokenManager";
import { APP_VERSION } from "./lib/consts";
import axios from "axios";
export async function createHybridClientServer() {
logger.info("Starting hybrid client server...");
// Start the token manager
await tokenManager.start();
const token = await tokenManager.getToken();
const monitor = new TraefikConfigManager();
await monitor.start();
// Create client
const client = createWebSocketClient(
token,
config.getRawConfig().managed!.endpoint!,
{
reconnectInterval: 5000,
pingInterval: 30000,
pingTimeout: 10000
}
);
// Register message handlers
client.registerHandler("remoteExitNode/peers/add", async (message) => {
const { publicKey, allowedIps } = message.data;
// TODO: we are getting the exit node twice here
// NOTE: there should only be one gerbil registered so...
const [exitNode] = await db.select().from(exitNodes).limit(1);
await addPeer(exitNode.exitNodeId, {
publicKey: publicKey,
allowedIps: allowedIps || []
});
});
client.registerHandler("remoteExitNode/peers/remove", async (message) => {
const { publicKey } = message.data;
// TODO: we are getting the exit node twice here
// NOTE: there should only be one gerbil registered so...
const [exitNode] = await db.select().from(exitNodes).limit(1);
await deletePeer(exitNode.exitNodeId, publicKey);
});
// /update-proxy-mapping
client.registerHandler("remoteExitNode/update-proxy-mapping", async (message) => {
try {
const [exitNode] = await db.select().from(exitNodes).limit(1);
if (!exitNode) {
logger.error("No exit node found for proxy mapping update");
return;
}
const response = await axios.post(`${exitNode.endpoint}/update-proxy-mapping`, message.data);
logger.info(`Successfully updated proxy mapping: ${response.status}`);
} catch (error) {
// pull data out of the axios error to log
if (axios.isAxiosError(error)) {
logger.error("Error updating proxy mapping:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error updating proxy mapping:", error);
}
}
});
// /update-destinations
client.registerHandler("remoteExitNode/update-destinations", async (message) => {
try {
const [exitNode] = await db.select().from(exitNodes).limit(1);
if (!exitNode) {
logger.error("No exit node found for destinations update");
return;
}
const response = await axios.post(`${exitNode.endpoint}/update-destinations`, message.data);
logger.info(`Successfully updated destinations: ${response.status}`);
} catch (error) {
// pull data out of the axios error to log
if (axios.isAxiosError(error)) {
logger.error("Error updating destinations:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error updating destinations:", error);
}
}
});
client.registerHandler("remoteExitNode/traefik/reload", async (message) => {
await monitor.HandleTraefikConfig();
});
// Listen to connection events
client.on("connect", () => {
logger.info("Connected to WebSocket server");
client.sendMessage("remoteExitNode/register", {
remoteExitNodeVersion: APP_VERSION
});
});
client.on("disconnect", () => {
logger.info("Disconnected from WebSocket server");
});
client.on("message", (message) => {
logger.info(
`Received message: ${message.type} ${JSON.stringify(message.data)}`
);
});
// Connect to the server
try {
await client.connect();
logger.info("Connection initiated");
} catch (error) {
logger.error("Failed to connect:", error);
}
// Store the ping interval stop function for cleanup if needed
const stopPingInterval = client.sendMessageInterval(
"remoteExitNode/ping",
{ timestamp: Date.now() / 1000 },
60000
); // send every minute
// Return client and cleanup function for potential use
return { client, stopPingInterval };
}

View File

@@ -5,13 +5,20 @@ import { runSetupFunctions } from "./setup";
import { createApiServer } from "./apiServer";
import { createNextServer } from "./nextServer";
import { createInternalServer } from "./internalServer";
import { ApiKey, ApiKeyOrg, RemoteExitNode, Session, User, UserOrg } from "@server/db";
import {
ApiKey,
ApiKeyOrg,
RemoteExitNode,
Session,
User,
UserOrg
} from "@server/db";
import { createIntegrationApiServer } from "./integrationApiServer";
import { createHybridClientServer } from "./hybridServer";
import config from "@server/lib/config";
import { setHostMeta } from "@server/lib/hostMeta";
import { initTelemetryClient } from "./lib/telemetry.js";
import { TraefikConfigManager } from "./lib/traefik/TraefikConfigManager.js";
import { initCleanup } from "#dynamic/cleanup";
async function startServers() {
await setHostMeta();
@@ -25,16 +32,11 @@ async function startServers() {
const apiServer = createApiServer();
const internalServer = createInternalServer();
let hybridClientServer;
let nextServer;
if (config.isManagedMode()) {
hybridClientServer = await createHybridClientServer();
} else {
nextServer = await createNextServer();
if (config.getRawConfig().traefik.file_mode) {
const monitor = new TraefikConfigManager();
await monitor.start();
}
nextServer = await createNextServer();
if (config.getRawConfig().traefik.file_mode) {
const monitor = new TraefikConfigManager();
await monitor.start();
}
let integrationServer;
@@ -42,12 +44,13 @@ async function startServers() {
integrationServer = createIntegrationApiServer();
}
await initCleanup();
return {
apiServer,
nextServer,
internalServer,
integrationServer,
hybridClientServer
integrationServer
};
}

View File

@@ -7,7 +7,7 @@ import {
errorHandlerMiddleware,
notFoundMiddleware,
} from "@server/middlewares";
import { authenticated, unauthenticated } from "@server/routers/integration";
import { authenticated, unauthenticated } from "#dynamic/routers/integration";
import { logIncomingMiddleware } from "./middlewares/logIncoming";
import helmet from "helmet";
import swaggerUi from "swagger-ui-express";

View File

@@ -8,7 +8,7 @@ import {
errorHandlerMiddleware,
notFoundMiddleware
} from "@server/middlewares";
import internal from "@server/routers/internal";
import { internalRouter } from "#dynamic/routers/internal";
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
const internalPort = config.getRawConfig().server.internal_port;
@@ -23,7 +23,7 @@ export function createInternalServer() {
internalServer.use(express.json());
const prefix = `/api/v1`;
internalServer.use(prefix, internal);
internalServer.use(prefix, internalRouter);
internalServer.use(notFoundMiddleware);
internalServer.use(errorHandlerMiddleware);

View File

@@ -0,0 +1,6 @@
export async function createCustomer(
orgId: string,
email: string | null | undefined
): Promise<string | undefined> {
return;
}

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import Stripe from "stripe";
export enum FeatureId {

View File

@@ -0,0 +1,8 @@
export async function getOrgTierData(
orgId: string
): Promise<{ tier: string | null; active: boolean }> {
let tier = null;
let active = false;
return { tier, active };
}

View File

@@ -0,0 +1,5 @@
export * from "./limitSet";
export * from "./features";
export * from "./limitsService";
export * from "./getOrgTierData";
export * from "./createCustomer";

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { FeatureId } from "./features";
export type LimitSet = {

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { db, limits } from "@server/db";
import { and, eq } from "drizzle-orm";
import { LimitSet } from "./limitSet";

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
export enum TierId {
STANDARD = "standard",
}

View File

@@ -1,21 +1,7 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { eq, sql, and } from "drizzle-orm";
import NodeCache from "node-cache";
import { v4 as uuidv4 } from "uuid";
import { PutObjectCommand } from "@aws-sdk/client-s3";
import { s3Client } from "../s3";
import * as fs from "fs/promises";
import * as path from "path";
import {
@@ -30,10 +16,10 @@ import {
Transaction
} from "@server/db";
import { FeatureId, getFeatureMeterId } from "./features";
import config from "@server/lib/config";
import logger from "@server/logger";
import { sendToClient } from "@server/routers/ws";
import { sendToClient } from "#dynamic/routers/ws";
import { build } from "@server/build";
import { s3Client } from "@server/lib/s3";
interface StripeEvent {
identifier?: string;
@@ -45,6 +31,17 @@ interface StripeEvent {
};
}
export function noop() {
if (
build !== "saas" ||
!process.env.S3_BUCKET ||
!process.env.LOCAL_FILE_PATH
) {
return true;
}
return false;
}
export class UsageService {
private cache: NodeCache;
private bucketName: string | undefined;
@@ -55,11 +52,13 @@ export class UsageService {
constructor() {
this.cache = new NodeCache({ stdTTL: 300 }); // 5 minute TTL
if (build !== "saas") {
if (noop()) {
return;
}
this.bucketName = config.getRawPrivateConfig().stripe?.s3Bucket;
this.eventsDir = config.getRawPrivateConfig().stripe?.localFilePath;
// this.bucketName = privateConfig.getRawPrivateConfig().stripe?.s3Bucket;
// this.eventsDir = privateConfig.getRawPrivateConfig().stripe?.localFilePath;
this.bucketName = process.env.S3_BUCKET || undefined;
this.eventsDir = process.env.LOCAL_FILE_PATH || undefined;
// Ensure events directory exists
this.initializeEventsDirectory().then(() => {
@@ -83,7 +82,9 @@ export class UsageService {
private async initializeEventsDirectory(): Promise<void> {
if (!this.eventsDir) {
logger.warn("Stripe local file path is not configured, skipping events directory initialization.");
logger.warn(
"Stripe local file path is not configured, skipping events directory initialization."
);
return;
}
try {
@@ -95,7 +96,9 @@ export class UsageService {
private async uploadPendingEventFilesOnStartup(): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn("Stripe local file path or bucket name is not configured, skipping leftover event file upload.");
logger.warn(
"Stripe local file path or bucket name is not configured, skipping leftover event file upload."
);
return;
}
try {
@@ -118,15 +121,17 @@ export class UsageService {
ContentType: "application/json"
});
await s3Client.send(uploadCommand);
// Check if file still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`Startup file ${file} was already deleted`);
logger.debug(
`Startup file ${file} was already deleted`
);
}
logger.info(
`Uploaded leftover event file ${file} to S3 with ${events.length} events`
);
@@ -136,7 +141,9 @@ export class UsageService {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`Empty startup file ${file} was already deleted`);
logger.debug(
`Empty startup file ${file} was already deleted`
);
}
}
} catch (err) {
@@ -147,8 +154,8 @@ export class UsageService {
}
}
}
} catch (err) {
logger.error("Failed to scan for leftover event files:", err);
} catch (error) {
logger.error("Failed to scan for leftover event files");
}
}
@@ -158,17 +165,17 @@ export class UsageService {
value: number,
transaction: any = null
): Promise<Usage | null> {
if (build !== "saas") {
if (noop()) {
return null;
}
// Truncate value to 11 decimal places
value = this.truncateValue(value);
// Implement retry logic for deadlock handling
const maxRetries = 3;
let attempt = 0;
while (attempt <= maxRetries) {
try {
// Get subscription data for this org (with caching)
@@ -191,7 +198,12 @@ export class UsageService {
);
} else {
await db.transaction(async (trx) => {
usage = await this.internalAddUsage(orgId, featureId, value, trx);
usage = await this.internalAddUsage(
orgId,
featureId,
value,
trx
);
});
}
@@ -201,25 +213,26 @@ export class UsageService {
return usage || null;
} catch (error: any) {
// Check if this is a deadlock error
const isDeadlock = error?.code === '40P01' ||
error?.cause?.code === '40P01' ||
(error?.message && error.message.includes('deadlock'));
const isDeadlock =
error?.code === "40P01" ||
error?.cause?.code === "40P01" ||
(error?.message && error.message.includes("deadlock"));
if (isDeadlock && attempt < maxRetries) {
attempt++;
// Exponential backoff with jitter: 50-150ms, 100-300ms, 200-600ms
const baseDelay = Math.pow(2, attempt - 1) * 50;
const jitter = Math.random() * baseDelay;
const delay = baseDelay + jitter;
logger.warn(
`Deadlock detected for ${orgId}/${featureId}, retrying attempt ${attempt}/${maxRetries} after ${delay.toFixed(0)}ms`
);
await new Promise(resolve => setTimeout(resolve, delay));
await new Promise((resolve) => setTimeout(resolve, delay));
continue;
}
logger.error(
`Failed to add usage for ${orgId}/${featureId} after ${attempt} attempts:`,
error
@@ -239,10 +252,10 @@ export class UsageService {
): Promise<Usage> {
// Truncate value to 11 decimal places
value = this.truncateValue(value);
const usageId = `${orgId}-${featureId}`;
const meterId = getFeatureMeterId(featureId);
// Use upsert: insert if not exists, otherwise increment
const [returnUsage] = await trx
.insert(usage)
@@ -259,7 +272,8 @@ export class UsageService {
set: {
latestValue: sql`${usage.latestValue} + ${value}`
}
}).returning();
})
.returning();
return returnUsage;
}
@@ -280,7 +294,7 @@ export class UsageService {
value?: number,
customerId?: string
): Promise<void> {
if (build !== "saas") {
if (noop()) {
return;
}
try {
@@ -351,7 +365,7 @@ export class UsageService {
.set({
latestValue: newRunningTotal,
instantaneousValue: value,
updatedAt: Math.floor(Date.now() / 1000)
updatedAt: Math.floor(Date.now() / 1000)
})
.where(eq(usage.usageId, usageId));
}
@@ -366,7 +380,7 @@ export class UsageService {
meterId,
instantaneousValue: truncatedValue,
latestValue: truncatedValue,
updatedAt: Math.floor(Date.now() / 1000)
updatedAt: Math.floor(Date.now() / 1000)
});
}
});
@@ -427,7 +441,7 @@ export class UsageService {
): Promise<void> {
// Truncate value to 11 decimal places before sending to Stripe
const truncatedValue = this.truncateValue(value);
const event: StripeEvent = {
identifier: uuidv4(),
timestamp: Math.floor(new Date().getTime() / 1000),
@@ -444,7 +458,9 @@ export class UsageService {
private async writeEventToFile(event: StripeEvent): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn("Stripe local file path or bucket name is not configured, skipping event file write.");
logger.warn(
"Stripe local file path or bucket name is not configured, skipping event file write."
);
return;
}
if (!this.currentEventFile) {
@@ -493,7 +509,9 @@ export class UsageService {
private async uploadFileToS3(): Promise<void> {
if (!this.bucketName || !this.eventsDir) {
logger.warn("Stripe local file path or bucket name is not configured, skipping S3 upload.");
logger.warn(
"Stripe local file path or bucket name is not configured, skipping S3 upload."
);
return;
}
if (!this.currentEventFile) {
@@ -505,7 +523,9 @@ export class UsageService {
// Check if this file is already being uploaded
if (this.uploadingFiles.has(fileName)) {
logger.debug(`File ${fileName} is already being uploaded, skipping`);
logger.debug(
`File ${fileName} is already being uploaded, skipping`
);
return;
}
@@ -517,7 +537,9 @@ export class UsageService {
try {
await fs.access(filePath);
} catch (error) {
logger.debug(`File ${fileName} does not exist, may have been already processed`);
logger.debug(
`File ${fileName} does not exist, may have been already processed`
);
this.uploadingFiles.delete(fileName);
// Reset current file if it was this file
if (this.currentEventFile === fileName) {
@@ -537,7 +559,9 @@ export class UsageService {
await fs.unlink(filePath);
} catch (unlinkError) {
// File may have been already deleted
logger.debug(`File ${fileName} was already deleted during cleanup`);
logger.debug(
`File ${fileName} was already deleted during cleanup`
);
}
this.currentEventFile = null;
this.uploadingFiles.delete(fileName);
@@ -560,7 +584,9 @@ export class UsageService {
await fs.unlink(filePath);
} catch (unlinkError) {
// File may have been already deleted by another process
logger.debug(`File ${fileName} was already deleted during upload`);
logger.debug(
`File ${fileName} was already deleted during upload`
);
}
logger.info(
@@ -571,10 +597,7 @@ export class UsageService {
this.currentEventFile = null;
this.currentFileStartTime = 0;
} catch (error) {
logger.error(
`Failed to upload ${fileName} to S3:`,
error
);
logger.error(`Failed to upload ${fileName} to S3:`, error);
} finally {
// Always remove from uploading set
this.uploadingFiles.delete(fileName);
@@ -591,7 +614,7 @@ export class UsageService {
orgId: string,
featureId: FeatureId
): Promise<Usage | null> {
if (build !== "saas") {
if (noop()) {
return null;
}
@@ -610,7 +633,7 @@ export class UsageService {
`Creating new usage record for ${orgId}/${featureId}`
);
const meterId = getFeatureMeterId(featureId);
try {
const [newUsage] = await db
.insert(usage)
@@ -665,7 +688,7 @@ export class UsageService {
orgId: string,
featureId: FeatureId
): Promise<Usage | null> {
if (build !== "saas") {
if (noop()) {
return null;
}
await this.updateDaily(orgId, featureId); // Ensure daily usage is updated
@@ -685,7 +708,9 @@ export class UsageService {
*/
private async uploadOldEventFiles(): Promise<void> {
if (!this.eventsDir || !this.bucketName) {
logger.warn("Stripe local file path or bucket name is not configured, skipping old event file upload.");
logger.warn(
"Stripe local file path or bucket name is not configured, skipping old event file upload."
);
return;
}
try {
@@ -693,15 +718,17 @@ export class UsageService {
const now = Date.now();
for (const file of files) {
if (!file.endsWith(".json")) continue;
// Skip files that are already being uploaded
if (this.uploadingFiles.has(file)) {
logger.debug(`Skipping file ${file} as it's already being uploaded`);
logger.debug(
`Skipping file ${file} as it's already being uploaded`
);
continue;
}
const filePath = path.join(this.eventsDir, file);
try {
// Check if file still exists before processing
try {
@@ -716,7 +743,7 @@ export class UsageService {
if (age >= 90000) {
// 1.5 minutes - Mark as being uploaded
this.uploadingFiles.add(file);
try {
const fileContent = await fs.readFile(
filePath,
@@ -732,15 +759,17 @@ export class UsageService {
ContentType: "application/json"
});
await s3Client.send(uploadCommand);
// Check if file still exists before unlinking
try {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`File ${file} was already deleted during interval upload`);
logger.debug(
`File ${file} was already deleted during interval upload`
);
}
logger.info(
`Interval: Uploaded event file ${file} to S3 with ${events.length} events`
);
@@ -755,7 +784,9 @@ export class UsageService {
await fs.access(filePath);
await fs.unlink(filePath);
} catch (unlinkError) {
logger.debug(`Empty file ${file} was already deleted`);
logger.debug(
`Empty file ${file} was already deleted`
);
}
}
} finally {
@@ -777,12 +808,17 @@ export class UsageService {
}
}
public async checkLimitSet(orgId: string, kickSites = false, featureId?: FeatureId, usage?: Usage): Promise<boolean> {
if (build !== "saas") {
public async checkLimitSet(
orgId: string,
kickSites = false,
featureId?: FeatureId,
usage?: Usage
): Promise<boolean> {
if (noop()) {
return false;
}
// This method should check the current usage against the limits set for the organization
// and kick out all of the sites on the org
// and kick out all of the sites on the org
let hasExceededLimits = false;
try {
@@ -817,16 +853,30 @@ export class UsageService {
if (usage) {
currentUsage = usage;
} else {
currentUsage = await this.getUsage(orgId, limit.featureId as FeatureId);
currentUsage = await this.getUsage(
orgId,
limit.featureId as FeatureId
);
}
const usageValue = currentUsage?.instantaneousValue || currentUsage?.latestValue || 0;
logger.debug(`Current usage for org ${orgId} on feature ${limit.featureId}: ${usageValue}`);
logger.debug(`Limit for org ${orgId} on feature ${limit.featureId}: ${limit.value}`);
if (currentUsage && limit.value !== null && usageValue > limit.value) {
const usageValue =
currentUsage?.instantaneousValue ||
currentUsage?.latestValue ||
0;
logger.debug(
`Current usage for org ${orgId} on feature ${limit.featureId}: ${usageValue}`
);
logger.debug(
`Limit for org ${orgId} on feature ${limit.featureId}: ${limit.value}`
);
if (
currentUsage &&
limit.value !== null &&
usageValue > limit.value
) {
logger.debug(
`Org ${orgId} has exceeded limit for ${limit.featureId}: ` +
`${usageValue} > ${limit.value}`
`${usageValue} > ${limit.value}`
);
hasExceededLimits = true;
break; // Exit early if any limit is exceeded
@@ -835,7 +885,9 @@ export class UsageService {
// If any limits are exceeded, disconnect all sites for this organization
if (hasExceededLimits && kickSites) {
logger.warn(`Disconnecting all sites for org ${orgId} due to exceeded limits`);
logger.warn(
`Disconnecting all sites for org ${orgId} due to exceeded limits`
);
// Get all sites for this organization
const orgSites = await db
@@ -844,7 +896,7 @@ export class UsageService {
.where(eq(sites.orgId, orgId));
// Mark all sites as offline and send termination messages
const siteUpdates = orgSites.map(site => site.siteId);
const siteUpdates = orgSites.map((site) => site.siteId);
if (siteUpdates.length > 0) {
// Send termination messages to newt sites
@@ -865,17 +917,21 @@ export class UsageService {
};
// Don't await to prevent blocking
sendToClient(newt.newtId, payload).catch((error: any) => {
logger.error(
`Failed to send termination message to newt ${newt.newtId}:`,
error
);
});
sendToClient(newt.newtId, payload).catch(
(error: any) => {
logger.error(
`Failed to send termination message to newt ${newt.newtId}:`,
error
);
}
);
}
}
}
logger.info(`Disconnected ${orgSites.length} sites for org ${orgId} due to exceeded limits`);
logger.info(
`Disconnected ${orgSites.length} sites for org ${orgId} due to exceeded limits`
);
}
}
} catch (error) {

View File

@@ -1,4 +1,4 @@
import { sendToClient } from "@server/routers/ws";
import { sendToClient } from "#dynamic/routers/ws";
import { processContainerLabels } from "./parseDockerContainers";
import { applyBlueprint } from "./applyBlueprint";
import { db, sites } from "@server/db";

View File

@@ -25,7 +25,7 @@ import {
TargetData
} from "./types";
import logger from "@server/logger";
import { createCertificate } from "@server/routers/private/certificates/createCertificate";
import { createCertificate } from "#dynamic/routers/certificates/createCertificate";
import { pickPort } from "@server/routers/target/helpers";
import { resourcePassword } from "@server/db";
import { hashPassword } from "@server/auth/password";

View File

@@ -0,0 +1,13 @@
export async function getValidCertificatesForDomains(domains: Set<string>): Promise<
Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
return []; // stub
}

View File

@@ -3,19 +3,14 @@ import { __DIRNAME, APP_VERSION } from "@server/lib/consts";
import { db } from "@server/db";
import { SupporterKey, supporterKey } from "@server/db";
import { eq } from "drizzle-orm";
import { license } from "@server/license/license";
import { license } from "#dynamic/license/license";
import { configSchema, readConfigFile } from "./readConfigFile";
import { fromError } from "zod-validation-error";
import {
privateConfigSchema,
readPrivateConfigFile
} from "@server/lib/private/readConfigFile";
import logger from "@server/logger";
import { build } from "@server/build";
import logger from "@server/logger";
export class Config {
private rawConfig!: z.infer<typeof configSchema>;
private rawPrivateConfig!: z.infer<typeof privateConfigSchema>;
supporterData: SupporterKey | null = null;
@@ -37,19 +32,6 @@ export class Config {
throw new Error(`Invalid configuration file: ${errors}`);
}
const privateEnvironment = readPrivateConfigFile();
const {
data: parsedPrivateConfig,
success: privateSuccess,
error: privateError
} = privateConfigSchema.safeParse(privateEnvironment);
if (!privateSuccess) {
const errors = fromError(privateError);
throw new Error(`Invalid private configuration file: ${errors}`);
}
if (
// @ts-ignore
parsedConfig.users ||
@@ -109,132 +91,29 @@ export class Config {
? "true"
: "false";
if (parsedPrivateConfig.branding?.colors) {
process.env.BRANDING_COLORS = JSON.stringify(
parsedPrivateConfig.branding?.colors
);
}
if (parsedPrivateConfig.branding?.logo?.light_path) {
process.env.BRANDING_LOGO_LIGHT_PATH =
parsedPrivateConfig.branding?.logo?.light_path;
}
if (parsedPrivateConfig.branding?.logo?.dark_path) {
process.env.BRANDING_LOGO_DARK_PATH =
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
}
process.env.HIDE_SUPPORTER_KEY = parsedPrivateConfig.flags
?.hide_supporter_key
? "true"
: "false";
if (build != "oss") {
if (parsedPrivateConfig.branding?.logo?.light_path) {
process.env.BRANDING_LOGO_LIGHT_PATH =
parsedPrivateConfig.branding?.logo?.light_path;
}
if (parsedPrivateConfig.branding?.logo?.dark_path) {
process.env.BRANDING_LOGO_DARK_PATH =
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
}
process.env.BRANDING_LOGO_AUTH_WIDTH = parsedPrivateConfig.branding
?.logo?.auth_page?.width
? parsedPrivateConfig.branding?.logo?.auth_page?.width.toString()
: undefined;
process.env.BRANDING_LOGO_AUTH_HEIGHT = parsedPrivateConfig.branding
?.logo?.auth_page?.height
? parsedPrivateConfig.branding?.logo?.auth_page?.height.toString()
: undefined;
process.env.BRANDING_LOGO_NAVBAR_WIDTH = parsedPrivateConfig
.branding?.logo?.navbar?.width
? parsedPrivateConfig.branding?.logo?.navbar?.width.toString()
: undefined;
process.env.BRANDING_LOGO_NAVBAR_HEIGHT = parsedPrivateConfig
.branding?.logo?.navbar?.height
? parsedPrivateConfig.branding?.logo?.navbar?.height.toString()
: undefined;
process.env.BRANDING_FAVICON_PATH =
parsedPrivateConfig.branding?.favicon_path;
process.env.BRANDING_APP_NAME =
parsedPrivateConfig.branding?.app_name || "Pangolin";
if (parsedPrivateConfig.branding?.footer) {
process.env.BRANDING_FOOTER = JSON.stringify(
parsedPrivateConfig.branding?.footer
);
}
process.env.LOGIN_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.login_page?.title_text || "";
process.env.LOGIN_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.login_page?.subtitle_text || "";
process.env.SIGNUP_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.signup_page?.title_text || "";
process.env.SIGNUP_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.signup_page?.subtitle_text || "";
process.env.RESOURCE_AUTH_PAGE_HIDE_POWERED_BY =
parsedPrivateConfig.branding?.resource_auth_page
?.hide_powered_by === true
? "true"
: "false";
process.env.RESOURCE_AUTH_PAGE_SHOW_LOGO =
parsedPrivateConfig.branding?.resource_auth_page?.show_logo ===
true
? "true"
: "false";
process.env.RESOURCE_AUTH_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.resource_auth_page?.title_text ||
"";
process.env.RESOURCE_AUTH_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.resource_auth_page
?.subtitle_text || "";
if (parsedPrivateConfig.branding?.background_image_path) {
process.env.BACKGROUND_IMAGE_PATH =
parsedPrivateConfig.branding?.background_image_path;
}
if (parsedPrivateConfig.server.reo_client_id) {
process.env.REO_CLIENT_ID =
parsedPrivateConfig.server.reo_client_id;
}
}
if (parsedConfig.server.maxmind_db_path) {
process.env.MAXMIND_DB_PATH = parsedConfig.server.maxmind_db_path;
}
this.rawConfig = parsedConfig;
this.rawPrivateConfig = parsedPrivateConfig;
}
public async initServer() {
if (!this.rawConfig) {
throw new Error("Config not loaded. Call load() first.");
}
if (this.rawConfig.managed) {
// LETS NOT WORRY ABOUT THE SERVER SECRET WHEN MANAGED
return;
}
license.setServerSecret(this.rawConfig.server.secret!);
await this.checkKeyStatus();
}
private async checkKeyStatus() {
const licenseStatus = await license.check();
if (
!this.rawPrivateConfig.flags?.hide_supporter_key &&
build != "oss" &&
!licenseStatus.isHostLicensed
) {
if (build === "enterprise") {
await license.check();
}
if (build == "oss") {
this.checkSupporterKey();
}
}
@@ -243,10 +122,6 @@ export class Config {
return this.rawConfig;
}
public getRawPrivateConfig() {
return this.rawPrivateConfig;
}
public getNoReplyEmail(): string | undefined {
return (
this.rawConfig.email?.no_reply || this.rawConfig.email?.smtp_user
@@ -280,10 +155,6 @@ export class Config {
return false;
}
public isManagedMode() {
return typeof this.rawConfig?.managed === "object";
}
public async checkSupporterKey() {
const [key] = await db.select().from(supporterKey).limit(1);

View File

@@ -2,7 +2,7 @@ import path from "path";
import { fileURLToPath } from "url";
// This is a placeholder value replaced by the build process
export const APP_VERSION = "1.10.4";
export const APP_VERSION = "1.11.0";
export const __FILENAME = fileURLToPath(import.meta.url);
export const __DIRNAME = path.dirname(__FILENAME);

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { Request, Response, NextFunction } from "express";
import cors, { CorsOptions } from "cors";
import config from "@server/lib/config";

View File

@@ -1,16 +1,3 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { isValidCIDR } from "@server/lib/validators";
import { getNextAvailableOrgSubnet } from "@server/lib/ip";
import {
@@ -28,9 +15,9 @@ import {
} from "@server/db";
import { eq } from "drizzle-orm";
import { defaultRoleAllowedActions } from "@server/routers/role";
import { FeatureId, limitsService, sandboxLimitSet } from "@server/lib/private/billing";
import { createCustomer } from "@server/routers/private/billing/createCustomer";
import { usageService } from "@server/lib/private/billing/usageService";
import { FeatureId, limitsService, sandboxLimitSet } from "@server/lib/billing";
import { createCustomer } from "#dynamic/lib/billing";
import { usageService } from "@server/lib/billing/usageService";
export async function createUserAccountOrg(
userId: string,

View File

@@ -16,7 +16,11 @@ export async function verifyExitNodeOrgAccess(
return { hasAccess: true, exitNode };
}
export async function listExitNodes(orgId: string, filterOnline = false, noCloud = false) {
export async function listExitNodes(
orgId: string,
filterOnline = false,
noCloud = false
) {
// TODO: pick which nodes to send and ping better than just all of them that are not remote
const allExitNodes = await db
.select({
@@ -59,7 +63,16 @@ export async function checkExitNodeOrg(exitNodeId: number, orgId: string) {
return false;
}
export async function resolveExitNodes(hostname: string, publicKey: string) {
export async function resolveExitNodes(
hostname: string,
publicKey: string
): Promise<
{
endpoint: string;
publicKey: string;
orgId: string;
}[]
> {
// OSS version: simple implementation that returns empty array
return [];
}

View File

@@ -1,33 +1,4 @@
import { build } from "@server/build";
// Import both modules
import * as exitNodesModule from "./exitNodes";
import * as privateExitNodesModule from "./privateExitNodes";
// Conditionally export exit nodes implementation based on build type
const exitNodesImplementation = build === "oss" ? exitNodesModule : privateExitNodesModule;
// Re-export all items from the selected implementation
export const {
verifyExitNodeOrgAccess,
listExitNodes,
selectBestExitNode,
checkExitNodeOrg,
resolveExitNodes
} = exitNodesImplementation;
// Import communications modules
import * as exitNodeCommsModule from "./exitNodeComms";
import * as privateExitNodeCommsModule from "./privateExitNodeComms";
// Conditionally export communications implementation based on build type
const exitNodeCommsImplementation = build === "oss" ? exitNodeCommsModule : privateExitNodeCommsModule;
// Re-export communications functions from the selected implementation
export const {
sendToExitNode
} = exitNodeCommsImplementation;
// Re-export shared modules
export * from "./exitNodes";
export * from "./exitNodeComms";
export * from "./subnet";
export * from "./getCurrentExitNodeId";

View File

@@ -1,8 +1,5 @@
import logger from "@server/logger";
import { maxmindLookup } from "@server/db/maxmind";
import axios from "axios";
import config from "./config";
import { tokenManager } from "./tokenManager";
export async function getCountryCodeForIp(
ip: string
@@ -33,32 +30,4 @@ export async function getCountryCodeForIp(
}
return;
}
export async function remoteGetCountryCodeForIp(
ip: string
): Promise<string | undefined> {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/geoip/${ip}`,
await tokenManager.getAuthHeader()
);
return response.data.data.countryCode;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
}
return;
}
}

View File

@@ -1,25 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import RedisStore from "@server/db/private/redisStore";
import { MemoryStore, Store } from "express-rate-limit";
export function createStore(): Store {
const rateLimitStore: Store = new RedisStore({
prefix: 'api-rate-limit', // Optional: customize Redis key prefix
skipFailedRequests: true, // Don't count failed requests
skipSuccessfulRequests: false, // Count successful requests
});
return rateLimitStore;
}

View File

@@ -1,192 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import fs from "fs";
import yaml from "js-yaml";
import { privateConfigFilePath1 } from "@server/lib/consts";
import { z } from "zod";
import { colorsSchema } from "@server/lib/colorsSchema";
import { build } from "@server/build";
const portSchema = z.number().positive().gt(0).lte(65535);
export const privateConfigSchema = z
.object({
app: z.object({
region: z.string().optional().default("default"),
base_domain: z.string().optional()
}).optional().default({
region: "default"
}),
server: z.object({
encryption_key_path: z
.string()
.optional()
.default("./config/encryption.pem")
.pipe(z.string().min(8)),
resend_api_key: z.string().optional(),
reo_client_id: z.string().optional(),
}).optional().default({
encryption_key_path: "./config/encryption.pem"
}),
redis: z
.object({
host: z.string(),
port: portSchema,
password: z.string().optional(),
db: z.number().int().nonnegative().optional().default(0),
replicas: z
.array(
z.object({
host: z.string(),
port: portSchema,
password: z.string().optional(),
db: z.number().int().nonnegative().optional().default(0)
})
)
.optional()
// tls: z
// .object({
// reject_unauthorized: z
// .boolean()
// .optional()
// .default(true)
// })
// .optional()
})
.optional(),
gerbil: z
.object({
local_exit_node_reachable_at: z.string().optional().default("http://gerbil:3003")
})
.optional()
.default({}),
flags: z
.object({
enable_redis: z.boolean().optional(),
hide_supporter_key: z.boolean().optional()
})
.optional(),
branding: z
.object({
app_name: z.string().optional(),
background_image_path: z.string().optional(),
colors: z
.object({
light: colorsSchema.optional(),
dark: colorsSchema.optional()
})
.optional(),
logo: z
.object({
light_path: z.string().optional(),
dark_path: z.string().optional(),
auth_page: z
.object({
width: z.number().optional(),
height: z.number().optional()
})
.optional(),
navbar: z
.object({
width: z.number().optional(),
height: z.number().optional()
})
.optional()
})
.optional(),
favicon_path: z.string().optional(),
footer: z
.array(
z.object({
text: z.string(),
href: z.string().optional()
})
)
.optional(),
login_page: z
.object({
subtitle_text: z.string().optional(),
title_text: z.string().optional()
})
.optional(),
signup_page: z
.object({
subtitle_text: z.string().optional(),
title_text: z.string().optional()
})
.optional(),
resource_auth_page: z
.object({
show_logo: z.boolean().optional(),
hide_powered_by: z.boolean().optional(),
title_text: z.string().optional(),
subtitle_text: z.string().optional()
})
.optional(),
emails: z
.object({
signature: z.string().optional(),
colors: z
.object({
primary: z.string().optional()
})
.optional()
})
.optional()
})
.optional(),
stripe: z
.object({
secret_key: z.string(),
webhook_secret: z.string(),
s3Bucket: z.string(),
s3Region: z.string().default("us-east-1"),
localFilePath: z.string()
})
.optional(),
});
export function readPrivateConfigFile() {
if (build == "oss") {
return {};
}
const loadConfig = (configPath: string) => {
try {
const yamlContent = fs.readFileSync(configPath, "utf8");
const config = yaml.load(yamlContent);
return config;
} catch (error) {
if (error instanceof Error) {
throw new Error(
`Error loading configuration file: ${error.message}`
);
}
throw error;
}
};
let environment: any;
if (fs.existsSync(privateConfigFilePath1)) {
environment = loadConfig(privateConfigFilePath1);
}
if (!environment) {
throw new Error(
"No private configuration file found."
);
}
return environment;
}

View File

@@ -1,19 +0,0 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { S3Client } from "@aws-sdk/client-s3";
import config from "@server/lib/config";
export const s3Client = new S3Client({
region: config.getRawPrivateConfig().stripe?.s3Region || "us-east-1",
});

View File

@@ -12,42 +12,36 @@ const getEnvOrYaml = (envVar: string) => (valFromYaml: any) => {
export const configSchema = z
.object({
app: z.object({
dashboard_url: z
.string()
.url()
.pipe(z.string().url())
.transform((url) => url.toLowerCase())
.optional(),
log_level: z
.enum(["debug", "info", "warn", "error"])
.optional()
.default("info"),
save_logs: z.boolean().optional().default(false),
log_failed_attempts: z.boolean().optional().default(false),
telemetry: z
.object({
anonymous_usage: z.boolean().optional().default(true)
})
.optional()
.default({}),
}).optional().default({
log_level: "info",
save_logs: false,
log_failed_attempts: false,
telemetry: {
anonymous_usage: true
}
}),
managed: z
app: z
.object({
name: z.string().optional(),
id: z.string().optional(),
secret: z.string().optional(),
endpoint: z.string().optional().default("https://pangolin.fossorial.io"),
redirect_endpoint: z.string().optional()
dashboard_url: z
.string()
.url()
.pipe(z.string().url())
.transform((url) => url.toLowerCase())
.optional(),
log_level: z
.enum(["debug", "info", "warn", "error"])
.optional()
.default("info"),
save_logs: z.boolean().optional().default(false),
log_failed_attempts: z.boolean().optional().default(false),
telemetry: z
.object({
anonymous_usage: z.boolean().optional().default(true)
})
.optional()
.default({})
})
.optional(),
.optional()
.default({
log_level: "info",
save_logs: false,
log_failed_attempts: false,
telemetry: {
anonymous_usage: true
}
}),
domains: z
.record(
z.string(),
@@ -61,94 +55,95 @@ export const configSchema = z
})
)
.optional(),
server: z.object({
integration_port: portSchema
.optional()
.default(3003)
.transform(stoi)
.pipe(portSchema.optional()),
external_port: portSchema
.optional()
.default(3000)
.transform(stoi)
.pipe(portSchema),
internal_port: portSchema
.optional()
.default(3001)
.transform(stoi)
.pipe(portSchema),
next_port: portSchema
.optional()
.default(3002)
.transform(stoi)
.pipe(portSchema),
internal_hostname: z
.string()
.optional()
.default("pangolin")
.transform((url) => url.toLowerCase()),
session_cookie_name: z
.string()
.optional()
.default("p_session_token"),
resource_access_token_param: z
.string()
.optional()
.default("p_token"),
resource_access_token_headers: z
.object({
id: z.string().optional().default("P-Access-Token-Id"),
token: z.string().optional().default("P-Access-Token")
})
.optional()
.default({}),
resource_session_request_param: z
.string()
.optional()
.default("resource_session_request_param"),
dashboard_session_length_hours: z
.number()
.positive()
.gt(0)
.optional()
.default(720),
resource_session_length_hours: z
.number()
.positive()
.gt(0)
.optional()
.default(720),
cors: z
.object({
origins: z.array(z.string()).optional(),
methods: z.array(z.string()).optional(),
allowed_headers: z.array(z.string()).optional(),
credentials: z.boolean().optional()
})
.optional(),
trust_proxy: z.number().int().gte(0).optional().default(1),
secret: z
.string()
.pipe(z.string().min(8))
.optional(),
maxmind_db_path: z.string().optional()
}).optional().default({
integration_port: 3003,
external_port: 3000,
internal_port: 3001,
next_port: 3002,
internal_hostname: "pangolin",
session_cookie_name: "p_session_token",
resource_access_token_param: "p_token",
resource_access_token_headers: {
id: "P-Access-Token-Id",
token: "P-Access-Token"
},
resource_session_request_param: "resource_session_request_param",
dashboard_session_length_hours: 720,
resource_session_length_hours: 720,
trust_proxy: 1
}),
server: z
.object({
integration_port: portSchema
.optional()
.default(3003)
.transform(stoi)
.pipe(portSchema.optional()),
external_port: portSchema
.optional()
.default(3000)
.transform(stoi)
.pipe(portSchema),
internal_port: portSchema
.optional()
.default(3001)
.transform(stoi)
.pipe(portSchema),
next_port: portSchema
.optional()
.default(3002)
.transform(stoi)
.pipe(portSchema),
internal_hostname: z
.string()
.optional()
.default("pangolin")
.transform((url) => url.toLowerCase()),
session_cookie_name: z
.string()
.optional()
.default("p_session_token"),
resource_access_token_param: z
.string()
.optional()
.default("p_token"),
resource_access_token_headers: z
.object({
id: z.string().optional().default("P-Access-Token-Id"),
token: z.string().optional().default("P-Access-Token")
})
.optional()
.default({}),
resource_session_request_param: z
.string()
.optional()
.default("resource_session_request_param"),
dashboard_session_length_hours: z
.number()
.positive()
.gt(0)
.optional()
.default(720),
resource_session_length_hours: z
.number()
.positive()
.gt(0)
.optional()
.default(720),
cors: z
.object({
origins: z.array(z.string()).optional(),
methods: z.array(z.string()).optional(),
allowed_headers: z.array(z.string()).optional(),
credentials: z.boolean().optional()
})
.optional(),
trust_proxy: z.number().int().gte(0).optional().default(1),
secret: z.string().pipe(z.string().min(8)).optional(),
maxmind_db_path: z.string().optional()
})
.optional()
.default({
integration_port: 3003,
external_port: 3000,
internal_port: 3001,
next_port: 3002,
internal_hostname: "pangolin",
session_cookie_name: "p_session_token",
resource_access_token_param: "p_token",
resource_access_token_headers: {
id: "P-Access-Token-Id",
token: "P-Access-Token"
},
resource_session_request_param:
"resource_session_request_param",
dashboard_session_length_hours: 720,
resource_session_length_hours: 720,
trust_proxy: 1
}),
postgres: z
.object({
connection_string: z.string().optional(),
@@ -161,18 +156,29 @@ export const configSchema = z
.optional(),
pool: z
.object({
max_connections: z.number().positive().optional().default(20),
max_replica_connections: z.number().positive().optional().default(10),
idle_timeout_ms: z.number().positive().optional().default(30000),
connection_timeout_ms: z.number().positive().optional().default(5000)
max_connections: z
.number()
.positive()
.optional()
.default(20),
max_replica_connections: z
.number()
.positive()
.optional()
.default(10),
idle_timeout_ms: z
.number()
.positive()
.optional()
.default(30000),
connection_timeout_ms: z
.number()
.positive()
.optional()
.default(5000)
})
.optional()
.default({
max_connections: 20,
max_replica_connections: 10,
idle_timeout_ms: 30000,
connection_timeout_ms: 5000
})
.default({})
})
.optional(),
traefik: z
@@ -193,7 +199,10 @@ export const configSchema = z
.optional()
.default("/var/dynamic/router_config.yml"),
static_domains: z.array(z.string()).optional().default([]),
site_types: z.array(z.string()).optional().default(["newt", "wireguard", "local"]),
site_types: z
.array(z.string())
.optional()
.default(["newt", "wireguard", "local"]),
allow_raw_resources: z.boolean().optional().default(true),
file_mode: z.boolean().optional().default(false)
})
@@ -320,10 +329,7 @@ export const configSchema = z
if (data.flags?.disable_config_managed_domains) {
return true;
}
// If hybrid is defined, domains are not required
if (data.managed) {
return true;
}
if (keys.length === 0) {
return false;
}
@@ -335,15 +341,14 @@ export const configSchema = z
)
.refine(
(data) => {
// If hybrid is defined, server secret is not required
if (data.managed) {
return true;
}
// If hybrid is not defined, server secret must be defined. If its not defined already then pull it from env
if (data.server?.secret === undefined) {
data.server.secret = process.env.SERVER_SECRET;
}
return data.server?.secret !== undefined && data.server.secret.length > 0;
return (
data.server?.secret !== undefined &&
data.server.secret.length > 0
);
},
{
message: "Server secret must be defined"
@@ -351,12 +356,11 @@ export const configSchema = z
)
.refine(
(data) => {
// If hybrid is defined, dashboard_url is not required
if (data.managed) {
return true;
}
// If hybrid is not defined, dashboard_url must be defined
return data.app.dashboard_url !== undefined && data.app.dashboard_url.length > 0;
return (
data.app.dashboard_url !== undefined &&
data.app.dashboard_url.length > 0
);
},
{
message: "Dashboard URL must be defined"

View File

@@ -1,80 +0,0 @@
import axios from "axios";
import { tokenManager } from "../tokenManager";
import logger from "@server/logger";
import config from "../config";
/**
* Get valid certificates for the specified domains
*/
export async function getValidCertificatesForDomainsHybrid(domains: Set<string>): Promise<
Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
if (domains.size === 0) {
return [];
}
const domainArray = Array.from(domains);
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/certificates/domains`,
{
params: {
domains: domainArray
},
headers: (await tokenManager.getAuthHeader()).headers
}
);
if (response.status !== 200) {
logger.error(
`Failed to fetch certificates for domains: ${response.status} ${response.statusText}`,
{ responseData: response.data, domains: domainArray }
);
return [];
}
// logger.debug(
// `Successfully retrieved ${response.data.data?.length || 0} certificates for ${domainArray.length} domains`
// );
return response.data.data;
} catch (error) {
// pull data out of the axios error to log
if (axios.isAxiosError(error)) {
logger.error("Error getting certificates:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error getting certificates:", error);
}
return [];
}
}
export async function getValidCertificatesForDomains(domains: Set<string>): Promise<
Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
return []; // stub
}

View File

@@ -1,14 +0,0 @@
import { build } from "@server/build";
// Import both modules
import * as certificateModule from "./certificates";
import * as privateCertificateModule from "./privateCertificates";
// Conditionally export Remote Certificates implementation based on build type
const remoteCertificatesImplementation = build === "oss" ? certificateModule : privateCertificateModule;
// Re-export all items from the selected implementation
export const {
getValidCertificatesForDomains,
getValidCertificatesForDomainsHybrid
} = remoteCertificatesImplementation;

View File

@@ -1,73 +0,0 @@
import { Request, Response, NextFunction } from "express";
import { Router } from "express";
import axios from "axios";
import HttpCode from "@server/types/HttpCode";
import createHttpError from "http-errors";
import logger from "@server/logger";
import config from "@server/lib/config";
import { tokenManager } from "./tokenManager";
/**
* Proxy function that forwards requests to the remote cloud server
*/
export const proxyToRemote = async (
req: Request,
res: Response,
next: NextFunction,
endpoint: string
): Promise<any> => {
try {
const remoteUrl = `${config.getRawConfig().managed?.endpoint?.replace(/\/$/, '')}/api/v1/${endpoint}`;
logger.debug(`Proxying request to remote server: ${remoteUrl}`);
// Forward the request to the remote server
const response = await axios({
method: req.method as any,
url: remoteUrl,
data: req.body,
headers: {
'Content-Type': 'application/json',
...(await tokenManager.getAuthHeader()).headers
},
params: req.query,
timeout: 30000, // 30 second timeout
validateStatus: () => true // Don't throw on non-2xx status codes
});
logger.debug(`Proxy response: ${JSON.stringify(response.data)}`);
// Forward the response status and data
return res.status(response.status).json(response.data);
} catch (error) {
logger.error("Error proxying request to remote server:", error);
if (axios.isAxiosError(error)) {
if (error.code === 'ECONNREFUSED' || error.code === 'ENOTFOUND') {
return next(
createHttpError(
HttpCode.SERVICE_UNAVAILABLE,
"Remote server is unavailable"
)
);
}
if (error.code === 'ECONNABORTED') {
return next(
createHttpError(
HttpCode.REQUEST_TIMEOUT,
"Request to remote server timed out"
)
);
}
}
return next(
createHttpError(
HttpCode.INTERNAL_SERVER_ERROR,
"Error communicating with remote server"
)
);
}
};

15
server/lib/resend.ts Normal file
View File

@@ -0,0 +1,15 @@
export enum AudienceIds {
General = "",
Subscribed = "",
Churned = ""
}
let resend;
export default resend;
export async function moveEmailToAudience(
email: string,
audienceId: AudienceIds
) {
return;
}

5
server/lib/s3.ts Normal file
View File

@@ -0,0 +1,5 @@
import { S3Client } from "@aws-sdk/client-s3";
export const s3Client = new S3Client({
region: process.env.S3_REGION || "us-east-1",
});

View File

@@ -9,6 +9,7 @@ import { APP_VERSION } from "./consts";
import crypto from "crypto";
import { UserType } from "@server/types/UserTypes";
import { build } from "@server/build";
import license from "@server/license/license";
class TelemetryClient {
private client: PostHog | null = null;
@@ -176,17 +177,36 @@ class TelemetryClient {
const stats = await this.getSystemStats();
this.client.capture({
distinctId: hostMeta.hostMetaId,
event: "supporter_status",
properties: {
valid: stats.supporterStatus.valid,
tier: stats.supporterStatus.tier,
github_username: stats.supporterStatus.githubUsername
? this.anon(stats.supporterStatus.githubUsername)
: "None"
}
});
if (build === "enterprise") {
const licenseStatus = await license.check();
const payload = {
distinctId: hostMeta.hostMetaId,
event: "enterprise_status",
properties: {
is_host_licensed: licenseStatus.isHostLicensed,
is_license_valid: licenseStatus.isLicenseValid,
license_tier: licenseStatus.tier || "unknown"
}
};
logger.debug("Sending enterprise startup telemtry payload:", {
payload
});
// this.client.capture(payload);
}
if (build === "oss") {
this.client.capture({
distinctId: hostMeta.hostMetaId,
event: "supporter_status",
properties: {
valid: stats.supporterStatus.valid,
tier: stats.supporterStatus.tier,
github_username: stats.supporterStatus.githubUsername
? this.anon(stats.supporterStatus.githubUsername)
: "None"
}
});
}
this.client.capture({
distinctId: hostMeta.hostMetaId,

View File

@@ -1,274 +0,0 @@
import axios from "axios";
import config from "@server/lib/config";
import logger from "@server/logger";
export interface TokenResponse {
success: boolean;
message?: string;
data: {
token: string;
};
}
/**
* Token Manager - Handles automatic token refresh for hybrid server authentication
*
* Usage throughout the application:
* ```typescript
* import { tokenManager } from "@server/lib/tokenManager";
*
* // Get the current valid token
* const token = await tokenManager.getToken();
*
* // Force refresh if needed
* await tokenManager.refreshToken();
* ```
*
* The token manager automatically refreshes tokens every 24 hours by default
* and is started once in the privateHybridServer.ts file.
*/
export class TokenManager {
private token: string | null = null;
private refreshInterval: NodeJS.Timeout | null = null;
private isRefreshing: boolean = false;
private refreshIntervalMs: number;
private retryInterval: NodeJS.Timeout | null = null;
private retryIntervalMs: number;
private tokenAvailablePromise: Promise<void> | null = null;
private tokenAvailableResolve: (() => void) | null = null;
constructor(refreshIntervalMs: number = 24 * 60 * 60 * 1000, retryIntervalMs: number = 5000) {
// Default to 24 hours for refresh, 5 seconds for retry
this.refreshIntervalMs = refreshIntervalMs;
this.retryIntervalMs = retryIntervalMs;
this.setupTokenAvailablePromise();
}
/**
* Set up promise that resolves when token becomes available
*/
private setupTokenAvailablePromise(): void {
this.tokenAvailablePromise = new Promise((resolve) => {
this.tokenAvailableResolve = resolve;
});
}
/**
* Resolve the token available promise
*/
private resolveTokenAvailable(): void {
if (this.tokenAvailableResolve) {
this.tokenAvailableResolve();
this.tokenAvailableResolve = null;
}
}
/**
* Start the token manager - gets initial token and sets up refresh interval
* If initial token fetch fails, keeps retrying every few seconds until successful
*/
async start(): Promise<void> {
logger.info("Starting token manager...");
try {
await this.refreshToken();
this.setupRefreshInterval();
this.resolveTokenAvailable();
logger.info("Token manager started successfully");
} catch (error) {
logger.warn(`Failed to get initial token, will retry in ${this.retryIntervalMs / 1000} seconds:`, error);
this.setupRetryInterval();
}
}
/**
* Set up retry interval for initial token acquisition
*/
private setupRetryInterval(): void {
if (this.retryInterval) {
clearInterval(this.retryInterval);
}
this.retryInterval = setInterval(async () => {
try {
logger.debug("Retrying initial token acquisition");
await this.refreshToken();
this.setupRefreshInterval();
this.clearRetryInterval();
this.resolveTokenAvailable();
logger.info("Token manager started successfully after retry");
} catch (error) {
logger.debug("Token acquisition retry failed, will try again");
}
}, this.retryIntervalMs);
}
/**
* Clear retry interval
*/
private clearRetryInterval(): void {
if (this.retryInterval) {
clearInterval(this.retryInterval);
this.retryInterval = null;
}
}
/**
* Stop the token manager and clear all intervals
*/
stop(): void {
if (this.refreshInterval) {
clearInterval(this.refreshInterval);
this.refreshInterval = null;
}
this.clearRetryInterval();
logger.info("Token manager stopped");
}
/**
* Get the current valid token
*/
// TODO: WE SHOULD NOT BE GETTING A TOKEN EVERY TIME WE REQUEST IT
async getToken(): Promise<string> {
// If we don't have a token yet, wait for it to become available
if (!this.token && this.tokenAvailablePromise) {
await this.tokenAvailablePromise;
}
if (!this.token) {
if (this.isRefreshing) {
// Wait for current refresh to complete
await this.waitForRefresh();
} else {
throw new Error("No valid token available");
}
}
if (!this.token) {
throw new Error("No valid token available");
}
return this.token;
}
async getAuthHeader() {
return {
headers: {
Authorization: `Bearer ${await this.getToken()}`,
"X-CSRF-Token": "x-csrf-protection",
}
};
}
/**
* Force refresh the token
*/
async refreshToken(): Promise<void> {
if (this.isRefreshing) {
await this.waitForRefresh();
return;
}
this.isRefreshing = true;
try {
const hybridConfig = config.getRawConfig().managed;
if (
!hybridConfig?.id ||
!hybridConfig?.secret ||
!hybridConfig?.endpoint
) {
throw new Error("Hybrid configuration is not defined");
}
const tokenEndpoint = `${hybridConfig.endpoint}/api/v1/auth/remoteExitNode/get-token`;
const tokenData = {
remoteExitNodeId: hybridConfig.id,
secret: hybridConfig.secret
};
logger.debug("Requesting new token from server");
const response = await axios.post<TokenResponse>(
tokenEndpoint,
tokenData,
{
headers: {
"Content-Type": "application/json",
"X-CSRF-Token": "x-csrf-protection"
},
timeout: 10000 // 10 second timeout
}
);
if (!response.data.success) {
throw new Error(
`Failed to get token: ${response.data.message}`
);
}
if (!response.data.data.token) {
throw new Error("Received empty token from server");
}
this.token = response.data.data.token;
logger.debug("Token refreshed successfully");
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error updating proxy mapping:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error updating proxy mapping:", error);
}
throw new Error("Failed to refresh token");
} finally {
this.isRefreshing = false;
}
}
/**
* Set up automatic token refresh interval
*/
private setupRefreshInterval(): void {
if (this.refreshInterval) {
clearInterval(this.refreshInterval);
}
this.refreshInterval = setInterval(async () => {
try {
logger.debug("Auto-refreshing token");
await this.refreshToken();
} catch (error) {
logger.error("Failed to auto-refresh token:", error);
}
}, this.refreshIntervalMs);
}
/**
* Wait for current refresh operation to complete
*/
private async waitForRefresh(): Promise<void> {
return new Promise((resolve) => {
const checkInterval = setInterval(() => {
if (!this.isRefreshing) {
clearInterval(checkInterval);
resolve();
}
}, 100);
});
}
}
// Export a singleton instance for use throughout the application
export const tokenManager = new TokenManager();

View File

@@ -6,14 +6,10 @@ import * as yaml from "js-yaml";
import axios from "axios";
import { db, exitNodes } from "@server/db";
import { eq } from "drizzle-orm";
import { tokenManager } from "../tokenManager";
import { getCurrentExitNodeId } from "@server/lib/exitNodes";
import { getTraefikConfig } from "./";
import {
getValidCertificatesForDomains,
getValidCertificatesForDomainsHybrid
} from "../remoteCertificates";
import { sendToExitNode } from "../exitNodes";
import { getTraefikConfig } from "#dynamic/lib/traefik";
import { getValidCertificatesForDomains } from "#dynamic/lib/certificates";
import { sendToExitNode } from "#dynamic/lib/exitNodes";
import { build } from "@server/build";
export class TraefikConfigManager {
@@ -313,93 +309,92 @@ export class TraefikConfigManager {
this.lastActiveDomains = new Set(domains);
}
// Scan current local certificate state
this.lastLocalCertificateState =
await this.scanLocalCertificateState();
if (
process.env.USE_PANGOLIN_DNS === "true" &&
build != "oss"
) {
// Scan current local certificate state
this.lastLocalCertificateState =
await this.scanLocalCertificateState();
// Only fetch certificates if needed (domain changes, missing certs, or daily renewal check)
let validCertificates: Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}> = [];
// Only fetch certificates if needed (domain changes, missing certs, or daily renewal check)
let validCertificates: Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}> = [];
if (this.shouldFetchCertificates(domains)) {
// Filter out domains that are already covered by wildcard certificates
const domainsToFetch = new Set<string>();
for (const domain of domains) {
if (
!isDomainCoveredByWildcard(
domain,
this.lastLocalCertificateState
)
) {
domainsToFetch.add(domain);
} else {
logger.debug(
`Domain ${domain} is covered by existing wildcard certificate, skipping fetch`
);
}
}
if (domainsToFetch.size > 0) {
// Get valid certificates for domains not covered by wildcards
if (config.isManagedMode()) {
validCertificates =
await getValidCertificatesForDomainsHybrid(
domainsToFetch
if (this.shouldFetchCertificates(domains)) {
// Filter out domains that are already covered by wildcard certificates
const domainsToFetch = new Set<string>();
for (const domain of domains) {
if (
!isDomainCoveredByWildcard(
domain,
this.lastLocalCertificateState
)
) {
domainsToFetch.add(domain);
} else {
logger.debug(
`Domain ${domain} is covered by existing wildcard certificate, skipping fetch`
);
} else {
}
}
if (domainsToFetch.size > 0) {
// Get valid certificates for domains not covered by wildcards
validCertificates =
await getValidCertificatesForDomains(
domainsToFetch
);
this.lastCertificateFetch = new Date();
this.lastKnownDomains = new Set(domains);
logger.info(
`Fetched ${validCertificates.length} certificates from remote (${domains.size - domainsToFetch.size} domains covered by wildcards)`
);
// Download and decrypt new certificates
await this.processValidCertificates(validCertificates);
} else {
logger.info(
"All domains are covered by existing wildcard certificates, no fetch needed"
);
this.lastCertificateFetch = new Date();
this.lastKnownDomains = new Set(domains);
}
this.lastCertificateFetch = new Date();
this.lastKnownDomains = new Set(domains);
logger.info(
`Fetched ${validCertificates.length} certificates from remote (${domains.size - domainsToFetch.size} domains covered by wildcards)`
);
// Download and decrypt new certificates
await this.processValidCertificates(validCertificates);
// Always ensure all existing certificates (including wildcards) are in the config
await this.updateDynamicConfigFromLocalCerts(domains);
} else {
logger.info(
"All domains are covered by existing wildcard certificates, no fetch needed"
);
this.lastCertificateFetch = new Date();
this.lastKnownDomains = new Set(domains);
const timeSinceLastFetch = this.lastCertificateFetch
? Math.round(
(Date.now() -
this.lastCertificateFetch.getTime()) /
(1000 * 60)
)
: 0;
// logger.debug(
// `Skipping certificate fetch - no changes detected and within 24-hour window (last fetch: ${timeSinceLastFetch} minutes ago)`
// );
// Still need to ensure config is up to date with existing certificates
await this.updateDynamicConfigFromLocalCerts(domains);
}
// Always ensure all existing certificates (including wildcards) are in the config
await this.updateDynamicConfigFromLocalCerts(domains);
} else {
const timeSinceLastFetch = this.lastCertificateFetch
? Math.round(
(Date.now() - this.lastCertificateFetch.getTime()) /
(1000 * 60)
)
: 0;
// Clean up certificates for domains no longer in use
await this.cleanupUnusedCertificates(domains);
// logger.debug(
// `Skipping certificate fetch - no changes detected and within 24-hour window (last fetch: ${timeSinceLastFetch} minutes ago)`
// );
// Still need to ensure config is up to date with existing certificates
await this.updateDynamicConfigFromLocalCerts(domains);
// wait 1 second for traefik to pick up the new certificates
await new Promise((resolve) => setTimeout(resolve, 500));
}
// Clean up certificates for domains no longer in use
await this.cleanupUnusedCertificates(domains);
// wait 1 second for traefik to pick up the new certificates
await new Promise((resolve) => setTimeout(resolve, 500));
// Write traefik config as YAML to a second dynamic config file if changed
await this.writeTraefikDynamicConfig(traefikConfig);
@@ -448,32 +443,15 @@ export class TraefikConfigManager {
} | null> {
let traefikConfig;
try {
if (config.isManagedMode()) {
const resp = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/traefik-config`,
await tokenManager.getAuthHeader()
);
if (resp.status !== 200) {
logger.error(
`Failed to fetch traefik config: ${resp.status} ${resp.statusText}`,
{ responseData: resp.data }
);
return null;
}
traefikConfig = resp.data.data;
} else {
const currentExitNode = await getCurrentExitNodeId();
// logger.debug(`Fetching traefik config for exit node: ${currentExitNode}`);
traefikConfig = await getTraefikConfig(
// this is called by the local exit node to get its own config
currentExitNode,
config.getRawConfig().traefik.site_types,
build == "oss", // filter out the namespace domains in open source
build != "oss" // generate the login pages on the cloud and hybrid
);
}
const currentExitNode = await getCurrentExitNodeId();
// logger.debug(`Fetching traefik config for exit node: ${currentExitNode}`);
traefikConfig = await getTraefikConfig(
// this is called by the local exit node to get its own config
currentExitNode,
config.getRawConfig().traefik.site_types,
build == "oss", // filter out the namespace domains in open source
build != "oss" // generate the login pages on the cloud and hybrid
);
const domains = new Set<string>();
@@ -718,7 +696,12 @@ export class TraefikConfigManager {
for (const cert of validCertificates) {
try {
if (!cert.certFile || !cert.keyFile) {
if (
!cert.certFile ||
!cert.keyFile ||
cert.certFile.length === 0 ||
cert.keyFile.length === 0
) {
logger.warn(
`Certificate for domain ${cert.domain} is missing cert or key file`
);
@@ -842,7 +825,9 @@ export class TraefikConfigManager {
const lastUpdateStr = fs
.readFileSync(lastUpdatePath, "utf8")
.trim();
lastUpdateTime = Math.floor(new Date(lastUpdateStr).getTime() / 1000);
lastUpdateTime = Math.floor(
new Date(lastUpdateStr).getTime() / 1000
);
} catch {
lastUpdateTime = null;
}

View File

@@ -1,9 +1,8 @@
import { db, exitNodes, targetHealthCheck } from "@server/db";
import { db, targetHealthCheck } from "@server/db";
import { and, eq, inArray, or, isNull, ne, isNotNull, desc } from "drizzle-orm";
import logger from "@server/logger";
import config from "@server/lib/config";
import { orgs, resources, sites, Target, targets } from "@server/db";
import { build } from "@server/build";
import { resources, sites, Target, targets } from "@server/db";
import createPathRewriteMiddleware from "./middleware";
import { sanitize, validatePathRewriteConfig } from "./utils";
@@ -105,7 +104,12 @@ export async function getTraefikConfig(
const priority = row.priority ?? 100;
// Create a unique key combining resourceId, path config, and rewrite config
const pathKey = [targetPath, pathMatchType, rewritePath, rewritePathType]
const pathKey = [
targetPath,
pathMatchType,
rewritePath,
rewritePathType
]
.filter(Boolean)
.join("-");
const mapKey = [resourceId, pathKey].filter(Boolean).join("-");
@@ -120,13 +124,15 @@ export async function getTraefikConfig(
);
if (!validation.isValid) {
logger.error(`Invalid path rewrite configuration for resource ${resourceId}: ${validation.error}`);
logger.error(
`Invalid path rewrite configuration for resource ${resourceId}: ${validation.error}`
);
return;
}
resourcesMap.set(key, {
resourceId: row.resourceId,
name: resourceName,
name: resourceName,
fullDomain: row.fullDomain,
ssl: row.ssl,
http: row.http,
@@ -158,9 +164,6 @@ export async function getTraefikConfig(
port: row.port,
internalPort: row.internalPort,
enabled: row.targetEnabled,
rewritePath: row.rewritePath,
rewritePathType: row.rewritePathType,
priority: row.priority,
site: {
siteId: row.siteId,
type: row.siteType,
@@ -239,21 +242,18 @@ export async function getTraefikConfig(
preferWildcardCert = configDomain.prefer_wildcard_cert;
}
let tls = {};
if (build == "oss") {
tls = {
certResolver: certResolver,
...(preferWildcardCert
? {
domains: [
{
main: wildCard
}
]
}
: {})
};
}
const tls = {
certResolver: certResolver,
...(preferWildcardCert
? {
domains: [
{
main: wildCard
}
]
}
: {})
};
const additionalMiddlewares =
config.getRawConfig().traefik.additional_middlewares || [];
@@ -264,11 +264,12 @@ export async function getTraefikConfig(
];
// Handle path rewriting middleware
if (resource.rewritePath &&
resource.path &&
if (
resource.rewritePath !== null &&
resource.path !== null &&
resource.pathMatchType &&
resource.rewritePathType) {
resource.rewritePathType
) {
// Create a unique middleware name
const rewriteMiddlewareName = `rewrite-r${resource.resourceId}-${key}`;
@@ -287,7 +288,10 @@ export async function getTraefikConfig(
}
// the middleware to the config
Object.assign(config_output.http.middlewares, rewriteResult.middlewares);
Object.assign(
config_output.http.middlewares,
rewriteResult.middlewares
);
// middlewares to the router middleware chain
if (rewriteResult.chain) {
@@ -298,9 +302,13 @@ export async function getTraefikConfig(
routerMiddlewares.push(rewriteMiddlewareName);
}
logger.debug(`Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`);
logger.debug(
`Created path rewrite middleware ${rewriteMiddlewareName}: ${resource.pathMatchType}(${resource.path}) -> ${resource.rewritePathType}(${resource.rewritePath})`
);
} catch (error) {
logger.error(`Failed to create path rewrite middleware for resource ${resource.resourceId}: ${error}`);
logger.error(
`Failed to create path rewrite middleware for resource ${resource.resourceId}: ${error}`
);
}
}
@@ -316,7 +324,9 @@ export async function getTraefikConfig(
value: string;
}[];
} catch (e) {
logger.warn(`Failed to parse headers for resource ${resource.resourceId}: ${e}`);
logger.warn(
`Failed to parse headers for resource ${resource.resourceId}: ${e}`
);
}
headersArr.forEach((header) => {
@@ -482,14 +492,14 @@ export async function getTraefikConfig(
})(),
...(resource.stickySession
? {
sticky: {
cookie: {
name: "p_sticky", // TODO: make this configurable via config.yml like other cookies
secure: resource.ssl,
httpOnly: true
}
}
}
sticky: {
cookie: {
name: "p_sticky", // TODO: make this configurable via config.yml like other cookies
secure: resource.ssl,
httpOnly: true
}
}
}
: {})
}
};
@@ -590,13 +600,13 @@ export async function getTraefikConfig(
})(),
...(resource.stickySession
? {
sticky: {
ipStrategy: {
depth: 0,
sourcePort: true
}
}
}
sticky: {
ipStrategy: {
depth: 0,
sourcePort: true
}
}
}
: {})
}
};

View File

@@ -1,11 +1 @@
import { build } from "@server/build";
// Import both modules
import * as traefikModule from "./getTraefikConfig";
import * as privateTraefikModule from "./privateGetTraefikConfig";
// Conditionally export Traefik configuration implementation based on build type
const traefikImplementation = build === "oss" ? traefikModule : privateTraefikModule;
// Re-export all items from the selected implementation
export const { getTraefikConfig } = traefikImplementation;
export * from "./getTraefikConfig";

View File

@@ -1,26 +1,17 @@
import { db } from "@server/db";
import { hostMeta, licenseKey, sites } from "@server/db";
import logger from "@server/logger";
import NodeCache from "node-cache";
import { validateJWT } from "./licenseJwt";
import { count, eq } from "drizzle-orm";
import moment from "moment";
import { db, hostMeta, HostMeta } from "@server/db";
import { setHostMeta } from "@server/lib/hostMeta";
import { encrypt, decrypt } from "@server/lib/crypto";
const keyTypes = ["HOST", "SITES"] as const;
type KeyType = (typeof keyTypes)[number];
const keyTypes = ["host"] as const;
export type LicenseKeyType = (typeof keyTypes)[number];
const keyTiers = ["PROFESSIONAL", "ENTERPRISE"] as const;
type KeyTier = (typeof keyTiers)[number];
const keyTiers = ["personal", "enterprise"] as const;
export type LicenseKeyTier = (typeof keyTiers)[number];
export type LicenseStatus = {
isHostLicensed: boolean; // Are there any license keys?
isLicenseValid: boolean; // Is the license key valid?
hostId: string; // Host ID
maxSites?: number;
usedSites?: number;
tier?: KeyTier;
tier?: LicenseKeyTier;
};
export type LicenseKeyCache = {
@@ -28,451 +19,27 @@ export type LicenseKeyCache = {
licenseKeyEncrypted: string;
valid: boolean;
iat?: Date;
type?: KeyType;
tier?: KeyTier;
numSites?: number;
};
type ActivateLicenseKeyAPIResponse = {
data: {
instanceId: string;
};
success: boolean;
error: string;
message: string;
status: number;
};
type ValidateLicenseAPIResponse = {
data: {
licenseKeys: {
[key: string]: string;
};
};
success: boolean;
error: string;
message: string;
status: number;
};
type TokenPayload = {
valid: boolean;
type: KeyType;
tier: KeyTier;
quantity: number;
terminateAt: string; // ISO
iat: number; // Issued at
type?: LicenseKeyType;
tier?: LicenseKeyTier;
terminateAt?: Date;
};
export class License {
private phoneHomeInterval = 6 * 60 * 60; // 6 hours = 6 * 60 * 60 = 21600 seconds
private validationServerUrl =
"https://api.fossorial.io/api/v1/license/professional/validate";
private activationServerUrl =
"https://api.fossorial.io/api/v1/license/professional/activate";
private statusCache = new NodeCache({ stdTTL: this.phoneHomeInterval });
private licenseKeyCache = new NodeCache();
private ephemeralKey!: string;
private statusKey = "status";
private serverSecret!: string;
private publicKey = `-----BEGIN PUBLIC KEY-----
MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAx9RKc8cw+G8r7h/xeozF
FNkRDggQfYO6Ae+EWHGujZ9WYAZ10spLh9F/zoLhhr3XhsjpoRXwMfgNuO5HstWf
CYM20I0l7EUUMWEyWd4tZLd+5XQ4jY5xWOCWyFJAGQSp7flcRmxdfde+l+xg9eKl
apbY84aVp09/GqM96hCS+CsQZrhohu/aOqYVB/eAhF01qsbmiZ7Y3WtdhTldveYt
h4mZWGmjf8d/aEgePf/tk1gp0BUxf+Ae5yqoAqU+6aiFbjJ7q1kgxc18PWFGfE9y
zSk+OZk887N5ThQ52154+oOUCMMR2Y3t5OH1hVZod51vuY2u5LsQXsf+87PwB91y
LQIDAQAB
-----END PUBLIC KEY-----`;
constructor(private hostMeta: HostMeta) {}
constructor(private hostId: string) {
this.ephemeralKey = Buffer.from(
JSON.stringify({ ts: new Date().toISOString() })
).toString("base64");
setInterval(
async () => {
await this.check();
},
1000 * 60 * 60
); // 1 hour = 60 * 60 = 3600 seconds
}
public listKeys(): LicenseKeyCache[] {
const keys = this.licenseKeyCache.keys();
return keys.map((key) => {
return this.licenseKeyCache.get<LicenseKeyCache>(key)!;
});
public async check(): Promise<LicenseStatus> {
return {
hostId: this.hostMeta.hostMetaId,
isHostLicensed: false,
isLicenseValid: false
};
}
public setServerSecret(secret: string) {
this.serverSecret = secret;
}
public async forceRecheck() {
this.statusCache.flushAll();
this.licenseKeyCache.flushAll();
return await this.check();
}
public async isUnlocked(): Promise<boolean> {
const status = await this.check();
if (status.isHostLicensed) {
if (status.isLicenseValid) {
return true;
}
}
return false;
}
public async check(): Promise<LicenseStatus> {
// Set used sites
const [siteCount] = await db
.select({
value: count()
})
.from(sites);
const status: LicenseStatus = {
hostId: this.hostId,
isHostLicensed: true,
isLicenseValid: false,
maxSites: undefined,
usedSites: siteCount.value
};
try {
if (this.statusCache.has(this.statusKey)) {
const res = this.statusCache.get("status") as LicenseStatus;
res.usedSites = status.usedSites;
return res;
}
// Invalidate all
this.licenseKeyCache.flushAll();
const allKeysRes = await db.select().from(licenseKey);
if (allKeysRes.length === 0) {
status.isHostLicensed = false;
return status;
}
let foundHostKey = false;
// Validate stored license keys
for (const key of allKeysRes) {
try {
// Decrypt the license key and token
const decryptedKey = decrypt(
key.licenseKeyId,
this.serverSecret
);
const decryptedToken = decrypt(
key.token,
this.serverSecret
);
const payload = validateJWT<TokenPayload>(
decryptedToken,
this.publicKey
);
this.licenseKeyCache.set<LicenseKeyCache>(decryptedKey, {
licenseKey: decryptedKey,
licenseKeyEncrypted: key.licenseKeyId,
valid: payload.valid,
type: payload.type,
tier: payload.tier,
numSites: payload.quantity,
iat: new Date(payload.iat * 1000)
});
if (payload.type === "HOST") {
foundHostKey = true;
}
} catch (e) {
logger.error(
`Error validating license key: ${key.licenseKeyId}`
);
logger.error(e);
this.licenseKeyCache.set<LicenseKeyCache>(
key.licenseKeyId,
{
licenseKey: key.licenseKeyId,
licenseKeyEncrypted: key.licenseKeyId,
valid: false
}
);
}
}
if (!foundHostKey && allKeysRes.length) {
logger.debug("No host license key found");
status.isHostLicensed = false;
}
const keys = allKeysRes.map((key) => ({
licenseKey: decrypt(key.licenseKeyId, this.serverSecret),
instanceId: decrypt(key.instanceId, this.serverSecret)
}));
let apiResponse: ValidateLicenseAPIResponse | undefined;
try {
// Phone home to validate license keys
apiResponse = await this.phoneHome(keys);
if (!apiResponse?.success) {
throw new Error(apiResponse?.error);
}
} catch (e) {
logger.error("Error communicating with license server:");
logger.error(e);
}
logger.debug("Validate response", apiResponse);
// Check and update all license keys with server response
for (const key of keys) {
try {
const cached = this.licenseKeyCache.get<LicenseKeyCache>(
key.licenseKey
)!;
const licenseKeyRes =
apiResponse?.data?.licenseKeys[key.licenseKey];
if (!apiResponse || !licenseKeyRes) {
logger.debug(
`No response from server for license key: ${key.licenseKey}`
);
if (cached.iat) {
const exp = moment(cached.iat)
.add(7, "days")
.toDate();
if (exp > new Date()) {
logger.debug(
`Using cached license key: ${key.licenseKey}, valid ${cached.valid}`
);
continue;
}
}
logger.debug(
`Can't trust license key: ${key.licenseKey}`
);
cached.valid = false;
this.licenseKeyCache.set<LicenseKeyCache>(
key.licenseKey,
cached
);
continue;
}
const payload = validateJWT<TokenPayload>(
licenseKeyRes,
this.publicKey
);
cached.valid = payload.valid;
cached.type = payload.type;
cached.tier = payload.tier;
cached.numSites = payload.quantity;
cached.iat = new Date(payload.iat * 1000);
// Encrypt the updated token before storing
const encryptedKey = encrypt(
key.licenseKey,
this.serverSecret
);
const encryptedToken = encrypt(
licenseKeyRes,
this.serverSecret
);
await db
.update(licenseKey)
.set({
token: encryptedToken
})
.where(eq(licenseKey.licenseKeyId, encryptedKey));
this.licenseKeyCache.set<LicenseKeyCache>(
key.licenseKey,
cached
);
} catch (e) {
logger.error(`Error validating license key: ${key}`);
logger.error(e);
}
}
// Compute host status
for (const key of keys) {
const cached = this.licenseKeyCache.get<LicenseKeyCache>(
key.licenseKey
)!;
logger.debug("Checking key", cached);
if (cached.type === "HOST") {
status.isLicenseValid = cached.valid;
status.tier = cached.tier;
}
if (!cached.valid) {
continue;
}
if (!status.maxSites) {
status.maxSites = 0;
}
status.maxSites += cached.numSites || 0;
}
} catch (error) {
logger.error("Error checking license status:");
logger.error(error);
}
this.statusCache.set(this.statusKey, status);
return status;
}
public async activateLicenseKey(key: string) {
// Encrypt the license key before storing
const encryptedKey = encrypt(key, this.serverSecret);
const [existingKey] = await db
.select()
.from(licenseKey)
.where(eq(licenseKey.licenseKeyId, encryptedKey))
.limit(1);
if (existingKey) {
throw new Error("License key already exists");
}
let instanceId: string | undefined;
try {
// Call activate
const apiResponse = await fetch(this.activationServerUrl, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify({
licenseKey: key,
instanceName: this.hostId
})
});
const data = await apiResponse.json();
if (!data.success) {
throw new Error(`${data.message || data.error}`);
}
const response = data as ActivateLicenseKeyAPIResponse;
if (!response.data) {
throw new Error("No response from server");
}
if (!response.data.instanceId) {
throw new Error("No instance ID in response");
}
instanceId = response.data.instanceId;
} catch (error) {
throw Error(`Error activating license key: ${error}`);
}
// Phone home to validate license key
const keys = [
{
licenseKey: key,
instanceId: instanceId!
}
];
let validateResponse: ValidateLicenseAPIResponse;
try {
validateResponse = await this.phoneHome(keys);
if (!validateResponse) {
throw new Error("No response from server");
}
if (!validateResponse.success) {
throw new Error(validateResponse.error);
}
// Validate the license key
const licenseKeyRes = validateResponse.data.licenseKeys[key];
if (!licenseKeyRes) {
throw new Error("Invalid license key");
}
const payload = validateJWT<TokenPayload>(
licenseKeyRes,
this.publicKey
);
if (!payload.valid) {
throw new Error("Invalid license key");
}
const encryptedToken = encrypt(licenseKeyRes, this.serverSecret);
// Encrypt the instanceId before storing
const encryptedInstanceId = encrypt(instanceId!, this.serverSecret);
// Store the license key in the database
await db.insert(licenseKey).values({
licenseKeyId: encryptedKey,
token: encryptedToken,
instanceId: encryptedInstanceId
});
} catch (error) {
throw Error(`Error validating license key: ${error}`);
}
// Invalidate the cache and re-compute the status
return await this.forceRecheck();
}
private async phoneHome(
keys: {
licenseKey: string;
instanceId: string;
}[]
): Promise<ValidateLicenseAPIResponse> {
// Decrypt the instanceIds before sending to the server
const decryptedKeys = keys.map((key) => ({
licenseKey: key.licenseKey,
instanceId: key.instanceId
? decrypt(key.instanceId, this.serverSecret)
: key.instanceId
}));
const response = await fetch(this.validationServerUrl, {
method: "POST",
headers: {
"Content-Type": "application/json"
},
body: JSON.stringify({
licenseKeys: decryptedKeys,
ephemeralKey: this.ephemeralKey,
instanceName: this.hostId
})
});
const data = await response.json();
return data as ValidateLicenseAPIResponse;
}
}
await setHostMeta();
@@ -483,6 +50,6 @@ if (!info) {
throw new Error("Host information not found");
}
export const license = new License(info.hostMetaId);
export const license = new License(info);
export default license;

View File

@@ -21,10 +21,9 @@ export * from "./verifyIsLoggedInUser";
export * from "./verifyIsLoggedInUser";
export * from "./verifyClientAccess";
export * from "./integration";
export * from "./verifyValidLicense";
export * from "./verifyUserHasAction";
export * from "./verifyApiKeyAccess";
export * from "./verifyDomainAccess";
export * from "./verifyClientsEnabled";
export * from "./verifyUserIsOrgOwner";
export * from "./verifySiteResourceAccess";
export * from "./verifySiteResourceAccess";

28
server/private/cleanup.ts Normal file
View File

@@ -0,0 +1,28 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { rateLimitService } from "#private/lib/rateLimit";
import { cleanup as wsCleanup } from "#private/routers/ws";
async function cleanup() {
await rateLimitService.cleanup();
await wsCleanup();
process.exit(0);
}
export async function initCleanup() {
// Handle process termination
process.on("SIGTERM", () => cleanup());
process.on("SIGINT", () => cleanup());
}

View File

@@ -13,7 +13,7 @@
import { customers, db } from "@server/db";
import { eq } from "drizzle-orm";
import stripe from "@server/lib/private/stripe";
import stripe from "#private/lib/stripe";
import { build } from "@server/build";
export async function createCustomer(

View File

@@ -0,0 +1,46 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { getTierPriceSet } from "@server/lib/billing/tiers";
import { getOrgSubscriptionData } from "#private/routers/billing/getOrgSubscription";
import { build } from "@server/build";
export async function getOrgTierData(
orgId: string
): Promise<{ tier: string | null; active: boolean }> {
let tier = null;
let active = false;
if (build !== "saas") {
return { tier, active };
}
const { subscription, items } = await getOrgSubscriptionData(orgId);
if (items && items.length > 0) {
const tierPriceSet = getTierPriceSet();
// Iterate through tiers in order (earlier keys are higher tiers)
for (const [tierId, priceId] of Object.entries(tierPriceSet)) {
// Check if any subscription item matches this tier's price ID
const matchingItem = items.find((item) => item.priceId === priceId);
if (matchingItem) {
tier = tierId;
break;
}
}
}
if (subscription && subscription.status === "active") {
active = true;
}
return { tier, active };
}

View File

@@ -11,6 +11,5 @@
* This file is not licensed under the AGPLv3.
*/
export * from "./limitSet";
export * from "./features";
export * from "./limitsService";
export * from "./getOrgTierData";
export * from "./createCustomer";

View File

@@ -11,10 +11,10 @@
* This file is not licensed under the AGPLv3.
*/
import config from "../config";
import config from "./config";
import { certificates, db } from "@server/db";
import { and, eq, isNotNull } from "drizzle-orm";
import { decryptData } from "../encryption";
import { decryptData } from "@server/lib/encryption";
import * as fs from "fs";
export async function getValidCertificatesForDomains(
@@ -97,20 +97,4 @@ export async function getValidCertificatesForDomains(
});
return validCertsDecrypted;
}
export async function getValidCertificatesForDomainsHybrid(
domains: Set<string>
): Promise<
Array<{
id: number;
domain: string;
wildcard: boolean | null;
certFile: string | null;
keyFile: string | null;
expiresAt: number | null;
updatedAt?: number | null;
}>
> {
return []; // stub
}
}

View File

@@ -0,0 +1,165 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { z } from "zod";
import { __DIRNAME } from "@server/lib/consts";
import { SupporterKey } from "@server/db";
import { fromError } from "zod-validation-error";
import {
privateConfigSchema,
readPrivateConfigFile
} from "#private/lib/readConfigFile";
import { build } from "@server/build";
export class PrivateConfig {
private rawPrivateConfig!: z.infer<typeof privateConfigSchema>;
supporterData: SupporterKey | null = null;
supporterHiddenUntil: number | null = null;
isDev: boolean = process.env.ENVIRONMENT !== "prod";
constructor() {
const privateEnvironment = readPrivateConfigFile();
const {
data: parsedPrivateConfig,
success: privateSuccess,
error: privateError
} = privateConfigSchema.safeParse(privateEnvironment);
if (!privateSuccess) {
const errors = fromError(privateError);
throw new Error(`Invalid private configuration file: ${errors}`);
}
if (parsedPrivateConfig.branding?.colors) {
process.env.BRANDING_COLORS = JSON.stringify(
parsedPrivateConfig.branding?.colors
);
}
if (parsedPrivateConfig.branding?.logo?.light_path) {
process.env.BRANDING_LOGO_LIGHT_PATH =
parsedPrivateConfig.branding?.logo?.light_path;
}
if (parsedPrivateConfig.branding?.logo?.dark_path) {
process.env.BRANDING_LOGO_DARK_PATH =
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
}
if (build != "oss") {
if (parsedPrivateConfig.branding?.logo?.light_path) {
process.env.BRANDING_LOGO_LIGHT_PATH =
parsedPrivateConfig.branding?.logo?.light_path;
}
if (parsedPrivateConfig.branding?.logo?.dark_path) {
process.env.BRANDING_LOGO_DARK_PATH =
parsedPrivateConfig.branding?.logo?.dark_path || undefined;
}
process.env.BRANDING_LOGO_AUTH_WIDTH = parsedPrivateConfig.branding
?.logo?.auth_page?.width
? parsedPrivateConfig.branding?.logo?.auth_page?.width.toString()
: undefined;
process.env.BRANDING_LOGO_AUTH_HEIGHT = parsedPrivateConfig.branding
?.logo?.auth_page?.height
? parsedPrivateConfig.branding?.logo?.auth_page?.height.toString()
: undefined;
process.env.BRANDING_LOGO_NAVBAR_WIDTH = parsedPrivateConfig
.branding?.logo?.navbar?.width
? parsedPrivateConfig.branding?.logo?.navbar?.width.toString()
: undefined;
process.env.BRANDING_LOGO_NAVBAR_HEIGHT = parsedPrivateConfig
.branding?.logo?.navbar?.height
? parsedPrivateConfig.branding?.logo?.navbar?.height.toString()
: undefined;
process.env.BRANDING_FAVICON_PATH =
parsedPrivateConfig.branding?.favicon_path;
process.env.BRANDING_APP_NAME =
parsedPrivateConfig.branding?.app_name || "Pangolin";
if (parsedPrivateConfig.branding?.footer) {
process.env.BRANDING_FOOTER = JSON.stringify(
parsedPrivateConfig.branding?.footer
);
}
process.env.LOGIN_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.login_page?.title_text || "";
process.env.LOGIN_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.login_page?.subtitle_text || "";
process.env.SIGNUP_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.signup_page?.title_text || "";
process.env.SIGNUP_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.signup_page?.subtitle_text || "";
process.env.RESOURCE_AUTH_PAGE_HIDE_POWERED_BY =
parsedPrivateConfig.branding?.resource_auth_page
?.hide_powered_by === true
? "true"
: "false";
process.env.RESOURCE_AUTH_PAGE_SHOW_LOGO =
parsedPrivateConfig.branding?.resource_auth_page?.show_logo ===
true
? "true"
: "false";
process.env.RESOURCE_AUTH_PAGE_TITLE_TEXT =
parsedPrivateConfig.branding?.resource_auth_page?.title_text ||
"";
process.env.RESOURCE_AUTH_PAGE_SUBTITLE_TEXT =
parsedPrivateConfig.branding?.resource_auth_page
?.subtitle_text || "";
if (parsedPrivateConfig.branding?.background_image_path) {
process.env.BACKGROUND_IMAGE_PATH =
parsedPrivateConfig.branding?.background_image_path;
}
if (parsedPrivateConfig.server.reo_client_id) {
process.env.REO_CLIENT_ID =
parsedPrivateConfig.server.reo_client_id;
}
if (parsedPrivateConfig.stripe?.s3Bucket) {
process.env.S3_BUCKET = parsedPrivateConfig.stripe.s3Bucket;
}
if (parsedPrivateConfig.stripe?.localFilePath) {
process.env.LOCAL_FILE_PATH =
parsedPrivateConfig.stripe.localFilePath;
}
if (parsedPrivateConfig.stripe?.s3Region) {
process.env.S3_REGION = parsedPrivateConfig.stripe.s3Region;
}
if (parsedPrivateConfig.flags.use_pangolin_dns) {
process.env.USE_PANGOLIN_DNS =
parsedPrivateConfig.flags.use_pangolin_dns.toString();
}
}
this.rawPrivateConfig = parsedPrivateConfig;
}
public getRawPrivateConfig() {
return this.rawPrivateConfig;
}
}
export const privateConfig = new PrivateConfig();
export default privateConfig;

View File

@@ -15,8 +15,9 @@ import axios from "axios";
import logger from "@server/logger";
import { db, ExitNode, remoteExitNodes } from "@server/db";
import { eq } from "drizzle-orm";
import { sendToClient } from "../../routers/ws";
import { config } from "../config";
import { sendToClient } from "#private/routers/ws";
import privateConfig from "#private/lib/config";
import config from "@server/lib/config";
interface ExitNodeRequest {
remoteType?: string;
@@ -56,16 +57,16 @@ export async function sendToExitNode(
} else {
let hostname = exitNode.reachableAt;
logger.debug(`Exit node details:`, {
type: exitNode.type,
name: exitNode.name,
reachableAt: exitNode.reachableAt,
});
// logger.debug(`Exit node details:`, {
// type: exitNode.type,
// name: exitNode.name,
// reachableAt: exitNode.reachableAt,
// });
logger.debug(`Configured local exit node name: ${config.getRawConfig().gerbil.exit_node_name}`);
// logger.debug(`Configured local exit node name: ${config.getRawConfig().gerbil.exit_node_name}`);
if (exitNode.name == config.getRawConfig().gerbil.exit_node_name) {
hostname = config.getRawPrivateConfig().gerbil.local_exit_node_reachable_at;
hostname = privateConfig.getRawPrivateConfig().gerbil.local_exit_node_reachable_at;
}
if (!hostname) {
@@ -74,10 +75,10 @@ export async function sendToExitNode(
);
}
logger.debug(`Sending request to exit node at ${hostname}`, {
type: request.remoteType,
data: request.data
});
// logger.debug(`Sending request to exit node at ${hostname}`, {
// type: request.remoteType,
// data: request.data
// });
// Handle local exit node with HTTP API
const method = request.method || "POST";

View File

@@ -0,0 +1,15 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
export * from "./exitNodeComms";
export * from "./exitNodes";

View File

@@ -12,7 +12,7 @@
*/
import logger from "@server/logger";
import redisManager from "@server/db/private/redis";
import redisManager from "#private/lib/redis";
import { build } from "@server/build";
// Rate limiting configuration
@@ -451,8 +451,4 @@ export class RateLimitService {
}
// Export singleton instance
export const rateLimitService = new RateLimitService();
// Handle process termination
process.on("SIGTERM", () => rateLimitService.cleanup());
process.on("SIGINT", () => rateLimitService.cleanup());
export const rateLimitService = new RateLimitService();

Some files were not shown because too many files have changed in this diff Show More