Compare commits

..

7 Commits

Author SHA1 Message Date
Owen
7fa44981cf Merge branch 'adrianeastles-feature/resource-rule-templates' into policies 2025-10-06 20:55:04 -07:00
Owen
1bacad7854 Merge branch 'feature/resource-rule-templates' of github.com:adrianeastles/pangolin into adrianeastles-feature/resource-rule-templates 2025-10-06 20:54:43 -07:00
Adrian Astles
75cec731e8 Resource Rules page:
Split into 3 clear sections: Enabled Rules (with explanation), Rule Templates, and Resource Rules Configuration
Hide Rules Configuration when rules are disabled

Rule Template pages:
Rules: adopt Settings section layout; right-aligned “Add Rule” button that opens a Create Rule dialog; remove inline add form; consistent table styling
2025-08-08 19:30:26 +08:00
Adrian Astles
16a88281bb Added better notifications for users when templates are updated.
Added confirmation dialogs for destructive actions.
Other improvements/changes
When deleting rule templates, we now clean up all resource rules that were created from the template.
2025-08-07 23:49:56 +08:00
Adrian Astles
1574cbc5df Pagination for template rules table and resource rules table. 2025-08-07 23:23:20 +08:00
Adrian Astles
2cb2a115b0 align template rules table columns with resource rules page 2025-08-07 23:14:24 +08:00
Adrian Astles
9dce7b2cde Scoped Branch - Rule Templates:
- Add rule templates for reusable access control rules
- Support template assignment to resources with automatic rule propagation
- Add template management UI
- Implement template rule protection on resource rules page
2025-08-07 22:57:18 +08:00
942 changed files with 33384 additions and 76497 deletions

View File

@@ -30,5 +30,3 @@ dist
.git .git
migrations/ migrations/
config/ config/
build.ts
tsconfig.json

View File

@@ -1,3 +1,6 @@
{ {
"extends": ["next/core-web-vitals", "next/typescript"] "extends": [
"next/core-web-vitals",
"next/typescript"
]
} }

View File

@@ -1,62 +1,34 @@
name: CI/CD Pipeline name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
permissions:
contents: read
packages: write # for GHCR push
id-token: write # for Cosign Keyless (OIDC) Signing
# Required secrets:
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
on: on:
push: push:
tags: tags:
- "[0-9]+.[0-9]+.[0-9]+" - "[0-9]+.[0-9]+.[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+.rc.[0-9]+"
concurrency:
group: ${{ github.ref }}
cancel-in-progress: true
jobs: jobs:
release: release:
name: Build and Release name: Build and Release
runs-on: [self-hosted, linux, x64] runs-on: ubuntu-latest
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 uses: actions/checkout@v5
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 uses: docker/setup-buildx-action@v3
- name: Log in to Docker Hub - name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0 uses: docker/login-action@v3
with: with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }} username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }} password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name - name: Extract tag name
id: get-tag id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Install Go - name: Install Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 uses: actions/setup-go@v6
with: with:
go-version: 1.24 go-version: 1.24
@@ -65,21 +37,18 @@ jobs:
TAG=${{ env.TAG }} TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts cat server/lib/consts.ts
shell: bash
- name: Pull latest Gerbil version - name: Pull latest Gerbil version
id: get-gerbil-tag id: get-gerbil-tag
run: | run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Pull latest Badger version - name: Pull latest Badger version
id: get-badger-tag id: get-badger-tag
run: | run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Update install/main.go - name: Update install/main.go
run: | run: |
@@ -91,7 +60,6 @@ jobs:
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION" echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
cat install/main.go cat install/main.go
shell: bash
- name: Build installer - name: Build installer
working-directory: install working-directory: install
@@ -99,89 +67,12 @@ jobs:
make go-build-release make go-build-release
- name: Upload artifacts from /install/bin - name: Upload artifacts from /install/bin
uses: actions/upload-artifact@330a01c490aca151604b8cf639adc76d48f6c5d4 # v5.0.0 uses: actions/upload-artifact@v4
with: with:
name: install-bin name: install-bin
path: install/bin/ path: install/bin/
- name: Build and push Docker images (Docker Hub) - name: Build and push Docker images
run: | run: |
TAG=${{ env.TAG }} TAG=${{ env.TAG }}
make -j4 build-release tag=$TAG make build-release tag=$TAG
echo "Built & pushed to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
shell: bash
- name: Install skopeo + jq
# skopeo: copy/inspect images between registries
# jq: JSON parsing tool used to extract digest values
run: |
sudo apt-get update -y
sudo apt-get install -y skopeo jq
skopeo --version
shell: bash
- name: Login to GHCR
run: |
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
shell: bash
- name: Copy tag from Docker Hub to GHCR
# Mirror the already-built image (all architectures) to GHCR so we can sign it
run: |
set -euo pipefail
TAG=${{ env.TAG }}
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:$TAG \
docker://$GHCR_IMAGE:$TAG
shell: bash
- name: Login to GitHub Container Registry (for cosign)
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign
# cosign is used to sign and verify container images (key and keyless)
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Dual-sign and verify (GHCR & Docker Hub)
# Sign each image by digest using keyless (OIDC) and key-based signing,
# then verify both the public key signature and the keyless OIDC signature.
env:
TAG: ${{ env.TAG }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_YES: "true"
run: |
set -euo pipefail
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
echo "Processing ${IMAGE}:${TAG}"
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
REF="${IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
echo "==> cosign verify (public key) ${REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
done
shell: bash

View File

@@ -1,8 +1,5 @@
name: ESLint name: ESLint
permissions:
contents: read
on: on:
pull_request: pull_request:
paths: paths:
@@ -21,10 +18,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 uses: actions/checkout@v5
- name: Set up Node.js - name: Set up Node.js
uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 uses: actions/setup-node@v5
with: with:
node-version: '22' node-version: '22'

View File

@@ -1,132 +0,0 @@
name: Mirror & Sign (Docker Hub to GHCR)
on:
workflow_dispatch: {}
permissions:
contents: read
packages: write
id-token: write # for keyless OIDC
env:
SOURCE_IMAGE: docker.io/fosrl/pangolin
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
jobs:
mirror-and-dual-sign:
runs-on: amd64-runner
steps:
- name: Install skopeo + jq
run: |
sudo apt-get update -y
sudo apt-get install -y skopeo jq
skopeo --version
- name: Install cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Input check
run: |
test -n "${SOURCE_IMAGE}" || (echo "SOURCE_IMAGE is empty" && exit 1)
echo "Source : ${SOURCE_IMAGE}"
echo "Target : ${DEST_IMAGE}"
# Auth for skopeo (containers-auth)
- name: Skopeo login to GHCR
run: |
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
# Auth for cosign (docker-config)
- name: Docker login to GHCR (for cosign)
run: |
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
- name: List source tags
run: |
set -euo pipefail
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
| jq -r '.Tags[]' | sort -u > src-tags.txt
echo "Found source tags: $(wc -l < src-tags.txt)"
head -n 20 src-tags.txt || true
- name: List destination tags (skip existing)
run: |
set -euo pipefail
if skopeo list-tags --retry-times 3 docker://"${DEST_IMAGE}" >/tmp/dst.json 2>/dev/null; then
jq -r '.Tags[]' /tmp/dst.json | sort -u > dst-tags.txt
else
: > dst-tags.txt
fi
echo "Existing destination tags: $(wc -l < dst-tags.txt)"
- name: Mirror, dual-sign, and verify
env:
# keyless
COSIGN_YES: "true"
# key-based
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
# verify
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
run: |
set -euo pipefail
copied=0; skipped=0; v_ok=0; errs=0
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+"
while read -r tag; do
[ -z "$tag" ] && continue
if grep -Fxq "$tag" dst-tags.txt; then
echo "::notice ::Skip (exists) ${DEST_IMAGE}:${tag}"
skipped=$((skipped+1))
continue
fi
echo "==> Copy ${SOURCE_IMAGE}:${tag} → ${DEST_IMAGE}:${tag}"
if ! skopeo copy --all --retry-times 3 \
docker://"${SOURCE_IMAGE}:${tag}" docker://"${DEST_IMAGE}:${tag}"; then
echo "::warning title=Copy failed::${SOURCE_IMAGE}:${tag}"
errs=$((errs+1)); continue
fi
copied=$((copied+1))
digest="$(skopeo inspect --retry-times 3 docker://"${DEST_IMAGE}:${tag}" | jq -r '.Digest')"
ref="${DEST_IMAGE}@${digest}"
echo "==> cosign sign (keyless) --recursive ${ref}"
if ! cosign sign --recursive "${ref}"; then
echo "::warning title=Keyless sign failed::${ref}"
errs=$((errs+1))
fi
echo "==> cosign sign (key) --recursive ${ref}"
if ! cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${ref}"; then
echo "::warning title=Key sign failed::${ref}"
errs=$((errs+1))
fi
echo "==> cosign verify (public key) ${ref}"
if ! cosign verify --key env://COSIGN_PUBLIC_KEY "${ref}" -o text; then
echo "::warning title=Verify(pubkey) failed::${ref}"
errs=$((errs+1))
fi
echo "==> cosign verify (keyless policy) ${ref}"
if ! cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${ref}" -o text; then
echo "::warning title=Verify(keyless) failed::${ref}"
errs=$((errs+1))
else
v_ok=$((v_ok+1))
fi
done < src-tags.txt
echo "---- Summary ----"
echo "Copied : $copied"
echo "Skipped : $skipped"
echo "Verified OK : $v_ok"
echo "Errors : $errs"

View File

@@ -1,39 +0,0 @@
name: Restart Runners
on:
schedule:
- cron: '0 0 */7 * *'
permissions:
id-token: write
contents: read
jobs:
ec2-maintenance-prod:
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
aws-region: ${{ secrets.AWS_REGION }}
- name: Verify AWS identity
run: aws sts get-caller-identity
- name: Start EC2 instance
run: |
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances started"
- name: Wait
run: sleep 600
- name: Stop EC2 instance
run: |
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances stopped"

View File

@@ -14,7 +14,7 @@ jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1 - uses: actions/stale@v10
with: with:
days-before-stale: 14 days-before-stale: 14
days-before-close: 14 days-before-close: 14

View File

@@ -1,8 +1,5 @@
name: Run Tests name: Run Tests
permissions:
contents: read
on: on:
pull_request: pull_request:
branches: branches:
@@ -14,9 +11,9 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1 - uses: actions/checkout@v5
- uses: actions/setup-node@395ad3262231945c25e8478fd5baf05154b1d79f # v6.1.0 - uses: actions/setup-node@v5
with: with:
node-version: '22' node-version: '22'

3
.gitignore vendored
View File

@@ -48,6 +48,3 @@ server/build.ts
postgres/ postgres/
dynamic/ dynamic/
*.mmdb *.mmdb
scratch/
tsconfig.json
hydrateSaas.ts

2
.nvmrc
View File

@@ -1 +1 @@
24 22

View File

@@ -1,12 +0,0 @@
.github/
bruno/
cli/
config/
messages/
next.config.mjs/
public/
tailwind.config.js/
test/
**/*.yml
**/*.yaml
**/*.md

View File

@@ -1,3 +0,0 @@
{
"recommendations": ["esbenp.prettier-vscode"]
}

22
.vscode/settings.json vendored
View File

@@ -1,22 +0,0 @@
{
"editor.codeActionsOnSave": {
"source.addMissingImports.ts": "always"
},
"editor.defaultFormatter": "esbenp.prettier-vscode",
"[jsonc]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[javascript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[json]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"editor.formatOnSave": true
}

View File

@@ -4,7 +4,7 @@ Contributions are welcome!
Please see the contribution and local development guide on the docs page before getting started: Please see the contribution and local development guide on the docs page before getting started:
https://docs.pangolin.net/development/contributing https://docs.digpangolin.com/development/contributing
### Licensing Considerations ### Licensing Considerations

View File

@@ -1,12 +1,10 @@
FROM node:24-alpine AS builder FROM node:22-alpine AS builder
WORKDIR /app WORKDIR /app
ARG BUILD=oss ARG BUILD=oss
ARG DATABASE=sqlite ARG DATABASE=sqlite
RUN apk add --no-cache curl tzdata python3 make g++
# COPY package.json package-lock.json ./ # COPY package.json package-lock.json ./
COPY package*.json ./ COPY package*.json ./
RUN npm ci RUN npm ci
@@ -14,42 +12,20 @@ RUN npm ci
COPY . . COPY . .
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
RUN echo "export const driver: \"pg\" | \"sqlite\" = \"$DATABASE\";" >> server/db/index.ts
RUN echo "export const build = \"$BUILD\" as \"saas\" | \"enterprise\" | \"oss\";" > server/build.ts RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
# Copy the appropriate TypeScript configuration based on build type RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema.ts --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema.ts --out init; fi
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
elif [ "$BUILD" = "saas" ]; then cp tsconfig.saas.json tsconfig.json; \
elif [ "$BUILD" = "enterprise" ]; then cp tsconfig.enterprise.json tsconfig.json; \
fi
# if the build is oss then remove the server/private directory
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema --out init; fi
RUN mkdir -p dist
RUN npm run next:build
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
RUN if [ "$DATABASE" = "pg" ]; then \
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
else \
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
fi
# test to make sure the build output is there and error if not
RUN test -f dist/server.mjs
RUN npm run build:$DATABASE
RUN npm run build:cli RUN npm run build:cli
FROM node:24-alpine AS runner FROM node:22-alpine AS runner
WORKDIR /app WORKDIR /app
# Curl used for the health checks # Curl used for the health checks
# Python and build tools needed for better-sqlite3 native compilation RUN apk add --no-cache curl tzdata
RUN apk add --no-cache curl tzdata python3 make g++
# COPY package.json package-lock.json ./ # COPY package.json package-lock.json ./
COPY package*.json ./ COPY package*.json ./

View File

@@ -1,19 +1,13 @@
.PHONY: build dev-build-sqlite dev-build-pg build-release build-arm build-x86 test clean .PHONY: build build-pg build-release build-arm build-x86 test clean
major_tag := $(shell echo $(tag) | cut -d. -f1) major_tag := $(shell echo $(tag) | cut -d. -f1)
minor_tag := $(shell echo $(tag) | cut -d. -f1,2) minor_tag := $(shell echo $(tag) | cut -d. -f1,2)
build-release:
.PHONY: build-release build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
build-release: build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
build-sqlite:
@if [ -z "$(tag)" ]; then \ @if [ -z "$(tag)" ]; then \
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \ echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
exit 1; \ exit 1; \
fi fi
docker buildx build \ docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \ --build-arg DATABASE=sqlite \
--platform linux/arm64,linux/amd64 \ --platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:latest \ --tag fosrl/pangolin:latest \
@@ -21,14 +15,7 @@ build-sqlite:
--tag fosrl/pangolin:$(minor_tag) \ --tag fosrl/pangolin:$(minor_tag) \
--tag fosrl/pangolin:$(tag) \ --tag fosrl/pangolin:$(tag) \
--push . --push .
build-postgresql:
@if [ -z "$(tag)" ]; then \
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
exit 1; \
fi
docker buildx build \ docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \ --build-arg DATABASE=pg \
--platform linux/arm64,linux/amd64 \ --platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:postgresql-latest \ --tag fosrl/pangolin:postgresql-latest \
@@ -37,76 +24,16 @@ build-postgresql:
--tag fosrl/pangolin:postgresql-$(tag) \ --tag fosrl/pangolin:postgresql-$(tag) \
--push . --push .
build-ee-sqlite:
@if [ -z "$(tag)" ]; then \
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
exit 1; \
fi
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-latest \
--tag fosrl/pangolin:ee-$(major_tag) \
--tag fosrl/pangolin:ee-$(minor_tag) \
--tag fosrl/pangolin:ee-$(tag) \
--push .
build-ee-postgresql:
@if [ -z "$(tag)" ]; then \
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
exit 1; \
fi
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-postgresql-latest \
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
--tag fosrl/pangolin:ee-postgresql-$(minor_tag) \
--tag fosrl/pangolin:ee-postgresql-$(tag) \
--push .
build-rc:
@if [ -z "$(tag)" ]; then \
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
exit 1; \
fi
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=sqlite \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:$(tag) \
--push .
docker buildx build \
--build-arg BUILD=oss \
--build-arg DATABASE=pg \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:postgresql-$(tag) \
--push .
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=sqlite \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-$(tag) \
--push .
docker buildx build \
--build-arg BUILD=enterprise \
--build-arg DATABASE=pg \
--platform linux/arm64,linux/amd64 \
--tag fosrl/pangolin:ee-postgresql-$(tag) \
--push .
build-arm: build-arm:
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest . docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
build-x86: build-x86:
docker buildx build --platform linux/amd64 -t fosrl/pangolin:latest . docker buildx build --platform linux/amd64 -t fosrl/pangolin:latest .
dev-build-sqlite: build-sqlite:
docker build --build-arg DATABASE=sqlite -t fosrl/pangolin:latest . docker build --build-arg DATABASE=sqlite -t fosrl/pangolin:latest .
dev-build-pg: build-pg:
docker build --build-arg DATABASE=pg -t fosrl/pangolin:postgresql-latest . docker build --build-arg DATABASE=pg -t fosrl/pangolin:postgresql-latest .
test: test:

158
README.md
View File

@@ -1,101 +1,157 @@
<div align="center"> <div align="center">
<h2> <h2>
<a href="https://pangolin.net/"> <picture>
<picture> <source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png"> <img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="250">
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="350">
</picture> </picture>
</a>
</h2> </h2>
</div> </div>
<h4 align="center">Secure gateway to your private networks</h4>
<div align="center">
_Pangolin tunnels your services to the internet so you can access anything from anywhere._
</div>
<div align="center"> <div align="center">
<h5> <h5>
<a href="https://pangolin.net/"> <a href="https://digpangolin.com">
Website Website
</a> </a>
<span> | </span> <span> | </span>
<a href="https://docs.pangolin.net/"> <a href="https://docs.digpangolin.com/self-host/quick-install-managed">
Documentation Quick Install Guide
</a> </a>
<span> | </span> <span> | </span>
<a href="mailto:contact@pangolin.net"> <a href="mailto:contact@fossorial.io">
Contact Us Contact Us
</a> </a>
<span> | </span>
<a href="https://digpangolin.com/slack">
Slack
</a>
<span> | </span>
<a href="https://discord.gg/HCJR8Xhme4">
Discord
</a>
</h5> </h5>
</div>
<div align="center"> [![Slack](https://img.shields.io/badge/chat-slack-yellow?style=flat-square&logo=slack)](https://digpangolin.com/slack)
[![Discord](https://img.shields.io/discord/1325658630518865980?logo=discord&style=flat-square)](https://discord.gg/HCJR8Xhme4)
[![Slack](https://img.shields.io/badge/chat-slack-yellow?style=flat-square&logo=slack)](https://pangolin.net/slack)
[![Docker](https://img.shields.io/docker/pulls/fosrl/pangolin?style=flat-square)](https://hub.docker.com/r/fosrl/pangolin) [![Docker](https://img.shields.io/docker/pulls/fosrl/pangolin?style=flat-square)](https://hub.docker.com/r/fosrl/pangolin)
![Stars](https://img.shields.io/github/stars/fosrl/pangolin?style=flat-square) ![Stars](https://img.shields.io/github/stars/fosrl/pangolin?style=flat-square)
[![Discord](https://img.shields.io/discord/1325658630518865980?logo=discord&style=flat-square)](https://discord.gg/HCJR8Xhme4)
[![YouTube](https://img.shields.io/badge/YouTube-red?logo=youtube&logoColor=white&style=flat-square)](https://www.youtube.com/@fossorial-app) [![YouTube](https://img.shields.io/badge/YouTube-red?logo=youtube&logoColor=white&style=flat-square)](https://www.youtube.com/@fossorial-app)
</div> </div>
<p align="center"> <p align="center">
<strong> <strong>
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a> Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
</strong> </strong>
</p> </p>
Pangolin is an open-source, identity-based remote access platform built on WireGuard that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources, all with zero-trust security and granular access control. Pangolin is a self-hosted tunneled reverse proxy server with identity and access control, designed to securely expose private resources on distributed networks. Acting as a central hub, it connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports.
## Installation <img src="public/screenshots/hero.png" alt="Preview"/>
- Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin. ![gif](public/clip.gif)
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
<img src="public/screenshots/hero.png" />
## Deployment Options
| <img width=500 /> | Description |
|-----------------|--------------|
| **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
## Key Features ## Key Features
| <img width=500 /> | <img width=500 /> | ### Reverse Proxy Through WireGuard Tunnel
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
| **Connect remote networks with sites**<br /><br />Pangolin's lightweight site connectors create secure tunnels from remote networks without requiring public IP addresses or open ports. Sites make any network anywhere available for authorized access. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
| **Browser-based reverse proxy access**<br /><br />Expose web applications through identity and context-aware tunneled reverse proxies. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet. Users access applications through any web browser with authentication and granular access control. | <img src="public/clip.gif" width=500 /><tr></tr> |
| **Client-based private resource access**<br /><br />Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. | <img src="public/screenshots/private-resources.png" width=500 /><tr></tr> |
| **Zero-trust granular access**<br /><br />Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications and services you explicitly define, reducing security risk and attack surface. | <img src="public/screenshots/user-devices.png" width=500 /><tr></tr> |
## Download Clients - Expose private resources on your network **without opening ports** (firewall punching).
- Secure and easy to configure private connectivity via a custom **user space WireGuard client**, [Newt](https://github.com/fosrl/newt).
- Built-in support for any WireGuard client.
- Automated **SSL certificates** (https) via [LetsEncrypt](https://letsencrypt.org/).
- Support for HTTP/HTTPS and **raw TCP/UDP services**.
- Load balancing.
- Extend functionality with existing [Traefik](https://github.com/traefik/traefik) plugins, such as [CrowdSec](https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin) and [Geoblock](https://github.com/PascalMinder/geoblock).
- **Automatically install and configure Crowdsec via Pangolin's installer script.**
- Attach as many sites to the central server as you wish.
Download the Pangolin client for your platform: ### Identity & Access Management
- [Mac](https://pangolin.net/downloads/mac) - Centralized authentication system using platform SSO. **Users will only have to manage one login.**
- [Windows](https://pangolin.net/downloads/windows) - **Define access control rules for IPs, IP ranges, and URL paths per resource.**
- [Linux](https://pangolin.net/downloads/linux) - TOTP with backup codes for two-factor authentication.
- Create organizations, each with multiple sites, users, and roles.
- **Role-based access control** to manage resource access permissions.
- Additional authentication options include:
- Email whitelisting with **one-time passcodes.**
- **Temporary, self-destructing share links.**
- Resource specific pin codes.
- Resource specific passwords.
- Passkeys
- External identity provider (IdP) support with OAuth2/OIDC, such as Authentik, Keycloak, Okta, and others.
- Auto-provision users and roles from your IdP.
## Get Started <img src="public/auth-diagram1.png" alt="Auth and diagram"/>
### Check out the docs ## Use Cases
We encourage everyone to read the full documentation first, which is ### Manage Access to Internal Apps
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
the docs to illustrate some basic ideas.
### Sign up and try now - Grant users access to your apps from anywhere using just a web browser. No client software required.
For Pangolin's managed service, you will first need to create an account at ### Developers and DevOps
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
- Expose and test internal tools and dashboards like **Grafana**. Bring localhost or private IPs online for easy access.
### Secure API Gateway
- One application load balancer across multiple clouds and on-premises.
### IoT and Edge Devices
- Easily expose **IoT devices**, **edge servers**, or **Raspberry Pi** to the internet for field equipment monitoring.
<img src="public/screenshots/sites.png" alt="Sites"/>
## Deployment Options
### Fully Self Hosted
Host the full application on your own server or on the cloud with a VPS. Take a look at the [documentation](https://docs.digpangolin.com/self-host/quick-install) to get started.
> Many of our users have had a great experience with [RackNerd](https://my.racknerd.com/aff.php?aff=13788). Depending on promotions, you can get a [**VPS with 1 vCPU, 1GB RAM, and ~20GB SSD for just around $12/year**](https://my.racknerd.com/aff.php?aff=13788&pid=912). That's a great deal!
### Pangolin Cloud
Easy to use with simple [pay as you go pricing](https://digpangolin.com/pricing). [Check it out here](https://pangolin.fossorial.io/auth/signup).
- Everything you get with self hosted Pangolin, but fully managed for you.
### Managed & High Availability
Managed control plane, your infrastructure
- We manage database and control plane.
- You self-host lightweight exit-node.
- Traffic flows through your infra.
- We coordinate failover between your nodes or to Cloud when things go bad.
Try it out using [Pangolin Cloud](https://pangolin.fossorial.io)
### Full Enterprise On-Premises
[Contact us](mailto:numbat@fossorial.io) for a full distributed and enterprise deployments on your infrastructure controlled by your team.
## Project Development / Roadmap
We want to hear your feature requests! Add them to the [discussion board](https://github.com/orgs/fosrl/discussions/categories/feature-requests).
## Licensing ## Licensing
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net). Pangolin is dual licensed under the AGPL-3 and the Fossorial Commercial license. For inquiries about commercial licensing, please contact us at [numbat@fossorial.io](mailto:numbat@fossorial.io).
## Contributions ## Contributions
Looking for something to contribute? Take a look at issues marked with [help wanted](https://github.com/fosrl/pangolin/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22). Also take a look through the freature requests in Discussions - any are available and some are marked as a good first issue.
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices. Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
--- Please post bug reports and other functional issues in the [Issues](https://github.com/fosrl/pangolin/issues) section of the repository.
WireGuard® is a registered trademark of Jason A. Donenfeld. If you are looking to help with translations, please contribute [on Crowdin](https://crowdin.com/project/fossorial-pangolin) or open a PR with changes to the translations files found in `messages/`.

View File

@@ -3,7 +3,7 @@
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us: If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk. 1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include: 2. Send a detailed report to [security@fossorial.io](mailto:security@fossorial.io) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
- Description and location of the vulnerability. - Description and location of the vulnerability.
- Potential impact of the vulnerability. - Potential impact of the vulnerability.

View File

@@ -8,7 +8,7 @@ import base64
YAML_FILE_PATH = 'blueprint.yaml' YAML_FILE_PATH = 'blueprint.yaml'
# The API endpoint and headers from the curl request # The API endpoint and headers from the curl request
API_URL = 'http://api.pangolin.net/v1/org/test/blueprint' API_URL = 'http://api.pangolin.fossorial.io/v1/org/test/blueprint'
HEADERS = { HEADERS = {
'accept': '*/*', 'accept': '*/*',
'Authorization': 'Bearer <your_token_here>', 'Authorization': 'Bearer <your_token_here>',

View File

@@ -28,10 +28,9 @@ proxy-resources:
# sso-roles: # sso-roles:
# - Member # - Member
# sso-users: # sso-users:
# - owen@pangolin.net # - owen@fossorial.io
# whitelist-users: # whitelist-users:
# - owen@pangolin.net # - owen@fossorial.io
# auto-login-idp: 1
headers: headers:
- name: X-Example-Header - name: X-Example-Header
value: example-value value: example-value

View File

@@ -5,14 +5,14 @@ meta {
} }
post { post {
url: http://localhost:3000/api/v1/auth/login url: http://localhost:4000/api/v1/auth/login
body: json body: json
auth: none auth: none
} }
body:json { body:json {
{ {
"email": "admin@fosrl.io", "email": "owen@fossorial.io",
"password": "Password123!" "password": "Password123!"
} }
} }

View File

@@ -12,6 +12,6 @@ post {
body:json { body:json {
{ {
"email": "milo@pangolin.net" "email": "milo@fossorial.io"
} }
} }

View File

@@ -12,7 +12,7 @@ put {
body:json { body:json {
{ {
"email": "numbat@pangolin.net", "email": "numbat@fossorial.io",
"password": "Password123!" "password": "Password123!"
} }
} }

View File

@@ -1,15 +0,0 @@
meta {
name: createOlm
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/olm
body: none
auth: inherit
}
settings {
encodeUrl: true
}

View File

@@ -1,8 +0,0 @@
meta {
name: Olm
seq: 15
}
auth {
mode: inherit
}

View File

@@ -1,6 +1,6 @@
{ {
"version": "1", "version": "1",
"name": "Pangolin", "name": "Pangolin Saas",
"type": "collection", "type": "collection",
"ignore": [ "ignore": [
"node_modules", "node_modules",

View File

@@ -90,8 +90,7 @@ export const setAdminCredentials: CommandModule<{}, SetAdminCredentialsArgs> = {
passwordHash, passwordHash,
dateCreated: moment().toISOString(), dateCreated: moment().toISOString(),
serverAdmin: true, serverAdmin: true,
emailVerified: true, emailVerified: true
lastPasswordChange: new Date().getTime()
}); });
console.log("Server admin created"); console.log("Server admin created");

View File

@@ -1,5 +1,5 @@
# To see all available options, please visit the docs: # To see all available options, please visit the docs:
# https://docs.pangolin.net/self-host/advanced/config-file # https://docs.digpangolin.com/self-host/advanced/config-file
app: app:
dashboard_url: http://localhost:3002 dashboard_url: http://localhost:3002
@@ -25,3 +25,4 @@ flags:
disable_user_create_org: true disable_user_create_org: true
allow_raw_resources: true allow_raw_resources: true
enable_integration_api: true enable_integration_api: true
enable_clients: true

View File

@@ -1,15 +0,0 @@
services:
drizzle-gateway:
image: ghcr.io/drizzle-team/gateway:latest
ports:
- "4984:4983"
depends_on:
- db
environment:
- STORE_PATH=/app
- DATABASE_URL=postgresql://postgres:password@db:5432/postgres
volumes:
- drizzle-gateway-data:/app
volumes:
drizzle-gateway-data:

View File

@@ -20,7 +20,7 @@ services:
pangolin: pangolin:
condition: service_healthy condition: service_healthy
command: command:
- --reachableAt=http://gerbil:3004 - --reachableAt=http://gerbil:3003
- --generateAndSaveKeyTo=/var/config/key - --generateAndSaveKeyTo=/var/config/key
- --remoteConfig=http://pangolin:3001/api/v1/ - --remoteConfig=http://pangolin:3001/api/v1/
volumes: volumes:
@@ -35,7 +35,7 @@ services:
- 80:80 # Port for traefik because of the network_mode - 80:80 # Port for traefik because of the network_mode
traefik: traefik:
image: traefik:v3.6 image: traefik:v3.5
container_name: traefik container_name: traefik
restart: unless-stopped restart: unless-stopped
network_mode: service:gerbil # Ports appear on the gerbil service network_mode: service:gerbil # Ports appear on the gerbil service

View File

@@ -1,7 +1,16 @@
import { defineConfig } from "drizzle-kit"; import { defineConfig } from "drizzle-kit";
import path from "path"; import path from "path";
import { build } from "@server/build";
const schema = [path.join("server", "db", "pg", "schema")]; let schema;
if (build === "oss") {
schema = [path.join("server", "db", "pg", "schema.ts")];
} else {
schema = [
path.join("server", "db", "pg", "schema.ts"),
path.join("server", "db", "pg", "privateSchema.ts")
];
}
export default defineConfig({ export default defineConfig({
dialect: "postgresql", dialect: "postgresql",

View File

@@ -1,8 +1,17 @@
import { build } from "@server/build";
import { APP_PATH } from "@server/lib/consts"; import { APP_PATH } from "@server/lib/consts";
import { defineConfig } from "drizzle-kit"; import { defineConfig } from "drizzle-kit";
import path from "path"; import path from "path";
const schema = [path.join("server", "db", "sqlite", "schema")]; let schema;
if (build === "oss") {
schema = [path.join("server", "db", "sqlite", "schema.ts")];
} else {
schema = [
path.join("server", "db", "sqlite", "schema.ts"),
path.join("server", "db", "sqlite", "privateSchema.ts")
];
}
export default defineConfig({ export default defineConfig({
dialect: "sqlite", dialect: "sqlite",

View File

@@ -2,9 +2,8 @@ import esbuild from "esbuild";
import yargs from "yargs"; import yargs from "yargs";
import { hideBin } from "yargs/helpers"; import { hideBin } from "yargs/helpers";
import { nodeExternalsPlugin } from "esbuild-node-externals"; import { nodeExternalsPlugin } from "esbuild-node-externals";
import path from "path";
import fs from "fs";
// import { glob } from "glob"; // import { glob } from "glob";
// import path from "path";
const banner = ` const banner = `
// patch __dirname // patch __dirname
@@ -19,25 +18,18 @@ const require = topLevelCreateRequire(import.meta.url);
`; `;
const argv = yargs(hideBin(process.argv)) const argv = yargs(hideBin(process.argv))
.usage("Usage: $0 -entry [string] -out [string] -build [string]") .usage("Usage: $0 -entry [string] -out [string]")
.option("entry", { .option("entry", {
alias: "e", alias: "e",
describe: "Entry point file", describe: "Entry point file",
type: "string", type: "string",
demandOption: true demandOption: true,
}) })
.option("out", { .option("out", {
alias: "o", alias: "o",
describe: "Output file path", describe: "Output file path",
type: "string", type: "string",
demandOption: true demandOption: true,
})
.option("build", {
alias: "b",
describe: "Build type (oss, saas, enterprise)",
type: "string",
choices: ["oss", "saas", "enterprise"],
default: "oss"
}) })
.help() .help()
.alias("help", "h").argv; .alias("help", "h").argv;
@@ -54,206 +46,6 @@ function getPackagePaths() {
return ["package.json"]; return ["package.json"];
} }
// Plugin to guard against bad imports from #private
function privateImportGuardPlugin() {
return {
name: "private-import-guard",
setup(build) {
const violations = [];
build.onResolve({ filter: /^#private\// }, (args) => {
const importingFile = args.importer;
// Check if the importing file is NOT in server/private
const normalizedImporter = path.normalize(importingFile);
const isInServerPrivate = normalizedImporter.includes(
path.normalize("server/private")
);
if (!isInServerPrivate) {
const violation = {
file: importingFile,
importPath: args.path,
resolveDir: args.resolveDir
};
violations.push(violation);
console.log(`PRIVATE IMPORT VIOLATION:`);
console.log(` File: ${importingFile}`);
console.log(` Import: ${args.path}`);
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
console.log("");
}
// Return null to let the default resolver handle it
return null;
});
build.onEnd((result) => {
if (violations.length > 0) {
console.log(
`\nSUMMARY: Found ${violations.length} private import violation(s):`
);
violations.forEach((v, i) => {
console.log(
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
);
});
console.log("");
result.errors.push({
text: `Private import violations detected: ${violations.length} violation(s) found`,
location: null,
notes: violations.map((v) => ({
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
location: null
}))
});
}
});
}
};
}
// Plugin to guard against bad imports from #private
function dynamicImportGuardPlugin() {
return {
name: "dynamic-import-guard",
setup(build) {
const violations = [];
build.onResolve({ filter: /^#dynamic\// }, (args) => {
const importingFile = args.importer;
// Check if the importing file is NOT in server/private
const normalizedImporter = path.normalize(importingFile);
const isInServerPrivate = normalizedImporter.includes(
path.normalize("server/private")
);
if (isInServerPrivate) {
const violation = {
file: importingFile,
importPath: args.path,
resolveDir: args.resolveDir
};
violations.push(violation);
console.log(`DYNAMIC IMPORT VIOLATION:`);
console.log(` File: ${importingFile}`);
console.log(` Import: ${args.path}`);
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
console.log("");
}
// Return null to let the default resolver handle it
return null;
});
build.onEnd((result) => {
if (violations.length > 0) {
console.log(
`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`
);
violations.forEach((v, i) => {
console.log(
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
);
});
console.log("");
result.errors.push({
text: `Dynamic import violations detected: ${violations.length} violation(s) found`,
location: null,
notes: violations.map((v) => ({
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
location: null
}))
});
}
});
}
};
}
// Plugin to dynamically switch imports based on build type
function dynamicImportSwitcherPlugin(buildValue) {
return {
name: "dynamic-import-switcher",
setup(build) {
const switches = [];
build.onStart(() => {
console.log(
`Dynamic import switcher using build type: ${buildValue}`
);
});
build.onResolve({ filter: /^#dynamic\// }, (args) => {
// Extract the path after #dynamic/
const dynamicPath = args.path.replace(/^#dynamic\//, "");
// Determine the replacement based on build type
let replacement;
if (buildValue === "oss") {
replacement = `#open/${dynamicPath}`;
} else if (
buildValue === "saas" ||
buildValue === "enterprise"
) {
replacement = `#closed/${dynamicPath}`; // We use #closed here so that the route guards dont complain after its been changed but this is the same as #private
} else {
console.warn(
`Unknown build type '${buildValue}', defaulting to #open/`
);
replacement = `#open/${dynamicPath}`;
}
const switchInfo = {
file: args.importer,
originalPath: args.path,
replacementPath: replacement,
buildType: buildValue
};
switches.push(switchInfo);
console.log(`DYNAMIC IMPORT SWITCH:`);
console.log(` File: ${args.importer}`);
console.log(` Original: ${args.path}`);
console.log(
` Switched to: ${replacement} (build: ${buildValue})`
);
console.log("");
// Rewrite the import path and let the normal resolution continue
return build.resolve(replacement, {
importer: args.importer,
namespace: args.namespace,
resolveDir: args.resolveDir,
kind: args.kind
});
});
build.onEnd((result) => {
if (switches.length > 0) {
console.log(
`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`
);
switches.forEach((s, i) => {
console.log(
` ${i + 1}. ${path.relative(process.cwd(), s.file)}`
);
console.log(
` ${s.originalPath}${s.replacementPath}`
);
});
console.log("");
}
});
}
};
}
esbuild esbuild
.build({ .build({
entryPoints: [argv.entry], entryPoints: [argv.entry],
@@ -262,44 +54,19 @@ esbuild
format: "esm", format: "esm",
minify: false, minify: false,
banner: { banner: {
js: banner js: banner,
}, },
platform: "node", platform: "node",
external: ["body-parser"], external: ["body-parser"],
plugins: [ plugins: [
privateImportGuardPlugin(),
dynamicImportGuardPlugin(),
dynamicImportSwitcherPlugin(argv.build),
nodeExternalsPlugin({ nodeExternalsPlugin({
packagePath: getPackagePaths() packagePath: getPackagePaths(),
}) }),
], ],
sourcemap: "inline", sourcemap: "inline",
target: "node22" target: "node22",
}) })
.then((result) => { .then(() => {
// Check if there were any errors in the build result
if (result.errors && result.errors.length > 0) {
console.error(
`Build failed with ${result.errors.length} error(s):`
);
result.errors.forEach((error, i) => {
console.error(`${i + 1}. ${error.text}`);
if (error.notes) {
error.notes.forEach((note) => {
console.error(` - ${note.text}`);
});
}
});
// remove the output file if it was created
if (fs.existsSync(argv.out)) {
fs.unlinkSync(argv.out);
}
process.exit(1);
}
console.log("Build completed successfully"); console.log("Build completed successfully");
}) })
.catch((error) => { .catch((error) => {

View File

@@ -1,19 +1,19 @@
import tseslint from "typescript-eslint"; import tseslint from 'typescript-eslint';
export default tseslint.config({ export default tseslint.config({
files: ["**/*.{ts,tsx,js,jsx}"], files: ["**/*.{ts,tsx,js,jsx}"],
languageOptions: { languageOptions: {
parser: tseslint.parser, parser: tseslint.parser,
parserOptions: { parserOptions: {
ecmaVersion: "latest", ecmaVersion: "latest",
sourceType: "module", sourceType: "module",
ecmaFeatures: { ecmaFeatures: {
jsx: true jsx: true
} }
}
},
rules: {
semi: "error",
"prefer-const": "warn"
} }
},
rules: {
"semi": "error",
"prefer-const": "warn"
}
}); });

View File

@@ -18,11 +18,7 @@ put-back:
mv main.go.bak main.go mv main.go.bak main.go
dev-update-versions: dev-update-versions:
if [ -z "$(tag)" ]; then \ PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name') && \
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name'); \
else \
PANGOLIN_VERSION=$(tag); \
fi && \
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \ GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \ BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \ echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \

View File

@@ -1,10 +1,15 @@
# To see all available options, please visit the docs: # To see all available options, please visit the docs:
# https://docs.pangolin.net/ # https://docs.digpangolin.com/self-host/advanced/config-file
gerbil: gerbil:
start_port: 51820 start_port: 51820
base_endpoint: "{{.DashboardDomain}}" base_endpoint: "{{.DashboardDomain}}"
{{if .HybridMode}}
managed:
id: "{{.HybridId}}"
secret: "{{.HybridSecret}}"
{{else}}
app: app:
dashboard_url: "https://{{.DashboardDomain}}" dashboard_url: "https://{{.DashboardDomain}}"
log_level: "info" log_level: "info"
@@ -14,6 +19,7 @@ app:
domains: domains:
domain1: domain1:
base_domain: "{{.BaseDomain}}" base_domain: "{{.BaseDomain}}"
cert_resolver: "letsencrypt"
server: server:
secret: "{{.Secret}}" secret: "{{.Secret}}"
@@ -22,7 +28,6 @@ server:
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"] methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
allowed_headers: ["X-CSRF-Token", "Content-Type"] allowed_headers: ["X-CSRF-Token", "Content-Type"]
credentials: false credentials: false
{{if .EnableGeoblocking}}maxmind_db_path: "./config/GeoLite2-Country.mmdb"{{end}}
{{if .EnableEmail}} {{if .EnableEmail}}
email: email:
smtp_host: "{{.EmailSMTPHost}}" smtp_host: "{{.EmailSMTPHost}}"
@@ -36,3 +41,4 @@ flags:
disable_signup_without_invite: true disable_signup_without_invite: true
disable_user_create_org: false disable_user_create_org: false
allow_raw_resources: true allow_raw_resources: true
{{end}}

View File

@@ -6,6 +6,8 @@ services:
restart: unless-stopped restart: unless-stopped
volumes: volumes:
- ./config:/app/config - ./config:/app/config
- pangolin-data:/var/certificates
- pangolin-data:/var/dynamic
healthcheck: healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"] test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"]
interval: "10s" interval: "10s"
@@ -20,7 +22,7 @@ services:
pangolin: pangolin:
condition: service_healthy condition: service_healthy
command: command:
- --reachableAt=http://gerbil:3004 - --reachableAt=http://gerbil:3003
- --generateAndSaveKeyTo=/var/config/key - --generateAndSaveKeyTo=/var/config/key
- --remoteConfig=http://pangolin:3001/api/v1/ - --remoteConfig=http://pangolin:3001/api/v1/
volumes: volumes:
@@ -31,11 +33,11 @@ services:
ports: ports:
- 51820:51820/udp - 51820:51820/udp
- 21820:21820/udp - 21820:21820/udp
- 443:443 - 443:{{if .HybridMode}}8443{{else}}443{{end}}
- 80:80 - 80:80
{{end}} {{end}}
traefik: traefik:
image: docker.io/traefik:v3.6 image: docker.io/traefik:v3.5
container_name: traefik container_name: traefik
restart: unless-stopped restart: unless-stopped
{{if .InstallGerbil}} {{if .InstallGerbil}}
@@ -54,9 +56,15 @@ services:
- ./config/traefik:/etc/traefik:ro # Volume to store the Traefik configuration - ./config/traefik:/etc/traefik:ro # Volume to store the Traefik configuration
- ./config/letsencrypt:/letsencrypt # Volume to store the Let's Encrypt certificates - ./config/letsencrypt:/letsencrypt # Volume to store the Let's Encrypt certificates
- ./config/traefik/logs:/var/log/traefik # Volume to store Traefik logs - ./config/traefik/logs:/var/log/traefik # Volume to store Traefik logs
# Shared volume for certificates and dynamic config in file mode
- pangolin-data:/var/certificates:ro
- pangolin-data:/var/dynamic:ro
networks: networks:
default: default:
driver: bridge driver: bridge
name: pangolin name: pangolin
{{if .EnableIPv6}} enable_ipv6: true{{end}} {{if .EnableIPv6}} enable_ipv6: true{{end}}
volumes:
pangolin-data:

View File

@@ -51,12 +51,3 @@ http:
loadBalancer: loadBalancer:
servers: servers:
- url: "http://pangolin:3000" # API/WebSocket server - url: "http://pangolin:3000" # API/WebSocket server
tcp:
serversTransports:
pp-transport-v1:
proxyProtocol:
version: 1
pp-transport-v2:
proxyProtocol:
version: 2

View File

@@ -3,12 +3,17 @@ api:
dashboard: true dashboard: true
providers: providers:
{{if not .HybridMode}}
http: http:
endpoint: "http://pangolin:3001/api/v1/traefik-config" endpoint: "http://pangolin:3001/api/v1/traefik-config"
pollInterval: "5s" pollInterval: "5s"
file: file:
filename: "/etc/traefik/dynamic_config.yml" filename: "/etc/traefik/dynamic_config.yml"
{{else}}
file:
directory: "/var/dynamic"
watch: true
{{end}}
experimental: experimental:
plugins: plugins:
badger: badger:
@@ -22,7 +27,7 @@ log:
maxBackups: 3 maxBackups: 3
maxAge: 3 maxAge: 3
compress: true compress: true
{{if not .HybridMode}}
certificatesResolvers: certificatesResolvers:
letsencrypt: letsencrypt:
acme: acme:
@@ -31,18 +36,22 @@ certificatesResolvers:
email: "{{.LetsEncryptEmail}}" email: "{{.LetsEncryptEmail}}"
storage: "/letsencrypt/acme.json" storage: "/letsencrypt/acme.json"
caServer: "https://acme-v02.api.letsencrypt.org/directory" caServer: "https://acme-v02.api.letsencrypt.org/directory"
{{end}}
entryPoints: entryPoints:
web: web:
address: ":80" address: ":80"
websecure: websecure:
address: ":443" address: ":443"
{{if .HybridMode}} proxyProtocol:
trustedIPs:
- 0.0.0.0/0
- ::1/128{{end}}
transport: transport:
respondingTimeouts: respondingTimeouts:
readTimeout: "30m" readTimeout: "30m"
http: {{if not .HybridMode}} http:
tls: tls:
certResolver: "letsencrypt" certResolver: "letsencrypt"{{end}}
serversTransport: serversTransport:
insecureSkipVerify: true insecureSkipVerify: true

View File

@@ -73,7 +73,7 @@ func installDocker() error {
case strings.Contains(osRelease, "ID=ubuntu"): case strings.Contains(osRelease, "ID=ubuntu"):
installCmd = exec.Command("bash", "-c", fmt.Sprintf(` installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
apt-get update && apt-get update &&
apt-get install -y apt-transport-https ca-certificates curl && apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg && curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list && echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
apt-get update && apt-get update &&
@@ -82,7 +82,7 @@ func installDocker() error {
case strings.Contains(osRelease, "ID=debian"): case strings.Contains(osRelease, "ID=debian"):
installCmd = exec.Command("bash", "-c", fmt.Sprintf(` installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
apt-get update && apt-get update &&
apt-get install -y apt-transport-https ca-certificates curl && apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg && curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list && echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
apt-get update && apt-get update &&

View File

@@ -1,180 +0,0 @@
#!/bin/bash
# Get installer - Cross-platform installation script
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/installer/refs/heads/main/get-installer.sh | bash
set -e
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m' # No Color
# GitHub repository info
REPO="fosrl/pangolin"
GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
# Function to print colored output
print_status() {
echo -e "${GREEN}[INFO]${NC} $1"
}
print_warning() {
echo -e "${YELLOW}[WARN]${NC} $1"
}
print_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to get latest version from GitHub API
get_latest_version() {
local latest_info
if command -v curl >/dev/null 2>&1; then
latest_info=$(curl -fsSL "$GITHUB_API_URL" 2>/dev/null)
elif command -v wget >/dev/null 2>&1; then
latest_info=$(wget -qO- "$GITHUB_API_URL" 2>/dev/null)
else
print_error "Neither curl nor wget is available. Please install one of them." >&2
exit 1
fi
if [ -z "$latest_info" ]; then
print_error "Failed to fetch latest version information" >&2
exit 1
fi
# Extract version from JSON response (works without jq)
local version=$(echo "$latest_info" | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')
if [ -z "$version" ]; then
print_error "Could not parse version from GitHub API response" >&2
exit 1
fi
# Remove 'v' prefix if present
version=$(echo "$version" | sed 's/^v//')
echo "$version"
}
# Detect OS and architecture
detect_platform() {
local os arch
# Detect OS - only support Linux
case "$(uname -s)" in
Linux*) os="linux" ;;
*)
print_error "Unsupported operating system: $(uname -s). Only Linux is supported."
exit 1
;;
esac
# Detect architecture - only support amd64 and arm64
case "$(uname -m)" in
x86_64|amd64) arch="amd64" ;;
arm64|aarch64) arch="arm64" ;;
*)
print_error "Unsupported architecture: $(uname -m). Only amd64 and arm64 are supported on Linux."
exit 1
;;
esac
echo "${os}_${arch}"
}
# Get installation directory
get_install_dir() {
# Install to the current directory
local install_dir="$(pwd)"
if [ ! -d "$install_dir" ]; then
print_error "Installation directory does not exist: $install_dir"
exit 1
fi
echo "$install_dir"
}
# Download and install installer
install_installer() {
local platform="$1"
local install_dir="$2"
local binary_name="installer_${platform}"
local download_url="${BASE_URL}/${binary_name}"
local temp_file="/tmp/installer"
local final_path="${install_dir}/installer"
print_status "Downloading installer from ${download_url}"
# Download the binary
if command -v curl >/dev/null 2>&1; then
curl -fsSL "$download_url" -o "$temp_file"
elif command -v wget >/dev/null 2>&1; then
wget -q "$download_url" -O "$temp_file"
else
print_error "Neither curl nor wget is available. Please install one of them."
exit 1
fi
# Create install directory if it doesn't exist
mkdir -p "$install_dir"
# Move binary to install directory
mv "$temp_file" "$final_path"
# Make executable
chmod +x "$final_path"
print_status "Installer downloaded to ${final_path}"
}
# Verify installation
verify_installation() {
local install_dir="$1"
local installer_path="${install_dir}/installer"
if [ -f "$installer_path" ] && [ -x "$installer_path" ]; then
print_status "Installation successful!"
return 0
else
print_error "Installation failed. Binary not found or not executable."
return 1
fi
}
# Main installation process
main() {
print_status "Installing latest version of installer..."
# Get latest version
print_status "Fetching latest version from GitHub..."
VERSION=$(get_latest_version)
print_status "Latest version: v${VERSION}"
# Set base URL with the fetched version
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
# Detect platform
PLATFORM=$(detect_platform)
print_status "Detected platform: ${PLATFORM}"
# Get install directory
INSTALL_DIR=$(get_install_dir)
print_status "Install directory: ${INSTALL_DIR}"
# Install installer
install_installer "$PLATFORM" "$INSTALL_DIR"
# Verify installation
if verify_installation "$INSTALL_DIR"; then
print_status "Installer is ready to use!"
else
exit 1
fi
}
# Run main function
main "$@"

View File

@@ -3,8 +3,8 @@ module installer
go 1.24.0 go 1.24.0
require ( require (
golang.org/x/term v0.38.0 golang.org/x/term v0.35.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
require golang.org/x/sys v0.39.0 // indirect require golang.org/x/sys v0.36.0 // indirect

View File

@@ -1,7 +1,7 @@
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk= golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q= golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg= golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View File

@@ -2,6 +2,7 @@ package main
import ( import (
"bufio" "bufio"
"bytes"
"embed" "embed"
"fmt" "fmt"
"io" "io"
@@ -47,15 +48,17 @@ type Config struct {
InstallGerbil bool InstallGerbil bool
TraefikBouncerKey string TraefikBouncerKey string
DoCrowdsecInstall bool DoCrowdsecInstall bool
EnableGeoblocking bool
Secret string Secret string
HybridMode bool
HybridId string
HybridSecret string
} }
type SupportedContainer string type SupportedContainer string
const ( const (
Docker SupportedContainer = "docker" Docker SupportedContainer = "docker"
Podman SupportedContainer = "podman" Podman SupportedContainer = "podman"
Undefined SupportedContainer = "undefined" Undefined SupportedContainer = "undefined"
) )
@@ -95,6 +98,24 @@ func main() {
fmt.Println("\n=== Generating Configuration Files ===") fmt.Println("\n=== Generating Configuration Files ===")
// If the secret and id are not generated then generate them
if config.HybridMode && (config.HybridId == "" || config.HybridSecret == "") {
// fmt.Println("Requesting hybrid credentials from cloud...")
credentials, err := requestHybridCredentials()
if err != nil {
fmt.Printf("Error requesting hybrid credentials: %v\n", err)
fmt.Println("Please obtain credentials manually from the dashboard and run the installer again.")
os.Exit(1)
}
config.HybridId = credentials.RemoteExitNodeId
config.HybridSecret = credentials.Secret
fmt.Printf("Your managed credentials have been obtained successfully.\n")
fmt.Printf(" ID: %s\n", config.HybridId)
fmt.Printf(" Secret: %s\n", config.HybridSecret)
fmt.Println("Take these to the Pangolin dashboard https://pangolin.fossorial.io to adopt your node.")
readBool(reader, "Have you adopted your node?", true)
}
if err := createConfigFiles(config); err != nil { if err := createConfigFiles(config); err != nil {
fmt.Printf("Error creating config files: %v\n", err) fmt.Printf("Error creating config files: %v\n", err)
os.Exit(1) os.Exit(1)
@@ -104,15 +125,6 @@ func main() {
fmt.Println("\nConfiguration files created successfully!") fmt.Println("\nConfiguration files created successfully!")
// Download MaxMind database if requested
if config.EnableGeoblocking {
fmt.Println("\n=== Downloading MaxMind Database ===")
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error downloading MaxMind database: %v\n", err)
fmt.Println("You can download it manually later if needed.")
}
}
fmt.Println("\n=== Starting installation ===") fmt.Println("\n=== Starting installation ===")
if readBool(reader, "Would you like to install and start the containers?", true) { if readBool(reader, "Would you like to install and start the containers?", true) {
@@ -160,34 +172,9 @@ func main() {
} else { } else {
alreadyInstalled = true alreadyInstalled = true
fmt.Println("Looks like you already installed Pangolin!") fmt.Println("Looks like you already installed Pangolin!")
// Check if MaxMind database exists and offer to update it
fmt.Println("\n=== MaxMind Database Update ===")
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
fmt.Println("MaxMind GeoLite2 Country database found.")
if readBool(reader, "Would you like to update the MaxMind database to the latest version?", false) {
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error updating MaxMind database: %v\n", err)
fmt.Println("You can try updating it manually later if needed.")
}
}
} else {
fmt.Println("MaxMind GeoLite2 Country database not found.")
if readBool(reader, "Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error downloading MaxMind database: %v\n", err)
fmt.Println("You can try downloading it manually later if needed.")
}
// Now you need to update your config file accordingly to enable geoblocking
fmt.Println("Please remember to update your config/config.yml file to enable geoblocking! \n")
// add maxmind_db_path: "./config/GeoLite2-Country.mmdb" under server
fmt.Println("Add the following line under the 'server' section:")
fmt.Println(" maxmind_db_path: \"./config/GeoLite2-Country.mmdb\"")
}
}
} }
if !checkIsCrowdsecInstalledInCompose() { if !checkIsCrowdsecInstalledInCompose() && !checkIsPangolinInstalledWithHybrid() {
fmt.Println("\n=== CrowdSec Install ===") fmt.Println("\n=== CrowdSec Install ===")
// check if crowdsec is installed // check if crowdsec is installed
if readBool(reader, "Would you like to install CrowdSec?", false) { if readBool(reader, "Would you like to install CrowdSec?", false) {
@@ -209,8 +196,8 @@ func main() {
parsedURL, err := url.Parse(appConfig.DashboardURL) parsedURL, err := url.Parse(appConfig.DashboardURL)
if err != nil { if err != nil {
fmt.Printf("Error parsing URL: %v\n", err) fmt.Printf("Error parsing URL: %v\n", err)
return return
} }
config.DashboardDomain = parsedURL.Hostname() config.DashboardDomain = parsedURL.Hostname()
@@ -238,11 +225,12 @@ func main() {
} }
fmt.Println("CrowdSec installed successfully!") fmt.Println("CrowdSec installed successfully!")
return
} }
} }
} }
if !alreadyInstalled || config.DoCrowdsecInstall { if !config.HybridMode && !alreadyInstalled {
// Setup Token Section // Setup Token Section
fmt.Println("\n=== Setup Token ===") fmt.Println("\n=== Setup Token ===")
@@ -263,7 +251,9 @@ func main() {
fmt.Println("\nInstallation complete!") fmt.Println("\nInstallation complete!")
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain) if !config.HybridMode && !checkIsPangolinInstalledWithHybrid() {
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
}
} }
func podmanOrDocker(reader *bufio.Reader) SupportedContainer { func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
@@ -338,42 +328,66 @@ func collectUserInput(reader *bufio.Reader) Config {
// Basic configuration // Basic configuration
fmt.Println("\n=== Basic Configuration ===") fmt.Println("\n=== Basic Configuration ===")
for {
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "") response := readString(reader, "Do you want to install Pangolin as a cloud-managed (beta) node? (yes/no)", "")
if strings.EqualFold(response, "yes") || strings.EqualFold(response, "y") {
// Set default dashboard domain after base domain is collected config.HybridMode = true
defaultDashboardDomain := "" break
if config.BaseDomain != "" { } else if strings.EqualFold(response, "no") || strings.EqualFold(response, "n") {
defaultDashboardDomain = "pangolin." + config.BaseDomain config.HybridMode = false
} break
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain) }
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "") fmt.Println("Please answer 'yes' or 'no'")
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
// Email configuration
fmt.Println("\n=== Email Configuration ===")
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
if config.EnableEmail {
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
config.EmailNoReply = readString(reader, "Enter no-reply email address (often the same as SMTP username)", "")
} }
// Validate required fields if config.HybridMode {
if config.BaseDomain == "" { alreadyHaveCreds := readBool(reader, "Do you already have credentials from the dashboard? If not, we will create them later", false)
fmt.Println("Error: Domain name is required")
os.Exit(1) if alreadyHaveCreds {
} config.HybridId = readString(reader, "Enter your ID", "")
if config.LetsEncryptEmail == "" { config.HybridSecret = readString(reader, "Enter your secret", "")
fmt.Println("Error: Let's Encrypt email is required") }
os.Exit(1)
} // Try to get public IP as default
if config.EnableEmail && config.EmailNoReply == "" { publicIP := getPublicIP()
fmt.Println("Error: No-reply email address is required when email is enabled") if publicIP != "" {
os.Exit(1) fmt.Printf("Detected public IP: %s\n", publicIP)
}
config.DashboardDomain = readString(reader, "The public addressable IP address for this node or a domain pointing to it", publicIP)
config.InstallGerbil = true
} else {
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
// Set default dashboard domain after base domain is collected
defaultDashboardDomain := ""
if config.BaseDomain != "" {
defaultDashboardDomain = "pangolin." + config.BaseDomain
}
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
// Email configuration
fmt.Println("\n=== Email Configuration ===")
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
if config.EnableEmail {
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
}
// Validate required fields
if config.BaseDomain == "" {
fmt.Println("Error: Domain name is required")
os.Exit(1)
}
if config.LetsEncryptEmail == "" {
fmt.Println("Error: Let's Encrypt email is required")
os.Exit(1)
}
} }
// Advanced configuration // Advanced configuration
@@ -381,7 +395,6 @@ func collectUserInput(reader *bufio.Reader) Config {
fmt.Println("\n=== Advanced Configuration ===") fmt.Println("\n=== Advanced Configuration ===")
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true) config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
if config.DashboardDomain == "" { if config.DashboardDomain == "" {
fmt.Println("Error: Dashboard Domain name is required") fmt.Println("Error: Dashboard Domain name is required")
@@ -416,6 +429,11 @@ func createConfigFiles(config Config) error {
return nil return nil
} }
// the hybrid does not need the dynamic config
if config.HybridMode && strings.Contains(path, "dynamic_config.yml") {
return nil
}
// skip .DS_Store // skip .DS_Store
if strings.Contains(path, ".DS_Store") { if strings.Contains(path, ".DS_Store") {
return nil return nil
@@ -645,30 +663,18 @@ func checkPortsAvailable(port int) error {
return nil return nil
} }
func downloadMaxMindDatabase() error { func checkIsPangolinInstalledWithHybrid() bool {
fmt.Println("Downloading MaxMind GeoLite2 Country database...") // Check if config/config.yml exists and contains hybrid section
if _, err := os.Stat("config/config.yml"); err != nil {
// Download the GeoLite2 Country database return false
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
} }
// Extract the database // Read config file to check for hybrid section
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil { content, err := os.ReadFile("config/config.yml")
return fmt.Errorf("failed to extract GeoLite2 database: %v", err) if err != nil {
return false
} }
// Find the .mmdb file and move it to the config directory // Check for hybrid section
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil { return bytes.Contains(content, []byte("managed:"))
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
}
// Clean up the downloaded files
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
}
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
return nil
} }

110
install/quickStart.go Normal file
View File

@@ -0,0 +1,110 @@
package main
import (
"bytes"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"time"
)
const (
FRONTEND_SECRET_KEY = "af4e4785-7e09-11f0-b93a-74563c4e2a7e"
// CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
)
// HybridCredentials represents the response from the cloud API
type HybridCredentials struct {
RemoteExitNodeId string `json:"remoteExitNodeId"`
Secret string `json:"secret"`
}
// APIResponse represents the full response structure from the cloud API
type APIResponse struct {
Data HybridCredentials `json:"data"`
}
// RequestPayload represents the request body structure
type RequestPayload struct {
Token string `json:"token"`
}
func generateValidationToken() string {
timestamp := time.Now().UnixMilli()
data := fmt.Sprintf("%s|%d", FRONTEND_SECRET_KEY, timestamp)
obfuscated := make([]byte, len(data))
for i, char := range []byte(data) {
obfuscated[i] = char + 5
}
return base64.StdEncoding.EncodeToString(obfuscated)
}
// requestHybridCredentials makes an HTTP POST request to the cloud API
// to get hybrid credentials (ID and secret)
func requestHybridCredentials() (*HybridCredentials, error) {
// Generate validation token
token := generateValidationToken()
// Create request payload
payload := RequestPayload{
Token: token,
}
// Marshal payload to JSON
jsonData, err := json.Marshal(payload)
if err != nil {
return nil, fmt.Errorf("failed to marshal request payload: %v", err)
}
// Create HTTP request
req, err := http.NewRequest("POST", CLOUD_API_URL, bytes.NewBuffer(jsonData))
if err != nil {
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
}
// Set headers
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-CSRF-Token", "x-csrf-protection")
// Create HTTP client with timeout
client := &http.Client{
Timeout: 30 * time.Second,
}
// Make the request
resp, err := client.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to make HTTP request: %v", err)
}
defer resp.Body.Close()
// Check response status
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("API request failed with status code: %d", resp.StatusCode)
}
// Read response body for debugging
body, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("failed to read response body: %v", err)
}
// Print the raw JSON response for debugging
// fmt.Printf("Raw JSON response: %s\n", string(body))
// Parse response
var apiResponse APIResponse
if err := json.Unmarshal(body, &apiResponse); err != nil {
return nil, fmt.Errorf("failed to decode API response: %v", err)
}
// Validate response data
if apiResponse.Data.RemoteExitNodeId == "" || apiResponse.Data.Secret == "" {
return nil, fmt.Errorf("invalid response: missing remoteExitNodeId or secret")
}
return &apiResponse.Data, nil
}

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,15 +1,12 @@
import type { NextConfig } from "next";
import createNextIntlPlugin from "next-intl/plugin"; import createNextIntlPlugin from "next-intl/plugin";
const withNextIntl = createNextIntlPlugin(); const withNextIntl = createNextIntlPlugin();
const nextConfig: NextConfig = { /** @type {import("next").NextConfig} */
const nextConfig = {
eslint: { eslint: {
ignoreDuringBuilds: true ignoreDuringBuilds: true
}, },
experimental: {
reactCompiler: true
},
output: "standalone" output: "standalone"
}; };

13904
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -19,163 +19,150 @@
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts", "db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts", "db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
"db:clear-migrations": "rm -rf server/migrations", "db:clear-migrations": "rm -rf server/migrations",
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts && cp tsconfig.oss.json tsconfig.json", "set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts",
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts && cp tsconfig.saas.json tsconfig.json", "set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts",
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json", "set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts",
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts", "set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts", "set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
"next:build": "next build",
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs", "build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs", "build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs", "start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
"email": "email dev --dir server/emails/templates --port 3005", "email": "email dev --dir server/emails/templates --port 3005",
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs", "build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs",
"format": "prettier --write ." "db:sqlite:seed-exit-node": "sqlite3 config/db/db.sqlite \"INSERT INTO exitNodes (exitNodeId, name, address, endpoint, publicKey, listenPort, reachableAt, maxConnections, online, lastPing, type, region) VALUES (null, 'test', '10.0.0.1/24', 'localhost', 'MJ44MpnWGxMZURgxW/fWXDFsejhabnEFYDo60LQwK3A=', 1234, 'http://localhost:3003', 123, 1, null, 'gerbil', null);\""
}, },
"dependencies": { "dependencies": {
"@asteasolutions/zod-to-openapi": "8.2.0", "@asteasolutions/zod-to-openapi": "^7.3.4",
"@aws-sdk/client-s3": "3.948.0", "@aws-sdk/client-s3": "3.837.0",
"@faker-js/faker": "10.1.0",
"@headlessui/react": "2.2.9",
"@hookform/resolvers": "5.2.2", "@hookform/resolvers": "5.2.2",
"@monaco-editor/react": "4.7.0", "@node-rs/argon2": "^2.0.2",
"@node-rs/argon2": "2.0.2",
"@oslojs/crypto": "1.0.1", "@oslojs/crypto": "1.0.1",
"@oslojs/encoding": "1.1.0", "@oslojs/encoding": "1.1.0",
"@radix-ui/react-avatar": "1.1.11", "@radix-ui/react-avatar": "1.1.10",
"@radix-ui/react-checkbox": "1.3.3", "@radix-ui/react-checkbox": "1.3.3",
"@radix-ui/react-collapsible": "1.1.12", "@radix-ui/react-collapsible": "1.1.12",
"@radix-ui/react-dialog": "1.1.15", "@radix-ui/react-dialog": "1.1.15",
"@radix-ui/react-dropdown-menu": "2.1.16", "@radix-ui/react-dropdown-menu": "2.1.16",
"@radix-ui/react-icons": "1.3.2", "@radix-ui/react-icons": "1.3.2",
"@radix-ui/react-label": "2.1.8", "@radix-ui/react-label": "2.1.7",
"@radix-ui/react-popover": "1.1.15", "@radix-ui/react-popover": "1.1.15",
"@radix-ui/react-progress": "1.1.8", "@radix-ui/react-progress": "^1.1.7",
"@radix-ui/react-radio-group": "1.3.8", "@radix-ui/react-radio-group": "1.3.8",
"@radix-ui/react-scroll-area": "1.2.10", "@radix-ui/react-scroll-area": "^1.2.10",
"@radix-ui/react-select": "2.2.6", "@radix-ui/react-select": "2.2.6",
"@radix-ui/react-separator": "1.1.8", "@radix-ui/react-separator": "1.1.7",
"@radix-ui/react-slot": "1.2.4", "@radix-ui/react-slot": "1.2.3",
"@radix-ui/react-switch": "1.2.6", "@radix-ui/react-switch": "1.2.6",
"@radix-ui/react-tabs": "1.1.13", "@radix-ui/react-tabs": "1.1.13",
"@radix-ui/react-toast": "1.2.15", "@radix-ui/react-toast": "1.2.15",
"@radix-ui/react-tooltip": "1.2.8", "@radix-ui/react-tooltip": "^1.2.8",
"@react-email/components": "1.0.1", "@react-email/components": "0.5.5",
"@react-email/render": "2.0.0", "@react-email/render": "^1.2.0",
"@react-email/tailwind": "2.0.1", "@react-email/tailwind": "1.2.2",
"@simplewebauthn/browser": "13.2.2", "@simplewebauthn/browser": "^13.2.0",
"@simplewebauthn/server": "13.2.2", "@simplewebauthn/server": "^13.2.1",
"@tailwindcss/forms": "0.5.10", "@tailwindcss/forms": "^0.5.10",
"@tanstack/react-query": "5.90.12",
"@tanstack/react-table": "8.21.3", "@tanstack/react-table": "8.21.3",
"arctic": "3.7.0", "arctic": "^3.7.0",
"axios": "1.13.2", "axios": "^1.12.2",
"better-sqlite3": "11.9.1", "better-sqlite3": "11.7.0",
"canvas-confetti": "1.9.4", "canvas-confetti": "1.9.3",
"class-variance-authority": "0.7.1", "class-variance-authority": "^0.7.1",
"clsx": "2.1.1", "clsx": "2.1.1",
"cmdk": "1.1.1", "cmdk": "1.1.1",
"cookie": "1.1.1", "cookie": "^1.0.2",
"cookie-parser": "1.4.7", "cookie-parser": "1.4.7",
"cookies": "0.9.1", "cookies": "^0.9.1",
"cors": "2.8.5", "cors": "2.8.5",
"crypto-js": "4.2.0", "crypto-js": "^4.2.0",
"d3": "7.9.0", "drizzle-orm": "0.44.6",
"date-fns": "4.1.0", "eslint": "9.35.0",
"drizzle-orm": "0.45.0", "eslint-config-next": "15.5.4",
"eslint": "9.39.1", "express": "5.1.0",
"eslint-config-next": "16.0.8", "express-rate-limit": "8.1.0",
"express": "5.2.1", "glob": "11.0.3",
"express-rate-limit": "8.2.1",
"glob": "13.0.0",
"helmet": "8.1.0", "helmet": "8.1.0",
"http-errors": "2.0.1", "http-errors": "2.0.0",
"i": "0.3.7", "i": "^0.3.7",
"input-otp": "1.4.2", "input-otp": "1.4.2",
"ioredis": "5.8.2", "ioredis": "5.6.1",
"jmespath": "0.16.0", "jmespath": "^0.16.0",
"js-yaml": "4.1.1", "js-yaml": "4.1.0",
"jsonwebtoken": "9.0.3", "jsonwebtoken": "^9.0.2",
"lucide-react": "0.559.0", "lucide-react": "^0.544.0",
"maxmind": "5.0.1", "maxmind": "5.0.0",
"moment": "2.30.1", "moment": "2.30.1",
"next": "15.5.9", "next": "15.5.4",
"next-intl": "4.5.8", "next-intl": "^4.3.9",
"next-themes": "0.4.6", "next-themes": "0.4.6",
"nextjs-toploader": "3.9.17",
"node-cache": "5.1.2", "node-cache": "5.1.2",
"node-fetch": "3.3.2", "node-fetch": "3.3.2",
"nodemailer": "7.0.11", "nodemailer": "7.0.6",
"npm": "11.7.0", "npm": "^11.6.1",
"nprogress": "0.2.0",
"oslo": "1.2.1", "oslo": "1.2.1",
"pg": "8.16.3", "pg": "^8.16.2",
"posthog-node": "5.17.2", "posthog-node": "^5.8.4",
"qrcode.react": "4.2.0", "qrcode.react": "4.2.0",
"react": "19.2.3", "react": "19.1.1",
"react-day-picker": "9.12.0", "react-dom": "19.1.1",
"react-dom": "19.2.3", "react-easy-sort": "^1.7.0",
"react-easy-sort": "1.8.0", "react-hook-form": "7.62.0",
"react-hook-form": "7.68.0", "react-icons": "^5.5.0",
"react-icons": "5.5.0",
"rebuild": "0.1.2", "rebuild": "0.1.2",
"recharts": "2.15.4", "reodotdev": "^1.0.0",
"reodotdev": "1.0.0", "resend": "^6.1.1",
"resend": "6.6.0", "semver": "^7.7.2",
"semver": "7.7.3", "stripe": "18.2.1",
"stripe": "20.0.0", "swagger-ui-express": "^5.0.1",
"swagger-ui-express": "5.0.1", "tailwind-merge": "3.3.1",
"tailwind-merge": "3.4.0", "tw-animate-css": "^1.3.8",
"topojson-client": "3.1.0", "uuid": "^13.0.0",
"tw-animate-css": "1.4.0",
"uuid": "13.0.0",
"vaul": "1.1.2", "vaul": "1.1.2",
"visionscarto-world-atlas": "1.0.0", "winston": "3.17.0",
"winston": "3.19.0",
"winston-daily-rotate-file": "5.0.0", "winston-daily-rotate-file": "5.0.0",
"ws": "8.18.3", "ws": "8.18.3",
"yaml": "2.8.2",
"yargs": "18.0.0", "yargs": "18.0.0",
"zod": "4.1.13", "zod": "3.25.76",
"zod-validation-error": "5.0.0" "zod-validation-error": "3.5.2"
}, },
"devDependencies": { "devDependencies": {
"@dotenvx/dotenvx": "1.51.1", "@dotenvx/dotenvx": "1.51.0",
"@esbuild-plugins/tsconfig-paths": "0.1.2", "@esbuild-plugins/tsconfig-paths": "0.1.2",
"@tailwindcss/postcss": "4.1.17", "@react-email/preview-server": "4.2.12",
"@tanstack/react-query-devtools": "5.91.1", "@tailwindcss/postcss": "^4.1.14",
"@types/better-sqlite3": "7.6.13", "@types/better-sqlite3": "7.6.12",
"@types/cookie-parser": "1.4.10", "@types/cookie-parser": "1.4.9",
"@types/cors": "2.8.19", "@types/cors": "2.8.19",
"@types/crypto-js": "4.2.2", "@types/crypto-js": "^4.2.2",
"@types/d3": "7.4.3", "@types/express": "5.0.3",
"@types/express": "5.0.6", "@types/express-session": "^1.18.2",
"@types/express-session": "1.18.2", "@types/jmespath": "^0.15.2",
"@types/jmespath": "0.15.2",
"@types/jsonwebtoken": "9.0.10",
"@types/node": "24.10.2",
"@types/nodemailer": "7.0.4",
"@types/nprogress": "0.2.3",
"@types/pg": "8.16.0",
"@types/react": "19.2.7",
"@types/react-dom": "19.2.3",
"@types/semver": "7.7.1",
"@types/swagger-ui-express": "4.1.8",
"@types/topojson-client": "3.1.5",
"@types/ws": "8.18.1",
"@types/yargs": "17.0.35",
"@types/js-yaml": "4.0.9", "@types/js-yaml": "4.0.9",
"babel-plugin-react-compiler": "1.0.0", "@types/jsonwebtoken": "^9.0.10",
"drizzle-kit": "0.31.8", "@types/node": "24.6.2",
"esbuild": "0.27.1", "@types/nodemailer": "7.0.2",
"esbuild-node-externals": "1.20.1", "@types/pg": "8.15.5",
"postcss": "8.5.6", "@types/react": "19.1.16",
"prettier": "3.7.4", "@types/react-dom": "19.1.9",
"react-email": "5.0.7", "@types/semver": "^7.7.1",
"tailwindcss": "4.1.17", "@types/swagger-ui-express": "^4.1.8",
"@types/ws": "8.18.1",
"@types/yargs": "17.0.33",
"drizzle-kit": "0.31.5",
"esbuild": "0.25.10",
"esbuild-node-externals": "1.18.0",
"postcss": "^8",
"react-email": "4.2.12",
"tailwindcss": "^4.1.4",
"tsc-alias": "1.8.16", "tsc-alias": "1.8.16",
"tsx": "4.21.0", "tsx": "4.20.6",
"typescript": "5.9.3", "typescript": "^5",
"typescript-eslint": "8.49.0" "typescript-eslint": "^8.45.0"
},
"overrides": {
"emblor": {
"react": "19.0.0",
"react-dom": "19.0.0"
}
} }
} }

View File

@@ -1,8 +1,8 @@
/** @type {import('postcss-load-config').Config} */ /** @type {import('postcss-load-config').Config} */
const config = { const config = {
plugins: { plugins: {
"@tailwindcss/postcss": {} "@tailwindcss/postcss": {},
} },
}; };
export default config; export default config;

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 13 KiB

After

Width:  |  Height:  |  Size: 33 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 687 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 493 KiB

After

Width:  |  Height:  |  Size: 713 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 636 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 484 KiB

After

Width:  |  Height:  |  Size: 713 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 421 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 484 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 713 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 456 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 396 KiB

After

Width:  |  Height:  |  Size: 674 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 434 KiB

View File

@@ -7,21 +7,21 @@ import {
errorHandlerMiddleware, errorHandlerMiddleware,
notFoundMiddleware notFoundMiddleware
} from "@server/middlewares"; } from "@server/middlewares";
import { authenticated, unauthenticated } from "#dynamic/routers/external"; import { corsWithLoginPageSupport } from "@server/middlewares/private/corsWithLoginPage";
import { router as wsRouter, handleWSUpgrade } from "#dynamic/routers/ws"; import { authenticated, unauthenticated } from "@server/routers/external";
import { router as wsRouter, handleWSUpgrade } from "@server/routers/ws";
import { logIncomingMiddleware } from "./middlewares/logIncoming"; import { logIncomingMiddleware } from "./middlewares/logIncoming";
import { csrfProtectionMiddleware } from "./middlewares/csrfProtection"; import { csrfProtectionMiddleware } from "./middlewares/csrfProtection";
import helmet from "helmet"; import helmet from "helmet";
import { stripeWebhookHandler } from "@server/routers/private/billing/webhooks";
import { build } from "./build"; import { build } from "./build";
import rateLimit, { ipKeyGenerator } from "express-rate-limit"; import rateLimit, { ipKeyGenerator } from "express-rate-limit";
import createHttpError from "http-errors"; import createHttpError from "http-errors";
import HttpCode from "./types/HttpCode"; import HttpCode from "./types/HttpCode";
import requestTimeoutMiddleware from "./middlewares/requestTimeout"; import requestTimeoutMiddleware from "./middlewares/requestTimeout";
import { createStore } from "#dynamic/lib/rateLimitStore"; import { createStore } from "@server/lib/private/rateLimitStore";
import hybridRouter from "@server/routers/private/hybrid";
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions"; import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
import { corsWithLoginPageSupport } from "@server/lib/corsWithLoginPage";
import { hybridRouter } from "#dynamic/routers/hybrid";
import { billingWebhookHandler } from "#dynamic/routers/billing/webhooks";
const dev = config.isDev; const dev = config.isDev;
const externalPort = config.getRawConfig().server.external_port; const externalPort = config.getRawConfig().server.external_port;
@@ -39,30 +39,32 @@ export function createApiServer() {
apiServer.post( apiServer.post(
`${prefix}/billing/webhooks`, `${prefix}/billing/webhooks`,
express.raw({ type: "application/json" }), express.raw({ type: "application/json" }),
billingWebhookHandler stripeWebhookHandler
); );
} }
const corsConfig = config.getRawConfig().server.cors; const corsConfig = config.getRawConfig().server.cors;
const options = {
...(corsConfig?.origins
? { origin: corsConfig.origins }
: {
origin: (origin: any, callback: any) => {
callback(null, true);
}
}),
...(corsConfig?.methods && { methods: corsConfig.methods }),
...(corsConfig?.allowed_headers && {
allowedHeaders: corsConfig.allowed_headers
}),
credentials: !(corsConfig?.credentials === false)
};
if (build == "oss" || !corsConfig) { if (build == "oss") {
const options = {
...(corsConfig?.origins
? { origin: corsConfig.origins }
: {
origin: (origin: any, callback: any) => {
callback(null, true);
}
}),
...(corsConfig?.methods && { methods: corsConfig.methods }),
...(corsConfig?.allowed_headers && {
allowedHeaders: corsConfig.allowed_headers
}),
credentials: !(corsConfig?.credentials === false)
};
logger.debug("Using CORS options", options); logger.debug("Using CORS options", options);
apiServer.use(cors(options)); apiServer.use(cors(options));
} else if (corsConfig) { } else {
// Use the custom CORS middleware with loginPage support // Use the custom CORS middleware with loginPage support
apiServer.use(corsWithLoginPageSupport(corsConfig)); apiServer.use(corsWithLoginPageSupport(corsConfig));
} }
@@ -79,12 +81,6 @@ export function createApiServer() {
// Add request timeout middleware // Add request timeout middleware
apiServer.use(requestTimeoutMiddleware(60000)); // 60 second timeout apiServer.use(requestTimeoutMiddleware(60000)); // 60 second timeout
apiServer.use(logIncomingMiddleware);
if (build !== "oss") {
apiServer.use(`${prefix}/hybrid`, hybridRouter); // put before rate limiting because we will rate limit there separately because some of the routes are heavily used
}
if (!dev) { if (!dev) {
apiServer.use( apiServer.use(
rateLimit({ rateLimit({
@@ -107,7 +103,11 @@ export function createApiServer() {
} }
// API routes // API routes
apiServer.use(logIncomingMiddleware);
apiServer.use(prefix, unauthenticated); apiServer.use(prefix, unauthenticated);
if (build !== "oss") {
apiServer.use(`${prefix}/hybrid`, hybridRouter);
}
apiServer.use(prefix, authenticated); apiServer.use(prefix, authenticated);
// WebSocket routes // WebSocket routes

View File

@@ -4,6 +4,7 @@ import { userActions, roleActions, userOrgs } from "@server/db";
import { and, eq } from "drizzle-orm"; import { and, eq } from "drizzle-orm";
import createHttpError from "http-errors"; import createHttpError from "http-errors";
import HttpCode from "@server/types/HttpCode"; import HttpCode from "@server/types/HttpCode";
import { sendUsageNotification } from "@server/routers/org";
export enum ActionsEnum { export enum ActionsEnum {
createOrgUser = "createOrgUser", createOrgUser = "createOrgUser",
@@ -19,7 +20,6 @@ export enum ActionsEnum {
getSite = "getSite", getSite = "getSite",
listSites = "listSites", listSites = "listSites",
updateSite = "updateSite", updateSite = "updateSite",
reGenerateSecret = "reGenerateSecret",
createResource = "createResource", createResource = "createResource",
deleteResource = "deleteResource", deleteResource = "deleteResource",
getResource = "getResource", getResource = "getResource",
@@ -61,7 +61,6 @@ export enum ActionsEnum {
getUser = "getUser", getUser = "getUser",
setResourcePassword = "setResourcePassword", setResourcePassword = "setResourcePassword",
setResourcePincode = "setResourcePincode", setResourcePincode = "setResourcePincode",
setResourceHeaderAuth = "setResourceHeaderAuth",
setResourceWhitelist = "setResourceWhitelist", setResourceWhitelist = "setResourceWhitelist",
getResourceWhitelist = "getResourceWhitelist", getResourceWhitelist = "getResourceWhitelist",
generateAccessToken = "generateAccessToken", generateAccessToken = "generateAccessToken",
@@ -82,11 +81,7 @@ export enum ActionsEnum {
listClients = "listClients", listClients = "listClients",
getClient = "getClient", getClient = "getClient",
listOrgDomains = "listOrgDomains", listOrgDomains = "listOrgDomains",
getDomain = "getDomain",
updateOrgDomain = "updateOrgDomain",
getDNSRecords = "getDNSRecords",
createNewt = "createNewt", createNewt = "createNewt",
createOlm = "createOlm",
createIdp = "createIdp", createIdp = "createIdp",
updateIdp = "updateIdp", updateIdp = "updateIdp",
deleteIdp = "deleteIdp", deleteIdp = "deleteIdp",
@@ -121,11 +116,7 @@ export enum ActionsEnum {
updateLoginPage = "updateLoginPage", updateLoginPage = "updateLoginPage",
getLoginPage = "getLoginPage", getLoginPage = "getLoginPage",
deleteLoginPage = "deleteLoginPage", deleteLoginPage = "deleteLoginPage",
listBlueprints = "listBlueprints", applyBlueprint = "applyBlueprint"
getBlueprint = "getBlueprint",
applyBlueprint = "applyBlueprint",
viewLogs = "viewLogs",
exportLogs = "exportLogs"
} }
export async function checkUserActionPermission( export async function checkUserActionPermission(
@@ -202,6 +193,8 @@ export async function checkUserActionPermission(
.limit(1); .limit(1);
return roleActionPermission.length > 0; return roleActionPermission.length > 0;
return false;
} catch (error) { } catch (error) {
console.error("Error checking user action permission:", error); console.error("Error checking user action permission:", error);
throw createHttpError( throw createHttpError(

View File

@@ -2,13 +2,13 @@ import { hash, verify } from "@node-rs/argon2";
export async function verifyPassword( export async function verifyPassword(
password: string, password: string,
hash: string hash: string,
): Promise<boolean> { ): Promise<boolean> {
const validPassword = await verify(hash, password, { const validPassword = await verify(hash, password, {
memoryCost: 19456, memoryCost: 19456,
timeCost: 2, timeCost: 2,
outputLen: 32, outputLen: 32,
parallelism: 1 parallelism: 1,
}); });
return validPassword; return validPassword;
} }
@@ -18,7 +18,7 @@ export async function hashPassword(password: string): Promise<string> {
memoryCost: 19456, memoryCost: 19456,
timeCost: 2, timeCost: 2,
outputLen: 32, outputLen: 32,
parallelism: 1 parallelism: 1,
}); });
return passwordHash; return passwordHash;

View File

@@ -4,13 +4,10 @@ export const passwordSchema = z
.string() .string()
.min(8, { message: "Password must be at least 8 characters long" }) .min(8, { message: "Password must be at least 8 characters long" })
.max(128, { message: "Password must be at most 128 characters long" }) .max(128, { message: "Password must be at most 128 characters long" })
.regex( .regex(/^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[~!`@#$%^&*()_\-+={}[\]|\\:;"'<>,.\/?]).*$/, {
/^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[~!`@#$%^&*()_\-+={}[\]|\\:;"'<>,.\/?]).*$/, message: `Your password must meet the following conditions:
{
message: `Your password must meet the following conditions:
at least one uppercase English letter, at least one uppercase English letter,
at least one lowercase English letter, at least one lowercase English letter,
at least one digit, at least one digit,
at least one special character.` at least one special character.`
} });
);

View File

@@ -36,15 +36,12 @@ export async function createSession(
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token))
); );
const [session] = await db const session: Session = {
.insert(sessions) sessionId: sessionId,
.values({ userId,
sessionId: sessionId, expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime()
userId, };
expiresAt: new Date(Date.now() + SESSION_COOKIE_EXPIRES).getTime(), await db.insert(sessions).values(session);
issuedAt: new Date().getTime()
})
.returning();
return session; return session;
} }

View File

@@ -1,4 +1,6 @@
import { encodeHexLowerCase } from "@oslojs/encoding"; import {
encodeHexLowerCase,
} from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2"; import { sha256 } from "@oslojs/crypto/sha2";
import { Newt, newts, newtSessions, NewtSession } from "@server/db"; import { Newt, newts, newtSessions, NewtSession } from "@server/db";
import { db } from "@server/db"; import { db } from "@server/db";
@@ -8,25 +10,25 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
export async function createNewtSession( export async function createNewtSession(
token: string, token: string,
newtId: string newtId: string,
): Promise<NewtSession> { ): Promise<NewtSession> {
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token)),
); );
const session: NewtSession = { const session: NewtSession = {
sessionId: sessionId, sessionId: sessionId,
newtId, newtId,
expiresAt: new Date(Date.now() + EXPIRES).getTime() expiresAt: new Date(Date.now() + EXPIRES).getTime(),
}; };
await db.insert(newtSessions).values(session); await db.insert(newtSessions).values(session);
return session; return session;
} }
export async function validateNewtSessionToken( export async function validateNewtSessionToken(
token: string token: string,
): Promise<SessionValidationResult> { ): Promise<SessionValidationResult> {
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token)),
); );
const result = await db const result = await db
.select({ newt: newts, session: newtSessions }) .select({ newt: newts, session: newtSessions })
@@ -43,12 +45,14 @@ export async function validateNewtSessionToken(
.where(eq(newtSessions.sessionId, session.sessionId)); .where(eq(newtSessions.sessionId, session.sessionId));
return { session: null, newt: null }; return { session: null, newt: null };
} }
if (Date.now() >= session.expiresAt - EXPIRES / 2) { if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
session.expiresAt = new Date(Date.now() + EXPIRES).getTime(); session.expiresAt = new Date(
Date.now() + EXPIRES,
).getTime();
await db await db
.update(newtSessions) .update(newtSessions)
.set({ .set({
expiresAt: session.expiresAt expiresAt: session.expiresAt,
}) })
.where(eq(newtSessions.sessionId, session.sessionId)); .where(eq(newtSessions.sessionId, session.sessionId));
} }

View File

@@ -1,4 +1,6 @@
import { encodeHexLowerCase } from "@oslojs/encoding"; import {
encodeHexLowerCase,
} from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2"; import { sha256 } from "@oslojs/crypto/sha2";
import { Olm, olms, olmSessions, OlmSession } from "@server/db"; import { Olm, olms, olmSessions, OlmSession } from "@server/db";
import { db } from "@server/db"; import { db } from "@server/db";
@@ -8,25 +10,25 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
export async function createOlmSession( export async function createOlmSession(
token: string, token: string,
olmId: string olmId: string,
): Promise<OlmSession> { ): Promise<OlmSession> {
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token)),
); );
const session: OlmSession = { const session: OlmSession = {
sessionId: sessionId, sessionId: sessionId,
olmId, olmId,
expiresAt: new Date(Date.now() + EXPIRES).getTime() expiresAt: new Date(Date.now() + EXPIRES).getTime(),
}; };
await db.insert(olmSessions).values(session); await db.insert(olmSessions).values(session);
return session; return session;
} }
export async function validateOlmSessionToken( export async function validateOlmSessionToken(
token: string token: string,
): Promise<SessionValidationResult> { ): Promise<SessionValidationResult> {
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token)),
); );
const result = await db const result = await db
.select({ olm: olms, session: olmSessions }) .select({ olm: olms, session: olmSessions })
@@ -43,12 +45,14 @@ export async function validateOlmSessionToken(
.where(eq(olmSessions.sessionId, session.sessionId)); .where(eq(olmSessions.sessionId, session.sessionId));
return { session: null, olm: null }; return { session: null, olm: null };
} }
if (Date.now() >= session.expiresAt - EXPIRES / 2) { if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
session.expiresAt = new Date(Date.now() + EXPIRES).getTime(); session.expiresAt = new Date(
Date.now() + EXPIRES,
).getTime();
await db await db
.update(olmSessions) .update(olmSessions)
.set({ .set({
expiresAt: session.expiresAt expiresAt: session.expiresAt,
}) })
.where(eq(olmSessions.sessionId, session.sessionId)); .where(eq(olmSessions.sessionId, session.sessionId));
} }

View File

@@ -11,14 +11,11 @@
* This file is not licensed under the AGPLv3. * This file is not licensed under the AGPLv3.
*/ */
import { encodeHexLowerCase } from "@oslojs/encoding";
import { sha256 } from "@oslojs/crypto/sha2";
import { import {
RemoteExitNode, encodeHexLowerCase,
remoteExitNodes, } from "@oslojs/encoding";
remoteExitNodeSessions, import { sha256 } from "@oslojs/crypto/sha2";
RemoteExitNodeSession import { RemoteExitNode, remoteExitNodes, remoteExitNodeSessions, RemoteExitNodeSession } from "@server/db";
} from "@server/db";
import { db } from "@server/db"; import { db } from "@server/db";
import { eq } from "drizzle-orm"; import { eq } from "drizzle-orm";
@@ -26,39 +23,30 @@ export const EXPIRES = 1000 * 60 * 60 * 24 * 30;
export async function createRemoteExitNodeSession( export async function createRemoteExitNodeSession(
token: string, token: string,
remoteExitNodeId: string remoteExitNodeId: string,
): Promise<RemoteExitNodeSession> { ): Promise<RemoteExitNodeSession> {
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token)),
); );
const session: RemoteExitNodeSession = { const session: RemoteExitNodeSession = {
sessionId: sessionId, sessionId: sessionId,
remoteExitNodeId, remoteExitNodeId,
expiresAt: new Date(Date.now() + EXPIRES).getTime() expiresAt: new Date(Date.now() + EXPIRES).getTime(),
}; };
await db.insert(remoteExitNodeSessions).values(session); await db.insert(remoteExitNodeSessions).values(session);
return session; return session;
} }
export async function validateRemoteExitNodeSessionToken( export async function validateRemoteExitNodeSessionToken(
token: string token: string,
): Promise<SessionValidationResult> { ): Promise<SessionValidationResult> {
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token)),
); );
const result = await db const result = await db
.select({ .select({ remoteExitNode: remoteExitNodes, session: remoteExitNodeSessions })
remoteExitNode: remoteExitNodes,
session: remoteExitNodeSessions
})
.from(remoteExitNodeSessions) .from(remoteExitNodeSessions)
.innerJoin( .innerJoin(remoteExitNodes, eq(remoteExitNodeSessions.remoteExitNodeId, remoteExitNodes.remoteExitNodeId))
remoteExitNodes,
eq(
remoteExitNodeSessions.remoteExitNodeId,
remoteExitNodes.remoteExitNodeId
)
)
.where(eq(remoteExitNodeSessions.sessionId, sessionId)); .where(eq(remoteExitNodeSessions.sessionId, sessionId));
if (result.length < 1) { if (result.length < 1) {
return { session: null, remoteExitNode: null }; return { session: null, remoteExitNode: null };
@@ -70,32 +58,26 @@ export async function validateRemoteExitNodeSessionToken(
.where(eq(remoteExitNodeSessions.sessionId, session.sessionId)); .where(eq(remoteExitNodeSessions.sessionId, session.sessionId));
return { session: null, remoteExitNode: null }; return { session: null, remoteExitNode: null };
} }
if (Date.now() >= session.expiresAt - EXPIRES / 2) { if (Date.now() >= session.expiresAt - (EXPIRES / 2)) {
session.expiresAt = new Date(Date.now() + EXPIRES).getTime(); session.expiresAt = new Date(
Date.now() + EXPIRES,
).getTime();
await db await db
.update(remoteExitNodeSessions) .update(remoteExitNodeSessions)
.set({ .set({
expiresAt: session.expiresAt expiresAt: session.expiresAt,
}) })
.where(eq(remoteExitNodeSessions.sessionId, session.sessionId)); .where(eq(remoteExitNodeSessions.sessionId, session.sessionId));
} }
return { session, remoteExitNode }; return { session, remoteExitNode };
} }
export async function invalidateRemoteExitNodeSession( export async function invalidateRemoteExitNodeSession(sessionId: string): Promise<void> {
sessionId: string await db.delete(remoteExitNodeSessions).where(eq(remoteExitNodeSessions.sessionId, sessionId));
): Promise<void> {
await db
.delete(remoteExitNodeSessions)
.where(eq(remoteExitNodeSessions.sessionId, sessionId));
} }
export async function invalidateAllRemoteExitNodeSessions( export async function invalidateAllRemoteExitNodeSessions(remoteExitNodeId: string): Promise<void> {
remoteExitNodeId: string await db.delete(remoteExitNodeSessions).where(eq(remoteExitNodeSessions.remoteExitNodeId, remoteExitNodeId));
): Promise<void> {
await db
.delete(remoteExitNodeSessions)
.where(eq(remoteExitNodeSessions.remoteExitNodeId, remoteExitNodeId));
} }
export type SessionValidationResult = export type SessionValidationResult =

View File

@@ -4,6 +4,9 @@ import { resourceSessions, ResourceSession } from "@server/db";
import { db } from "@server/db"; import { db } from "@server/db";
import { eq, and } from "drizzle-orm"; import { eq, and } from "drizzle-orm";
import config from "@server/lib/config"; import config from "@server/lib/config";
import axios from "axios";
import logger from "@server/logger";
import { tokenManager } from "@server/lib/tokenManager";
export const SESSION_COOKIE_NAME = export const SESSION_COOKIE_NAME =
config.getRawConfig().server.session_cookie_name; config.getRawConfig().server.session_cookie_name;
@@ -50,8 +53,7 @@ export async function createResourceSession(opts: {
doNotExtend: opts.doNotExtend || false, doNotExtend: opts.doNotExtend || false,
accessTokenId: opts.accessTokenId || null, accessTokenId: opts.accessTokenId || null,
isRequestToken: opts.isRequestToken || false, isRequestToken: opts.isRequestToken || false,
userSessionId: opts.userSessionId || null, userSessionId: opts.userSessionId || null
issuedAt: new Date().getTime()
}; };
await db.insert(resourceSessions).values(session); await db.insert(resourceSessions).values(session);
@@ -63,6 +65,29 @@ export async function validateResourceSessionToken(
token: string, token: string,
resourceId: number resourceId: number
): Promise<ResourceSessionValidationResult> { ): Promise<ResourceSessionValidationResult> {
if (config.isManagedMode()) {
try {
const response = await axios.post(`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/session/validate`, {
token: token
}, await tokenManager.getAuthHeader());
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error validating resource session token in hybrid mode:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error validating resource session token in hybrid mode:", error);
}
return { resourceSession: null };
}
}
const sessionId = encodeHexLowerCase( const sessionId = encodeHexLowerCase(
sha256(new TextEncoder().encode(token)) sha256(new TextEncoder().encode(token))
); );

View File

@@ -1,43 +1,9 @@
import { Request } from "express"; import { Request } from "express";
import { import { validateSessionToken, SESSION_COOKIE_NAME } from "@server/auth/sessions/app";
validateSessionToken,
SESSION_COOKIE_NAME
} from "@server/auth/sessions/app";
export async function verifySession(req: Request, forceLogin?: boolean) { export async function verifySession(req: Request) {
const res = await validateSessionToken( const res = await validateSessionToken(
req.cookies[SESSION_COOKIE_NAME] ?? "" req.cookies[SESSION_COOKIE_NAME] ?? "",
); );
if (!forceLogin) {
return res;
}
if (!res.session || !res.user) {
return {
session: null,
user: null
};
}
if (res.session.deviceAuthUsed) {
return {
session: null,
user: null
};
}
if (!res.session.issuedAt) {
return {
session: null,
user: null
};
}
const mins = 5 * 60 * 1000;
const now = new Date().getTime();
if (now - res.session.issuedAt > mins) {
return {
session: null,
user: null
};
}
return res; return res;
} }

View File

@@ -1,13 +0,0 @@
import { cleanup as wsCleanup } from "#dynamic/routers/ws";
async function cleanup() {
await wsCleanup();
process.exit(0);
}
export async function initCleanup() {
// Handle process termination
process.on("SIGTERM", () => cleanup());
process.on("SIGINT", () => cleanup());
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,6 @@
import { join } from "path"; import { join } from "path";
import { readFileSync } from "fs"; import { readFileSync } from "fs";
import { clients, db, resources, siteResources } from "@server/db"; import { db, resources, siteResources } from "@server/db";
import { randomInt } from "crypto";
import { exitNodes, sites } from "@server/db"; import { exitNodes, sites } from "@server/db";
import { eq, and } from "drizzle-orm"; import { eq, and } from "drizzle-orm";
import { __DIRNAME } from "@server/lib/consts"; import { __DIRNAME } from "@server/lib/consts";
@@ -16,25 +15,6 @@ if (!dev) {
} }
export const names = JSON.parse(readFileSync(file, "utf-8")); export const names = JSON.parse(readFileSync(file, "utf-8"));
export async function getUniqueClientName(orgId: string): Promise<string> {
let loops = 0;
while (true) {
if (loops > 100) {
throw new Error("Could not generate a unique name");
}
const name = generateName();
const count = await db
.select({ niceId: clients.niceId, orgId: clients.orgId })
.from(clients)
.where(and(eq(clients.niceId, name), eq(clients.orgId, orgId)));
if (count.length === 0) {
return name;
}
loops++;
}
}
export async function getUniqueSiteName(orgId: string): Promise<string> { export async function getUniqueSiteName(orgId: string): Promise<string> {
let loops = 0; let loops = 0;
while (true) { while (true) {
@@ -62,36 +42,18 @@ export async function getUniqueResourceName(orgId: string): Promise<string> {
} }
const name = generateName(); const name = generateName();
const [resourceCount, siteResourceCount] = await Promise.all([ const count = await db
db .select({ niceId: resources.niceId, orgId: resources.orgId })
.select({ niceId: resources.niceId, orgId: resources.orgId }) .from(resources)
.from(resources) .where(and(eq(resources.niceId, name), eq(resources.orgId, orgId)));
.where( if (count.length === 0) {
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
),
db
.select({
niceId: siteResources.niceId,
orgId: siteResources.orgId
})
.from(siteResources)
.where(
and(
eq(siteResources.niceId, name),
eq(siteResources.orgId, orgId)
)
)
]);
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
return name; return name;
} }
loops++; loops++;
} }
} }
export async function getUniqueSiteResourceName( export async function getUniqueSiteResourceName(orgId: string): Promise<string> {
orgId: string
): Promise<string> {
let loops = 0; let loops = 0;
while (true) { while (true) {
if (loops > 100) { if (loops > 100) {
@@ -99,27 +61,11 @@ export async function getUniqueSiteResourceName(
} }
const name = generateName(); const name = generateName();
const [resourceCount, siteResourceCount] = await Promise.all([ const count = await db
db .select({ niceId: siteResources.niceId, orgId: siteResources.orgId })
.select({ niceId: resources.niceId, orgId: resources.orgId }) .from(siteResources)
.from(resources) .where(and(eq(siteResources.niceId, name), eq(siteResources.orgId, orgId)));
.where( if (count.length === 0) {
and(eq(resources.niceId, name), eq(resources.orgId, orgId))
),
db
.select({
niceId: siteResources.niceId,
orgId: siteResources.orgId
})
.from(siteResources)
.where(
and(
eq(siteResources.niceId, name),
eq(siteResources.orgId, orgId)
)
)
]);
if (resourceCount.length === 0 && siteResourceCount.length === 0) {
return name; return name;
} }
loops++; loops++;
@@ -128,7 +74,9 @@ export async function getUniqueSiteResourceName(
export async function getUniqueExitNodeEndpointName(): Promise<string> { export async function getUniqueExitNodeEndpointName(): Promise<string> {
let loops = 0; let loops = 0;
const count = await db.select().from(exitNodes); const count = await db
.select()
.from(exitNodes);
while (true) { while (true) {
if (loops > 100) { if (loops > 100) {
throw new Error("Could not generate a unique name"); throw new Error("Could not generate a unique name");
@@ -147,11 +95,14 @@ export async function getUniqueExitNodeEndpointName(): Promise<string> {
} }
} }
export function generateName(): string { export function generateName(): string {
const name = ( const name = (
names.descriptors[randomInt(names.descriptors.length)] + names.descriptors[
Math.floor(Math.random() * names.descriptors.length)
] +
"-" + "-" +
names.animals[randomInt(names.animals.length)] names.animals[Math.floor(Math.random() * names.animals.length)]
) )
.toLowerCase() .toLowerCase()
.replace(/\s/g, "-"); .replace(/\s/g, "-");

View File

@@ -13,12 +13,9 @@ function createDb() {
connection_string: process.env.POSTGRES_CONNECTION_STRING connection_string: process.env.POSTGRES_CONNECTION_STRING
}; };
if (process.env.POSTGRES_REPLICA_CONNECTION_STRINGS) { if (process.env.POSTGRES_REPLICA_CONNECTION_STRINGS) {
const replicas = const replicas = process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split(",").map((conn) => ({
process.env.POSTGRES_REPLICA_CONNECTION_STRINGS.split( connection_string: conn.trim()
"," }));
).map((conn) => ({
connection_string: conn.trim()
}));
config.postgres.replicas = replicas; config.postgres.replicas = replicas;
} }
} else { } else {
@@ -38,49 +35,32 @@ function createDb() {
} }
// Create connection pools instead of individual connections // Create connection pools instead of individual connections
const poolConfig = config.postgres.pool;
const primaryPool = new Pool({ const primaryPool = new Pool({
connectionString, connectionString,
max: poolConfig?.max_connections || 20, max: 20,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000, idleTimeoutMillis: 30000,
connectionTimeoutMillis: poolConfig?.connection_timeout_ms || 5000 connectionTimeoutMillis: 5000,
}); });
const replicas = []; const replicas = [];
if (!replicaConnections.length) { if (!replicaConnections.length) {
replicas.push( replicas.push(DrizzlePostgres(primaryPool));
DrizzlePostgres(primaryPool, {
logger: process.env.QUERY_LOGGING == "true"
})
);
} else { } else {
for (const conn of replicaConnections) { for (const conn of replicaConnections) {
const replicaPool = new Pool({ const replicaPool = new Pool({
connectionString: conn.connection_string, connectionString: conn.connection_string,
max: poolConfig?.max_replica_connections || 20, max: 10,
idleTimeoutMillis: poolConfig?.idle_timeout_ms || 30000, idleTimeoutMillis: 30000,
connectionTimeoutMillis: connectionTimeoutMillis: 5000,
poolConfig?.connection_timeout_ms || 5000
}); });
replicas.push( replicas.push(DrizzlePostgres(replicaPool));
DrizzlePostgres(replicaPool, {
logger: process.env.QUERY_LOGGING == "true"
})
);
} }
} }
return withReplicas( return withReplicas(DrizzlePostgres(primaryPool), replicas as any);
DrizzlePostgres(primaryPool, {
logger: process.env.QUERY_LOGGING == "true"
}),
replicas as any
);
} }
export const db = createDb(); export const db = createDb();
export default db; export default db;
export type Transaction = Parameters< export type Transaction = Parameters<Parameters<typeof db["transaction"]>[0]>[0];
Parameters<(typeof db)["transaction"]>[0]
>[0];

View File

@@ -1,3 +1,3 @@
export * from "./driver"; export * from "./driver";
export * from "./schema/schema"; export * from "./schema";
export * from "./schema/privateSchema"; export * from "./privateSchema";

View File

@@ -11,7 +11,6 @@ const runMigrations = async () => {
migrationsFolder: migrationsFolder migrationsFolder: migrationsFolder
}); });
console.log("Migrations completed successfully."); console.log("Migrations completed successfully.");
process.exit(0);
} catch (error) { } catch (error) {
console.error("Error running migrations:", error); console.error("Error running migrations:", error);
process.exit(1); process.exit(1);

View File

@@ -1,3 +1,16 @@
/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
import { import {
pgTable, pgTable,
serial, serial,
@@ -6,8 +19,7 @@ import {
integer, integer,
bigint, bigint,
real, real,
text, text
index
} from "drizzle-orm/pg-core"; } from "drizzle-orm/pg-core";
import { InferSelectModel } from "drizzle-orm"; import { InferSelectModel } from "drizzle-orm";
import { domains, orgs, targets, users, exitNodes, sessions } from "./schema"; import { domains, orgs, targets, users, exitNodes, sessions } from "./schema";
@@ -167,7 +179,6 @@ export const remoteExitNodes = pgTable("remoteExitNode", {
secretHash: varchar("secretHash").notNull(), secretHash: varchar("secretHash").notNull(),
dateCreated: varchar("dateCreated").notNull(), dateCreated: varchar("dateCreated").notNull(),
version: varchar("version"), version: varchar("version"),
secondaryVersion: varchar("secondaryVersion"), // This is to detect the new nodes after the transition to pangolin-node
exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, { exitNodeId: integer("exitNodeId").references(() => exitNodes.exitNodeId, {
onDelete: "cascade" onDelete: "cascade"
}) })
@@ -215,57 +226,6 @@ export const sessionTransferToken = pgTable("sessionTransferToken", {
expiresAt: bigint("expiresAt", { mode: "number" }).notNull() expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
}); });
export const actionAuditLog = pgTable(
"actionAuditLog",
{
id: serial("id").primaryKey(),
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
actorType: varchar("actorType", { length: 50 }).notNull(),
actor: varchar("actor", { length: 255 }).notNull(),
actorId: varchar("actorId", { length: 255 }).notNull(),
action: varchar("action", { length: 100 }).notNull(),
metadata: text("metadata")
},
(table) => [
index("idx_actionAuditLog_timestamp").on(table.timestamp),
index("idx_actionAuditLog_org_timestamp").on(
table.orgId,
table.timestamp
)
]
);
export const accessAuditLog = pgTable(
"accessAuditLog",
{
id: serial("id").primaryKey(),
timestamp: bigint("timestamp", { mode: "number" }).notNull(), // this is EPOCH time in seconds
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
actorType: varchar("actorType", { length: 50 }),
actor: varchar("actor", { length: 255 }),
actorId: varchar("actorId", { length: 255 }),
resourceId: integer("resourceId"),
ip: varchar("ip", { length: 45 }),
type: varchar("type", { length: 100 }).notNull(),
action: boolean("action").notNull(),
location: text("location"),
userAgent: text("userAgent"),
metadata: text("metadata")
},
(table) => [
index("idx_identityAuditLog_timestamp").on(table.timestamp),
index("idx_identityAuditLog_org_timestamp").on(
table.orgId,
table.timestamp
)
]
);
export type Limit = InferSelectModel<typeof limits>; export type Limit = InferSelectModel<typeof limits>;
export type Account = InferSelectModel<typeof account>; export type Account = InferSelectModel<typeof account>;
export type Certificate = InferSelectModel<typeof certificates>; export type Certificate = InferSelectModel<typeof certificates>;
@@ -283,5 +243,3 @@ export type RemoteExitNodeSession = InferSelectModel<
>; >;
export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>; export type ExitNodeOrg = InferSelectModel<typeof exitNodeOrgs>;
export type LoginPage = InferSelectModel<typeof loginPage>; export type LoginPage = InferSelectModel<typeof loginPage>;
export type ActionAuditLog = InferSelectModel<typeof actionAuditLog>;
export type AccessAuditLog = InferSelectModel<typeof accessAuditLog>;

View File

@@ -6,12 +6,10 @@ import {
integer, integer,
bigint, bigint,
real, real,
text, text
index
} from "drizzle-orm/pg-core"; } from "drizzle-orm/pg-core";
import { InferSelectModel } from "drizzle-orm"; import { InferSelectModel } from "drizzle-orm";
import { randomUUID } from "crypto"; import { randomUUID } from "crypto";
import { alias } from "yargs";
export const domains = pgTable("domains", { export const domains = pgTable("domains", {
domainId: varchar("domainId").primaryKey(), domainId: varchar("domainId").primaryKey(),
@@ -20,41 +18,15 @@ export const domains = pgTable("domains", {
type: varchar("type"), // "ns", "cname", "wildcard" type: varchar("type"), // "ns", "cname", "wildcard"
verified: boolean("verified").notNull().default(false), verified: boolean("verified").notNull().default(false),
failed: boolean("failed").notNull().default(false), failed: boolean("failed").notNull().default(false),
tries: integer("tries").notNull().default(0), tries: integer("tries").notNull().default(0)
certResolver: varchar("certResolver"),
customCertResolver: varchar("customCertResolver"),
preferWildcardCert: boolean("preferWildcardCert")
});
export const dnsRecords = pgTable("dnsRecords", {
id: serial("id").primaryKey(),
domainId: varchar("domainId")
.notNull()
.references(() => domains.domainId, { onDelete: "cascade" }),
recordType: varchar("recordType").notNull(), // "NS" | "CNAME" | "A" | "TXT"
baseDomain: varchar("baseDomain"),
value: varchar("value").notNull(),
verified: boolean("verified").notNull().default(false)
}); });
export const orgs = pgTable("orgs", { export const orgs = pgTable("orgs", {
orgId: varchar("orgId").primaryKey(), orgId: varchar("orgId").primaryKey(),
name: varchar("name").notNull(), name: varchar("name").notNull(),
subnet: varchar("subnet"), subnet: varchar("subnet"),
utilitySubnet: varchar("utilitySubnet"), // this is the subnet for utility addresses
createdAt: text("createdAt"), createdAt: text("createdAt"),
requireTwoFactor: boolean("requireTwoFactor"), settings: text("settings") // JSON blob of org-specific settings
maxSessionLengthHours: integer("maxSessionLengthHours"),
passwordExpiryDays: integer("passwordExpiryDays"),
settingsLogRetentionDaysRequest: integer("settingsLogRetentionDaysRequest") // where 0 = dont keep logs and -1 = keep forever, and 9001 = end of the following year
.notNull()
.default(7),
settingsLogRetentionDaysAccess: integer("settingsLogRetentionDaysAccess") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull()
.default(0),
settingsLogRetentionDaysAction: integer("settingsLogRetentionDaysAction") // where 0 = dont keep logs and -1 = keep forever and 9001 = end of the following year
.notNull()
.default(0)
}); });
export const orgDomains = pgTable("orgDomains", { export const orgDomains = pgTable("orgDomains", {
@@ -90,7 +62,8 @@ export const sites = pgTable("sites", {
publicKey: varchar("publicKey"), publicKey: varchar("publicKey"),
lastHolePunch: bigint("lastHolePunch", { mode: "number" }), lastHolePunch: bigint("lastHolePunch", { mode: "number" }),
listenPort: integer("listenPort"), listenPort: integer("listenPort"),
dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true) dockerSocketEnabled: boolean("dockerSocketEnabled").notNull().default(true),
remoteSubnets: text("remoteSubnets") // comma-separated list of subnets that this site can access
}); });
export const resources = pgTable("resources", { export const resources = pgTable("resources", {
@@ -127,11 +100,9 @@ export const resources = pgTable("resources", {
setHostHeader: varchar("setHostHeader"), setHostHeader: varchar("setHostHeader"),
enableProxy: boolean("enableProxy").default(true), enableProxy: boolean("enableProxy").default(true),
skipToIdpId: integer("skipToIdpId").references(() => idp.idpId, { skipToIdpId: integer("skipToIdpId").references(() => idp.idpId, {
onDelete: "set null" onDelete: "cascade"
}), }),
headers: text("headers"), // comma-separated list of headers to add to the request headers: text("headers") // comma-separated list of headers to add to the request
proxyProtocol: boolean("proxyProtocol").notNull().default(false),
proxyProtocolVersion: integer("proxyProtocolVersion").default(1)
}); });
export const targets = pgTable("targets", { export const targets = pgTable("targets", {
@@ -176,8 +147,7 @@ export const targetHealthCheck = pgTable("targetHealthCheck", {
hcFollowRedirects: boolean("hcFollowRedirects").default(true), hcFollowRedirects: boolean("hcFollowRedirects").default(true),
hcMethod: varchar("hcMethod").default("GET"), hcMethod: varchar("hcMethod").default("GET"),
hcStatus: integer("hcStatus"), // http code hcStatus: integer("hcStatus"), // http code
hcHealth: text("hcHealth").default("unknown"), // "unknown", "healthy", "unhealthy" hcHealth: text("hcHealth").default("unknown") // "unknown", "healthy", "unhealthy"
hcTlsServerName: text("hcTlsServerName")
}); });
export const exitNodes = pgTable("exitNodes", { export const exitNodes = pgTable("exitNodes", {
@@ -206,41 +176,11 @@ export const siteResources = pgTable("siteResources", {
.references(() => orgs.orgId, { onDelete: "cascade" }), .references(() => orgs.orgId, { onDelete: "cascade" }),
niceId: varchar("niceId").notNull(), niceId: varchar("niceId").notNull(),
name: varchar("name").notNull(), name: varchar("name").notNull(),
mode: varchar("mode").notNull(), // "host" | "cidr" | "port" protocol: varchar("protocol").notNull(),
protocol: varchar("protocol"), // only for port mode proxyPort: integer("proxyPort").notNull(),
proxyPort: integer("proxyPort"), // only for port mode destinationPort: integer("destinationPort").notNull(),
destinationPort: integer("destinationPort"), // only for port mode destinationIp: varchar("destinationIp").notNull(),
destination: varchar("destination").notNull(), // ip, cidr, hostname; validate against the mode enabled: boolean("enabled").notNull().default(true)
enabled: boolean("enabled").notNull().default(true),
alias: varchar("alias"),
aliasAddress: varchar("aliasAddress")
});
export const clientSiteResources = pgTable("clientSiteResources", {
clientId: integer("clientId")
.notNull()
.references(() => clients.clientId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const roleSiteResources = pgTable("roleSiteResources", {
roleId: integer("roleId")
.notNull()
.references(() => roles.roleId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
});
export const userSiteResources = pgTable("userSiteResources", {
userId: varchar("userId")
.notNull()
.references(() => users.userId, { onDelete: "cascade" }),
siteResourceId: integer("siteResourceId")
.notNull()
.references(() => siteResources.siteResourceId, { onDelete: "cascade" })
}); });
export const users = pgTable("user", { export const users = pgTable("user", {
@@ -260,8 +200,7 @@ export const users = pgTable("user", {
dateCreated: varchar("dateCreated").notNull(), dateCreated: varchar("dateCreated").notNull(),
termsAcceptedTimestamp: varchar("termsAcceptedTimestamp"), termsAcceptedTimestamp: varchar("termsAcceptedTimestamp"),
termsVersion: varchar("termsVersion"), termsVersion: varchar("termsVersion"),
serverAdmin: boolean("serverAdmin").notNull().default(false), serverAdmin: boolean("serverAdmin").notNull().default(false)
lastPasswordChange: bigint("lastPasswordChange", { mode: "number" })
}); });
export const newts = pgTable("newt", { export const newts = pgTable("newt", {
@@ -287,9 +226,7 @@ export const sessions = pgTable("session", {
userId: varchar("userId") userId: varchar("userId")
.notNull() .notNull()
.references(() => users.userId, { onDelete: "cascade" }), .references(() => users.userId, { onDelete: "cascade" }),
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(), expiresAt: bigint("expiresAt", { mode: "number" }).notNull()
issuedAt: bigint("issuedAt", { mode: "number" }),
deviceAuthUsed: boolean("deviceAuthUsed").notNull().default(false)
}); });
export const newtSessions = pgTable("newtSession", { export const newtSessions = pgTable("newtSession", {
@@ -444,14 +381,6 @@ export const resourcePassword = pgTable("resourcePassword", {
passwordHash: varchar("passwordHash").notNull() passwordHash: varchar("passwordHash").notNull()
}); });
export const resourceHeaderAuth = pgTable("resourceHeaderAuth", {
headerAuthId: serial("headerAuthId").primaryKey(),
resourceId: integer("resourceId")
.notNull()
.references(() => resources.resourceId, { onDelete: "cascade" }),
headerAuthHash: varchar("headerAuthHash").notNull()
});
export const resourceAccessToken = pgTable("resourceAccessToken", { export const resourceAccessToken = pgTable("resourceAccessToken", {
accessTokenId: varchar("accessTokenId").primaryKey(), accessTokenId: varchar("accessTokenId").primaryKey(),
orgId: varchar("orgId") orgId: varchar("orgId")
@@ -506,8 +435,7 @@ export const resourceSessions = pgTable("resourceSessions", {
{ {
onDelete: "cascade" onDelete: "cascade"
} }
), )
issuedAt: bigint("issuedAt", { mode: "number" })
}); });
export const resourceWhitelist = pgTable("resourceWhitelist", { export const resourceWhitelist = pgTable("resourceWhitelist", {
@@ -538,6 +466,8 @@ export const resourceRules = pgTable("resourceRules", {
resourceId: integer("resourceId") resourceId: integer("resourceId")
.notNull() .notNull()
.references(() => resources.resourceId, { onDelete: "cascade" }), .references(() => resources.resourceId, { onDelete: "cascade" }),
templateRuleId: integer("templateRuleId")
.references(() => templateRules.ruleId, { onDelete: "cascade" }),
enabled: boolean("enabled").notNull().default(true), enabled: boolean("enabled").notNull().default(true),
priority: integer("priority").notNull(), priority: integer("priority").notNull(),
action: varchar("action").notNull(), // ACCEPT, DROP, PASS action: varchar("action").notNull(), // ACCEPT, DROP, PASS
@@ -545,6 +475,40 @@ export const resourceRules = pgTable("resourceRules", {
value: varchar("value").notNull() value: varchar("value").notNull()
}); });
// Rule templates (reusable rule sets)
export const ruleTemplates = pgTable("ruleTemplates", {
templateId: varchar("templateId").primaryKey(),
orgId: varchar("orgId")
.notNull()
.references(() => orgs.orgId, { onDelete: "cascade" }),
name: varchar("name").notNull(),
description: varchar("description"),
createdAt: bigint("createdAt", { mode: "number" }).notNull()
});
// Rules within templates
export const templateRules = pgTable("templateRules", {
ruleId: serial("ruleId").primaryKey(),
templateId: varchar("templateId")
.notNull()
.references(() => ruleTemplates.templateId, { onDelete: "cascade" }),
enabled: boolean("enabled").notNull().default(true),
priority: integer("priority").notNull(),
action: varchar("action").notNull(), // ACCEPT, DROP
match: varchar("match").notNull(), // CIDR, IP, PATH
value: varchar("value").notNull()
});
// Template assignments to resources
export const resourceTemplates = pgTable("resourceTemplates", {
resourceId: integer("resourceId")
.notNull()
.references(() => resources.resourceId, { onDelete: "cascade" }),
templateId: varchar("templateId")
.notNull()
.references(() => ruleTemplates.templateId, { onDelete: "cascade" })
});
export const supporterKey = pgTable("supporterKey", { export const supporterKey = pgTable("supporterKey", {
keyId: serial("keyId").primaryKey(), keyId: serial("keyId").primaryKey(),
key: varchar("key").notNull(), key: varchar("key").notNull(),
@@ -631,7 +595,7 @@ export const idpOrg = pgTable("idpOrg", {
}); });
export const clients = pgTable("clients", { export const clients = pgTable("clients", {
clientId: serial("clientId").primaryKey(), clientId: serial("id").primaryKey(),
orgId: varchar("orgId") orgId: varchar("orgId")
.references(() => orgs.orgId, { .references(() => orgs.orgId, {
onDelete: "cascade" onDelete: "cascade"
@@ -640,12 +604,6 @@ export const clients = pgTable("clients", {
exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, { exitNodeId: integer("exitNode").references(() => exitNodes.exitNodeId, {
onDelete: "set null" onDelete: "set null"
}), }),
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade"
}),
niceId: varchar("niceId").notNull(),
olmId: text("olmId"), // to lock it to a specific olm optionally
name: varchar("name").notNull(), name: varchar("name").notNull(),
pubKey: varchar("pubKey"), pubKey: varchar("pubKey"),
subnet: varchar("subnet").notNull(), subnet: varchar("subnet").notNull(),
@@ -660,40 +618,23 @@ export const clients = pgTable("clients", {
maxConnections: integer("maxConnections") maxConnections: integer("maxConnections")
}); });
export const clientSitesAssociationsCache = pgTable( export const clientSites = pgTable("clientSites", {
"clientSitesAssociationsCache", clientId: integer("clientId")
{ .notNull()
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message .references(() => clients.clientId, { onDelete: "cascade" }),
.notNull(), siteId: integer("siteId")
siteId: integer("siteId").notNull(), .notNull()
isRelayed: boolean("isRelayed").notNull().default(false), .references(() => sites.siteId, { onDelete: "cascade" }),
endpoint: varchar("endpoint"), isRelayed: boolean("isRelayed").notNull().default(false),
publicKey: varchar("publicKey") // this will act as the session's public key for hole punching so we can track when it changes endpoint: varchar("endpoint")
} });
);
export const clientSiteResourcesAssociationsCache = pgTable(
"clientSiteResourcesAssociationsCache",
{
clientId: integer("clientId") // not a foreign key here so after its deleted the rebuild function can delete it and send the message
.notNull(),
siteResourceId: integer("siteResourceId").notNull()
}
);
export const olms = pgTable("olms", { export const olms = pgTable("olms", {
olmId: varchar("id").primaryKey(), olmId: varchar("id").primaryKey(),
secretHash: varchar("secretHash").notNull(), secretHash: varchar("secretHash").notNull(),
dateCreated: varchar("dateCreated").notNull(), dateCreated: varchar("dateCreated").notNull(),
version: text("version"), version: text("version"),
agent: text("agent"),
name: varchar("name"),
clientId: integer("clientId").references(() => clients.clientId, { clientId: integer("clientId").references(() => clients.clientId, {
// we will switch this depending on the current org it wants to connect to
onDelete: "set null"
}),
userId: text("userId").references(() => users.userId, {
// optionally tied to a user and in this case delete when the user deletes
onDelete: "cascade" onDelete: "cascade"
}) })
}); });
@@ -758,72 +699,6 @@ export const setupTokens = pgTable("setupTokens", {
dateUsed: varchar("dateUsed") dateUsed: varchar("dateUsed")
}); });
// Blueprint runs
export const blueprints = pgTable("blueprints", {
blueprintId: serial("blueprintId").primaryKey(),
orgId: text("orgId")
.references(() => orgs.orgId, {
onDelete: "cascade"
})
.notNull(),
name: varchar("name").notNull(),
source: varchar("source").notNull(),
createdAt: integer("createdAt").notNull(),
succeeded: boolean("succeeded").notNull(),
contents: text("contents").notNull(),
message: text("message")
});
export const requestAuditLog = pgTable(
"requestAuditLog",
{
id: serial("id").primaryKey(),
timestamp: integer("timestamp").notNull(), // this is EPOCH time in seconds
orgId: text("orgId").references(() => orgs.orgId, {
onDelete: "cascade"
}),
action: boolean("action").notNull(),
reason: integer("reason").notNull(),
actorType: text("actorType"),
actor: text("actor"),
actorId: text("actorId"),
resourceId: integer("resourceId"),
ip: text("ip"),
location: text("location"),
userAgent: text("userAgent"),
metadata: text("metadata"),
headers: text("headers"), // JSON blob
query: text("query"), // JSON blob
originalRequestURL: text("originalRequestURL"),
scheme: text("scheme"),
host: text("host"),
path: text("path"),
method: text("method"),
tls: boolean("tls")
},
(table) => [
index("idx_requestAuditLog_timestamp").on(table.timestamp),
index("idx_requestAuditLog_org_timestamp").on(
table.orgId,
table.timestamp
)
]
);
export const deviceWebAuthCodes = pgTable("deviceWebAuthCodes", {
codeId: serial("codeId").primaryKey(),
code: text("code").notNull().unique(),
ip: text("ip"),
city: text("city"),
deviceName: text("deviceName"),
applicationName: text("applicationName").notNull(),
expiresAt: bigint("expiresAt", { mode: "number" }).notNull(),
createdAt: bigint("createdAt", { mode: "number" }).notNull(),
verified: boolean("verified").notNull().default(false),
userId: varchar("userId").references(() => users.userId, {
onDelete: "cascade"
})
});
export type Org = InferSelectModel<typeof orgs>; export type Org = InferSelectModel<typeof orgs>;
export type User = InferSelectModel<typeof users>; export type User = InferSelectModel<typeof users>;
export type Site = InferSelectModel<typeof sites>; export type Site = InferSelectModel<typeof sites>;
@@ -851,7 +726,6 @@ export type UserOrg = InferSelectModel<typeof userOrgs>;
export type ResourceSession = InferSelectModel<typeof resourceSessions>; export type ResourceSession = InferSelectModel<typeof resourceSessions>;
export type ResourcePincode = InferSelectModel<typeof resourcePincode>; export type ResourcePincode = InferSelectModel<typeof resourcePincode>;
export type ResourcePassword = InferSelectModel<typeof resourcePassword>; export type ResourcePassword = InferSelectModel<typeof resourcePassword>;
export type ResourceHeaderAuth = InferSelectModel<typeof resourceHeaderAuth>;
export type ResourceOtp = InferSelectModel<typeof resourceOtp>; export type ResourceOtp = InferSelectModel<typeof resourceOtp>;
export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>; export type ResourceAccessToken = InferSelectModel<typeof resourceAccessToken>;
export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>; export type ResourceWhitelist = InferSelectModel<typeof resourceWhitelist>;
@@ -864,7 +738,7 @@ export type ApiKey = InferSelectModel<typeof apiKeys>;
export type ApiKeyAction = InferSelectModel<typeof apiKeyActions>; export type ApiKeyAction = InferSelectModel<typeof apiKeyActions>;
export type ApiKeyOrg = InferSelectModel<typeof apiKeyOrg>; export type ApiKeyOrg = InferSelectModel<typeof apiKeyOrg>;
export type Client = InferSelectModel<typeof clients>; export type Client = InferSelectModel<typeof clients>;
export type ClientSite = InferSelectModel<typeof clientSitesAssociationsCache>; export type ClientSite = InferSelectModel<typeof clientSites>;
export type Olm = InferSelectModel<typeof olms>; export type Olm = InferSelectModel<typeof olms>;
export type OlmSession = InferSelectModel<typeof olmSessions>; export type OlmSession = InferSelectModel<typeof olmSessions>;
export type UserClient = InferSelectModel<typeof userClients>; export type UserClient = InferSelectModel<typeof userClients>;
@@ -874,10 +748,6 @@ export type SiteResource = InferSelectModel<typeof siteResources>;
export type SetupToken = InferSelectModel<typeof setupTokens>; export type SetupToken = InferSelectModel<typeof setupTokens>;
export type HostMeta = InferSelectModel<typeof hostMeta>; export type HostMeta = InferSelectModel<typeof hostMeta>;
export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>; export type TargetHealthCheck = InferSelectModel<typeof targetHealthCheck>;
export type IdpOidcConfig = InferSelectModel<typeof idpOidcConfig>; export type RuleTemplate = InferSelectModel<typeof ruleTemplates>;
export type Blueprint = InferSelectModel<typeof blueprints>; export type TemplateRule = InferSelectModel<typeof templateRules>;
export type LicenseKey = InferSelectModel<typeof licenseKey>; export type ResourceTemplate = InferSelectModel<typeof resourceTemplates>;
export type SecurityKey = InferSelectModel<typeof securityKeys>;
export type WebauthnChallenge = InferSelectModel<typeof webauthnChallenge>;
export type DeviceWebAuthCode = InferSelectModel<typeof deviceWebAuthCodes>;
export type RequestAuditLog = InferSelectModel<typeof requestAuditLog>;

View File

@@ -14,14 +14,14 @@
// Simple test file for the rate limit service with Redis // Simple test file for the rate limit service with Redis
// Run with: npx ts-node rateLimitService.test.ts // Run with: npx ts-node rateLimitService.test.ts
import { RateLimitService } from "./rateLimit"; import { RateLimitService } from './rateLimit';
function generateClientId() { function generateClientId() {
return "client-" + Math.random().toString(36).substring(2, 15); return 'client-' + Math.random().toString(36).substring(2, 15);
} }
async function runTests() { async function runTests() {
console.log("Starting Rate Limit Service Tests...\n"); console.log('Starting Rate Limit Service Tests...\n');
const rateLimitService = new RateLimitService(); const rateLimitService = new RateLimitService();
let testsPassed = 0; let testsPassed = 0;
@@ -47,54 +47,36 @@ async function runTests() {
} }
// Test 1: Basic rate limiting // Test 1: Basic rate limiting
await test("Should allow requests under the limit", async () => { await test('Should allow requests under the limit', async () => {
const clientId = generateClientId(); const clientId = generateClientId();
const maxRequests = 5; const maxRequests = 5;
for (let i = 0; i < maxRequests - 1; i++) { for (let i = 0; i < maxRequests - 1; i++) {
const result = await rateLimitService.checkRateLimit( const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
clientId,
undefined,
maxRequests
);
assert(!result.isLimited, `Request ${i + 1} should be allowed`); assert(!result.isLimited, `Request ${i + 1} should be allowed`);
assert( assert(result.totalHits === i + 1, `Expected ${i + 1} hits, got ${result.totalHits}`);
result.totalHits === i + 1,
`Expected ${i + 1} hits, got ${result.totalHits}`
);
} }
}); });
// Test 2: Rate limit blocking // Test 2: Rate limit blocking
await test("Should block requests over the limit", async () => { await test('Should block requests over the limit', async () => {
const clientId = generateClientId(); const clientId = generateClientId();
const maxRequests = 30; const maxRequests = 30;
// Use up all allowed requests // Use up all allowed requests
for (let i = 0; i < maxRequests - 1; i++) { for (let i = 0; i < maxRequests - 1; i++) {
const result = await rateLimitService.checkRateLimit( const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
clientId,
undefined,
maxRequests
);
assert(!result.isLimited, `Request ${i + 1} should be allowed`); assert(!result.isLimited, `Request ${i + 1} should be allowed`);
} }
// Next request should be blocked // Next request should be blocked
const blockedResult = await rateLimitService.checkRateLimit( const blockedResult = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
clientId, assert(blockedResult.isLimited, 'Request should be blocked');
undefined, assert(blockedResult.reason === 'global', 'Should be blocked for global reason');
maxRequests
);
assert(blockedResult.isLimited, "Request should be blocked");
assert(
blockedResult.reason === "global",
"Should be blocked for global reason"
);
}); });
// Test 3: Message type limits // Test 3: Message type limits
await test("Should handle message type limits", async () => { await test('Should handle message type limits', async () => {
const clientId = generateClientId(); const clientId = generateClientId();
const globalMax = 10; const globalMax = 10;
const messageTypeMax = 2; const messageTypeMax = 2;
@@ -103,63 +85,53 @@ async function runTests() {
for (let i = 0; i < messageTypeMax - 1; i++) { for (let i = 0; i < messageTypeMax - 1; i++) {
const result = await rateLimitService.checkRateLimit( const result = await rateLimitService.checkRateLimit(
clientId, clientId,
"ping", 'ping',
globalMax, globalMax,
messageTypeMax messageTypeMax
); );
assert( assert(!result.isLimited, `Ping message ${i + 1} should be allowed`);
!result.isLimited,
`Ping message ${i + 1} should be allowed`
);
} }
// Next 'ping' should be blocked // Next 'ping' should be blocked
const blockedResult = await rateLimitService.checkRateLimit( const blockedResult = await rateLimitService.checkRateLimit(
clientId, clientId,
"ping", 'ping',
globalMax, globalMax,
messageTypeMax messageTypeMax
); );
assert(blockedResult.isLimited, "Ping message should be blocked"); assert(blockedResult.isLimited, 'Ping message should be blocked');
assert( assert(blockedResult.reason === 'message_type:ping', 'Should be blocked for message type');
blockedResult.reason === "message_type:ping",
"Should be blocked for message type"
);
// Other message types should still work // Other message types should still work
const otherResult = await rateLimitService.checkRateLimit( const otherResult = await rateLimitService.checkRateLimit(
clientId, clientId,
"pong", 'pong',
globalMax, globalMax,
messageTypeMax messageTypeMax
); );
assert(!otherResult.isLimited, "Pong message should be allowed"); assert(!otherResult.isLimited, 'Pong message should be allowed');
}); });
// Test 4: Reset functionality // Test 4: Reset functionality
await test("Should reset client correctly", async () => { await test('Should reset client correctly', async () => {
const clientId = generateClientId(); const clientId = generateClientId();
const maxRequests = 3; const maxRequests = 3;
// Use up some requests // Use up some requests
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests); await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
await rateLimitService.checkRateLimit(clientId, "test", maxRequests); await rateLimitService.checkRateLimit(clientId, 'test', maxRequests);
// Reset the client // Reset the client
await rateLimitService.resetKey(clientId); await rateLimitService.resetKey(clientId);
// Should be able to make fresh requests // Should be able to make fresh requests
const result = await rateLimitService.checkRateLimit( const result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
clientId, assert(!result.isLimited, 'Request after reset should be allowed');
undefined, assert(result.totalHits === 1, 'Should have 1 hit after reset');
maxRequests
);
assert(!result.isLimited, "Request after reset should be allowed");
assert(result.totalHits === 1, "Should have 1 hit after reset");
}); });
// Test 5: Different clients are independent // Test 5: Different clients are independent
await test("Should handle different clients independently", async () => { await test('Should handle different clients independently', async () => {
const client1 = generateClientId(); const client1 = generateClientId();
const client2 = generateClientId(); const client2 = generateClientId();
const maxRequests = 2; const maxRequests = 2;
@@ -167,62 +139,43 @@ async function runTests() {
// Client 1 uses up their limit // Client 1 uses up their limit
await rateLimitService.checkRateLimit(client1, undefined, maxRequests); await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
await rateLimitService.checkRateLimit(client1, undefined, maxRequests); await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
const client1Blocked = await rateLimitService.checkRateLimit( const client1Blocked = await rateLimitService.checkRateLimit(client1, undefined, maxRequests);
client1, assert(client1Blocked.isLimited, 'Client 1 should be blocked');
undefined,
maxRequests
);
assert(client1Blocked.isLimited, "Client 1 should be blocked");
// Client 2 should still be able to make requests // Client 2 should still be able to make requests
const client2Result = await rateLimitService.checkRateLimit( const client2Result = await rateLimitService.checkRateLimit(client2, undefined, maxRequests);
client2, assert(!client2Result.isLimited, 'Client 2 should not be blocked');
undefined, assert(client2Result.totalHits === 1, 'Client 2 should have 1 hit');
maxRequests
);
assert(!client2Result.isLimited, "Client 2 should not be blocked");
assert(client2Result.totalHits === 1, "Client 2 should have 1 hit");
}); });
// Test 6: Decrement functionality // Test 6: Decrement functionality
await test("Should decrement correctly", async () => { await test('Should decrement correctly', async () => {
const clientId = generateClientId(); const clientId = generateClientId();
const maxRequests = 5; const maxRequests = 5;
// Make some requests // Make some requests
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests); await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
await rateLimitService.checkRateLimit(clientId, undefined, maxRequests); await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
let result = await rateLimitService.checkRateLimit( let result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
clientId, assert(result.totalHits === 3, 'Should have 3 hits before decrement');
undefined,
maxRequests
);
assert(result.totalHits === 3, "Should have 3 hits before decrement");
// Decrement // Decrement
await rateLimitService.decrementRateLimit(clientId); await rateLimitService.decrementRateLimit(clientId);
// Next request should reflect the decrement // Next request should reflect the decrement
result = await rateLimitService.checkRateLimit( result = await rateLimitService.checkRateLimit(clientId, undefined, maxRequests);
clientId, assert(result.totalHits === 3, 'Should have 3 hits after decrement + increment');
undefined,
maxRequests
);
assert(
result.totalHits === 3,
"Should have 3 hits after decrement + increment"
);
}); });
// Wait a moment for any pending Redis operations // Wait a moment for any pending Redis operations
console.log("\nWaiting for Redis sync..."); console.log('\nWaiting for Redis sync...');
await new Promise((resolve) => setTimeout(resolve, 1000)); await new Promise(resolve => setTimeout(resolve, 1000));
// Force sync to test Redis integration // Force sync to test Redis integration
await test("Should sync to Redis", async () => { await test('Should sync to Redis', async () => {
await rateLimitService.forceSyncAllPendingData(); await rateLimitService.forceSyncAllPendingData();
// If this doesn't throw, Redis sync is working // If this doesn't throw, Redis sync is working
assert(true, "Redis sync completed"); assert(true, 'Redis sync completed');
}); });
// Cleanup // Cleanup
@@ -234,16 +187,16 @@ async function runTests() {
console.log(`❌ Failed: ${testsTotal - testsPassed}/${testsTotal}`); console.log(`❌ Failed: ${testsTotal - testsPassed}/${testsTotal}`);
if (testsPassed === testsTotal) { if (testsPassed === testsTotal) {
console.log("\n🎉 All tests passed!"); console.log('\n🎉 All tests passed!');
process.exit(0); process.exit(0);
} else { } else {
console.log("\n💥 Some tests failed!"); console.log('\n💥 Some tests failed!');
process.exit(1); process.exit(1);
} }
} }
// Run the tests // Run the tests
runTests().catch((error) => { runTests().catch(error => {
console.error("Test runner error:", error); console.error('Test runner error:', error);
process.exit(1); process.exit(1);
}); });

View File

@@ -12,7 +12,7 @@
*/ */
import logger from "@server/logger"; import logger from "@server/logger";
import redisManager from "#private/lib/redis"; import redisManager from "@server/db/private/redis";
import { build } from "@server/build"; import { build } from "@server/build";
// Rate limiting configuration // Rate limiting configuration
@@ -40,8 +40,7 @@ interface RateLimitResult {
export class RateLimitService { export class RateLimitService {
private localRateLimitTracker: Map<string, RateLimitTracker> = new Map(); private localRateLimitTracker: Map<string, RateLimitTracker> = new Map();
private localMessageTypeRateLimitTracker: Map<string, RateLimitTracker> = private localMessageTypeRateLimitTracker: Map<string, RateLimitTracker> = new Map();
new Map();
private cleanupInterval: NodeJS.Timeout | null = null; private cleanupInterval: NodeJS.Timeout | null = null;
private forceSyncInterval: NodeJS.Timeout | null = null; private forceSyncInterval: NodeJS.Timeout | null = null;
@@ -69,74 +68,21 @@ export class RateLimitService {
return `ratelimit:${clientId}`; return `ratelimit:${clientId}`;
} }
private getMessageTypeRateLimitKey( private getMessageTypeRateLimitKey(clientId: string, messageType: string): string {
clientId: string,
messageType: string
): string {
return `ratelimit:${clientId}:${messageType}`; return `ratelimit:${clientId}:${messageType}`;
} }
// Helper function to clean up old timestamp fields from a Redis hash
private async cleanupOldTimestamps(
key: string,
windowStart: number
): Promise<void> {
if (!redisManager.isRedisEnabled()) return;
try {
const client = redisManager.getClient();
if (!client) return;
// Get all fields in the hash
const allData = await redisManager.hgetall(key);
if (!allData || Object.keys(allData).length === 0) return;
// Find fields that are older than the window
const fieldsToDelete: string[] = [];
for (const timestamp of Object.keys(allData)) {
const time = parseInt(timestamp);
if (time < windowStart) {
fieldsToDelete.push(timestamp);
}
}
// Delete old fields in batches to avoid call stack size exceeded errors
// The spread operator can cause issues with very large arrays
if (fieldsToDelete.length > 0) {
const batchSize = 1000; // Process 1000 fields at a time
for (let i = 0; i < fieldsToDelete.length; i += batchSize) {
const batch = fieldsToDelete.slice(i, i + batchSize);
await client.hdel(key, ...batch);
}
logger.debug(
`Cleaned up ${fieldsToDelete.length} old timestamp fields from ${key}`
);
}
} catch (error) {
logger.error(
`Failed to cleanup old timestamps for key ${key}:`,
error
);
// Don't throw - cleanup failures shouldn't block rate limiting
}
}
// Helper function to sync local rate limit data to Redis // Helper function to sync local rate limit data to Redis
private async syncRateLimitToRedis( private async syncRateLimitToRedis(
clientId: string, clientId: string,
tracker: RateLimitTracker tracker: RateLimitTracker
): Promise<void> { ): Promise<void> {
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) return;
return;
try { try {
const currentTime = Math.floor(Date.now() / 1000); const currentTime = Math.floor(Date.now() / 1000);
const windowStart = currentTime - RATE_LIMIT_WINDOW;
const globalKey = this.getRateLimitKey(clientId); const globalKey = this.getRateLimitKey(clientId);
// Clean up old timestamp fields before writing
await this.cleanupOldTimestamps(globalKey, windowStart);
// Get current value and add pending count // Get current value and add pending count
const currentValue = await redisManager.hget( const currentValue = await redisManager.hget(
globalKey, globalKey,
@@ -145,13 +91,9 @@ export class RateLimitService {
const newValue = ( const newValue = (
parseInt(currentValue || "0") + tracker.pendingCount parseInt(currentValue || "0") + tracker.pendingCount
).toString(); ).toString();
await redisManager.hset( await redisManager.hset(globalKey, currentTime.toString(), newValue);
globalKey,
currentTime.toString(),
newValue
);
// Set TTL using the client directly - this prevents the key from persisting forever // Set TTL using the client directly
if (redisManager.getClient()) { if (redisManager.getClient()) {
await redisManager await redisManager
.getClient() .getClient()
@@ -162,9 +104,7 @@ export class RateLimitService {
tracker.lastSyncedCount = tracker.count; tracker.lastSyncedCount = tracker.count;
tracker.pendingCount = 0; tracker.pendingCount = 0;
logger.debug( logger.debug(`Synced global rate limit to Redis for client ${clientId}`);
`Synced global rate limit to Redis for client ${clientId}`
);
} catch (error) { } catch (error) {
logger.error("Failed to sync global rate limit to Redis:", error); logger.error("Failed to sync global rate limit to Redis:", error);
} }
@@ -175,19 +115,11 @@ export class RateLimitService {
messageType: string, messageType: string,
tracker: RateLimitTracker tracker: RateLimitTracker
): Promise<void> { ): Promise<void> {
if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) if (!redisManager.isRedisEnabled() || tracker.pendingCount === 0) return;
return;
try { try {
const currentTime = Math.floor(Date.now() / 1000); const currentTime = Math.floor(Date.now() / 1000);
const windowStart = currentTime - RATE_LIMIT_WINDOW; const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
const messageTypeKey = this.getMessageTypeRateLimitKey(
clientId,
messageType
);
// Clean up old timestamp fields before writing
await this.cleanupOldTimestamps(messageTypeKey, windowStart);
// Get current value and add pending count // Get current value and add pending count
const currentValue = await redisManager.hget( const currentValue = await redisManager.hget(
@@ -203,7 +135,7 @@ export class RateLimitService {
newValue newValue
); );
// Set TTL using the client directly - this prevents the key from persisting forever // Set TTL using the client directly
if (redisManager.getClient()) { if (redisManager.getClient()) {
await redisManager await redisManager
.getClient() .getClient()
@@ -218,17 +150,12 @@ export class RateLimitService {
`Synced message type rate limit to Redis for client ${clientId}, type ${messageType}` `Synced message type rate limit to Redis for client ${clientId}, type ${messageType}`
); );
} catch (error) { } catch (error) {
logger.error( logger.error("Failed to sync message type rate limit to Redis:", error);
"Failed to sync message type rate limit to Redis:",
error
);
} }
} }
// Initialize local tracker from Redis data // Initialize local tracker from Redis data
private async initializeLocalTracker( private async initializeLocalTracker(clientId: string): Promise<RateLimitTracker> {
clientId: string
): Promise<RateLimitTracker> {
const currentTime = Math.floor(Date.now() / 1000); const currentTime = Math.floor(Date.now() / 1000);
const windowStart = currentTime - RATE_LIMIT_WINDOW; const windowStart = currentTime - RATE_LIMIT_WINDOW;
@@ -243,16 +170,10 @@ export class RateLimitService {
try { try {
const globalKey = this.getRateLimitKey(clientId); const globalKey = this.getRateLimitKey(clientId);
// Clean up old timestamp fields before reading
await this.cleanupOldTimestamps(globalKey, windowStart);
const globalRateLimitData = await redisManager.hgetall(globalKey); const globalRateLimitData = await redisManager.hgetall(globalKey);
let count = 0; let count = 0;
for (const [timestamp, countStr] of Object.entries( for (const [timestamp, countStr] of Object.entries(globalRateLimitData)) {
globalRateLimitData
)) {
const time = parseInt(timestamp); const time = parseInt(timestamp);
if (time >= windowStart) { if (time >= windowStart) {
count += parseInt(countStr); count += parseInt(countStr);
@@ -266,10 +187,7 @@ export class RateLimitService {
lastSyncedCount: count lastSyncedCount: count
}; };
} catch (error) { } catch (error) {
logger.error( logger.error("Failed to initialize global tracker from Redis:", error);
"Failed to initialize global tracker from Redis:",
error
);
return { return {
count: 0, count: 0,
windowStart: currentTime, windowStart: currentTime,
@@ -296,21 +214,11 @@ export class RateLimitService {
} }
try { try {
const messageTypeKey = this.getMessageTypeRateLimitKey( const messageTypeKey = this.getMessageTypeRateLimitKey(clientId, messageType);
clientId, const messageTypeRateLimitData = await redisManager.hgetall(messageTypeKey);
messageType
);
// Clean up old timestamp fields before reading
await this.cleanupOldTimestamps(messageTypeKey, windowStart);
const messageTypeRateLimitData =
await redisManager.hgetall(messageTypeKey);
let count = 0; let count = 0;
for (const [timestamp, countStr] of Object.entries( for (const [timestamp, countStr] of Object.entries(messageTypeRateLimitData)) {
messageTypeRateLimitData
)) {
const time = parseInt(timestamp); const time = parseInt(timestamp);
if (time >= windowStart) { if (time >= windowStart) {
count += parseInt(countStr); count += parseInt(countStr);
@@ -324,10 +232,7 @@ export class RateLimitService {
lastSyncedCount: count lastSyncedCount: count
}; };
} catch (error) { } catch (error) {
logger.error( logger.error("Failed to initialize message type tracker from Redis:", error);
"Failed to initialize message type tracker from Redis:",
error
);
return { return {
count: 0, count: 0,
windowStart: currentTime, windowStart: currentTime,
@@ -369,10 +274,7 @@ export class RateLimitService {
isLimited: true, isLimited: true,
reason: "global", reason: "global",
totalHits: globalTracker.count, totalHits: globalTracker.count,
resetTime: new Date( resetTime: new Date((globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
(globalTracker.windowStart + Math.floor(windowMs / 1000)) *
1000
)
}; };
} }
@@ -384,32 +286,19 @@ export class RateLimitService {
// Check message type specific rate limit if messageType is provided // Check message type specific rate limit if messageType is provided
if (messageType) { if (messageType) {
const messageTypeKey = `${clientId}:${messageType}`; const messageTypeKey = `${clientId}:${messageType}`;
let messageTypeTracker = let messageTypeTracker = this.localMessageTypeRateLimitTracker.get(messageTypeKey);
this.localMessageTypeRateLimitTracker.get(messageTypeKey);
if ( if (!messageTypeTracker || messageTypeTracker.windowStart < windowStart) {
!messageTypeTracker ||
messageTypeTracker.windowStart < windowStart
) {
// New window or first request for this message type - initialize from Redis if available // New window or first request for this message type - initialize from Redis if available
messageTypeTracker = await this.initializeMessageTypeTracker( messageTypeTracker = await this.initializeMessageTypeTracker(clientId, messageType);
clientId,
messageType
);
messageTypeTracker.windowStart = currentTime; messageTypeTracker.windowStart = currentTime;
this.localMessageTypeRateLimitTracker.set( this.localMessageTypeRateLimitTracker.set(messageTypeKey, messageTypeTracker);
messageTypeKey,
messageTypeTracker
);
} }
// Increment message type counters // Increment message type counters
messageTypeTracker.count++; messageTypeTracker.count++;
messageTypeTracker.pendingCount++; messageTypeTracker.pendingCount++;
this.localMessageTypeRateLimitTracker.set( this.localMessageTypeRateLimitTracker.set(messageTypeKey, messageTypeTracker);
messageTypeKey,
messageTypeTracker
);
// Check if message type limit would be exceeded // Check if message type limit would be exceeded
if (messageTypeTracker.count >= messageTypeLimit) { if (messageTypeTracker.count >= messageTypeLimit) {
@@ -417,38 +306,25 @@ export class RateLimitService {
isLimited: true, isLimited: true,
reason: `message_type:${messageType}`, reason: `message_type:${messageType}`,
totalHits: messageTypeTracker.count, totalHits: messageTypeTracker.count,
resetTime: new Date( resetTime: new Date((messageTypeTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
(messageTypeTracker.windowStart +
Math.floor(windowMs / 1000)) *
1000
)
}; };
} }
// Sync to Redis if threshold reached // Sync to Redis if threshold reached
if (messageTypeTracker.pendingCount >= REDIS_SYNC_THRESHOLD) { if (messageTypeTracker.pendingCount >= REDIS_SYNC_THRESHOLD) {
this.syncMessageTypeRateLimitToRedis( this.syncMessageTypeRateLimitToRedis(clientId, messageType, messageTypeTracker);
clientId,
messageType,
messageTypeTracker
);
} }
} }
return { return {
isLimited: false, isLimited: false,
totalHits: globalTracker.count, totalHits: globalTracker.count,
resetTime: new Date( resetTime: new Date((globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000)
(globalTracker.windowStart + Math.floor(windowMs / 1000)) * 1000
)
}; };
} }
// Decrement function for skipSuccessfulRequests/skipFailedRequests functionality // Decrement function for skipSuccessfulRequests/skipFailedRequests functionality
async decrementRateLimit( async decrementRateLimit(clientId: string, messageType?: string): Promise<void> {
clientId: string,
messageType?: string
): Promise<void> {
// Decrement global counter // Decrement global counter
const globalTracker = this.localRateLimitTracker.get(clientId); const globalTracker = this.localRateLimitTracker.get(clientId);
if (globalTracker && globalTracker.count > 0) { if (globalTracker && globalTracker.count > 0) {
@@ -460,8 +336,7 @@ export class RateLimitService {
// Decrement message type counter if provided // Decrement message type counter if provided
if (messageType) { if (messageType) {
const messageTypeKey = `${clientId}:${messageType}`; const messageTypeKey = `${clientId}:${messageType}`;
const messageTypeTracker = const messageTypeTracker = this.localMessageTypeRateLimitTracker.get(messageTypeKey);
this.localMessageTypeRateLimitTracker.get(messageTypeKey);
if (messageTypeTracker && messageTypeTracker.count > 0) { if (messageTypeTracker && messageTypeTracker.count > 0) {
messageTypeTracker.count--; messageTypeTracker.count--;
messageTypeTracker.pendingCount--; messageTypeTracker.pendingCount--;
@@ -489,13 +364,9 @@ export class RateLimitService {
// Get all message type keys for this client and delete them // Get all message type keys for this client and delete them
const client = redisManager.getClient(); const client = redisManager.getClient();
if (client) { if (client) {
const messageTypeKeys = await client.keys( const messageTypeKeys = await client.keys(`ratelimit:${clientId}:*`);
`ratelimit:${clientId}:*`
);
if (messageTypeKeys.length > 0) { if (messageTypeKeys.length > 0) {
await Promise.all( await Promise.all(messageTypeKeys.map(key => redisManager.del(key)));
messageTypeKeys.map((key) => redisManager.del(key))
);
} }
} }
} }
@@ -507,10 +378,7 @@ export class RateLimitService {
const windowStart = currentTime - RATE_LIMIT_WINDOW; const windowStart = currentTime - RATE_LIMIT_WINDOW;
// Clean up global rate limit tracking and sync pending data // Clean up global rate limit tracking and sync pending data
for (const [ for (const [clientId, tracker] of this.localRateLimitTracker.entries()) {
clientId,
tracker
] of this.localRateLimitTracker.entries()) {
if (tracker.windowStart < windowStart) { if (tracker.windowStart < windowStart) {
// Sync any pending data before cleanup // Sync any pending data before cleanup
if (tracker.pendingCount > 0) { if (tracker.pendingCount > 0) {
@@ -521,19 +389,12 @@ export class RateLimitService {
} }
// Clean up message type rate limit tracking and sync pending data // Clean up message type rate limit tracking and sync pending data
for (const [ for (const [key, tracker] of this.localMessageTypeRateLimitTracker.entries()) {
key,
tracker
] of this.localMessageTypeRateLimitTracker.entries()) {
if (tracker.windowStart < windowStart) { if (tracker.windowStart < windowStart) {
// Sync any pending data before cleanup // Sync any pending data before cleanup
if (tracker.pendingCount > 0) { if (tracker.pendingCount > 0) {
const [clientId, messageType] = key.split(":", 2); const [clientId, messageType] = key.split(":", 2);
await this.syncMessageTypeRateLimitToRedis( await this.syncMessageTypeRateLimitToRedis(clientId, messageType, tracker);
clientId,
messageType,
tracker
);
} }
this.localMessageTypeRateLimitTracker.delete(key); this.localMessageTypeRateLimitTracker.delete(key);
} }
@@ -547,27 +408,17 @@ export class RateLimitService {
logger.debug("Force syncing all pending rate limit data to Redis..."); logger.debug("Force syncing all pending rate limit data to Redis...");
// Sync all pending global rate limits // Sync all pending global rate limits
for (const [ for (const [clientId, tracker] of this.localRateLimitTracker.entries()) {
clientId,
tracker
] of this.localRateLimitTracker.entries()) {
if (tracker.pendingCount > 0) { if (tracker.pendingCount > 0) {
await this.syncRateLimitToRedis(clientId, tracker); await this.syncRateLimitToRedis(clientId, tracker);
} }
} }
// Sync all pending message type rate limits // Sync all pending message type rate limits
for (const [ for (const [key, tracker] of this.localMessageTypeRateLimitTracker.entries()) {
key,
tracker
] of this.localMessageTypeRateLimitTracker.entries()) {
if (tracker.pendingCount > 0) { if (tracker.pendingCount > 0) {
const [clientId, messageType] = key.split(":", 2); const [clientId, messageType] = key.split(":", 2);
await this.syncMessageTypeRateLimitToRedis( await this.syncMessageTypeRateLimitToRedis(clientId, messageType, tracker);
clientId,
messageType,
tracker
);
} }
} }
@@ -601,3 +452,7 @@ export class RateLimitService {
// Export singleton instance // Export singleton instance
export const rateLimitService = new RateLimitService(); export const rateLimitService = new RateLimitService();
// Handle process termination
process.on("SIGTERM", () => rateLimitService.cleanup());
process.on("SIGINT", () => rateLimitService.cleanup());

View File

@@ -13,13 +13,13 @@
import Redis, { RedisOptions } from "ioredis"; import Redis, { RedisOptions } from "ioredis";
import logger from "@server/logger"; import logger from "@server/logger";
import privateConfig from "#private/lib/config"; import config from "@server/lib/config";
import { build } from "@server/build"; import { build } from "@server/build";
class RedisManager { class RedisManager {
public client: Redis | null = null; public client: Redis | null = null;
private writeClient: Redis | null = null; // Master for writes private writeClient: Redis | null = null; // Master for writes
private readClient: Redis | null = null; // Replica for reads private readClient: Redis | null = null; // Replica for reads
private subscriber: Redis | null = null; private subscriber: Redis | null = null;
private publisher: Redis | null = null; private publisher: Redis | null = null;
private isEnabled: boolean = false; private isEnabled: boolean = false;
@@ -46,8 +46,7 @@ class RedisManager {
this.isEnabled = false; this.isEnabled = false;
return; return;
} }
this.isEnabled = this.isEnabled = config.getRawPrivateConfig().flags?.enable_redis || false;
privateConfig.getRawPrivateConfig().flags.enable_redis || false;
if (this.isEnabled) { if (this.isEnabled) {
this.initializeClients(); this.initializeClients();
} }
@@ -64,19 +63,15 @@ class RedisManager {
} }
private async triggerReconnectionCallbacks(): Promise<void> { private async triggerReconnectionCallbacks(): Promise<void> {
logger.info( logger.info(`Triggering ${this.reconnectionCallbacks.size} reconnection callbacks`);
`Triggering ${this.reconnectionCallbacks.size} reconnection callbacks`
);
const promises = Array.from(this.reconnectionCallbacks).map( const promises = Array.from(this.reconnectionCallbacks).map(async (callback) => {
async (callback) => { try {
try { await callback();
await callback(); } catch (error) {
} catch (error) { logger.error("Error in reconnection callback:", error);
logger.error("Error in reconnection callback:", error);
}
} }
); });
await Promise.allSettled(promises); await Promise.allSettled(promises);
} }
@@ -84,17 +79,13 @@ class RedisManager {
private async resubscribeToChannels(): Promise<void> { private async resubscribeToChannels(): Promise<void> {
if (!this.subscriber || this.subscribers.size === 0) return; if (!this.subscriber || this.subscribers.size === 0) return;
logger.info( logger.info(`Re-subscribing to ${this.subscribers.size} channels after Redis reconnection`);
`Re-subscribing to ${this.subscribers.size} channels after Redis reconnection`
);
try { try {
const channels = Array.from(this.subscribers.keys()); const channels = Array.from(this.subscribers.keys());
if (channels.length > 0) { if (channels.length > 0) {
await this.subscriber.subscribe(...channels); await this.subscriber.subscribe(...channels);
logger.info( logger.info(`Successfully re-subscribed to channels: ${channels.join(', ')}`);
`Successfully re-subscribed to channels: ${channels.join(", ")}`
);
} }
} catch (error) { } catch (error) {
logger.error("Failed to re-subscribe to channels:", error); logger.error("Failed to re-subscribe to channels:", error);
@@ -102,12 +93,12 @@ class RedisManager {
} }
private getRedisConfig(): RedisOptions { private getRedisConfig(): RedisOptions {
const redisConfig = privateConfig.getRawPrivateConfig().redis!; const redisConfig = config.getRawPrivateConfig().redis!;
const opts: RedisOptions = { const opts: RedisOptions = {
host: redisConfig.host!, host: redisConfig.host!,
port: redisConfig.port!, port: redisConfig.port!,
password: redisConfig.password, password: redisConfig.password,
db: redisConfig.db db: redisConfig.db,
// tls: { // tls: {
// rejectUnauthorized: // rejectUnauthorized:
// redisConfig.tls?.reject_unauthorized || false // redisConfig.tls?.reject_unauthorized || false
@@ -117,7 +108,7 @@ class RedisManager {
} }
private getReplicaRedisConfig(): RedisOptions | null { private getReplicaRedisConfig(): RedisOptions | null {
const redisConfig = privateConfig.getRawPrivateConfig().redis!; const redisConfig = config.getRawPrivateConfig().redis!;
if (!redisConfig.replicas || redisConfig.replicas.length === 0) { if (!redisConfig.replicas || redisConfig.replicas.length === 0) {
return null; return null;
} }
@@ -129,7 +120,7 @@ class RedisManager {
host: replica.host!, host: replica.host!,
port: replica.port!, port: replica.port!,
password: replica.password, password: replica.password,
db: replica.db || redisConfig.db db: replica.db || redisConfig.db,
// tls: { // tls: {
// rejectUnauthorized: // rejectUnauthorized:
// replica.tls?.reject_unauthorized || false // replica.tls?.reject_unauthorized || false
@@ -153,7 +144,7 @@ class RedisManager {
maxRetriesPerRequest: 3, maxRetriesPerRequest: 3,
keepAlive: 30000, keepAlive: 30000,
connectTimeout: this.connectionTimeout, connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout commandTimeout: this.commandTimeout,
}); });
// Initialize replica connection for reads (if available) // Initialize replica connection for reads (if available)
@@ -164,7 +155,7 @@ class RedisManager {
maxRetriesPerRequest: 3, maxRetriesPerRequest: 3,
keepAlive: 30000, keepAlive: 30000,
connectTimeout: this.connectionTimeout, connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout commandTimeout: this.commandTimeout,
}); });
} else { } else {
// Fallback to master for reads if no replicas // Fallback to master for reads if no replicas
@@ -181,7 +172,7 @@ class RedisManager {
maxRetriesPerRequest: 3, maxRetriesPerRequest: 3,
keepAlive: 30000, keepAlive: 30000,
connectTimeout: this.connectionTimeout, connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout commandTimeout: this.commandTimeout,
}); });
// Subscriber uses replica if available (reads) // Subscriber uses replica if available (reads)
@@ -191,7 +182,7 @@ class RedisManager {
maxRetriesPerRequest: 3, maxRetriesPerRequest: 3,
keepAlive: 30000, keepAlive: 30000,
connectTimeout: this.connectionTimeout, connectTimeout: this.connectionTimeout,
commandTimeout: this.commandTimeout commandTimeout: this.commandTimeout,
}); });
// Add reconnection handlers for write client // Add reconnection handlers for write client
@@ -214,11 +205,8 @@ class RedisManager {
// Trigger reconnection callbacks when Redis comes back online // Trigger reconnection callbacks when Redis comes back online
if (this.isHealthy) { if (this.isHealthy) {
this.triggerReconnectionCallbacks().catch((error) => { this.triggerReconnectionCallbacks().catch(error => {
logger.error( logger.error("Error triggering reconnection callbacks:", error);
"Error triggering reconnection callbacks:",
error
);
}); });
} }
}); });
@@ -248,11 +236,8 @@ class RedisManager {
// Trigger reconnection callbacks when Redis comes back online // Trigger reconnection callbacks when Redis comes back online
if (this.isHealthy) { if (this.isHealthy) {
this.triggerReconnectionCallbacks().catch((error) => { this.triggerReconnectionCallbacks().catch(error => {
logger.error( logger.error("Error triggering reconnection callbacks:", error);
"Error triggering reconnection callbacks:",
error
);
}); });
} }
}); });
@@ -328,8 +313,7 @@ class RedisManager {
private updateOverallHealth(): void { private updateOverallHealth(): void {
// Overall health is true if write is healthy and (read is healthy OR we don't have replicas) // Overall health is true if write is healthy and (read is healthy OR we don't have replicas)
this.isHealthy = this.isHealthy = this.isWriteHealthy && (this.isReadHealthy || !this.hasReplicas);
this.isWriteHealthy && (this.isReadHealthy || !this.hasReplicas);
} }
private async executeWithRetry<T>( private async executeWithRetry<T>(
@@ -348,15 +332,10 @@ class RedisManager {
// If this is the last attempt, try fallback if available // If this is the last attempt, try fallback if available
if (attempt === this.maxRetries && fallbackOperation) { if (attempt === this.maxRetries && fallbackOperation) {
try { try {
logger.warn( logger.warn(`${operationName} primary operation failed, trying fallback`);
`${operationName} primary operation failed, trying fallback`
);
return await fallbackOperation(); return await fallbackOperation();
} catch (fallbackError) { } catch (fallbackError) {
logger.error( logger.error(`${operationName} fallback also failed:`, fallbackError);
`${operationName} fallback also failed:`,
fallbackError
);
throw lastError; throw lastError;
} }
} }
@@ -368,25 +347,18 @@ class RedisManager {
// Calculate delay with exponential backoff // Calculate delay with exponential backoff
const delay = Math.min( const delay = Math.min(
this.baseRetryDelay * this.baseRetryDelay * Math.pow(this.backoffMultiplier, attempt),
Math.pow(this.backoffMultiplier, attempt),
this.maxRetryDelay this.maxRetryDelay
); );
logger.warn( logger.warn(`${operationName} failed (attempt ${attempt + 1}/${this.maxRetries + 1}), retrying in ${delay}ms:`, error);
`${operationName} failed (attempt ${attempt + 1}/${this.maxRetries + 1}), retrying in ${delay}ms:`,
error
);
// Wait before retrying // Wait before retrying
await new Promise((resolve) => setTimeout(resolve, delay)); await new Promise(resolve => setTimeout(resolve, delay));
} }
} }
logger.error( logger.error(`${operationName} failed after ${this.maxRetries + 1} attempts:`, lastError);
`${operationName} failed after ${this.maxRetries + 1} attempts:`,
lastError
);
throw lastError; throw lastError;
} }
@@ -429,44 +401,23 @@ class RedisManager {
await Promise.race([ await Promise.race([
this.writeClient.ping(), this.writeClient.ping(),
new Promise((_, reject) => new Promise((_, reject) =>
setTimeout( setTimeout(() => reject(new Error('Write client health check timeout')), 2000)
() =>
reject(
new Error("Write client health check timeout")
),
2000
)
) )
]); ]);
this.isWriteHealthy = true; this.isWriteHealthy = true;
// Check read client health if it's different from write client // Check read client health if it's different from write client
if ( if (this.hasReplicas && this.readClient && this.readClient !== this.writeClient) {
this.hasReplicas &&
this.readClient &&
this.readClient !== this.writeClient
) {
try { try {
await Promise.race([ await Promise.race([
this.readClient.ping(), this.readClient.ping(),
new Promise((_, reject) => new Promise((_, reject) =>
setTimeout( setTimeout(() => reject(new Error('Read client health check timeout')), 2000)
() =>
reject(
new Error(
"Read client health check timeout"
)
),
2000
)
) )
]); ]);
this.isReadHealthy = true; this.isReadHealthy = true;
} catch (error) { } catch (error) {
logger.error( logger.error("Redis read client health check failed:", error);
"Redis read client health check failed:",
error
);
this.isReadHealthy = false; this.isReadHealthy = false;
} }
} else { } else {
@@ -524,13 +475,16 @@ class RedisManager {
if (!this.isRedisEnabled() || !this.writeClient) return false; if (!this.isRedisEnabled() || !this.writeClient) return false;
try { try {
await this.executeWithRetry(async () => { await this.executeWithRetry(
if (ttl) { async () => {
await this.writeClient!.setex(key, ttl, value); if (ttl) {
} else { await this.writeClient!.setex(key, ttl, value);
await this.writeClient!.set(key, value); } else {
} await this.writeClient!.set(key, value);
}, "Redis SET"); }
},
"Redis SET"
);
return true; return true;
} catch (error) { } catch (error) {
logger.error("Redis SET error:", error); logger.error("Redis SET error:", error);
@@ -542,10 +496,9 @@ class RedisManager {
if (!this.isRedisEnabled() || !this.readClient) return null; if (!this.isRedisEnabled() || !this.readClient) return null;
try { try {
const fallbackOperation = const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
this.hasReplicas && this.writeClient && this.isWriteHealthy ? () => this.writeClient!.get(key)
? () => this.writeClient!.get(key) : undefined;
: undefined;
return await this.executeWithRetry( return await this.executeWithRetry(
() => this.readClient!.get(key), () => this.readClient!.get(key),
@@ -607,10 +560,9 @@ class RedisManager {
if (!this.isRedisEnabled() || !this.readClient) return []; if (!this.isRedisEnabled() || !this.readClient) return [];
try { try {
const fallbackOperation = const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
this.hasReplicas && this.writeClient && this.isWriteHealthy ? () => this.writeClient!.smembers(key)
? () => this.writeClient!.smembers(key) : undefined;
: undefined;
return await this.executeWithRetry( return await this.executeWithRetry(
() => this.readClient!.smembers(key), () => this.readClient!.smembers(key),
@@ -646,10 +598,9 @@ class RedisManager {
if (!this.isRedisEnabled() || !this.readClient) return null; if (!this.isRedisEnabled() || !this.readClient) return null;
try { try {
const fallbackOperation = const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
this.hasReplicas && this.writeClient && this.isWriteHealthy ? () => this.writeClient!.hget(key, field)
? () => this.writeClient!.hget(key, field) : undefined;
: undefined;
return await this.executeWithRetry( return await this.executeWithRetry(
() => this.readClient!.hget(key, field), () => this.readClient!.hget(key, field),
@@ -681,10 +632,9 @@ class RedisManager {
if (!this.isRedisEnabled() || !this.readClient) return {}; if (!this.isRedisEnabled() || !this.readClient) return {};
try { try {
const fallbackOperation = const fallbackOperation = (this.hasReplicas && this.writeClient && this.isWriteHealthy)
this.hasReplicas && this.writeClient && this.isWriteHealthy ? () => this.writeClient!.hgetall(key)
? () => this.writeClient!.hgetall(key) : undefined;
: undefined;
return await this.executeWithRetry( return await this.executeWithRetry(
() => this.readClient!.hgetall(key), () => this.readClient!.hgetall(key),
@@ -708,18 +658,18 @@ class RedisManager {
} }
try { try {
await this.executeWithRetry(async () => { await this.executeWithRetry(
// Add timeout to prevent hanging async () => {
return Promise.race([ // Add timeout to prevent hanging
this.publisher!.publish(channel, message), return Promise.race([
new Promise((_, reject) => this.publisher!.publish(channel, message),
setTimeout( new Promise((_, reject) =>
() => reject(new Error("Redis publish timeout")), setTimeout(() => reject(new Error('Redis publish timeout')), 3000)
3000
) )
) ]);
]); },
}, "Redis PUBLISH"); "Redis PUBLISH"
);
return true; return true;
} catch (error) { } catch (error) {
logger.error("Redis PUBLISH error:", error); logger.error("Redis PUBLISH error:", error);
@@ -739,20 +689,17 @@ class RedisManager {
if (!this.subscribers.has(channel)) { if (!this.subscribers.has(channel)) {
this.subscribers.set(channel, new Set()); this.subscribers.set(channel, new Set());
// Only subscribe to the channel if it's the first subscriber // Only subscribe to the channel if it's the first subscriber
await this.executeWithRetry(async () => { await this.executeWithRetry(
return Promise.race([ async () => {
this.subscriber!.subscribe(channel), return Promise.race([
new Promise((_, reject) => this.subscriber!.subscribe(channel),
setTimeout( new Promise((_, reject) =>
() => setTimeout(() => reject(new Error('Redis subscribe timeout')), 5000)
reject(
new Error("Redis subscribe timeout")
),
5000
) )
) ]);
]); },
}, "Redis SUBSCRIBE"); "Redis SUBSCRIBE"
);
} }
this.subscribers.get(channel)!.add(callback); this.subscribers.get(channel)!.add(callback);

View File

@@ -11,9 +11,9 @@
* This file is not licensed under the AGPLv3. * This file is not licensed under the AGPLv3.
*/ */
import { Store, Options, IncrementResponse } from "express-rate-limit"; import { Store, Options, IncrementResponse } from 'express-rate-limit';
import { rateLimitService } from "./rateLimit"; import { rateLimitService } from './rateLimit';
import logger from "@server/logger"; import logger from '@server/logger';
/** /**
* A Redis-backed rate limiting store for express-rate-limit that optimizes * A Redis-backed rate limiting store for express-rate-limit that optimizes
@@ -57,14 +57,12 @@ export default class RedisStore implements Store {
* *
* @param options - Configuration options for the store. * @param options - Configuration options for the store.
*/ */
constructor( constructor(options: {
options: { prefix?: string;
prefix?: string; skipFailedRequests?: boolean;
skipFailedRequests?: boolean; skipSuccessfulRequests?: boolean;
skipSuccessfulRequests?: boolean; } = {}) {
} = {} this.prefix = options.prefix || 'express-rate-limit';
) {
this.prefix = options.prefix || "express-rate-limit";
this.skipFailedRequests = options.skipFailedRequests || false; this.skipFailedRequests = options.skipFailedRequests || false;
this.skipSuccessfulRequests = options.skipSuccessfulRequests || false; this.skipSuccessfulRequests = options.skipSuccessfulRequests || false;
} }
@@ -103,8 +101,7 @@ export default class RedisStore implements Store {
return { return {
totalHits: result.totalHits || 1, totalHits: result.totalHits || 1,
resetTime: resetTime: result.resetTime || new Date(Date.now() + this.windowMs)
result.resetTime || new Date(Date.now() + this.windowMs)
}; };
} catch (error) { } catch (error) {
logger.error(`RedisStore increment error for key ${key}:`, error); logger.error(`RedisStore increment error for key ${key}:`, error);
@@ -161,9 +158,7 @@ export default class RedisStore implements Store {
*/ */
async resetAll(): Promise<void> { async resetAll(): Promise<void> {
try { try {
logger.warn( logger.warn('RedisStore resetAll called - this operation can be expensive');
"RedisStore resetAll called - this operation can be expensive"
);
// Force sync all pending data first // Force sync all pending data first
await rateLimitService.forceSyncAllPendingData(); await rateLimitService.forceSyncAllPendingData();
@@ -172,9 +167,9 @@ export default class RedisStore implements Store {
// scanning all Redis keys with our prefix, which could be expensive. // scanning all Redis keys with our prefix, which could be expensive.
// In production, it's better to let entries expire naturally. // In production, it's better to let entries expire naturally.
logger.info("RedisStore resetAll completed (pending data synced)"); logger.info('RedisStore resetAll completed (pending data synced)');
} catch (error) { } catch (error) {
logger.error("RedisStore resetAll error:", error); logger.error('RedisStore resetAll error:', error);
// Don't throw - this is an optional method // Don't throw - this is an optional method
} }
} }
@@ -186,9 +181,7 @@ export default class RedisStore implements Store {
* @param key - The identifier for a client. * @param key - The identifier for a client.
* @returns Current hit count and reset time, or null if no data exists. * @returns Current hit count and reset time, or null if no data exists.
*/ */
async getHits( async getHits(key: string): Promise<{ totalHits: number; resetTime: Date } | null> {
key: string
): Promise<{ totalHits: number; resetTime: Date } | null> {
try { try {
const clientId = `${this.prefix}:${key}`; const clientId = `${this.prefix}:${key}`;
@@ -207,8 +200,7 @@ export default class RedisStore implements Store {
return { return {
totalHits: Math.max(0, (result.totalHits || 0) - 1), // Adjust for the decrement totalHits: Math.max(0, (result.totalHits || 0) - 1), // Adjust for the decrement
resetTime: resetTime: result.resetTime || new Date(Date.now() + this.windowMs)
result.resetTime || new Date(Date.now() + this.windowMs)
}; };
} catch (error) { } catch (error) {
logger.error(`RedisStore getHits error for key ${key}:`, error); logger.error(`RedisStore getHits error for key ${key}:`, error);
@@ -223,9 +215,9 @@ export default class RedisStore implements Store {
async shutdown(): Promise<void> { async shutdown(): Promise<void> {
try { try {
// The rateLimitService handles its own cleanup // The rateLimitService handles its own cleanup
logger.info("RedisStore shutdown completed"); logger.info('RedisStore shutdown completed');
} catch (error) { } catch (error) {
logger.error("RedisStore shutdown error:", error); logger.error('RedisStore shutdown error:', error);
} }
} }
} }

View File

@@ -1,4 +1,4 @@
import { db, loginPage, LoginPage, loginPageOrg, Org, orgs } from "@server/db"; import { db, loginPage, LoginPage, loginPageOrg } from "@server/db";
import { import {
Resource, Resource,
ResourcePassword, ResourcePassword,
@@ -6,8 +6,6 @@ import {
ResourceRule, ResourceRule,
resourcePassword, resourcePassword,
resourcePincode, resourcePincode,
resourceHeaderAuth,
ResourceHeaderAuth,
resourceRules, resourceRules,
resources, resources,
roleResources, roleResources,
@@ -17,13 +15,15 @@ import {
users users
} from "@server/db"; } from "@server/db";
import { and, eq } from "drizzle-orm"; import { and, eq } from "drizzle-orm";
import axios from "axios";
import config from "@server/lib/config";
import logger from "@server/logger";
import { tokenManager } from "@server/lib/tokenManager";
export type ResourceWithAuth = { export type ResourceWithAuth = {
resource: Resource | null; resource: Resource | null;
pincode: ResourcePincode | null; pincode: ResourcePincode | null;
password: ResourcePassword | null; password: ResourcePassword | null;
headerAuth: ResourceHeaderAuth | null;
org: Org;
}; };
export type UserSessionWithUser = { export type UserSessionWithUser = {
@@ -37,6 +37,30 @@ export type UserSessionWithUser = {
export async function getResourceByDomain( export async function getResourceByDomain(
domain: string domain: string
): Promise<ResourceWithAuth | null> { ): Promise<ResourceWithAuth | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/domain/${domain}`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const [result] = await db const [result] = await db
.select() .select()
.from(resources) .from(resources)
@@ -48,11 +72,6 @@ export async function getResourceByDomain(
resourcePassword, resourcePassword,
eq(resourcePassword.resourceId, resources.resourceId) eq(resourcePassword.resourceId, resources.resourceId)
) )
.leftJoin(
resourceHeaderAuth,
eq(resourceHeaderAuth.resourceId, resources.resourceId)
)
.innerJoin(orgs, eq(orgs.orgId, resources.orgId))
.where(eq(resources.fullDomain, domain)) .where(eq(resources.fullDomain, domain))
.limit(1); .limit(1);
@@ -63,9 +82,7 @@ export async function getResourceByDomain(
return { return {
resource: result.resources, resource: result.resources,
pincode: result.resourcePincode, pincode: result.resourcePincode,
password: result.resourcePassword, password: result.resourcePassword
headerAuth: result.resourceHeaderAuth,
org: result.orgs
}; };
} }
@@ -75,6 +92,30 @@ export async function getResourceByDomain(
export async function getUserSessionWithUser( export async function getUserSessionWithUser(
userSessionId: string userSessionId: string
): Promise<UserSessionWithUser | null> { ): Promise<UserSessionWithUser | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/session/${userSessionId}`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const [res] = await db const [res] = await db
.select() .select()
.from(sessions) .from(sessions)
@@ -95,6 +136,30 @@ export async function getUserSessionWithUser(
* Get user organization role * Get user organization role
*/ */
export async function getUserOrgRole(userId: string, orgId: string) { export async function getUserOrgRole(userId: string, orgId: string) {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/org/${orgId}/role`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const userOrgRole = await db const userOrgRole = await db
.select() .select()
.from(userOrgs) .from(userOrgs)
@@ -111,6 +176,30 @@ export async function getRoleResourceAccess(
resourceId: number, resourceId: number,
roleId: number roleId: number
) { ) {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/role/${roleId}/resource/${resourceId}/access`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const roleResourceAccess = await db const roleResourceAccess = await db
.select() .select()
.from(roleResources) .from(roleResources)
@@ -132,6 +221,30 @@ export async function getUserResourceAccess(
userId: string, userId: string,
resourceId: number resourceId: number
) { ) {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/user/${userId}/resource/${resourceId}/access`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const userResourceAccess = await db const userResourceAccess = await db
.select() .select()
.from(userResources) .from(userResources)
@@ -152,6 +265,30 @@ export async function getUserResourceAccess(
export async function getResourceRules( export async function getResourceRules(
resourceId: number resourceId: number
): Promise<ResourceRule[]> { ): Promise<ResourceRule[]> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/resource/${resourceId}/rules`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return [];
}
}
const rules = await db const rules = await db
.select() .select()
.from(resourceRules) .from(resourceRules)
@@ -166,6 +303,30 @@ export async function getResourceRules(
export async function getOrgLoginPage( export async function getOrgLoginPage(
orgId: string orgId: string
): Promise<LoginPage | null> { ): Promise<LoginPage | null> {
if (config.isManagedMode()) {
try {
const response = await axios.get(
`${config.getRawConfig().managed?.endpoint}/api/v1/hybrid/org/${orgId}/login-page`,
await tokenManager.getAuthHeader()
);
return response.data.data;
} catch (error) {
if (axios.isAxiosError(error)) {
logger.error("Error fetching config in verify session:", {
message: error.message,
code: error.code,
status: error.response?.status,
statusText: error.response?.statusText,
url: error.config?.url,
method: error.config?.method
});
} else {
logger.error("Error fetching config in verify session:", error);
}
return null;
}
}
const [result] = await db const [result] = await db
.select() .select()
.from(loginPageOrg) .from(loginPageOrg)

Some files were not shown because too many files have changed in this diff Show More