Compare commits

..

1 Commits

Author SHA1 Message Date
Owen Schwartz
40f2262f3e Merge pull request #2309 from fosrl/dev
1.15.0
2026-01-23 10:40:16 -08:00
948 changed files with 28372 additions and 83304 deletions

View File

@@ -1,5 +0,0 @@
---
alwaysApply: true
---
Always localize strings and use the `t` function to convert keys to strings. Add the keys to the en-us.json file. Never edit the other language files, as en-us.json is the single source of truth.

View File

@@ -1,7 +0,0 @@
---
description:
alwaysApply: true
---
Proxy resources = public resources
Private resources = client resources = site resources

View File

@@ -28,9 +28,7 @@ LICENSE
CONTRIBUTING.md
dist
.git
server/migrations/
migrations/
config/
build.ts
tsconfig.json
Dockerfile*
drizzle.config.ts
tsconfig.json

1
.github/CODEOWNERS vendored
View File

@@ -1 +0,0 @@
* @oschwartz10612 @miloschwartz

View File

@@ -44,9 +44,19 @@ updates:
schedule:
interval: "daily"
groups:
patch-updates:
dev-patch-updates:
dependency-type: "development"
update-types:
- "patch"
minor-updates:
dev-minor-updates:
dependency-type: "development"
update-types:
- "minor"
prod-patch-updates:
dependency-type: "production"
update-types:
- "patch"
prod-minor-updates:
dependency-type: "production"
update-types:
- "minor"

View File

@@ -1,4 +1,4 @@
name: Public CICD Pipeline
name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
@@ -29,7 +29,7 @@ jobs:
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6
uses: aws-actions/configure-aws-credentials@v5
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
@@ -62,7 +62,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
@@ -77,7 +77,7 @@ jobs:
fi
- name: Log in to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
@@ -134,7 +134,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
@@ -149,7 +149,7 @@ jobs:
fi
- name: Log in to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
@@ -201,10 +201,10 @@ jobs:
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Log in to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
@@ -256,7 +256,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Extract tag name
id: get-tag
@@ -264,9 +264,9 @@ jobs:
shell: bash
- name: Install Go
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25
go-version: 1.24
- name: Update version in package.json
run: |
@@ -289,17 +289,25 @@ jobs:
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Update install/main.go
run: |
PANGOLIN_VERSION=${{ env.TAG }}
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
cat install/main.go
shell: bash
- name: Build installer
working-directory: install
run: |
make go-build-release \
PANGOLIN_VERSION=${{ env.TAG }} \
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }} \
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
shell: bash
make go-build-release
- name: Upload artifacts from /install/bin
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: install-bin
path: install/bin/
@@ -407,25 +415,31 @@ jobs:
shell: bash
- name: Login to GitHub Container Registry (for cosign)
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign
# cosign is used to sign container images using keyless (OIDC) signing
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1
# cosign is used to sign and verify container images (key and keyless)
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Sign (GHCR, keyless)
# Sign each GHCR image by digest using keyless (OIDC) signing via Sigstore/Rekor.
# Signatures are stored in the registry alongside the image.
- name: Dual-sign and verify (GHCR & Docker Hub)
# Sign each image by digest using keyless (OIDC) and key-based signing,
# then verify both the public key signature and the keyless OIDC signature.
env:
TAG: ${{ env.TAG }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_YES: "true"
run: |
set -euo pipefail
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
# Determine if this is an RC release
IS_RC="false"
if [[ "$TAG" == *"-rc."* ]]; then
@@ -453,48 +467,35 @@ jobs:
)
fi
FAILED_TAGS=()
SUCCESSFUL_TAGS=()
# Sign each image variant for both registries
for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
echo "Processing ${GHCR_IMAGE}:${IMAGE_TAG}"
TAG_FAILED=false
(
set -e
DIGEST="$(skopeo inspect --retry-times 3 docker://${GHCR_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
REF="${GHCR_IMAGE}@${DIGEST}"
DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
REF="${BASE_IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
) || TAG_FAILED=true
if [ "$TAG_FAILED" = "true" ]; then
echo "⚠️ WARNING: Failed to sign ${GHCR_IMAGE}:${IMAGE_TAG}"
FAILED_TAGS+=("${GHCR_IMAGE}:${IMAGE_TAG}")
else
echo "✓ Successfully signed ${GHCR_IMAGE}:${IMAGE_TAG}"
SUCCESSFUL_TAGS+=("${GHCR_IMAGE}:${IMAGE_TAG}")
fi
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
echo "==> cosign verify (public key) ${REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
done
done
echo ""
echo "=========================================="
echo "Sign Summary"
echo "=========================================="
echo "Successful: ${#SUCCESSFUL_TAGS[@]}"
echo "Failed: ${#FAILED_TAGS[@]}"
if [ ${#FAILED_TAGS[@]} -gt 0 ]; then
echo "Failed tags:"
for tag in "${FAILED_TAGS[@]}"; do
echo " - $tag"
done
echo "⚠️ WARNING: Some tags failed to sign, but continuing anyway"
else
echo "✓ All images signed successfully!"
fi
echo "All images signed and verified successfully!"
shell: bash
post-run:
@@ -512,7 +513,7 @@ jobs:
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6
uses: aws-actions/configure-aws-credentials@v5
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600

426
.github/workflows/cicd.yml.backup vendored Normal file
View File

@@ -0,0 +1,426 @@
name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
permissions:
contents: read
packages: write # for GHCR push
id-token: write # for Cosign Keyless (OIDC) Signing
# Required secrets:
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
on:
push:
tags:
- "[0-9]+.[0-9]+.[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
concurrency:
group: ${{ github.ref }}
cancel-in-progress: true
jobs:
pre-run:
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
aws-region: ${{ secrets.AWS_REGION }}
- name: Verify AWS identity
run: aws sts get-caller-identity
- name: Start EC2 instances
run: |
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances started"
release-arm:
name: Build and Release (ARM64)
runs-on: [self-hosted, linux, arm64, us-east-1]
needs: [pre-run]
if: >-
${{
needs.pre-run.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
THRESHOLD=75
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
echo "Used space: $USED_SPACE%"
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
echo "Used space is below the threshold of 75% free. Running Docker system prune."
echo y | docker system prune -a
else
echo "Storage space is above the threshold. No action needed."
fi
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Build and push Docker images (Docker Hub - ARM64)
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make build-rc-arm tag=$TAG
else
make build-release-arm tag=$TAG
fi
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
shell: bash
release-amd:
name: Build and Release (AMD64)
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [pre-run]
if: >-
${{
needs.pre-run.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
THRESHOLD=75
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
echo "Used space: $USED_SPACE%"
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
echo "Used space is below the threshold of 75% free. Running Docker system prune."
echo y | docker system prune -a
else
echo "Storage space is above the threshold. No action needed."
fi
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Build and push Docker images (Docker Hub - AMD64)
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make build-rc-amd tag=$TAG
else
make build-release-amd tag=$TAG
fi
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
shell: bash
create-manifest:
name: Create Multi-Arch Manifests
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [release-arm, release-amd]
if: >-
${{
needs.release-arm.result == 'success' &&
needs.release-amd.result == 'success'
}}
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Create multi-arch manifests
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make create-manifests-rc tag=$TAG
else
make create-manifests tag=$TAG
fi
echo "Created multi-arch manifests for tag: ${TAG}"
shell: bash
sign-and-package:
name: Sign and Package
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [release-arm, release-amd, create-manifest]
if: >-
${{
needs.release-arm.result == 'success' &&
needs.release-amd.result == 'success' &&
needs.create-manifest.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Install Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: 1.24
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Pull latest Gerbil version
id: get-gerbil-tag
run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Pull latest Badger version
id: get-badger-tag
run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Update install/main.go
run: |
PANGOLIN_VERSION=${{ env.TAG }}
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
cat install/main.go
shell: bash
- name: Build installer
working-directory: install
run: |
make go-build-release
- name: Upload artifacts from /install/bin
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: install-bin
path: install/bin/
- name: Install skopeo + jq
# skopeo: copy/inspect images between registries
# jq: JSON parsing tool used to extract digest values
run: |
sudo apt-get update -y
sudo apt-get install -y skopeo jq
skopeo --version
shell: bash
- name: Login to GHCR
env:
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
run: |
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
shell: bash
- name: Copy tag from Docker Hub to GHCR
# Mirror the already-built image (all architectures) to GHCR so we can sign it
# Wait a bit for both architectures to be available in Docker Hub manifest
env:
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
run: |
set -euo pipefail
TAG=${{ env.TAG }}
echo "Waiting for multi-arch manifest to be ready..."
sleep 30
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:$TAG \
docker://$GHCR_IMAGE:$TAG
shell: bash
- name: Login to GitHub Container Registry (for cosign)
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign
# cosign is used to sign and verify container images (key and keyless)
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Dual-sign and verify (GHCR & Docker Hub)
# Sign each image by digest using keyless (OIDC) and key-based signing,
# then verify both the public key signature and the keyless OIDC signature.
env:
TAG: ${{ env.TAG }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_YES: "true"
run: |
set -euo pipefail
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
echo "Processing ${IMAGE}:${TAG}"
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
REF="${IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
echo "==> cosign verify (public key) ${REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
done
shell: bash
post-run:
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
if: >-
${{
always() &&
needs.pre-run.result == 'success' &&
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
}}
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
aws-region: ${{ secrets.AWS_REGION }}
- name: Verify AWS identity
run: aws sts get-caller-identity
- name: Stop EC2 instances
run: |
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances stopped"

View File

@@ -21,12 +21,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Set up Node.js
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
with:
node-version: '24'
node-version: '22'
- name: Install dependencies
run: npm ci

View File

@@ -23,7 +23,7 @@ jobs:
skopeo --version
- name: Install cosign
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Input check
run: |

View File

@@ -14,7 +14,7 @@ jobs:
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6
uses: aws-actions/configure-aws-credentials@v5
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600

View File

@@ -1,4 +1,4 @@
name: SAAS Pipeline
name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
@@ -23,7 +23,7 @@ jobs:
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6
uses: aws-actions/configure-aws-credentials@v5
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
@@ -54,42 +54,7 @@ jobs:
steps:
- name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
- name: Download MaxMind GeoLite2 databases
env:
MAXMIND_LICENSE_KEY: ${{ secrets.MAXMIND_LICENSE_KEY }}
run: |
echo "Downloading MaxMind GeoLite2 databases..."
# Download GeoLite2-Country
curl -L "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=${MAXMIND_LICENSE_KEY}&suffix=tar.gz" \
-o GeoLite2-Country.tar.gz
# Download GeoLite2-ASN
curl -L "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=${MAXMIND_LICENSE_KEY}&suffix=tar.gz" \
-o GeoLite2-ASN.tar.gz
# Extract the .mmdb files
tar -xzf GeoLite2-Country.tar.gz --strip-components=1 --wildcards '*.mmdb'
tar -xzf GeoLite2-ASN.tar.gz --strip-components=1 --wildcards '*.mmdb'
# Verify files exist
if [ ! -f "GeoLite2-Country.mmdb" ]; then
echo "ERROR: Failed to download GeoLite2-Country.mmdb"
exit 1
fi
if [ ! -f "GeoLite2-ASN.mmdb" ]; then
echo "ERROR: Failed to download GeoLite2-ASN.mmdb"
exit 1
fi
# Clean up tar files
rm -f GeoLite2-Country.tar.gz GeoLite2-ASN.tar.gz
echo "MaxMind databases downloaded successfully"
ls -lh GeoLite2-*.mmdb
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
@@ -104,7 +69,7 @@ jobs:
fi
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6
uses: aws-actions/configure-aws-credentials@v5
with:
role-to-assume: arn:aws:iam::${{ secrets.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
@@ -145,7 +110,7 @@ jobs:
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6
uses: aws-actions/configure-aws-credentials@v5
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600

View File

@@ -14,7 +14,7 @@ jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with:
days-before-stale: 14
days-before-close: 14

View File

@@ -14,12 +14,12 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Node
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
with:
node-version: '24'
node-version: '22'
- name: Copy config file
run: cp config/config.example.yml config/config.yml
@@ -34,10 +34,10 @@ jobs:
run: npm run set:oss
- name: Generate database migrations
run: npm run db:generate
run: npm run db:sqlite:generate
- name: Apply database migrations
run: npm run db:push
run: npm run db:sqlite:push
- name: Test with tsc
run: npx tsc --noEmit
@@ -62,7 +62,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Copy config file
run: cp config/config.example.yml config/config.yml
- name: Build Docker image sqlite
run: make dev-build-sqlite
@@ -71,7 +74,10 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Copy config file
run: cp config/config.example.yml config/config.yml
- name: Build Docker image pg
run: make dev-build-pg

4
.gitignore vendored
View File

@@ -51,6 +51,4 @@ dynamic/
scratch/
tsconfig.json
hydrateSaas.ts
CLAUDE.md
drizzle.config.ts
server/setup/migrations.ts
CLAUDE.md

View File

@@ -10,7 +10,7 @@
"editor.defaultFormatter": "esbenp.prettier-vscode"
},
"[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"
"editor.defaultFormatter": "vscode.typescript-language-features"
},
"[typescriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode"

View File

@@ -1,51 +1,77 @@
# FROM node:24-slim AS base
FROM public.ecr.aws/docker/library/node:24-slim AS base
FROM node:24-alpine AS builder
# OCI Image Labels - Build Args for dynamic values
ARG VERSION="dev"
ARG REVISION=""
ARG CREATED=""
ARG LICENSE="AGPL-3.0"
WORKDIR /app
RUN apt-get update && apt-get install -y python3 make g++ && rm -rf /var/lib/apt/lists/*
COPY package*.json ./
FROM base AS builder-dev
RUN npm ci
COPY . .
ARG BUILD=oss
ARG DATABASE=sqlite
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi && \
npm run set:$DATABASE && \
npm run set:$BUILD && \
npm run db:generate && \
npm run build && \
npm run build:cli && \
test -f dist/server.mjs
# Derive title and description based on BUILD type
ARG IMAGE_TITLE="Pangolin"
ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
# Create placeholder files for MaxMind databases to avoid COPY errors
# Real files should be present for saas builds, placeholders for oss builds
RUN touch /app/GeoLite2-Country.mmdb /app/GeoLite2-ASN.mmdb
RUN apk add --no-cache curl tzdata python3 make g++
FROM base AS builder
# COPY package.json package-lock.json ./
COPY package*.json ./
RUN npm ci
RUN npm ci --omit=dev
COPY . .
# FROM node:24-slim AS runner
FROM public.ecr.aws/docker/library/node:24-slim AS runner
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
RUN echo "export const driver: \"pg\" | \"sqlite\" = \"$DATABASE\";" >> server/db/index.ts
RUN echo "export const build = \"$BUILD\" as \"saas\" | \"enterprise\" | \"oss\";" > server/build.ts
# Copy the appropriate TypeScript configuration based on build type
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
elif [ "$BUILD" = "saas" ]; then cp tsconfig.saas.json tsconfig.json; \
elif [ "$BUILD" = "enterprise" ]; then cp tsconfig.enterprise.json tsconfig.json; \
fi
# if the build is oss then remove the server/private directory
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema --out init; fi
RUN mkdir -p dist
RUN npm run next:build
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
RUN if [ "$DATABASE" = "pg" ]; then \
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
else \
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
fi
# test to make sure the build output is there and error if not
RUN test -f dist/server.mjs
RUN npm run build:cli
# Prune dev dependencies and clean up to prepare for copy to runner
RUN npm prune --omit=dev && npm cache clean --force
FROM node:24-alpine AS runner
WORKDIR /app
RUN apt-get update && apt-get install -y curl tzdata && rm -rf /var/lib/apt/lists/*
# Only curl and tzdata needed at runtime - no build tools!
RUN apk add --no-cache curl tzdata
# Copy pre-built node_modules from builder (already pruned to production only)
# This includes the compiled native modules like better-sqlite3
COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package.json ./package.json
COPY --from=builder-dev /app/.next/standalone ./
COPY --from=builder-dev /app/.next/static ./.next/static
COPY --from=builder-dev /app/dist ./dist
COPY --from=builder-dev /app/server/migrations ./dist/init
COPY --from=builder /app/.next/standalone ./
COPY --from=builder /app/.next/static ./.next/static
COPY --from=builder /app/dist ./dist
COPY --from=builder /app/init ./dist/init
COPY --from=builder /app/package.json ./package.json
COPY ./cli/wrapper.sh /usr/local/bin/pangctl
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
@@ -55,28 +81,6 @@ COPY server/db/ios_models.json ./dist/ios_models.json
COPY server/db/mac_models.json ./dist/mac_models.json
COPY public ./public
# Copy MaxMind databases for SaaS builds
ARG BUILD=oss
RUN mkdir -p ./maxmind
# Copy MaxMind databases (placeholders exist for oss builds, real files for saas)
COPY --from=builder-dev /app/GeoLite2-Country.mmdb ./maxmind/GeoLite2-Country.mmdb
COPY --from=builder-dev /app/GeoLite2-ASN.mmdb ./maxmind/GeoLite2-ASN.mmdb
# Remove MaxMind databases for non-saas builds (keep only for saas)
RUN if [ "$BUILD" != "saas" ]; then rm -rf ./maxmind; fi
# OCI Image Labels - Build Args for dynamic values
ARG VERSION="dev"
ARG REVISION=""
ARG CREATED=""
ARG LICENSE="AGPL-3.0"
# Derive title and description based on BUILD type
ARG IMAGE_TITLE="Pangolin"
ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
# OCI Image Labels
# https://github.com/opencontainers/image-spec/blob/main/annotations.md
LABEL org.opencontainers.image.source="https://github.com/fosrl/pangolin" \

View File

@@ -1,9 +1,7 @@
FROM node:24-alpine
FROM node:22-alpine
WORKDIR /app
RUN apk add --no-cache python3 make g++
COPY package*.json ./
# Install dependencies

View File

@@ -35,53 +35,43 @@
</div>
<p align="center">
<a href="https://docs.pangolin.net/careers/join-us">
<img src="https://img.shields.io/badge/🚀_We're_Hiring!-Join_Our_Team-brightgreen?style=for-the-badge" alt="We're Hiring!" />
</a>
</p>
<p align="center">
<strong>
Get started with Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
</strong>
</p>
Pangolin is an open-source, identity-based remote access platform built on WireGuard® that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources with NAT traversal, all with granular access controls.
Pangolin is an open-source, identity-based remote access platform built on WireGuard that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources, all with zero-trust security and granular access control.
## Installation
- Get started for free with [Pangolin Cloud](https://app.pangolin.net/).
- Or, check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to self-host Pangolin.
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
- Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin.
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
<img src="public/screenshots/hero.png" alt="Pangolin" width="100%" />
<img src="public/screenshots/hero.png" />
## Deployment Options
- **Pangolin Cloud** - Fully managed service - no infrastructure required.
- **Self-Host: Community Edition** - Free, open source, and licensed under AGPL-3.
- **Self-Host: Enterprise Edition** - Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses making less than \$100K USD gross annual revenue.
| <img width=500 /> | Description |
|-----------------|--------------|
| **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
## Key Features
### Connect remote networks with sites and NAT traversal
Pangolin's site connectors provide gateways into networks so you can access any networked resources. Sites use outbound tunnels and intelligent NAT traversal to make networks behind restrictive firewalls available for authorized access without public IPs or open ports. Easily deploy a site as a binary or container on any platform.
<img src="public/screenshots/sites.png" alt="Sites" width="100%" />
### Browser-based reverse proxy access
Expose web applications through identity and context-aware tunneled reverse proxies. Users access applications through any web browser with authentication and granular access control without installing a client. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet.
<img src="public/clip.gif" alt="Reverse proxy access" width="100%" />
### Client-based private resource access
Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. Add redundancy by routing traffic through multiple connectors in your network.
<img src="public/screenshots/private-resources.png" alt="Private resources" width="100%" />
### Give users and roles access to resources
Use Pangolin's built in users or bring your own identity provider and set up role based access control (RBAC). Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications, services, and routes you explicitly define.
<img src="public/screenshots/users.png" alt="Users from identity provider with roles" width="100%" />
| <img width=500 /> | <img width=500 /> |
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
| **Connect remote networks with sites**<br /><br />Pangolin's lightweight site connectors create secure tunnels from remote networks without requiring public IP addresses or open ports. Sites make any network anywhere available for authorized access. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
| **Browser-based reverse proxy access**<br /><br />Expose web applications through identity and context-aware tunneled reverse proxies. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet. Users access applications through any web browser with authentication and granular access control. | <img src="public/clip.gif" width=500 /><tr></tr> |
| **Client-based private resource access**<br /><br />Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. | <img src="public/screenshots/private-resources.png" width=500 /><tr></tr> |
| **Zero-trust granular access**<br /><br />Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications and services you explicitly define, reducing security risk and attack surface. | <img src="public/screenshots/user-devices.png" width=500 /><tr></tr> |
## Download Clients
@@ -95,16 +85,17 @@ Download the Pangolin client for your platform:
## Get Started
### Sign up now
Create a free account at [app.pangolin.net](https://app.pangolin.net) to get started with Pangolin Cloud.
### Check out the docs
We encourage everyone to read the full documentation first, which is
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
the docs to illustrate some basic ideas.
### Sign up and try now
For Pangolin's managed service, you will first need to create an account at
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
## Licensing
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net).
@@ -112,3 +103,7 @@ Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License
## Contributions
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
---
WireGuard® is a registered trademark of Jason A. Donenfeld.

View File

@@ -3,7 +3,7 @@
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) with the following information:
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
- Description and location of the vulnerability.
- Potential impact of the vulnerability.

View File

@@ -0,0 +1,17 @@
meta {
name: Create API Key
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/api-key
body: json
auth: inherit
}
body:json {
{
"isRoot": true
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: Delete API Key
type: http
seq: 2
}
delete {
url: http://localhost:3000/api/v1/api-key/dm47aacqxxn3ubj
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: List API Key Actions
type: http
seq: 6
}
get {
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: List Org API Keys
type: http
seq: 4
}
get {
url: http://localhost:3000/api/v1/org/home-lab/api-keys
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: List Root API Keys
type: http
seq: 3
}
get {
url: http://localhost:3000/api/v1/root/api-keys
body: none
auth: inherit
}

View File

@@ -0,0 +1,17 @@
meta {
name: Set API Key Actions
type: http
seq: 5
}
post {
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions
body: json
auth: inherit
}
body:json {
{
"actionIds": ["listSites"]
}
}

View File

@@ -0,0 +1,17 @@
meta {
name: Set API Key Orgs
type: http
seq: 7
}
post {
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/orgs
body: json
auth: inherit
}
body:json {
{
"orgIds": ["home-lab"]
}
}

View File

@@ -0,0 +1,3 @@
meta {
name: API Keys
}

View File

@@ -0,0 +1,18 @@
meta {
name: 2fa-disable
type: http
seq: 6
}
post {
url: http://localhost:3000/api/v1/auth/2fa/disable
body: json
auth: none
}
body:json {
{
"password": "aaaaa-1A",
"code": "377289"
}
}

17
bruno/Auth/2fa-enable.bru Normal file
View File

@@ -0,0 +1,17 @@
meta {
name: 2fa-enable
type: http
seq: 4
}
post {
url: http://localhost:3000/api/v1/auth/2fa/enable
body: json
auth: none
}
body:json {
{
"code": "374138"
}
}

View File

@@ -0,0 +1,17 @@
meta {
name: 2fa-request
type: http
seq: 5
}
post {
url: http://localhost:3000/api/v1/auth/2fa/request
body: json
auth: none
}
body:json {
{
"password": "aaaaa-1A"
}
}

View File

@@ -0,0 +1,18 @@
meta {
name: change-password
type: http
seq: 9
}
post {
url: http://localhost:3000/api/v1/auth/change-password
body: json
auth: none
}
body:json {
{
"oldPassword": "",
"newPassword": ""
}
}

18
bruno/Auth/login.bru Normal file
View File

@@ -0,0 +1,18 @@
meta {
name: login
type: http
seq: 1
}
post {
url: http://localhost:3000/api/v1/auth/login
body: json
auth: none
}
body:json {
{
"email": "admin@fosrl.io",
"password": "Password123!"
}
}

11
bruno/Auth/logout.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: logout
type: http
seq: 3
}
post {
url: http://localhost:4000/api/v1/auth/logout
body: none
auth: none
}

View File

@@ -0,0 +1,17 @@
meta {
name: reset-password-request
type: http
seq: 10
}
post {
url: http://localhost:3000/api/v1/auth/reset-password/request
body: json
auth: none
}
body:json {
{
"email": "milo@pangolin.net"
}
}

View File

@@ -0,0 +1,19 @@
meta {
name: reset-password
type: http
seq: 11
}
post {
url: http://localhost:3000/api/v1/auth/reset-password
body: json
auth: none
}
body:json {
{
"token": "3uhsbom72dwdhboctwrtntyd6jrlg4jtf5oaxy4k",
"newPassword": "aaaaa-1A",
"code": "6irqCGR3"
}
}

18
bruno/Auth/signup.bru Normal file
View File

@@ -0,0 +1,18 @@
meta {
name: signup
type: http
seq: 2
}
put {
url: http://localhost:3000/api/v1/auth/signup
body: json
auth: none
}
body:json {
{
"email": "numbat@pangolin.net",
"password": "Password123!"
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: verify-email-request
type: http
seq: 8
}
post {
url: http://localhost:3000/api/v1/auth/verify-email/request
body: none
auth: none
}

View File

@@ -0,0 +1,17 @@
meta {
name: verify-email
type: http
seq: 7
}
post {
url: http://localhost:3000/api/v1/auth/verify-email
body: json
auth: none
}
body:json {
{
"code": "50317187"
}
}

View File

@@ -0,0 +1,15 @@
meta {
name: verify-user
type: http
seq: 4
}
get {
url: http://localhost:3001/api/v1/badger/verify-user?sessionId=mb52273jkb6t3oys2bx6ur5x7rcrkl26c7warg3e
body: none
auth: none
}
params:query {
sessionId: mb52273jkb6t3oys2bx6ur5x7rcrkl26c7warg3e
}

View File

@@ -0,0 +1,22 @@
meta {
name: createClient
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/site/1/client
body: json
auth: none
}
body:json {
{
"siteId": 1,
"name": "test",
"type": "olm",
"subnet": "100.90.129.4/30",
"olmId": "029yzunhx6nh3y5",
"secret": "l0ymp075y3d4rccb25l6sqpgar52k09etunui970qq5gj7x6"
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: pickClientDefaults
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/site/1/pick-client-defaults
body: none
auth: none
}

View File

@@ -0,0 +1,22 @@
meta {
name: Create OIDC Provider
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/org/home-lab/idp/oidc
body: json
auth: inherit
}
body:json {
{
"clientId": "JJoSvHCZcxnXT2sn6CObj6a21MuKNRXs3kN5wbys",
"clientSecret": "2SlGL2wOGgMEWLI9yUuMAeFxre7qSNJVnXMzyepdNzH1qlxYnC4lKhhQ6a157YQEkYH3vm40KK4RCqbYiF8QIweuPGagPX3oGxEj2exwutoXFfOhtq4hHybQKoFq01Z3",
"authUrl": "http://localhost:9000/application/o/authorize/",
"tokenUrl": "http://localhost:9000/application/o/token/",
"scopes": ["email", "openid", "profile"],
"userIdentifier": "email"
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: Generate OIDC URL
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1
body: none
auth: inherit
}

3
bruno/IDP/folder.bru Normal file
View File

@@ -0,0 +1,3 @@
meta {
name: IDP
}

View File

@@ -0,0 +1,11 @@
meta {
name: Traefik Config
type: http
seq: 1
}
get {
url: http://localhost:3001/api/v1/traefik-config
body: none
auth: inherit
}

View File

@@ -0,0 +1,3 @@
meta {
name: Internal
}

View File

@@ -0,0 +1,11 @@
meta {
name: Create Newt
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/newt
body: none
auth: none
}

18
bruno/Newt/Get Token.bru Normal file
View File

@@ -0,0 +1,18 @@
meta {
name: Get Token
type: http
seq: 1
}
get {
url: http://localhost:3000/api/v1/auth/newt/get-token
body: json
auth: none
}
body:json {
{
"newtId": "o0d4rdxq3stnz7b",
"secret": "sy7l09fnaesd03iwrfp9m3qf0ryn19g0zf3dqieaazb4k7vk"
}
}

15
bruno/Olm/createOlm.bru Normal file
View File

@@ -0,0 +1,15 @@
meta {
name: createOlm
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/olm
body: none
auth: inherit
}
settings {
encodeUrl: true
}

8
bruno/Olm/folder.bru Normal file
View File

@@ -0,0 +1,8 @@
meta {
name: Olm
seq: 15
}
auth {
mode: inherit
}

11
bruno/Orgs/Check Id.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: Check Id
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/org/checkId
body: none
auth: none
}

11
bruno/Orgs/listOrgs.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: listOrgs
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: createRemoteExitNode
type: http
seq: 1
}
put {
url: http://localhost:4000/api/v1/org/org_i21aifypnlyxur2/remote-exit-node
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: listResourcesByOrg
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

View File

@@ -0,0 +1,16 @@
meta {
name: listResourcesBySite
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/site/1/resources?limit=10&offset=0
body: none
auth: none
}
params:query {
limit: 10
offset: 0
}

11
bruno/Sites/Get Site.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: Get Site
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/org/test/sites/mexican-mole-lizard-windy
body: none
auth: none
}

11
bruno/Sites/listSites.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: listSites
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

View File

@@ -0,0 +1,16 @@
meta {
name: listTargets
type: http
seq: 1
}
get {
url: http://localhost:3000/api/v1/resource/web.main.localhost/targets?limit=10&offset=0
body: none
auth: none
}
params:query {
limit: 10
offset: 0
}

11
bruno/Test.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: Test
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: traefik-config
type: http
seq: 1
}
get {
url: http://localhost:3001/api/v1/traefik-config
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: adminListUsers
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/users
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: adminRemoveUser
type: http
seq: 3
}
delete {
url: http://localhost:3000/api/v1/user/ky5r7ivqs8wc7u4
body: none
auth: none
}

11
bruno/Users/getUser.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: getUser
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

13
bruno/bruno.json Normal file
View File

@@ -0,0 +1,13 @@
{
"version": "1",
"name": "Pangolin",
"type": "collection",
"ignore": [
"node_modules",
".git"
],
"presets": {
"requestType": "http",
"requestUrl": "http://localhost:3000/api/v1"
}
}

View File

@@ -1,121 +0,0 @@
import { CommandModule } from "yargs";
import { db, orgs } from "@server/db";
import { eq } from "drizzle-orm";
import { encrypt } from "@server/lib/crypto";
import { configFilePath1, configFilePath2 } from "@server/lib/consts";
import { generateCA } from "@server/lib/sshCA";
import fs from "fs";
import yaml from "js-yaml";
type GenerateOrgCaKeysArgs = {
orgId: string;
secret?: string;
force?: boolean;
};
export const generateOrgCaKeys: CommandModule<{}, GenerateOrgCaKeysArgs> = {
command: "generate-org-ca-keys",
describe:
"Generate SSH CA public/private key pair for an organization and store them in the database (private key encrypted with server secret)",
builder: (yargs) => {
return yargs
.option("orgId", {
type: "string",
demandOption: true,
describe: "The organization ID"
})
.option("secret", {
type: "string",
describe:
"Server secret used to encrypt the CA private key. If omitted, read from config file (config.yml or config.yaml)."
})
.option("force", {
type: "boolean",
default: false,
describe:
"Overwrite existing CA keys for the org if they already exist"
});
},
handler: async (argv: {
orgId: string;
secret?: string;
force?: boolean;
}) => {
try {
const { orgId, force } = argv;
let secret = argv.secret;
if (!secret) {
const configPath = fs.existsSync(configFilePath1)
? configFilePath1
: fs.existsSync(configFilePath2)
? configFilePath2
: null;
if (!configPath) {
console.error(
"Error: No server secret provided and config file not found. " +
"Expected config.yml or config.yaml in the config directory, or pass --secret."
);
process.exit(1);
}
const configContent = fs.readFileSync(configPath, "utf8");
const config = yaml.load(configContent) as {
server?: { secret?: string };
};
if (!config?.server?.secret) {
console.error(
"Error: No server.secret in config file. Pass --secret or set server.secret in config."
);
process.exit(1);
}
secret = config.server.secret;
}
const [org] = await db
.select({
orgId: orgs.orgId,
sshCaPrivateKey: orgs.sshCaPrivateKey,
sshCaPublicKey: orgs.sshCaPublicKey
})
.from(orgs)
.where(eq(orgs.orgId, orgId))
.limit(1);
if (!org) {
console.error(`Error: Organization with orgId "${orgId}" not found.`);
process.exit(1);
}
if (org.sshCaPrivateKey != null || org.sshCaPublicKey != null) {
if (!force) {
console.error(
"Error: This organization already has CA keys. Use --force to overwrite."
);
process.exit(1);
}
}
const ca = generateCA(`pangolin-ssh-ca-${orgId}`);
const encryptedPrivateKey = encrypt(ca.privateKeyPem, secret);
await db
.update(orgs)
.set({
sshCaPrivateKey: encryptedPrivateKey,
sshCaPublicKey: ca.publicKeyOpenSSH
})
.where(eq(orgs.orgId, orgId));
console.log("SSH CA keys generated and stored for org:", orgId);
console.log("\nPublic key (OpenSSH format):");
console.log(ca.publicKeyOpenSSH);
process.exit(0);
} catch (error) {
console.error("Error generating org CA keys:", error);
process.exit(1);
}
}
};

View File

@@ -8,7 +8,6 @@ import { clearExitNodes } from "./commands/clearExitNodes";
import { rotateServerSecret } from "./commands/rotateServerSecret";
import { clearLicenseKeys } from "./commands/clearLicenseKeys";
import { deleteClient } from "./commands/deleteClient";
import { generateOrgCaKeys } from "./commands/generateOrgCaKeys";
yargs(hideBin(process.argv))
.scriptName("pangctl")
@@ -18,6 +17,5 @@ yargs(hideBin(process.argv))
.command(rotateServerSecret)
.command(clearLicenseKeys)
.command(deleteClient)
.command(generateOrgCaKeys)
.demandCommand()
.help().argv;

View File

@@ -1 +0,0 @@
*-journal

View File

@@ -4,12 +4,6 @@ services:
image: fosrl/pangolin:latest
container_name: pangolin
restart: unless-stopped
deploy:
resources:
limits:
memory: 1g
reservations:
memory: 256m
volumes:
- ./config:/app/config
healthcheck:

View File

@@ -6,12 +6,6 @@ import path from "path";
import fs from "fs";
// import { glob } from "glob";
// Read default build type from server/build.ts
let build = "oss";
const buildFile = fs.readFileSync(path.resolve("server/build.ts"), "utf8");
const m = buildFile.match(/export\s+const\s+build\s*=\s*["'](oss|saas|enterprise)["']/);
if (m) build = m[1];
const banner = `
// patch __dirname
// import { fileURLToPath } from "url";
@@ -43,7 +37,7 @@ const argv = yargs(hideBin(process.argv))
describe: "Build type (oss, saas, enterprise)",
type: "string",
choices: ["oss", "saas", "enterprise"],
default: build
default: "oss"
})
.help()
.alias("help", "h").argv;
@@ -281,7 +275,7 @@ esbuild
})
],
sourcemap: "inline",
target: "node24"
target: "node22"
})
.then((result) => {
// Check if there were any errors in the build result

View File

@@ -1,24 +1,41 @@
all: go-build-release
# Build with version injection via ldflags
# Versions can be passed via: make go-build-release PANGOLIN_VERSION=x.x.x GERBIL_VERSION=x.x.x BADGER_VERSION=x.x.x
# Or fetched automatically if not provided (requires curl and jq)
PANGOLIN_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name')
GERBIL_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
BADGER_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
LDFLAGS = -X main.pangolinVersion=$(PANGOLIN_VERSION) \
-X main.gerbilVersion=$(GERBIL_VERSION) \
-X main.badgerVersion=$(BADGER_VERSION)
all: update-versions go-build-release put-back
dev-all: dev-update-versions dev-build dev-clean
go-build-release:
@echo "Building with versions - Pangolin: $(PANGOLIN_VERSION), Gerbil: $(GERBIL_VERSION), Badger: $(BADGER_VERSION)"
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o bin/installer_linux_amd64
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$(LDFLAGS)" -o bin/installer_linux_arm64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/installer_linux_amd64
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/installer_linux_arm64
clean:
rm -f bin/installer_linux_amd64
rm -f bin/installer_linux_arm64
.PHONY: all go-build-release clean
update-versions:
@echo "Fetching latest versions..."
cp main.go main.go.bak && \
$(MAKE) dev-update-versions
put-back:
mv main.go.bak main.go
dev-update-versions:
if [ -z "$(tag)" ]; then \
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name'); \
else \
PANGOLIN_VERSION=$(tag); \
fi && \
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$$PANGOLIN_VERSION\"/" main.go && \
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$$GERBIL_VERSION\"/" main.go && \
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$$BADGER_VERSION\"/" main.go && \
echo "Updated main.go with latest versions"
dev-build: go-build-release
dev-clean:
@echo "Restoring version values ..."
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"replaceme\"/" main.go && \
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"replaceme\"/" main.go && \
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"replaceme\"/" main.go
@echo "Restored version strings in main.go"

View File

@@ -99,6 +99,11 @@ func ReadAppConfig(configPath string) (*AppConfigValues, error) {
return values, nil
}
// findPattern finds the start of a pattern in a string
func findPattern(s, pattern string) int {
return bytes.Index([]byte(s), []byte(pattern))
}
func copyDockerService(sourceFile, destFile, serviceName string) error {
// Read source file
sourceData, err := os.ReadFile(sourceFile)
@@ -113,19 +118,19 @@ func copyDockerService(sourceFile, destFile, serviceName string) error {
}
// Parse source Docker Compose YAML
var sourceCompose map[string]any
var sourceCompose map[string]interface{}
if err := yaml.Unmarshal(sourceData, &sourceCompose); err != nil {
return fmt.Errorf("error parsing source Docker Compose file: %w", err)
}
// Parse destination Docker Compose YAML
var destCompose map[string]any
var destCompose map[string]interface{}
if err := yaml.Unmarshal(destData, &destCompose); err != nil {
return fmt.Errorf("error parsing destination Docker Compose file: %w", err)
}
// Get services section from source
sourceServices, ok := sourceCompose["services"].(map[string]any)
sourceServices, ok := sourceCompose["services"].(map[string]interface{})
if !ok {
return fmt.Errorf("services section not found in source file or has invalid format")
}
@@ -137,10 +142,10 @@ func copyDockerService(sourceFile, destFile, serviceName string) error {
}
// Get or create services section in destination
destServices, ok := destCompose["services"].(map[string]any)
destServices, ok := destCompose["services"].(map[string]interface{})
if !ok {
// If services section doesn't exist, create it
destServices = make(map[string]any)
destServices = make(map[string]interface{})
destCompose["services"] = destServices
}
@@ -182,21 +187,17 @@ func backupConfig() error {
return nil
}
func MarshalYAMLWithIndent(data any, indent int) (resp []byte, err error) {
func MarshalYAMLWithIndent(data interface{}, indent int) ([]byte, error) {
buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer)
encoder.SetIndent(indent)
if err := encoder.Encode(data); err != nil {
err := encoder.Encode(data)
if err != nil {
return nil, err
}
defer func() {
if cerr := encoder.Close(); cerr != nil && err == nil {
err = cerr
}
}()
defer encoder.Close()
return buffer.Bytes(), nil
}
@@ -208,7 +209,7 @@ func replaceInFile(filepath, oldStr, newStr string) error {
}
// Replace the string
newContent := strings.ReplaceAll(string(content), oldStr, newStr)
newContent := strings.Replace(string(content), oldStr, newStr, -1)
// Write the modified content back to the file
err = os.WriteFile(filepath, []byte(newContent), 0644)
@@ -227,28 +228,28 @@ func CheckAndAddTraefikLogVolume(composePath string) error {
}
// Parse YAML into a generic map
var compose map[string]any
var compose map[string]interface{}
if err := yaml.Unmarshal(data, &compose); err != nil {
return fmt.Errorf("error parsing compose file: %w", err)
}
// Get services section
services, ok := compose["services"].(map[string]any)
services, ok := compose["services"].(map[string]interface{})
if !ok {
return fmt.Errorf("services section not found or invalid")
}
// Get traefik service
traefik, ok := services["traefik"].(map[string]any)
traefik, ok := services["traefik"].(map[string]interface{})
if !ok {
return fmt.Errorf("traefik service not found or invalid")
}
// Check volumes
logVolume := "./config/traefik/logs:/var/log/traefik"
var volumes []any
var volumes []interface{}
if existingVolumes, ok := traefik["volumes"].([]any); ok {
if existingVolumes, ok := traefik["volumes"].([]interface{}); ok {
// Check if volume already exists
for _, v := range existingVolumes {
if v.(string) == logVolume {
@@ -294,13 +295,13 @@ func MergeYAML(baseFile, overlayFile string) error {
}
// Parse base YAML into a map
var baseMap map[string]any
var baseMap map[string]interface{}
if err := yaml.Unmarshal(baseContent, &baseMap); err != nil {
return fmt.Errorf("error parsing base YAML: %v", err)
}
// Parse overlay YAML into a map
var overlayMap map[string]any
var overlayMap map[string]interface{}
if err := yaml.Unmarshal(overlayContent, &overlayMap); err != nil {
return fmt.Errorf("error parsing overlay YAML: %v", err)
}
@@ -323,8 +324,8 @@ func MergeYAML(baseFile, overlayFile string) error {
}
// mergeMap recursively merges two maps
func mergeMap(base, overlay map[string]any) map[string]any {
result := make(map[string]any)
func mergeMap(base, overlay map[string]interface{}) map[string]interface{} {
result := make(map[string]interface{})
// Copy all key-values from base map
for k, v := range base {
@@ -335,8 +336,8 @@ func mergeMap(base, overlay map[string]any) map[string]any {
for k, v := range overlay {
// If both maps have the same key and both values are maps, merge recursively
if baseVal, ok := base[k]; ok {
if baseMap, isBaseMap := baseVal.(map[string]any); isBaseMap {
if overlayMap, isOverlayMap := v.(map[string]any); isOverlayMap {
if baseMap, isBaseMap := baseVal.(map[string]interface{}); isBaseMap {
if overlayMap, isOverlayMap := v.(map[string]interface{}); isOverlayMap {
result[k] = mergeMap(baseMap, overlayMap)
continue
}

View File

@@ -81,19 +81,11 @@ entryPoints:
transport:
respondingTimeouts:
readTimeout: "30m"
http3:
advertisedPort: 443
http:
tls:
certResolver: "letsencrypt"
middlewares:
middlewares:
- crowdsec@file
encodedCharacters:
allowEncodedSlash: true
allowEncodedQuestionMark: true
serversTransport:
insecureSkipVerify: true
ping:
entryPoint: "web"
insecureSkipVerify: true

View File

@@ -4,12 +4,6 @@ services:
image: docker.io/fosrl/pangolin:{{if .IsEnterprise}}ee-{{end}}{{.PangolinVersion}}
container_name: pangolin
restart: unless-stopped
deploy:
resources:
limits:
memory: 1g
reservations:
memory: 256m
volumes:
- ./config:/app/config
healthcheck:
@@ -38,14 +32,15 @@ services:
- 51820:51820/udp
- 21820:21820/udp
- 443:443
- 443:443/udp # For http3 QUIC if desired
- 80:80
{{end}}
traefik:
image: docker.io/traefik:v3.6
container_name: traefik
restart: unless-stopped
{{if .InstallGerbil}} network_mode: service:gerbil # Ports appear on the gerbil service{{end}}{{if not .InstallGerbil}}
{{if .InstallGerbil}}
network_mode: service:gerbil # Ports appear on the gerbil service
{{end}}{{if not .InstallGerbil}}
ports:
- 443:443
- 80:80

View File

@@ -40,8 +40,6 @@ entryPoints:
transport:
respondingTimeouts:
readTimeout: "30m"
http3:
advertisedPort: 443
http:
tls:
certResolver: "letsencrypt"

View File

@@ -144,13 +144,12 @@ func installDocker() error {
}
func startDockerService() error {
switch runtime.GOOS {
case "linux":
if runtime.GOOS == "linux" {
cmd := exec.Command("systemctl", "enable", "--now", "docker")
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
return cmd.Run()
case "darwin":
} else if runtime.GOOS == "darwin" {
// On macOS, Docker is usually started via the Docker Desktop application
fmt.Println("Please start Docker Desktop manually on macOS.")
return nil
@@ -303,7 +302,7 @@ func pullContainers(containerType SupportedContainer) error {
return nil
}
return fmt.Errorf("unsupported container type: %s", containerType)
return fmt.Errorf("Unsupported container type: %s", containerType)
}
// startContainers starts the containers using the appropriate command.
@@ -326,7 +325,7 @@ func startContainers(containerType SupportedContainer) error {
return nil
}
return fmt.Errorf("unsupported container type: %s", containerType)
return fmt.Errorf("Unsupported container type: %s", containerType)
}
// stopContainers stops the containers using the appropriate command.
@@ -348,7 +347,7 @@ func stopContainers(containerType SupportedContainer) error {
return nil
}
return fmt.Errorf("unsupported container type: %s", containerType)
return fmt.Errorf("Unsupported container type: %s", containerType)
}
// restartContainer restarts a specific container using the appropriate command.
@@ -370,5 +369,5 @@ func restartContainer(container string, containerType SupportedContainer) error
return nil
}
return fmt.Errorf("unsupported container type: %s", containerType)
return fmt.Errorf("Unsupported container type: %s", containerType)
}

View File

@@ -6,13 +6,12 @@ import (
"log"
"os"
"os/exec"
"path/filepath"
"strings"
"gopkg.in/yaml.v3"
)
func installCrowdsec(config Config, installDir string) error {
func installCrowdsec(config Config) error {
if err := stopContainers(config.InstallationContainerType); err != nil {
return fmt.Errorf("failed to stop containers: %v", err)
@@ -28,20 +27,9 @@ func installCrowdsec(config Config, installDir string) error {
os.Exit(1)
}
if err := os.MkdirAll("config/crowdsec/db", 0755); err != nil {
fmt.Printf("Error creating config files: %v\n", err)
os.Exit(1)
}
if err := os.MkdirAll("config/crowdsec/acquis.d", 0755); err != nil {
fmt.Printf("Error creating config files: %v\n", err)
os.Exit(1)
}
if err := os.MkdirAll("config/traefik/logs", 0755); err != nil {
fmt.Printf("Error creating config files: %v\n", err)
os.Exit(1)
}
setupTraefikLogRotate(installDir)
os.MkdirAll("config/crowdsec/db", 0755)
os.MkdirAll("config/crowdsec/acquis.d", 0755)
os.MkdirAll("config/traefik/logs", 0755)
if err := copyDockerService("config/crowdsec/docker-compose.yml", "docker-compose.yml", "crowdsec"); err != nil {
fmt.Printf("Error copying docker service: %v\n", err)
@@ -165,34 +153,34 @@ func CheckAndAddCrowdsecDependency(composePath string) error {
}
// Parse YAML into a generic map
var compose map[string]any
var compose map[string]interface{}
if err := yaml.Unmarshal(data, &compose); err != nil {
return fmt.Errorf("error parsing compose file: %w", err)
}
// Get services section
services, ok := compose["services"].(map[string]any)
services, ok := compose["services"].(map[string]interface{})
if !ok {
return fmt.Errorf("services section not found or invalid")
}
// Get traefik service
traefik, ok := services["traefik"].(map[string]any)
traefik, ok := services["traefik"].(map[string]interface{})
if !ok {
return fmt.Errorf("traefik service not found or invalid")
}
// Get dependencies
dependsOn, ok := traefik["depends_on"].(map[string]any)
dependsOn, ok := traefik["depends_on"].(map[string]interface{})
if ok {
// Append the new block for crowdsec
dependsOn["crowdsec"] = map[string]any{
dependsOn["crowdsec"] = map[string]interface{}{
"condition": "service_healthy",
}
} else {
// No dependencies exist, create it
traefik["depends_on"] = map[string]any{
"crowdsec": map[string]any{
traefik["depends_on"] = map[string]interface{}{
"crowdsec": map[string]interface{}{
"condition": "service_healthy",
},
}
@@ -211,69 +199,3 @@ func CheckAndAddCrowdsecDependency(composePath string) error {
fmt.Println("Added dependency of crowdsec to traefik")
return nil
}
// setupTraefikLogRotate writes a logrotate config for the Traefik access log
// that CrowdSec depends on. This is only needed when CrowdSec is installed
// because the default Pangolin install does not enable Traefik access logs.
//
// copytruncate is used so Traefik does not need to be restarted or sent a
// signal after rotation — it keeps writing to the same file descriptor while
// the rotated copy is made and the original is truncated in place.
func setupTraefikLogRotate(installDir string) {
const logrotateDir = "/etc/logrotate.d"
const logrotateFile = "/etc/logrotate.d/pangolin-traefik"
logPath := filepath.Join(installDir, "config/traefik/logs/access.log")
if os.Geteuid() != 0 {
fmt.Println("\n[logrotate] Skipping automatic logrotate setup: not running as root.")
fmt.Println("[logrotate] To prevent unbounded growth of the Traefik access log used by CrowdSec,")
fmt.Println("[logrotate] create the file /etc/logrotate.d/pangolin-traefik manually with:")
printLogrotateConfig(logPath)
return
}
config := fmt.Sprintf(`# Logrotate config for Traefik access logs used by CrowdSec.
# Generated by the Pangolin installer. Safe to edit.
%s {
daily
rotate 7
compress
delaycompress
missingok
notifempty
copytruncate
}
`, logPath)
if err := os.MkdirAll(logrotateDir, 0755); err != nil {
fmt.Printf("[logrotate] Warning: could not create %s: %v\n", logrotateDir, err)
return
}
if err := os.WriteFile(logrotateFile, []byte(config), 0644); err != nil {
fmt.Printf("[logrotate] Warning: could not write %s: %v\n", logrotateFile, err)
fmt.Println("[logrotate] Set it up manually:")
printLogrotateConfig(logPath)
return
}
fmt.Printf("[logrotate] Wrote logrotate config to %s\n", logrotateFile)
fmt.Println("[logrotate] Traefik access logs will be rotated daily, keeping 7 compressed copies.")
}
// printLogrotateConfig prints a logrotate config block to stdout so users can
// set it up manually when the installer cannot write to /etc.
func printLogrotateConfig(logPath string) {
fmt.Printf(`
%s {
daily
rotate 7
compress
delaycompress
missingok
notifempty
copytruncate
}
`, logPath)
}

View File

@@ -1,38 +1,10 @@
module installer
go 1.25.0
go 1.24.0
require (
github.com/charmbracelet/huh v1.0.0
github.com/charmbracelet/lipgloss v1.1.0
golang.org/x/term v0.42.0
golang.org/x/term v0.39.0
gopkg.in/yaml.v3 v3.0.1
)
require (
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/catppuccin/go v0.3.0 // indirect
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 // indirect
github.com/charmbracelet/bubbletea v1.3.6 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/ansi v0.9.3 // indirect
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.16.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.43.0 // indirect
golang.org/x/text v0.23.0 // indirect
)
require golang.org/x/sys v0.40.0 // indirect

View File

@@ -1,80 +1,7 @@
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY=
github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E=
github.com/catppuccin/go v0.3.0 h1:d+0/YicIq+hSTo5oPuRi5kOpqkVA5tAsU6dNhvRu+aY=
github.com/catppuccin/go v0.3.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc=
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 h1:JFgG/xnwFfbezlUnFMJy0nusZvytYysV4SCS2cYbvws=
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7/go.mod h1:ISC1gtLcVilLOf23wvTfoQuYbW2q0JevFxPfUzZ9Ybw=
github.com/charmbracelet/bubbletea v1.3.6 h1:VkHIxPJQeDt0aFJIsVxw8BQdh/F/L2KKZGsK6et5taU=
github.com/charmbracelet/bubbletea v1.3.6/go.mod h1:oQD9VCRQFF8KplacJLo28/jofOI2ToOfGYeFgBBxHOc=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
github.com/charmbracelet/huh v1.0.0 h1:wOnedH8G4qzJbmhftTqrpppyqHakl/zbbNdXIWJyIxw=
github.com/charmbracelet/huh v1.0.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
github.com/charmbracelet/x/ansi v0.9.3 h1:BXt5DHS/MKF+LjuK4huWrC6NCvHtexww7dMayh6GXd0=
github.com/charmbracelet/x/ansi v0.9.3/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
github.com/charmbracelet/x/conpty v0.1.0/go.mod h1:rMFsDJoDwVmiYM10aD4bH2XiRgwI7NYJtQgl5yskjEQ=
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86 h1:JSt3B+U9iqk37QUU2Rvb6DSBYRLtWqFqfxf8l5hOZUA=
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86/go.mod h1:2P0UgXMEa6TsToMSuFqKFQR+fZTO9CNGUNokkPatT/0=
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 h1:qko3AQ4gK1MTS/de7F5hPGx6/k1u0w4TeYmBFwzYVP4=
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0/go.mod h1:pBhA0ybfXv6hDjQUZ7hk1lVxBiUbupdw5R31yPUViVQ=
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8JawjaNZY=
github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View File

@@ -1,208 +1,92 @@
package main
import (
"errors"
"bufio"
"fmt"
"os"
"strconv"
"strings"
"syscall"
"github.com/charmbracelet/huh"
"golang.org/x/term"
)
// pangolinTheme is the custom theme using brand colors
var pangolinTheme = ThemePangolin()
// isAccessibleMode checks if we should use accessible mode (simple prompts)
// This is true for: non-TTY, TERM=dumb, or ACCESSIBLE env var set
func isAccessibleMode() bool {
// Check if stdin is not a terminal (piped input, CI, etc.)
if !term.IsTerminal(int(os.Stdin.Fd())) {
return true
}
// Check for dumb terminal
if os.Getenv("TERM") == "dumb" {
return true
}
// Check for explicit accessible mode request
if os.Getenv("ACCESSIBLE") != "" {
return true
}
return false
}
// handleAbort checks if the error is a user abort (Ctrl+C) and exits if so
func handleAbort(err error) {
if err != nil && errors.Is(err, huh.ErrUserAborted) {
fmt.Println("\nInstallation cancelled.")
os.Exit(0)
}
}
// runField runs a single field with the Pangolin theme, handling accessible mode
func runField(field huh.Field) error {
if isAccessibleMode() {
return field.RunAccessible(os.Stdout, os.Stdin)
}
form := huh.NewForm(huh.NewGroup(field)).WithTheme(pangolinTheme)
return form.Run()
}
func readString(prompt string, defaultValue string) string {
var value string
title := prompt
func readString(reader *bufio.Reader, prompt string, defaultValue string) string {
if defaultValue != "" {
title = fmt.Sprintf("%s (default: %s)", prompt, defaultValue)
fmt.Printf("%s (default: %s): ", prompt, defaultValue)
} else {
fmt.Print(prompt + ": ")
}
input := huh.NewInput().
Title(title).
Value(&value)
// If no default value, this field is required
if defaultValue == "" {
input = input.Validate(func(s string) error {
if s == "" {
return fmt.Errorf("this field is required")
}
return nil
})
input, _ := reader.ReadString('\n')
input = strings.TrimSpace(input)
if input == "" {
return defaultValue
}
err := runField(input)
handleAbort(err)
if value == "" {
value = defaultValue
}
// Print the answer so it remains visible in terminal history (skip in accessible mode as it already shows)
if !isAccessibleMode() {
fmt.Printf("%s: %s\n", prompt, value)
}
return value
return input
}
func readPassword(prompt string) string {
var value string
func readStringNoDefault(reader *bufio.Reader, prompt string) string {
fmt.Print(prompt + ": ")
input, _ := reader.ReadString('\n')
return strings.TrimSpace(input)
}
func readPassword(prompt string, reader *bufio.Reader) string {
if term.IsTerminal(int(syscall.Stdin)) {
fmt.Print(prompt + ": ")
// Read password without echo if we're in a terminal
password, err := term.ReadPassword(int(syscall.Stdin))
fmt.Println() // Add a newline since ReadPassword doesn't add one
if err != nil {
return ""
}
input := strings.TrimSpace(string(password))
if input == "" {
return readPassword(prompt, reader)
}
return input
} else {
// Fallback to reading from stdin if not in a terminal
return readString(reader, prompt, "")
}
}
func readBool(reader *bufio.Reader, prompt string, defaultValue bool) bool {
defaultStr := "no"
if defaultValue {
defaultStr = "yes"
}
for {
input := huh.NewInput().
Title(prompt).
Value(&value).
EchoMode(huh.EchoModePassword).
Validate(func(s string) error {
if s == "" {
return fmt.Errorf("password is required")
}
return nil
})
err := runField(input)
handleAbort(err)
if value != "" {
// Print confirmation without revealing the password
if !isAccessibleMode() {
fmt.Printf("%s: %s\n", prompt, "********")
}
return value
input := readString(reader, prompt+" (yes/no)", defaultStr)
lower := strings.ToLower(input)
if lower == "yes" {
return true
} else if lower == "no" {
return false
} else {
fmt.Println("Please enter 'yes' or 'no'.")
}
}
}
func readBool(prompt string, defaultValue bool) bool {
var value = defaultValue
confirm := huh.NewConfirm().
Title(prompt).
Value(&value).
Affirmative("Yes").
Negative("No")
err := runField(confirm)
handleAbort(err)
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
answer := "No"
if value {
answer = "Yes"
func readBoolNoDefault(reader *bufio.Reader, prompt string) bool {
for {
input := readStringNoDefault(reader, prompt+" (yes/no)")
lower := strings.ToLower(input)
if lower == "yes" {
return true
} else if lower == "no" {
return false
} else {
fmt.Println("Please enter 'yes' or 'no'.")
}
fmt.Printf("%s: %s\n", prompt, answer)
}
return value
}
func readBoolNoDefault(prompt string) bool {
var value bool
confirm := huh.NewConfirm().
Title(prompt).
Value(&value).
Affirmative("Yes").
Negative("No")
err := runField(confirm)
handleAbort(err)
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
answer := "No"
if value {
answer = "Yes"
}
fmt.Printf("%s: %s\n", prompt, answer)
}
return value
}
func readInt(prompt string, defaultValue int) int {
var value string
title := fmt.Sprintf("%s (default: %d)", prompt, defaultValue)
input := huh.NewInput().
Title(title).
Value(&value).
Validate(func(s string) error {
if s == "" {
return nil
}
_, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("please enter a valid number")
}
return nil
})
err := runField(input)
handleAbort(err)
if value == "" {
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
fmt.Printf("%s: %d\n", prompt, defaultValue)
}
func readInt(reader *bufio.Reader, prompt string, defaultValue int) int {
input := readString(reader, prompt, fmt.Sprintf("%d", defaultValue))
if input == "" {
return defaultValue
}
result, err := strconv.Atoi(value)
if err != nil {
if !isAccessibleMode() {
fmt.Printf("%s: %d\n", prompt, defaultValue)
}
return defaultValue
}
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
fmt.Printf("%s: %d\n", prompt, result)
}
return result
value := defaultValue
fmt.Sscanf(input, "%d", &value)
return value
}

View File

@@ -1,35 +1,29 @@
package main
import (
"crypto/rand"
"bufio"
"embed"
"encoding/base64"
"fmt"
"io"
"io/fs"
"math/rand"
"net"
"net/http"
"net/url"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
"text/template"
"time"
)
// Version variables injected at build time via -ldflags
var (
pangolinVersion string
gerbilVersion string
badgerVersion string
)
// DO NOT EDIT THIS FUNCTION; IT MATCHED BY REGEX IN CICD
func loadVersions(config *Config) {
config.PangolinVersion = pangolinVersion
config.GerbilVersion = gerbilVersion
config.BadgerVersion = badgerVersion
config.PangolinVersion = "replaceme"
config.GerbilVersion = "replaceme"
config.BadgerVersion = "replaceme"
}
//go:embed config/*
@@ -87,19 +81,14 @@ func main() {
}
}
reader := bufio.NewReader(os.Stdin)
var config Config
var alreadyInstalled = false
// Determine installation directory
installDir := findOrSelectInstallDirectory()
if err := os.Chdir(installDir); err != nil {
fmt.Printf("Error changing to installation directory: %v\n", err)
os.Exit(1)
}
// check if there is already a config file
if _, err := os.Stat("config/config.yml"); err != nil {
config = collectUserInput()
config = collectUserInput(reader)
loadVersions(&config)
config.DoCrowdsecInstall = false
@@ -112,10 +101,7 @@ func main() {
os.Exit(1)
}
if err := moveFile("config/docker-compose.yml", "docker-compose.yml"); err != nil {
fmt.Printf("Error moving docker-compose.yml: %v\n", err)
os.Exit(1)
}
moveFile("config/docker-compose.yml", "docker-compose.yml")
fmt.Println("\nConfiguration files created successfully!")
@@ -130,17 +116,13 @@ func main() {
fmt.Println("\n=== Starting installation ===")
if readBool("Would you like to install and start the containers?", true) {
if readBool(reader, "Would you like to install and start the containers?", true) {
config.InstallationContainerType = podmanOrDocker()
config.InstallationContainerType = podmanOrDocker(reader)
if !isDockerInstalled() && runtime.GOOS == "linux" && config.InstallationContainerType == Docker {
if readBool("Docker is not installed. Would you like to install it?", true) {
if err := installDocker(); err != nil {
fmt.Printf("Error installing Docker: %v\n", err)
return
}
if readBool(reader, "Docker is not installed. Would you like to install it?", true) {
installDocker()
// try to start docker service but ignore errors
if err := startDockerService(); err != nil {
fmt.Println("Error starting Docker service:", err)
@@ -149,7 +131,7 @@ func main() {
}
// wait 10 seconds for docker to start checking if docker is running every 2 seconds
fmt.Println("Waiting for Docker to start...")
for range 5 {
for i := 0; i < 5; i++ {
if isDockerRunning() {
fmt.Println("Docker is running!")
break
@@ -184,7 +166,7 @@ func main() {
fmt.Println("\n=== MaxMind Database Update ===")
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
fmt.Println("MaxMind GeoLite2 Country database found.")
if readBool("Would you like to update the MaxMind database to the latest version?", false) {
if readBool(reader, "Would you like to update the MaxMind database to the latest version?", false) {
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error updating MaxMind database: %v\n", err)
fmt.Println("You can try updating it manually later if needed.")
@@ -192,7 +174,7 @@ func main() {
}
} else {
fmt.Println("MaxMind GeoLite2 Country database not found.")
if readBool("Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
if readBool(reader, "Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error downloading MaxMind database: %v\n", err)
fmt.Println("You can try downloading it manually later if needed.")
@@ -209,11 +191,11 @@ func main() {
if !checkIsCrowdsecInstalledInCompose() {
fmt.Println("\n=== CrowdSec Install ===")
// check if crowdsec is installed
if readBool("Would you like to install CrowdSec?", false) {
if readBool(reader, "Would you like to install CrowdSec?", false) {
fmt.Println("This installer constitutes a minimal viable CrowdSec deployment. CrowdSec will add extra complexity to your Pangolin installation and may not work to the best of its abilities out of the box. Users are expected to implement configuration adjustments on their own to achieve the best security posture. Consult the CrowdSec documentation for detailed configuration instructions.")
// BUG: crowdsec installation will be skipped if the user chooses to install on the first installation.
if readBool("Are you willing to manage CrowdSec?", false) {
if readBool(reader, "Are you willing to manage CrowdSec?", false) {
if config.DashboardDomain == "" {
traefikConfig, err := ReadTraefikConfig("config/traefik/traefik_config.yml")
if err != nil {
@@ -242,8 +224,8 @@ func main() {
fmt.Printf("Let's Encrypt Email: %s\n", config.LetsEncryptEmail)
fmt.Printf("Badger Version: %s\n", config.BadgerVersion)
if !readBool("Are these values correct?", true) {
config = collectUserInput()
if !readBool(reader, "Are these values correct?", true) {
config = collectUserInput(reader)
}
}
@@ -252,14 +234,14 @@ func main() {
if detectedType == Undefined {
// If detection fails, prompt the user
fmt.Println("Unable to detect container type from existing installation.")
config.InstallationContainerType = podmanOrDocker()
config.InstallationContainerType = podmanOrDocker(reader)
} else {
config.InstallationContainerType = detectedType
fmt.Printf("Detected container type: %s\n", config.InstallationContainerType)
}
config.DoCrowdsecInstall = true
err := installCrowdsec(config, installDir)
err := installCrowdsec(config)
if err != nil {
fmt.Printf("Error installing CrowdSec: %v\n", err)
return
@@ -294,119 +276,8 @@ func main() {
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
}
func hasExistingInstall(dir string) bool {
configPath := filepath.Join(dir, "config", "config.yml")
_, err := os.Stat(configPath)
return err == nil
}
func findOrSelectInstallDirectory() string {
const defaultInstallDir = "/opt/pangolin"
// Get current working directory
cwd, err := os.Getwd()
if err != nil {
fmt.Printf("Error getting current directory: %v\n", err)
os.Exit(1)
}
// 1. Check current directory for existing install
if hasExistingInstall(cwd) {
fmt.Printf("Found existing Pangolin installation in current directory: %s\n", cwd)
return cwd
}
// 2. Check default location (/opt/pangolin) for existing install
if cwd != defaultInstallDir && hasExistingInstall(defaultInstallDir) {
fmt.Printf("\nFound existing Pangolin installation at: %s\n", defaultInstallDir)
if readBool(fmt.Sprintf("Would you like to use the existing installation at %s?", defaultInstallDir), true) {
return defaultInstallDir
}
}
// 3. No existing install found, prompt for installation directory
fmt.Println("\n=== Installation Directory ===")
fmt.Println("No existing Pangolin installation detected.")
installDir := readString("Enter the installation directory", defaultInstallDir)
// Expand ~ to home directory if present
if strings.HasPrefix(installDir, "~") {
home, err := os.UserHomeDir()
if err != nil {
fmt.Printf("Error getting home directory: %v\n", err)
os.Exit(1)
}
installDir = filepath.Join(home, installDir[1:])
}
// Convert to absolute path
absPath, err := filepath.Abs(installDir)
if err != nil {
fmt.Printf("Error resolving path: %v\n", err)
os.Exit(1)
}
installDir = absPath
// Check if directory exists
if _, err := os.Stat(installDir); os.IsNotExist(err) {
// Directory doesn't exist, create it
if readBool(fmt.Sprintf("Directory %s does not exist. Create it?", installDir), true) {
if err := os.MkdirAll(installDir, 0755); err != nil {
fmt.Printf("Error creating directory: %v\n", err)
os.Exit(1)
}
fmt.Printf("Created directory: %s\n", installDir)
// Offer to change ownership if running via sudo
changeDirectoryOwnership(installDir)
} else {
fmt.Println("Installation cancelled.")
os.Exit(0)
}
}
fmt.Printf("Installation directory: %s\n", installDir)
return installDir
}
func changeDirectoryOwnership(dir string) {
// Check if we're running via sudo by looking for SUDO_USER
sudoUser := os.Getenv("SUDO_USER")
if sudoUser == "" || os.Geteuid() != 0 {
return
}
sudoUID := os.Getenv("SUDO_UID")
sudoGID := os.Getenv("SUDO_GID")
if sudoUID == "" || sudoGID == "" {
return
}
fmt.Printf("\nRunning as root via sudo (original user: %s)\n", sudoUser)
if readBool(fmt.Sprintf("Would you like to change ownership of %s to user '%s'? This makes it easier to manage config files without sudo.", dir, sudoUser), true) {
uid, err := strconv.Atoi(sudoUID)
if err != nil {
fmt.Printf("Warning: Could not parse SUDO_UID: %v\n", err)
return
}
gid, err := strconv.Atoi(sudoGID)
if err != nil {
fmt.Printf("Warning: Could not parse SUDO_GID: %v\n", err)
return
}
if err := os.Chown(dir, uid, gid); err != nil {
fmt.Printf("Warning: Could not change ownership: %v\n", err)
} else {
fmt.Printf("Changed ownership of %s to %s\n", dir, sudoUser)
}
}
}
func podmanOrDocker() SupportedContainer {
inputContainer := readString("Would you like to run Pangolin as Docker or Podman containers?", "docker")
func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
inputContainer := readString(reader, "Would you like to run Pangolin as Docker or Podman containers?", "docker")
chosenContainer := Docker
if strings.EqualFold(inputContainer, "docker") {
@@ -418,8 +289,7 @@ func podmanOrDocker() SupportedContainer {
os.Exit(1)
}
switch chosenContainer {
case Podman:
if chosenContainer == Podman {
if !isPodmanInstalled() {
fmt.Println("Podman or podman-compose is not installed. Please install both manually. Automated installation will be available in a later release.")
os.Exit(1)
@@ -428,7 +298,7 @@ func podmanOrDocker() SupportedContainer {
if err := exec.Command("bash", "-c", "cat /etc/sysctl.d/99-podman.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start=' || cat /etc/sysctl.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start='").Run(); err != nil {
fmt.Println("Would you like to configure ports >= 80 as unprivileged ports? This enables podman containers to listen on low-range ports.")
fmt.Println("Pangolin will experience startup issues if this is not configured, because it needs to listen on port 80/443 by default.")
approved := readBool("The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system\". Approve?", true)
approved := readBool(reader, "The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system\". Approve?", true)
if approved {
if os.Geteuid() != 0 {
fmt.Println("You need to run the installer as root for such a configuration.")
@@ -440,7 +310,7 @@ func podmanOrDocker() SupportedContainer {
// Linux only.
if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system"); err != nil {
fmt.Printf("Error configuring unprivileged ports: %v\n", err)
fmt.Printf("Error configuring unprivileged ports: %v\n", err)
os.Exit(1)
}
} else {
@@ -450,7 +320,7 @@ func podmanOrDocker() SupportedContainer {
fmt.Println("Unprivileged ports have been configured.")
}
case Docker:
} else if chosenContainer == Docker {
// check if docker is not installed and the user is root
if !isDockerInstalled() {
if os.Geteuid() != 0 {
@@ -465,7 +335,7 @@ func podmanOrDocker() SupportedContainer {
fmt.Println("The installer will not be able to run docker commands without running it as root.")
os.Exit(1)
}
default:
} else {
// This shouldn't happen unless there's a third container runtime.
os.Exit(1)
}
@@ -473,35 +343,35 @@ func podmanOrDocker() SupportedContainer {
return chosenContainer
}
func collectUserInput() Config {
func collectUserInput(reader *bufio.Reader) Config {
config := Config{}
// Basic configuration
fmt.Println("\n=== Basic Configuration ===")
config.IsEnterprise = readBoolNoDefault("Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.")
config.IsEnterprise = readBoolNoDefault(reader, "Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.")
config.BaseDomain = readString("Enter your base domain (no subdomain e.g. example.com)", "")
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
// Set default dashboard domain after base domain is collected
defaultDashboardDomain := ""
if config.BaseDomain != "" {
defaultDashboardDomain = "pangolin." + config.BaseDomain
}
config.DashboardDomain = readString("Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
config.LetsEncryptEmail = readString("Enter email for Let's Encrypt certificates", "")
config.InstallGerbil = readBool("Do you want to use Gerbil to allow tunneled connections", true)
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
// Email configuration
fmt.Println("\n=== Email Configuration ===")
config.EnableEmail = readBool("Enable email functionality (SMTP)", false)
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
if config.EnableEmail {
config.EmailSMTPHost = readString("Enter SMTP host", "")
config.EmailSMTPPort = readInt("Enter SMTP port (default 587)", 587)
config.EmailSMTPUser = readString("Enter SMTP username", "")
config.EmailSMTPPass = readPassword("Enter SMTP password")
config.EmailNoReply = readString("Enter no-reply email address (often the same as SMTP username)", "")
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
config.EmailNoReply = readString(reader, "Enter no-reply email address (often the same as SMTP username)", "")
}
// Validate required fields
@@ -522,8 +392,8 @@ func collectUserInput() Config {
fmt.Println("\n=== Advanced Configuration ===")
config.EnableIPv6 = readBool("Is your server IPv6 capable?", true)
config.EnableGeoblocking = readBool("Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
if config.DashboardDomain == "" {
fmt.Println("Error: Dashboard Domain name is required")
@@ -534,23 +404,15 @@ func collectUserInput() Config {
}
func createConfigFiles(config Config) error {
if err := os.MkdirAll("config", 0755); err != nil {
return fmt.Errorf("failed to create config directory: %v", err)
}
if err := os.MkdirAll("config/letsencrypt", 0755); err != nil {
return fmt.Errorf("failed to create letsencrypt directory: %v", err)
}
if err := os.MkdirAll("config/db", 0755); err != nil {
return fmt.Errorf("failed to create db directory: %v", err)
}
if err := os.MkdirAll("config/logs", 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %v", err)
}
os.MkdirAll("config", 0755)
os.MkdirAll("config/letsencrypt", 0755)
os.MkdirAll("config/db", 0755)
os.MkdirAll("config/logs", 0755)
// Walk through all embedded files
err := fs.WalkDir(configFiles, "config", func(path string, d fs.DirEntry, walkErr error) (err error) {
if walkErr != nil {
return walkErr
err := fs.WalkDir(configFiles, "config", func(path string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// Skip the root fs directory itself
@@ -601,11 +463,7 @@ func createConfigFiles(config Config) error {
if err != nil {
return fmt.Errorf("failed to create %s: %v", path, err)
}
defer func() {
if cerr := outFile.Close(); cerr != nil && err == nil {
err = cerr
}
}()
defer outFile.Close()
// Execute template
if err := tmpl.Execute(outFile, config); err != nil {
@@ -621,26 +479,18 @@ func createConfigFiles(config Config) error {
return nil
}
func copyFile(src, dst string) (err error) {
func copyFile(src, dst string) error {
source, err := os.Open(src)
if err != nil {
return err
}
defer func() {
if cerr := source.Close(); cerr != nil && err == nil {
err = cerr
}
}()
defer source.Close()
destination, err := os.Create(dst)
if err != nil {
return err
}
defer func() {
if cerr := destination.Close(); cerr != nil && err == nil {
err = cerr
}
}()
defer destination.Close()
_, err = io.Copy(destination, source)
return err
@@ -711,24 +561,22 @@ func showSetupTokenInstructions(containerType SupportedContainer, dashboardDomai
fmt.Println("To get your setup token, you need to:")
fmt.Println("")
fmt.Println("1. Start the containers")
switch containerType {
case Docker:
if containerType == Docker {
fmt.Println(" docker compose up -d")
case Podman:
} else if containerType == Podman {
fmt.Println(" podman-compose up -d")
} else {
}
fmt.Println("")
fmt.Println("2. Wait for the Pangolin container to start and generate the token")
fmt.Println("")
fmt.Println("3. Check the container logs for the setup token")
switch containerType {
case Docker:
if containerType == Docker {
fmt.Println(" docker logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
case Podman:
} else if containerType == Podman {
fmt.Println(" podman logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
} else {
}
fmt.Println("")
fmt.Println("4. Look for output like")
fmt.Println(" === SETUP TOKEN GENERATED ===")
@@ -744,12 +592,43 @@ func showSetupTokenInstructions(containerType SupportedContainer, dashboardDomai
}
func generateRandomSecretKey() string {
secret := make([]byte, 32)
_, err := rand.Read(secret)
if err != nil {
panic(fmt.Sprintf("Failed to generate random secret key: %v", err))
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
const length = 32
var seededRand *rand.Rand = rand.New(
rand.NewSource(time.Now().UnixNano()))
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return base64.StdEncoding.EncodeToString(secret)
return string(b)
}
func getPublicIP() string {
client := &http.Client{
Timeout: 10 * time.Second,
}
resp, err := client.Get("https://ifconfig.io/ip")
if err != nil {
return ""
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return ""
}
ip := strings.TrimSpace(string(body))
// Validate that it's a valid IP address
if net.ParseIP(ip) != nil {
return ip
}
return ""
}
// Run external commands with stdio/stderr attached.
@@ -764,7 +643,10 @@ func checkPortsAvailable(port int) error {
addr := fmt.Sprintf(":%d", port)
ln, err := net.Listen("tcp", addr)
if err != nil {
return fmt.Errorf("ERROR: port %d is occupied or cannot be bound: %w", port, err)
return fmt.Errorf(
"ERROR: port %d is occupied or cannot be bound: %w\n\n",
port, err,
)
}
if closeErr := ln.Close(); closeErr != nil {
fmt.Fprintf(os.Stderr,

View File

@@ -1,51 +0,0 @@
package main
import (
"github.com/charmbracelet/huh"
"github.com/charmbracelet/lipgloss"
)
// Pangolin brand colors (converted from oklch to hex)
var (
// Primary orange/amber - oklch(0.6717 0.1946 41.93)
primaryColor = lipgloss.AdaptiveColor{Light: "#D97706", Dark: "#F59E0B"}
// Muted foreground
mutedColor = lipgloss.AdaptiveColor{Light: "#737373", Dark: "#A3A3A3"}
// Success green
successColor = lipgloss.AdaptiveColor{Light: "#16A34A", Dark: "#22C55E"}
// Error red - oklch(0.577 0.245 27.325)
errorColor = lipgloss.AdaptiveColor{Light: "#DC2626", Dark: "#EF4444"}
// Normal text
normalFg = lipgloss.AdaptiveColor{Light: "#171717", Dark: "#FAFAFA"}
)
// ThemePangolin returns a huh theme using Pangolin brand colors
func ThemePangolin() *huh.Theme {
t := huh.ThemeBase()
// Focused state styles
t.Focused.Base = t.Focused.Base.BorderForeground(primaryColor)
t.Focused.Title = t.Focused.Title.Foreground(primaryColor).Bold(true)
t.Focused.Description = t.Focused.Description.Foreground(mutedColor)
t.Focused.ErrorIndicator = t.Focused.ErrorIndicator.Foreground(errorColor)
t.Focused.ErrorMessage = t.Focused.ErrorMessage.Foreground(errorColor)
t.Focused.SelectSelector = t.Focused.SelectSelector.Foreground(primaryColor)
t.Focused.NextIndicator = t.Focused.NextIndicator.Foreground(primaryColor)
t.Focused.PrevIndicator = t.Focused.PrevIndicator.Foreground(primaryColor)
t.Focused.Option = t.Focused.Option.Foreground(normalFg)
t.Focused.SelectedOption = t.Focused.SelectedOption.Foreground(primaryColor)
t.Focused.SelectedPrefix = lipgloss.NewStyle().Foreground(successColor).SetString("✓ ")
t.Focused.UnselectedPrefix = lipgloss.NewStyle().Foreground(mutedColor).SetString(" ")
t.Focused.FocusedButton = t.Focused.FocusedButton.Foreground(lipgloss.Color("#FFFFFF")).Background(primaryColor)
t.Focused.BlurredButton = t.Focused.BlurredButton.Foreground(normalFg).Background(lipgloss.AdaptiveColor{Light: "#E5E5E5", Dark: "#404040"})
t.Focused.TextInput.Cursor = t.Focused.TextInput.Cursor.Foreground(primaryColor)
t.Focused.TextInput.Prompt = t.Focused.TextInput.Prompt.Foreground(primaryColor)
// Blurred state inherits from focused but with hidden border
t.Blurred = t.Focused
t.Blurred.Base = t.Focused.Base.BorderStyle(lipgloss.HiddenBorder())
t.Blurred.Title = t.Blurred.Title.Foreground(mutedColor).Bold(false)
t.Blurred.TextInput.Prompt = t.Blurred.TextInput.Prompt.Foreground(mutedColor)
return t
}

View File

@@ -1,137 +0,0 @@
import os
import sys
# --- Configuration ---
# The header text to be added to the files.
HEADER_TEXT = """/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
"""
HEADER_NORMALIZED = HEADER_TEXT.strip()
def extract_leading_block_comment(content):
"""
If the file content begins with a /* ... */ block comment, return the
full text of that comment (including the delimiters) and the index at
which the rest of the file starts (after any trailing newlines).
Returns (None, 0) when no such comment is found.
"""
stripped = content.lstrip()
if not stripped.startswith('/*'):
return None, 0
# Account for any leading whitespace before the comment
comment_start = content.index('/*')
end_marker = content.find('*/', comment_start + 2)
if end_marker == -1:
return None, 0
comment_end = end_marker + 2 # position just after '*/'
comment_text = content[comment_start:comment_end].strip()
# Advance past any whitespace / newlines that follow the closing */
rest_start = comment_end
while rest_start < len(content) and content[rest_start] in '\n\r':
rest_start += 1
return comment_text, rest_start
def should_add_header(file_path):
"""
Checks if a file should receive the commercial license header.
Returns True if 'server/private' is in the path.
"""
if 'server/private' in file_path.lower():
return True
return False
def process_directory(root_dir):
"""
Recursively scans a directory and adds/replaces/removes headers in
qualifying .ts or .tsx files, skipping any 'node_modules' directories.
"""
print(f"Scanning directory: {root_dir}")
files_processed = 0
files_modified = 0
for root, dirs, files in os.walk(root_dir):
# Exclude 'node_modules' directories from the scan.
if 'node_modules' in dirs:
dirs.remove('node_modules')
for file in files:
if not (file.endswith('.ts') or file.endswith('.tsx')):
continue
file_path = os.path.join(root, file)
files_processed += 1
try:
with open(file_path, 'r', encoding='utf-8') as f:
original_content = f.read()
existing_comment, body_start = extract_leading_block_comment(
original_content
)
has_any_header = existing_comment is not None
has_correct_header = existing_comment == HEADER_NORMALIZED
body = original_content[body_start:] if has_any_header else original_content
if should_add_header(file_path):
if has_correct_header:
print(f"Header up-to-date: {file_path}")
else:
# Either no header exists or the header is outdated - write
# the correct one.
action = "Replaced header in" if has_any_header else "Added header to"
new_content = HEADER_NORMALIZED + '\n\n' + body
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"{action}: {file_path}")
files_modified += 1
else:
if has_any_header:
# Remove the header - it shouldn't be here.
with open(file_path, 'w', encoding='utf-8') as f:
f.write(body)
print(f"Removed header from: {file_path}")
files_modified += 1
else:
print(f"No header needed: {file_path}")
except Exception as e:
print(f"Error processing file {file_path}: {e}")
print("\n--- Scan Complete ---")
print(f"Total .ts or .tsx files found: {files_processed}")
print(f"Files modified (added/replaced/removed): {files_modified}")
if __name__ == "__main__":
# Get the target directory from the command line arguments.
# If no directory is provided, it uses the current directory ('.').
if len(sys.argv) > 1:
target_directory = sys.argv[1]
else:
target_directory = '.' # Default to current directory
if not os.path.isdir(target_directory):
print(f"Error: Directory '{target_directory}' not found.")
sys.exit(1)
process_directory(os.path.abspath(target_directory))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

9310
package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -12,29 +12,30 @@
"license": "SEE LICENSE IN LICENSE AND README.md",
"scripts": {
"dev": "NODE_ENV=development ENVIRONMENT=dev tsx watch server/index.ts",
"dev:check": "npx tsc --noEmit && npm run format:check",
"dev:setup": "cp config/config.example.yml config/config.yml && npm run set:oss && npm run set:sqlite && npm run db:sqlite:generate && npm run db:sqlite:push",
"db:generate": "drizzle-kit generate --config=./drizzle.config.ts",
"db:push": "npx tsx server/db/migrate.ts",
"db:studio": "drizzle-kit studio --config=./drizzle.config.ts",
"db:pg:generate": "drizzle-kit generate --config=./drizzle.pg.config.ts",
"db:sqlite:generate": "drizzle-kit generate --config=./drizzle.sqlite.config.ts",
"db:pg:push": "npx tsx server/db/pg/migrate.ts",
"db:sqlite:push": "npx tsx server/db/sqlite/migrate.ts",
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
"db:clear-migrations": "rm -rf server/migrations",
"set:oss": "echo 'export const build = \"oss\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
"set:saas": "echo 'export const build = \"saas\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
"set:enterprise": "echo 'export const build = \"enterprise\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts && cp drizzle.sqlite.config.ts drizzle.config.ts && cp server/setup/migrationsSqlite.ts server/setup/migrations.ts",
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts && cp drizzle.pg.config.ts drizzle.config.ts && cp server/setup/migrationsPg.ts server/setup/migrations.ts",
"build:next": "next build",
"build": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrations.ts -o dist/migrations.mjs",
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts",
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts",
"next:build": "next build",
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
"email": "email dev --dir server/emails/templates --port 3005",
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs",
"format:check": "prettier --check .",
"format": "prettier --write ."
},
"dependencies": {
"@asteasolutions/zod-to-openapi": "8.4.1",
"@aws-sdk/client-s3": "3.1011.0",
"@faker-js/faker": "10.3.0",
"@asteasolutions/zod-to-openapi": "8.4.0",
"@aws-sdk/client-s3": "3.971.0",
"@faker-js/faker": "10.2.0",
"@headlessui/react": "2.2.9",
"@hookform/resolvers": "5.2.2",
"@monaco-editor/react": "4.7.0",
@@ -59,83 +60,90 @@
"@radix-ui/react-tabs": "1.1.13",
"@radix-ui/react-toast": "1.2.15",
"@radix-ui/react-tooltip": "1.2.8",
"@react-email/components": "1.0.8",
"@react-email/render": "2.0.4",
"@react-email/tailwind": "2.0.5",
"@simplewebauthn/browser": "13.3.0",
"@simplewebauthn/server": "13.3.0",
"@react-email/components": "1.0.2",
"@react-email/render": "2.0.0",
"@react-email/tailwind": "2.0.2",
"@simplewebauthn/browser": "13.2.2",
"@simplewebauthn/server": "13.2.2",
"@tailwindcss/forms": "0.5.11",
"@tanstack/react-query": "5.90.21",
"@tanstack/react-query": "5.90.12",
"@tanstack/react-table": "8.21.3",
"arctic": "3.7.0",
"axios": "1.15.0",
"axios": "1.13.2",
"better-sqlite3": "11.9.1",
"canvas-confetti": "1.9.4",
"class-variance-authority": "0.7.1",
"clsx": "2.1.1",
"cmdk": "1.1.1",
"cookie": "1.1.1",
"cookie-parser": "1.4.7",
"cors": "2.8.6",
"cookies": "0.9.1",
"cors": "2.8.5",
"crypto-js": "4.2.0",
"d3": "7.9.0",
"drizzle-orm": "0.45.2",
"date-fns": "4.1.0",
"drizzle-orm": "0.45.1",
"eslint": "9.39.2",
"eslint-config-next": "16.1.0",
"express": "5.2.1",
"express-rate-limit": "8.3.0",
"glob": "13.0.6",
"express-rate-limit": "8.2.1",
"glob": "13.0.0",
"helmet": "8.1.0",
"http-errors": "2.0.1",
"i": "0.3.7",
"input-otp": "1.4.2",
"ioredis": "5.10.0",
"ioredis": "5.9.2",
"jmespath": "0.16.0",
"js-yaml": "4.1.1",
"jsonwebtoken": "9.0.3",
"lucide-react": "0.577.0",
"maxmind": "5.0.5",
"lucide-react": "0.562.0",
"maxmind": "5.0.1",
"moment": "2.30.1",
"next": "15.5.15",
"next-intl": "4.8.3",
"next": "15.5.9",
"next-intl": "4.7.0",
"next-themes": "0.4.6",
"nextjs-toploader": "3.9.17",
"node-cache": "5.1.2",
"nodemailer": "8.0.5",
"node-fetch": "3.3.2",
"nodemailer": "7.0.11",
"npm": "11.7.0",
"nprogress": "0.2.0",
"oslo": "1.2.1",
"pg": "8.20.0",
"posthog-node": "5.28.0",
"pg": "8.17.1",
"posthog-node": "5.23.0",
"qrcode.react": "4.2.0",
"react": "19.2.4",
"react-day-picker": "9.14.0",
"react-dom": "19.2.4",
"react": "19.2.3",
"react-day-picker": "9.13.0",
"react-dom": "19.2.3",
"react-easy-sort": "1.8.0",
"react-hook-form": "7.71.2",
"react-icons": "5.6.0",
"react-hook-form": "7.71.1",
"react-icons": "5.5.0",
"rebuild": "0.1.2",
"recharts": "2.15.4",
"reodotdev": "1.1.0",
"resend": "6.9.2",
"semver": "7.7.4",
"sshpk": "1.18.0",
"stripe": "20.4.1",
"reodotdev": "1.0.0",
"resend": "6.8.0",
"semver": "7.7.3",
"stripe": "20.2.0",
"swagger-ui-express": "5.0.1",
"tailwind-merge": "3.5.0",
"tailwind-merge": "3.4.0",
"topojson-client": "3.1.0",
"tw-animate-css": "1.4.0",
"use-debounce": "10.1.0",
"uuid": "13.0.0",
"vaul": "1.1.2",
"visionscarto-world-atlas": "1.0.0",
"winston": "3.19.0",
"winston-daily-rotate-file": "5.0.0",
"ws": "8.19.0",
"yaml": "2.8.3",
"yaml": "2.8.2",
"yargs": "18.0.0",
"zod": "4.3.6",
"zod": "4.3.5",
"zod-validation-error": "5.0.0"
},
"devDependencies": {
"@dotenvx/dotenvx": "1.54.1",
"@dotenvx/dotenvx": "1.51.2",
"@esbuild-plugins/tsconfig-paths": "0.1.2",
"@react-email/preview-server": "5.2.10",
"@tailwindcss/postcss": "4.2.2",
"@tanstack/react-query-devtools": "5.91.3",
"@tailwindcss/postcss": "4.1.18",
"@tanstack/react-query-devtools": "5.91.1",
"@types/better-sqlite3": "7.6.13",
"@types/cookie-parser": "1.4.10",
"@types/cors": "2.8.19",
@@ -144,37 +152,30 @@
"@types/express": "5.0.6",
"@types/express-session": "1.18.2",
"@types/jmespath": "0.15.2",
"@types/js-yaml": "4.0.9",
"@types/jsonwebtoken": "9.0.10",
"@types/node": "25.3.5",
"@types/nodemailer": "7.0.11",
"@types/node": "24.10.2",
"@types/nodemailer": "7.0.4",
"@types/nprogress": "0.2.3",
"@types/pg": "8.18.0",
"@types/react": "19.2.14",
"@types/pg": "8.16.0",
"@types/react": "19.2.7",
"@types/react-dom": "19.2.3",
"@types/semver": "7.7.1",
"@types/sshpk": "1.17.4",
"@types/swagger-ui-express": "4.1.8",
"@types/topojson-client": "3.1.5",
"@types/ws": "8.18.1",
"@types/yargs": "17.0.35",
"@types/js-yaml": "4.0.9",
"babel-plugin-react-compiler": "1.0.0",
"drizzle-kit": "0.31.10",
"esbuild": "0.27.4",
"drizzle-kit": "0.31.8",
"esbuild": "0.27.2",
"esbuild-node-externals": "1.20.1",
"eslint": "10.0.3",
"eslint-config-next": "16.1.7",
"postcss": "8.5.8",
"prettier": "3.8.1",
"react-email": "5.2.10",
"tailwindcss": "4.2.2",
"postcss": "8.5.6",
"prettier": "3.8.0",
"react-email": "5.2.5",
"tailwindcss": "4.1.18",
"tsc-alias": "1.8.16",
"tsx": "4.21.0",
"typescript": "5.9.3",
"typescript-eslint": "8.56.1"
},
"overrides": {
"esbuild": "0.27.4",
"dompurify": "3.3.2"
"typescript-eslint": "8.53.1"
}
}

Some files were not shown because too many files have changed in this diff Show More