Compare commits

..

1 Commits
s3 ... 1.15.0

Author SHA1 Message Date
Owen Schwartz
40f2262f3e Merge pull request #2309 from fosrl/dev
1.15.0
2026-01-23 10:40:16 -08:00
972 changed files with 29687 additions and 88974 deletions

View File

@@ -1,5 +0,0 @@
---
alwaysApply: true
---
Always localize strings and use the `t` function to convert keys to strings. Add the keys to the en-us.json file. Never edit the other language files, as en-us.json is the single source of truth.

View File

@@ -1,7 +0,0 @@
---
description:
alwaysApply: true
---
Proxy resources = public resources
Private resources = client resources = site resources

View File

@@ -28,9 +28,7 @@ LICENSE
CONTRIBUTING.md CONTRIBUTING.md
dist dist
.git .git
server/migrations/ migrations/
config/ config/
build.ts build.ts
tsconfig.json tsconfig.json
Dockerfile*
drizzle.config.ts

1
.github/CODEOWNERS vendored
View File

@@ -1 +0,0 @@
* @oschwartz10612 @miloschwartz

View File

@@ -44,9 +44,19 @@ updates:
schedule: schedule:
interval: "daily" interval: "daily"
groups: groups:
patch-updates: dev-patch-updates:
dependency-type: "development"
update-types: update-types:
- "patch" - "patch"
minor-updates: dev-minor-updates:
dependency-type: "development"
update-types: update-types:
- "minor" - "minor"
prod-patch-updates:
dependency-type: "production"
update-types:
- "patch"
prod-minor-updates:
dependency-type: "production"
update-types:
- "minor"

View File

@@ -1,4 +1,4 @@
name: Public CICD Pipeline name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries. # CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events. # Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
@@ -29,7 +29,7 @@ jobs:
permissions: write-all permissions: write-all
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6 uses: aws-actions/configure-aws-credentials@v5
with: with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }} role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600 role-duration-seconds: 3600
@@ -62,7 +62,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space - name: Monitor storage space
run: | run: |
@@ -77,7 +77,7 @@ jobs:
fi fi
- name: Log in to Docker Hub - name: Log in to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0 uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with: with:
registry: docker.io registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }} username: ${{ secrets.DOCKER_HUB_USERNAME }}
@@ -134,7 +134,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space - name: Monitor storage space
run: | run: |
@@ -149,7 +149,7 @@ jobs:
fi fi
- name: Log in to Docker Hub - name: Log in to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0 uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with: with:
registry: docker.io registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }} username: ${{ secrets.DOCKER_HUB_USERNAME }}
@@ -201,10 +201,10 @@ jobs:
timeout-minutes: 30 timeout-minutes: 30
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Log in to Docker Hub - name: Log in to Docker Hub
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0 uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with: with:
registry: docker.io registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }} username: ${{ secrets.DOCKER_HUB_USERNAME }}
@@ -256,7 +256,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Extract tag name - name: Extract tag name
id: get-tag id: get-tag
@@ -264,9 +264,9 @@ jobs:
shell: bash shell: bash
- name: Install Go - name: Install Go
uses: actions/setup-go@4a3601121dd01d1626a1e23e37211e3254c1c06c # v6.4.0 uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with: with:
go-version: 1.25 go-version: 1.24
- name: Update version in package.json - name: Update version in package.json
run: | run: |
@@ -289,17 +289,25 @@ jobs:
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash shell: bash
- name: Update install/main.go
run: |
PANGOLIN_VERSION=${{ env.TAG }}
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
cat install/main.go
shell: bash
- name: Build installer - name: Build installer
working-directory: install working-directory: install
run: | run: |
make go-build-release \ make go-build-release
PANGOLIN_VERSION=${{ env.TAG }} \
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }} \
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
shell: bash
- name: Upload artifacts from /install/bin - name: Upload artifacts from /install/bin
uses: actions/upload-artifact@043fb46d1a93c77aae656e7c1c64a875d1fc6a0a # v7.0.1 uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with: with:
name: install-bin name: install-bin
path: install/bin/ path: install/bin/
@@ -407,25 +415,31 @@ jobs:
shell: bash shell: bash
- name: Login to GitHub Container Registry (for cosign) - name: Login to GitHub Container Registry (for cosign)
uses: docker/login-action@4907a6ddec9925e35a0a9e82d7399ccc52663121 # v4.1.0 uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with: with:
registry: ghcr.io registry: ghcr.io
username: ${{ github.actor }} username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign - name: Install cosign
# cosign is used to sign container images using keyless (OIDC) signing # cosign is used to sign and verify container images (key and keyless)
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1 uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Sign (GHCR, keyless) - name: Dual-sign and verify (GHCR & Docker Hub)
# Sign each GHCR image by digest using keyless (OIDC) signing via Sigstore/Rekor. # Sign each image by digest using keyless (OIDC) and key-based signing,
# Signatures are stored in the registry alongside the image. # then verify both the public key signature and the keyless OIDC signature.
env: env:
TAG: ${{ env.TAG }} TAG: ${{ env.TAG }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_YES: "true" COSIGN_YES: "true"
run: | run: |
set -euo pipefail set -euo pipefail
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
# Determine if this is an RC release # Determine if this is an RC release
IS_RC="false" IS_RC="false"
if [[ "$TAG" == *"-rc."* ]]; then if [[ "$TAG" == *"-rc."* ]]; then
@@ -453,48 +467,35 @@ jobs:
) )
fi fi
FAILED_TAGS=() # Sign each image variant for both registries
SUCCESSFUL_TAGS=() for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
echo "Processing ${GHCR_IMAGE}:${IMAGE_TAG}" REF="${BASE_IMAGE}@${DIGEST}"
TAG_FAILED=false
(
set -e
DIGEST="$(skopeo inspect --retry-times 3 docker://${GHCR_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
REF="${GHCR_IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}" echo "Resolved digest: ${REF}"
echo "==> cosign sign (keyless) --recursive ${REF}" echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}" cosign sign --recursive "${REF}"
) || TAG_FAILED=true
if [ "$TAG_FAILED" = "true" ]; then echo "==> cosign sign (key) --recursive ${REF}"
echo "⚠️ WARNING: Failed to sign ${GHCR_IMAGE}:${IMAGE_TAG}" cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
FAILED_TAGS+=("${GHCR_IMAGE}:${IMAGE_TAG}")
else echo "==> cosign verify (public key) ${REF}"
echo "✓ Successfully signed ${GHCR_IMAGE}:${IMAGE_TAG}" cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
SUCCESSFUL_TAGS+=("${GHCR_IMAGE}:${IMAGE_TAG}")
fi echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
done
done done
echo "" echo "All images signed and verified successfully!"
echo "=========================================="
echo "Sign Summary"
echo "=========================================="
echo "Successful: ${#SUCCESSFUL_TAGS[@]}"
echo "Failed: ${#FAILED_TAGS[@]}"
if [ ${#FAILED_TAGS[@]} -gt 0 ]; then
echo "Failed tags:"
for tag in "${FAILED_TAGS[@]}"; do
echo " - $tag"
done
echo "⚠️ WARNING: Some tags failed to sign, but continuing anyway"
else
echo "✓ All images signed successfully!"
fi
shell: bash shell: bash
post-run: post-run:
@@ -512,7 +513,7 @@ jobs:
permissions: write-all permissions: write-all
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6 uses: aws-actions/configure-aws-credentials@v5
with: with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }} role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600 role-duration-seconds: 3600

426
.github/workflows/cicd.yml.backup vendored Normal file
View File

@@ -0,0 +1,426 @@
name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
permissions:
contents: read
packages: write # for GHCR push
id-token: write # for Cosign Keyless (OIDC) Signing
# Required secrets:
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
on:
push:
tags:
- "[0-9]+.[0-9]+.[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
concurrency:
group: ${{ github.ref }}
cancel-in-progress: true
jobs:
pre-run:
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
aws-region: ${{ secrets.AWS_REGION }}
- name: Verify AWS identity
run: aws sts get-caller-identity
- name: Start EC2 instances
run: |
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances started"
release-arm:
name: Build and Release (ARM64)
runs-on: [self-hosted, linux, arm64, us-east-1]
needs: [pre-run]
if: >-
${{
needs.pre-run.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
THRESHOLD=75
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
echo "Used space: $USED_SPACE%"
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
echo "Used space is below the threshold of 75% free. Running Docker system prune."
echo y | docker system prune -a
else
echo "Storage space is above the threshold. No action needed."
fi
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Build and push Docker images (Docker Hub - ARM64)
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make build-rc-arm tag=$TAG
else
make build-release-arm tag=$TAG
fi
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
shell: bash
release-amd:
name: Build and Release (AMD64)
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [pre-run]
if: >-
${{
needs.pre-run.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Monitor storage space
run: |
THRESHOLD=75
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
echo "Used space: $USED_SPACE%"
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
echo "Used space is below the threshold of 75% free. Running Docker system prune."
echo y | docker system prune -a
else
echo "Storage space is above the threshold. No action needed."
fi
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Build and push Docker images (Docker Hub - AMD64)
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make build-rc-amd tag=$TAG
else
make build-release-amd tag=$TAG
fi
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
shell: bash
create-manifest:
name: Create Multi-Arch Manifests
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [release-arm, release-amd]
if: >-
${{
needs.release-arm.result == 'success' &&
needs.release-amd.result == 'success'
}}
timeout-minutes: 30
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Check if release candidate
id: check-rc
run: |
TAG=${{ env.TAG }}
if [[ "$TAG" == *"-rc."* ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Create multi-arch manifests
run: |
TAG=${{ env.TAG }}
if [ "$IS_RC" = "true" ]; then
make create-manifests-rc tag=$TAG
else
make create-manifests tag=$TAG
fi
echo "Created multi-arch manifests for tag: ${TAG}"
shell: bash
sign-and-package:
name: Sign and Package
runs-on: [self-hosted, linux, x64, us-east-1]
needs: [release-arm, release-amd, create-manifest]
if: >-
${{
needs.release-arm.result == 'success' &&
needs.release-amd.result == 'success' &&
needs.create-manifest.result == 'success'
}}
# Job-level timeout to avoid runaway or stuck runs
timeout-minutes: 120
env:
# Target images
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
shell: bash
- name: Install Go
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
with:
go-version: 1.24
- name: Update version in package.json
run: |
TAG=${{ env.TAG }}
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
cat server/lib/consts.ts
shell: bash
- name: Pull latest Gerbil version
id: get-gerbil-tag
run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Pull latest Badger version
id: get-badger-tag
run: |
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
shell: bash
- name: Update install/main.go
run: |
PANGOLIN_VERSION=${{ env.TAG }}
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
cat install/main.go
shell: bash
- name: Build installer
working-directory: install
run: |
make go-build-release
- name: Upload artifacts from /install/bin
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
with:
name: install-bin
path: install/bin/
- name: Install skopeo + jq
# skopeo: copy/inspect images between registries
# jq: JSON parsing tool used to extract digest values
run: |
sudo apt-get update -y
sudo apt-get install -y skopeo jq
skopeo --version
shell: bash
- name: Login to GHCR
env:
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
run: |
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
shell: bash
- name: Copy tag from Docker Hub to GHCR
# Mirror the already-built image (all architectures) to GHCR so we can sign it
# Wait a bit for both architectures to be available in Docker Hub manifest
env:
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
run: |
set -euo pipefail
TAG=${{ env.TAG }}
echo "Waiting for multi-arch manifest to be ready..."
sleep 30
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
skopeo copy --all --retry-times 3 \
docker://$DOCKERHUB_IMAGE:$TAG \
docker://$GHCR_IMAGE:$TAG
shell: bash
- name: Login to GitHub Container Registry (for cosign)
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Install cosign
# cosign is used to sign and verify container images (key and keyless)
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Dual-sign and verify (GHCR & Docker Hub)
# Sign each image by digest using keyless (OIDC) and key-based signing,
# then verify both the public key signature and the keyless OIDC signature.
env:
TAG: ${{ env.TAG }}
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_YES: "true"
run: |
set -euo pipefail
issuer="https://token.actions.githubusercontent.com"
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
for IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
echo "Processing ${IMAGE}:${TAG}"
DIGEST="$(skopeo inspect --retry-times 3 docker://${IMAGE}:${TAG} | jq -r '.Digest')"
REF="${IMAGE}@${DIGEST}"
echo "Resolved digest: ${REF}"
echo "==> cosign sign (keyless) --recursive ${REF}"
cosign sign --recursive "${REF}"
echo "==> cosign sign (key) --recursive ${REF}"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
echo "==> cosign verify (public key) ${REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "${REF}" -o text
echo "==> cosign verify (keyless policy) ${REF}"
cosign verify \
--certificate-oidc-issuer "${issuer}" \
--certificate-identity-regexp "${id_regex}" \
"${REF}" -o text
done
shell: bash
post-run:
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
if: >-
${{
always() &&
needs.pre-run.result == 'success' &&
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
}}
runs-on: ubuntu-latest
permissions: write-all
steps:
- name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v2
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600
aws-region: ${{ secrets.AWS_REGION }}
- name: Verify AWS identity
run: aws sts get-caller-identity
- name: Stop EC2 instances
run: |
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
echo "EC2 instances stopped"

View File

@@ -21,12 +21,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Set up Node.js - name: Set up Node.js
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
with: with:
node-version: '24' node-version: '22'
- name: Install dependencies - name: Install dependencies
run: npm ci run: npm ci

View File

@@ -23,7 +23,7 @@ jobs:
skopeo --version skopeo --version
- name: Install cosign - name: Install cosign
uses: sigstore/cosign-installer@cad07c2e89fa2edd6e2d7bab4c1aa38e53f76003 # v4.1.1 uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
- name: Input check - name: Input check
run: | run: |

View File

@@ -14,7 +14,7 @@ jobs:
permissions: write-all permissions: write-all
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6 uses: aws-actions/configure-aws-credentials@v5
with: with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }} role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600 role-duration-seconds: 3600

View File

@@ -1,4 +1,4 @@
name: SAAS Pipeline name: CI/CD Pipeline
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries. # CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events. # Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
@@ -23,7 +23,7 @@ jobs:
permissions: write-all permissions: write-all
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6 uses: aws-actions/configure-aws-credentials@v5
with: with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }} role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600 role-duration-seconds: 3600
@@ -54,42 +54,7 @@ jobs:
steps: steps:
- name: Checkout code - name: Checkout code
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Download MaxMind GeoLite2 databases
env:
MAXMIND_LICENSE_KEY: ${{ secrets.MAXMIND_LICENSE_KEY }}
run: |
echo "Downloading MaxMind GeoLite2 databases..."
# Download GeoLite2-Country
curl -L "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=${MAXMIND_LICENSE_KEY}&suffix=tar.gz" \
-o GeoLite2-Country.tar.gz
# Download GeoLite2-ASN
curl -L "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=${MAXMIND_LICENSE_KEY}&suffix=tar.gz" \
-o GeoLite2-ASN.tar.gz
# Extract the .mmdb files
tar -xzf GeoLite2-Country.tar.gz --strip-components=1 --wildcards '*.mmdb'
tar -xzf GeoLite2-ASN.tar.gz --strip-components=1 --wildcards '*.mmdb'
# Verify files exist
if [ ! -f "GeoLite2-Country.mmdb" ]; then
echo "ERROR: Failed to download GeoLite2-Country.mmdb"
exit 1
fi
if [ ! -f "GeoLite2-ASN.mmdb" ]; then
echo "ERROR: Failed to download GeoLite2-ASN.mmdb"
exit 1
fi
# Clean up tar files
rm -f GeoLite2-Country.tar.gz GeoLite2-ASN.tar.gz
echo "MaxMind databases downloaded successfully"
ls -lh GeoLite2-*.mmdb
- name: Monitor storage space - name: Monitor storage space
run: | run: |
@@ -104,7 +69,7 @@ jobs:
fi fi
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6 uses: aws-actions/configure-aws-credentials@v5
with: with:
role-to-assume: arn:aws:iam::${{ secrets.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }} role-to-assume: arn:aws:iam::${{ secrets.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600 role-duration-seconds: 3600
@@ -145,7 +110,7 @@ jobs:
permissions: write-all permissions: write-all
steps: steps:
- name: Configure AWS credentials - name: Configure AWS credentials
uses: aws-actions/configure-aws-credentials@v6 uses: aws-actions/configure-aws-credentials@v5
with: with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }} role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
role-duration-seconds: 3600 role-duration-seconds: 3600

View File

@@ -14,7 +14,7 @@ jobs:
stale: stale:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0 - uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with: with:
days-before-stale: 14 days-before-stale: 14
days-before-close: 14 days-before-close: 14

View File

@@ -14,12 +14,12 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Install Node - name: Install Node
uses: actions/setup-node@53b83947a5a98c8d113130e565377fae1a50d02f # v6.3.0 uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
with: with:
node-version: '24' node-version: '22'
- name: Copy config file - name: Copy config file
run: cp config/config.example.yml config/config.yml run: cp config/config.example.yml config/config.yml
@@ -34,10 +34,10 @@ jobs:
run: npm run set:oss run: npm run set:oss
- name: Generate database migrations - name: Generate database migrations
run: npm run db:generate run: npm run db:sqlite:generate
- name: Apply database migrations - name: Apply database migrations
run: npm run db:push run: npm run db:sqlite:push
- name: Test with tsc - name: Test with tsc
run: npx tsc --noEmit run: npx tsc --noEmit
@@ -62,7 +62,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Copy config file
run: cp config/config.example.yml config/config.yml
- name: Build Docker image sqlite - name: Build Docker image sqlite
run: make dev-build-sqlite run: make dev-build-sqlite
@@ -71,7 +74,10 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- name: Checkout repository - name: Checkout repository
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2 uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Copy config file
run: cp config/config.example.yml config/config.yml
- name: Build Docker image pg - name: Build Docker image pg
run: make dev-build-pg run: make dev-build-pg

4
.gitignore vendored
View File

@@ -51,6 +51,4 @@ dynamic/
scratch/ scratch/
tsconfig.json tsconfig.json
hydrateSaas.ts hydrateSaas.ts
CLAUDE.md CLAUDE.md
drizzle.config.ts
server/setup/migrations.ts

View File

@@ -10,7 +10,7 @@
"editor.defaultFormatter": "esbenp.prettier-vscode" "editor.defaultFormatter": "esbenp.prettier-vscode"
}, },
"[typescript]": { "[typescript]": {
"editor.defaultFormatter": "esbenp.prettier-vscode" "editor.defaultFormatter": "vscode.typescript-language-features"
}, },
"[typescriptreact]": { "[typescriptreact]": {
"editor.defaultFormatter": "esbenp.prettier-vscode" "editor.defaultFormatter": "esbenp.prettier-vscode"

View File

@@ -1,51 +1,77 @@
# FROM node:24-slim AS base FROM node:24-alpine AS builder
FROM public.ecr.aws/docker/library/node:24-slim AS base
# OCI Image Labels - Build Args for dynamic values
ARG VERSION="dev"
ARG REVISION=""
ARG CREATED=""
ARG LICENSE="AGPL-3.0"
WORKDIR /app WORKDIR /app
RUN apt-get update && apt-get install -y python3 make g++ && rm -rf /var/lib/apt/lists/*
COPY package*.json ./
FROM base AS builder-dev
RUN npm ci
COPY . .
ARG BUILD=oss ARG BUILD=oss
ARG DATABASE=sqlite ARG DATABASE=sqlite
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi && \ # Derive title and description based on BUILD type
npm run set:$DATABASE && \ ARG IMAGE_TITLE="Pangolin"
npm run set:$BUILD && \ ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
npm run db:generate && \
npm run build && \
npm run build:cli && \
test -f dist/server.mjs
# Create placeholder files for MaxMind databases to avoid COPY errors RUN apk add --no-cache curl tzdata python3 make g++
# Real files should be present for saas builds, placeholders for oss builds
RUN touch /app/GeoLite2-Country.mmdb /app/GeoLite2-ASN.mmdb
FROM base AS builder # COPY package.json package-lock.json ./
COPY package*.json ./
RUN npm ci
RUN npm ci --omit=dev COPY . .
# FROM node:24-slim AS runner RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
FROM public.ecr.aws/docker/library/node:24-slim AS runner RUN echo "export const driver: \"pg\" | \"sqlite\" = \"$DATABASE\";" >> server/db/index.ts
RUN echo "export const build = \"$BUILD\" as \"saas\" | \"enterprise\" | \"oss\";" > server/build.ts
# Copy the appropriate TypeScript configuration based on build type
RUN if [ "$BUILD" = "oss" ]; then cp tsconfig.oss.json tsconfig.json; \
elif [ "$BUILD" = "saas" ]; then cp tsconfig.saas.json tsconfig.json; \
elif [ "$BUILD" = "enterprise" ]; then cp tsconfig.enterprise.json tsconfig.json; \
fi
# if the build is oss then remove the server/private directory
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema --out init; fi
RUN mkdir -p dist
RUN npm run next:build
RUN node esbuild.mjs -e server/index.ts -o dist/server.mjs -b $BUILD
RUN if [ "$DATABASE" = "pg" ]; then \
node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs; \
else \
node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs; \
fi
# test to make sure the build output is there and error if not
RUN test -f dist/server.mjs
RUN npm run build:cli
# Prune dev dependencies and clean up to prepare for copy to runner
RUN npm prune --omit=dev && npm cache clean --force
FROM node:24-alpine AS runner
WORKDIR /app WORKDIR /app
RUN apt-get update && apt-get install -y curl tzdata && rm -rf /var/lib/apt/lists/* # Only curl and tzdata needed at runtime - no build tools!
RUN apk add --no-cache curl tzdata
# Copy pre-built node_modules from builder (already pruned to production only)
# This includes the compiled native modules like better-sqlite3
COPY --from=builder /app/node_modules ./node_modules COPY --from=builder /app/node_modules ./node_modules
COPY --from=builder /app/package.json ./package.json
COPY --from=builder-dev /app/.next/standalone ./ COPY --from=builder /app/.next/standalone ./
COPY --from=builder-dev /app/.next/static ./.next/static COPY --from=builder /app/.next/static ./.next/static
COPY --from=builder-dev /app/dist ./dist COPY --from=builder /app/dist ./dist
COPY --from=builder-dev /app/server/migrations ./dist/init COPY --from=builder /app/init ./dist/init
COPY --from=builder /app/package.json ./package.json
COPY ./cli/wrapper.sh /usr/local/bin/pangctl COPY ./cli/wrapper.sh /usr/local/bin/pangctl
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
@@ -55,28 +81,6 @@ COPY server/db/ios_models.json ./dist/ios_models.json
COPY server/db/mac_models.json ./dist/mac_models.json COPY server/db/mac_models.json ./dist/mac_models.json
COPY public ./public COPY public ./public
# Copy MaxMind databases for SaaS builds
ARG BUILD=oss
RUN mkdir -p ./maxmind
# Copy MaxMind databases (placeholders exist for oss builds, real files for saas)
COPY --from=builder-dev /app/GeoLite2-Country.mmdb ./maxmind/GeoLite2-Country.mmdb
COPY --from=builder-dev /app/GeoLite2-ASN.mmdb ./maxmind/GeoLite2-ASN.mmdb
# Remove MaxMind databases for non-saas builds (keep only for saas)
RUN if [ "$BUILD" != "saas" ]; then rm -rf ./maxmind; fi
# OCI Image Labels - Build Args for dynamic values
ARG VERSION="dev"
ARG REVISION=""
ARG CREATED=""
ARG LICENSE="AGPL-3.0"
# Derive title and description based on BUILD type
ARG IMAGE_TITLE="Pangolin"
ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
# OCI Image Labels # OCI Image Labels
# https://github.com/opencontainers/image-spec/blob/main/annotations.md # https://github.com/opencontainers/image-spec/blob/main/annotations.md
LABEL org.opencontainers.image.source="https://github.com/fosrl/pangolin" \ LABEL org.opencontainers.image.source="https://github.com/fosrl/pangolin" \

View File

@@ -1,9 +1,7 @@
FROM node:24-alpine FROM node:22-alpine
WORKDIR /app WORKDIR /app
RUN apk add --no-cache python3 make g++
COPY package*.json ./ COPY package*.json ./
# Install dependencies # Install dependencies

View File

@@ -35,53 +35,43 @@
</div> </div>
<p align="center">
<a href="https://docs.pangolin.net/careers/join-us">
<img src="https://img.shields.io/badge/🚀_We're_Hiring!-Join_Our_Team-brightgreen?style=for-the-badge" alt="We're Hiring!" />
</a>
</p>
<p align="center"> <p align="center">
<strong> <strong>
Get started with Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a> Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
</strong> </strong>
</p> </p>
Pangolin is an open-source, identity-based remote access platform built on WireGuard® that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources with NAT traversal, all with granular access controls. Pangolin is an open-source, identity-based remote access platform built on WireGuard that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources, all with zero-trust security and granular access control.
## Installation ## Installation
- Get started for free with [Pangolin Cloud](https://app.pangolin.net/). - Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin.
- Or, check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to self-host Pangolin. - Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
<img src="public/screenshots/hero.png" alt="Pangolin" width="100%" /> <img src="public/screenshots/hero.png" />
## Deployment Options ## Deployment Options
- **Pangolin Cloud** - Fully managed service - no infrastructure required. | <img width=500 /> | Description |
- **Self-Host: Community Edition** - Free, open source, and licensed under AGPL-3. |-----------------|--------------|
- **Self-Host: Enterprise Edition** - Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses making less than \$100K USD gross annual revenue. | **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
## Key Features ## Key Features
### Connect remote networks with sites and NAT traversal | <img width=500 /> | <img width=500 /> |
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
Pangolin's site connectors provide gateways into networks so you can access any networked resources. Sites use outbound tunnels and intelligent NAT traversal to make networks behind restrictive firewalls available for authorized access without public IPs or open ports. Easily deploy a site as a binary or container on any platform. | **Connect remote networks with sites**<br /><br />Pangolin's lightweight site connectors create secure tunnels from remote networks without requiring public IP addresses or open ports. Sites make any network anywhere available for authorized access. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
| **Browser-based reverse proxy access**<br /><br />Expose web applications through identity and context-aware tunneled reverse proxies. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet. Users access applications through any web browser with authentication and granular access control. | <img src="public/clip.gif" width=500 /><tr></tr> |
<img src="public/screenshots/sites.png" alt="Sites" width="100%" /> | **Client-based private resource access**<br /><br />Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. | <img src="public/screenshots/private-resources.png" width=500 /><tr></tr> |
| **Zero-trust granular access**<br /><br />Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications and services you explicitly define, reducing security risk and attack surface. | <img src="public/screenshots/user-devices.png" width=500 /><tr></tr> |
### Browser-based reverse proxy access
Expose web applications through identity and context-aware tunneled reverse proxies. Users access applications through any web browser with authentication and granular access control without installing a client. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet.
<img src="public/clip.gif" alt="Reverse proxy access" width="100%" />
### Client-based private resource access
Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. Add redundancy by routing traffic through multiple connectors in your network.
<img src="public/screenshots/private-resources.png" alt="Private resources" width="100%" />
### Give users and roles access to resources
Use Pangolin's built in users or bring your own identity provider and set up role based access control (RBAC). Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications, services, and routes you explicitly define.
<img src="public/screenshots/users.png" alt="Users from identity provider with roles" width="100%" />
## Download Clients ## Download Clients
@@ -95,16 +85,17 @@ Download the Pangolin client for your platform:
## Get Started ## Get Started
### Sign up now
Create a free account at [app.pangolin.net](https://app.pangolin.net) to get started with Pangolin Cloud.
### Check out the docs ### Check out the docs
We encourage everyone to read the full documentation first, which is We encourage everyone to read the full documentation first, which is
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
the docs to illustrate some basic ideas. the docs to illustrate some basic ideas.
### Sign up and try now
For Pangolin's managed service, you will first need to create an account at
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
## Licensing ## Licensing
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net). Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net).
@@ -112,3 +103,7 @@ Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License
## Contributions ## Contributions
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices. Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
---
WireGuard® is a registered trademark of Jason A. Donenfeld.

View File

@@ -3,7 +3,7 @@
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us: If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk. 1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) with the following information: 2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
- Description and location of the vulnerability. - Description and location of the vulnerability.
- Potential impact of the vulnerability. - Potential impact of the vulnerability.

View File

@@ -0,0 +1,17 @@
meta {
name: Create API Key
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/api-key
body: json
auth: inherit
}
body:json {
{
"isRoot": true
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: Delete API Key
type: http
seq: 2
}
delete {
url: http://localhost:3000/api/v1/api-key/dm47aacqxxn3ubj
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: List API Key Actions
type: http
seq: 6
}
get {
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: List Org API Keys
type: http
seq: 4
}
get {
url: http://localhost:3000/api/v1/org/home-lab/api-keys
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: List Root API Keys
type: http
seq: 3
}
get {
url: http://localhost:3000/api/v1/root/api-keys
body: none
auth: inherit
}

View File

@@ -0,0 +1,17 @@
meta {
name: Set API Key Actions
type: http
seq: 5
}
post {
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/actions
body: json
auth: inherit
}
body:json {
{
"actionIds": ["listSites"]
}
}

View File

@@ -0,0 +1,17 @@
meta {
name: Set API Key Orgs
type: http
seq: 7
}
post {
url: http://localhost:3000/api/v1/api-key/ex0izu2c37fjz9x/orgs
body: json
auth: inherit
}
body:json {
{
"orgIds": ["home-lab"]
}
}

View File

@@ -0,0 +1,3 @@
meta {
name: API Keys
}

View File

@@ -0,0 +1,18 @@
meta {
name: 2fa-disable
type: http
seq: 6
}
post {
url: http://localhost:3000/api/v1/auth/2fa/disable
body: json
auth: none
}
body:json {
{
"password": "aaaaa-1A",
"code": "377289"
}
}

17
bruno/Auth/2fa-enable.bru Normal file
View File

@@ -0,0 +1,17 @@
meta {
name: 2fa-enable
type: http
seq: 4
}
post {
url: http://localhost:3000/api/v1/auth/2fa/enable
body: json
auth: none
}
body:json {
{
"code": "374138"
}
}

View File

@@ -0,0 +1,17 @@
meta {
name: 2fa-request
type: http
seq: 5
}
post {
url: http://localhost:3000/api/v1/auth/2fa/request
body: json
auth: none
}
body:json {
{
"password": "aaaaa-1A"
}
}

View File

@@ -0,0 +1,18 @@
meta {
name: change-password
type: http
seq: 9
}
post {
url: http://localhost:3000/api/v1/auth/change-password
body: json
auth: none
}
body:json {
{
"oldPassword": "",
"newPassword": ""
}
}

18
bruno/Auth/login.bru Normal file
View File

@@ -0,0 +1,18 @@
meta {
name: login
type: http
seq: 1
}
post {
url: http://localhost:3000/api/v1/auth/login
body: json
auth: none
}
body:json {
{
"email": "admin@fosrl.io",
"password": "Password123!"
}
}

11
bruno/Auth/logout.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: logout
type: http
seq: 3
}
post {
url: http://localhost:4000/api/v1/auth/logout
body: none
auth: none
}

View File

@@ -0,0 +1,17 @@
meta {
name: reset-password-request
type: http
seq: 10
}
post {
url: http://localhost:3000/api/v1/auth/reset-password/request
body: json
auth: none
}
body:json {
{
"email": "milo@pangolin.net"
}
}

View File

@@ -0,0 +1,19 @@
meta {
name: reset-password
type: http
seq: 11
}
post {
url: http://localhost:3000/api/v1/auth/reset-password
body: json
auth: none
}
body:json {
{
"token": "3uhsbom72dwdhboctwrtntyd6jrlg4jtf5oaxy4k",
"newPassword": "aaaaa-1A",
"code": "6irqCGR3"
}
}

18
bruno/Auth/signup.bru Normal file
View File

@@ -0,0 +1,18 @@
meta {
name: signup
type: http
seq: 2
}
put {
url: http://localhost:3000/api/v1/auth/signup
body: json
auth: none
}
body:json {
{
"email": "numbat@pangolin.net",
"password": "Password123!"
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: verify-email-request
type: http
seq: 8
}
post {
url: http://localhost:3000/api/v1/auth/verify-email/request
body: none
auth: none
}

View File

@@ -0,0 +1,17 @@
meta {
name: verify-email
type: http
seq: 7
}
post {
url: http://localhost:3000/api/v1/auth/verify-email
body: json
auth: none
}
body:json {
{
"code": "50317187"
}
}

View File

@@ -0,0 +1,15 @@
meta {
name: verify-user
type: http
seq: 4
}
get {
url: http://localhost:3001/api/v1/badger/verify-user?sessionId=mb52273jkb6t3oys2bx6ur5x7rcrkl26c7warg3e
body: none
auth: none
}
params:query {
sessionId: mb52273jkb6t3oys2bx6ur5x7rcrkl26c7warg3e
}

View File

@@ -0,0 +1,22 @@
meta {
name: createClient
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/site/1/client
body: json
auth: none
}
body:json {
{
"siteId": 1,
"name": "test",
"type": "olm",
"subnet": "100.90.129.4/30",
"olmId": "029yzunhx6nh3y5",
"secret": "l0ymp075y3d4rccb25l6sqpgar52k09etunui970qq5gj7x6"
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: pickClientDefaults
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/site/1/pick-client-defaults
body: none
auth: none
}

View File

@@ -0,0 +1,22 @@
meta {
name: Create OIDC Provider
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/org/home-lab/idp/oidc
body: json
auth: inherit
}
body:json {
{
"clientId": "JJoSvHCZcxnXT2sn6CObj6a21MuKNRXs3kN5wbys",
"clientSecret": "2SlGL2wOGgMEWLI9yUuMAeFxre7qSNJVnXMzyepdNzH1qlxYnC4lKhhQ6a157YQEkYH3vm40KK4RCqbYiF8QIweuPGagPX3oGxEj2exwutoXFfOhtq4hHybQKoFq01Z3",
"authUrl": "http://localhost:9000/application/o/authorize/",
"tokenUrl": "http://localhost:9000/application/o/token/",
"scopes": ["email", "openid", "profile"],
"userIdentifier": "email"
}
}

View File

@@ -0,0 +1,11 @@
meta {
name: Generate OIDC URL
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1
body: none
auth: inherit
}

3
bruno/IDP/folder.bru Normal file
View File

@@ -0,0 +1,3 @@
meta {
name: IDP
}

View File

@@ -0,0 +1,11 @@
meta {
name: Traefik Config
type: http
seq: 1
}
get {
url: http://localhost:3001/api/v1/traefik-config
body: none
auth: inherit
}

View File

@@ -0,0 +1,3 @@
meta {
name: Internal
}

View File

@@ -0,0 +1,11 @@
meta {
name: Create Newt
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/newt
body: none
auth: none
}

18
bruno/Newt/Get Token.bru Normal file
View File

@@ -0,0 +1,18 @@
meta {
name: Get Token
type: http
seq: 1
}
get {
url: http://localhost:3000/api/v1/auth/newt/get-token
body: json
auth: none
}
body:json {
{
"newtId": "o0d4rdxq3stnz7b",
"secret": "sy7l09fnaesd03iwrfp9m3qf0ryn19g0zf3dqieaazb4k7vk"
}
}

15
bruno/Olm/createOlm.bru Normal file
View File

@@ -0,0 +1,15 @@
meta {
name: createOlm
type: http
seq: 1
}
put {
url: http://localhost:3000/api/v1/olm
body: none
auth: inherit
}
settings {
encodeUrl: true
}

8
bruno/Olm/folder.bru Normal file
View File

@@ -0,0 +1,8 @@
meta {
name: Olm
seq: 15
}
auth {
mode: inherit
}

11
bruno/Orgs/Check Id.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: Check Id
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/org/checkId
body: none
auth: none
}

11
bruno/Orgs/listOrgs.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: listOrgs
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: createRemoteExitNode
type: http
seq: 1
}
put {
url: http://localhost:4000/api/v1/org/org_i21aifypnlyxur2/remote-exit-node
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: listResourcesByOrg
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

View File

@@ -0,0 +1,16 @@
meta {
name: listResourcesBySite
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/site/1/resources?limit=10&offset=0
body: none
auth: none
}
params:query {
limit: 10
offset: 0
}

11
bruno/Sites/Get Site.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: Get Site
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/org/test/sites/mexican-mole-lizard-windy
body: none
auth: none
}

11
bruno/Sites/listSites.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: listSites
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

View File

@@ -0,0 +1,16 @@
meta {
name: listTargets
type: http
seq: 1
}
get {
url: http://localhost:3000/api/v1/resource/web.main.localhost/targets?limit=10&offset=0
body: none
auth: none
}
params:query {
limit: 10
offset: 0
}

11
bruno/Test.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: Test
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1
body: none
auth: inherit
}

View File

@@ -0,0 +1,11 @@
meta {
name: traefik-config
type: http
seq: 1
}
get {
url: http://localhost:3001/api/v1/traefik-config
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: adminListUsers
type: http
seq: 2
}
get {
url: http://localhost:3000/api/v1/users
body: none
auth: none
}

View File

@@ -0,0 +1,11 @@
meta {
name: adminRemoveUser
type: http
seq: 3
}
delete {
url: http://localhost:3000/api/v1/user/ky5r7ivqs8wc7u4
body: none
auth: none
}

11
bruno/Users/getUser.bru Normal file
View File

@@ -0,0 +1,11 @@
meta {
name: getUser
type: http
seq: 1
}
get {
url:
body: none
auth: none
}

13
bruno/bruno.json Normal file
View File

@@ -0,0 +1,13 @@
{
"version": "1",
"name": "Pangolin",
"type": "collection",
"ignore": [
"node_modules",
".git"
],
"presets": {
"requestType": "http",
"requestUrl": "http://localhost:3000/api/v1"
}
}

View File

@@ -1,28 +0,0 @@
import { CommandModule } from "yargs";
import { db, certificates } from "@server/db";
type ClearCertificatesArgs = {};
export const clearCertificates: CommandModule<{}, ClearCertificatesArgs> = {
command: "clear-certificates",
describe: "Delete all entries from the certificates table",
builder: (yargs) => {
return yargs;
},
handler: async (argv: {}) => {
try {
console.log("Clearing all certificates from the database...");
const deleted = await db.delete(certificates).returning();
console.log(
`Deleted ${deleted.length} certificate(s) from the database`
);
process.exit(0);
} catch (error) {
console.error("Error:", error);
process.exit(1);
}
}
};

View File

@@ -1,121 +0,0 @@
import { CommandModule } from "yargs";
import { db, orgs } from "@server/db";
import { eq } from "drizzle-orm";
import { encrypt } from "@server/lib/crypto";
import { configFilePath1, configFilePath2 } from "@server/lib/consts";
import { generateCA } from "@server/lib/sshCA";
import fs from "fs";
import yaml from "js-yaml";
type GenerateOrgCaKeysArgs = {
orgId: string;
secret?: string;
force?: boolean;
};
export const generateOrgCaKeys: CommandModule<{}, GenerateOrgCaKeysArgs> = {
command: "generate-org-ca-keys",
describe:
"Generate SSH CA public/private key pair for an organization and store them in the database (private key encrypted with server secret)",
builder: (yargs) => {
return yargs
.option("orgId", {
type: "string",
demandOption: true,
describe: "The organization ID"
})
.option("secret", {
type: "string",
describe:
"Server secret used to encrypt the CA private key. If omitted, read from config file (config.yml or config.yaml)."
})
.option("force", {
type: "boolean",
default: false,
describe:
"Overwrite existing CA keys for the org if they already exist"
});
},
handler: async (argv: {
orgId: string;
secret?: string;
force?: boolean;
}) => {
try {
const { orgId, force } = argv;
let secret = argv.secret;
if (!secret) {
const configPath = fs.existsSync(configFilePath1)
? configFilePath1
: fs.existsSync(configFilePath2)
? configFilePath2
: null;
if (!configPath) {
console.error(
"Error: No server secret provided and config file not found. " +
"Expected config.yml or config.yaml in the config directory, or pass --secret."
);
process.exit(1);
}
const configContent = fs.readFileSync(configPath, "utf8");
const config = yaml.load(configContent) as {
server?: { secret?: string };
};
if (!config?.server?.secret) {
console.error(
"Error: No server.secret in config file. Pass --secret or set server.secret in config."
);
process.exit(1);
}
secret = config.server.secret;
}
const [org] = await db
.select({
orgId: orgs.orgId,
sshCaPrivateKey: orgs.sshCaPrivateKey,
sshCaPublicKey: orgs.sshCaPublicKey
})
.from(orgs)
.where(eq(orgs.orgId, orgId))
.limit(1);
if (!org) {
console.error(`Error: Organization with orgId "${orgId}" not found.`);
process.exit(1);
}
if (org.sshCaPrivateKey != null || org.sshCaPublicKey != null) {
if (!force) {
console.error(
"Error: This organization already has CA keys. Use --force to overwrite."
);
process.exit(1);
}
}
const ca = generateCA(`pangolin-ssh-ca-${orgId}`);
const encryptedPrivateKey = encrypt(ca.privateKeyPem, secret);
await db
.update(orgs)
.set({
sshCaPrivateKey: encryptedPrivateKey,
sshCaPublicKey: ca.publicKeyOpenSSH
})
.where(eq(orgs.orgId, orgId));
console.log("SSH CA keys generated and stored for org:", orgId);
console.log("\nPublic key (OpenSSH format):");
console.log(ca.publicKeyOpenSSH);
process.exit(0);
} catch (error) {
console.error("Error generating org CA keys:", error);
process.exit(1);
}
}
};

View File

@@ -1,5 +1,5 @@
import { CommandModule } from "yargs"; import { CommandModule } from "yargs";
import { db, idpOidcConfig, licenseKey, certificates, eventStreamingDestinations, alertWebhookActions } from "@server/db"; import { db, idpOidcConfig, licenseKey } from "@server/db";
import { encrypt, decrypt } from "@server/lib/crypto"; import { encrypt, decrypt } from "@server/lib/crypto";
import { configFilePath1, configFilePath2 } from "@server/lib/consts"; import { configFilePath1, configFilePath2 } from "@server/lib/consts";
import { eq } from "drizzle-orm"; import { eq } from "drizzle-orm";
@@ -129,15 +129,9 @@ export const rotateServerSecret: CommandModule<
console.log("\nReading encrypted data from database..."); console.log("\nReading encrypted data from database...");
const idpConfigs = await db.select().from(idpOidcConfig); const idpConfigs = await db.select().from(idpOidcConfig);
const licenseKeys = await db.select().from(licenseKey); const licenseKeys = await db.select().from(licenseKey);
const certs = await db.select().from(certificates);
const streamingDestinations = await db.select().from(eventStreamingDestinations);
const webhookActions = await db.select().from(alertWebhookActions);
console.log(`Found ${idpConfigs.length} OIDC IdP configuration(s)`); console.log(`Found ${idpConfigs.length} OIDC IdP configuration(s)`);
console.log(`Found ${licenseKeys.length} license key(s)`); console.log(`Found ${licenseKeys.length} license key(s)`);
console.log(`Found ${certs.length} certificate(s)`);
console.log(`Found ${streamingDestinations.length} event streaming destination(s)`);
console.log(`Found ${webhookActions.length} alert webhook action(s)`);
// Prepare all decrypted and re-encrypted values // Prepare all decrypted and re-encrypted values
console.log("\nDecrypting and re-encrypting values..."); console.log("\nDecrypting and re-encrypting values...");
@@ -155,27 +149,8 @@ export const rotateServerSecret: CommandModule<
encryptedInstanceId: string; encryptedInstanceId: string;
}; };
type CertUpdate = {
certId: number;
encryptedCertFile: string | null;
encryptedKeyFile: string | null;
};
type StreamingDestinationUpdate = {
destinationId: number;
encryptedConfig: string;
};
type WebhookActionUpdate = {
webhookActionId: number;
encryptedConfig: string;
};
const idpUpdates: IdpUpdate[] = []; const idpUpdates: IdpUpdate[] = [];
const licenseKeyUpdates: LicenseKeyUpdate[] = []; const licenseKeyUpdates: LicenseKeyUpdate[] = [];
const certUpdates: CertUpdate[] = [];
const streamingDestinationUpdates: StreamingDestinationUpdate[] = [];
const webhookActionUpdates: WebhookActionUpdate[] = [];
// Process idpOidcConfig entries // Process idpOidcConfig entries
for (const idpConfig of idpConfigs) { for (const idpConfig of idpConfigs) {
@@ -242,70 +217,6 @@ export const rotateServerSecret: CommandModule<
} }
} }
// Process certificate entries
for (const cert of certs) {
try {
const encryptedCertFile = cert.certFile
? encrypt(decrypt(cert.certFile, oldSecret), newSecret)
: null;
const encryptedKeyFile = cert.keyFile
? encrypt(decrypt(cert.keyFile, oldSecret), newSecret)
: null;
certUpdates.push({
certId: cert.certId,
encryptedCertFile,
encryptedKeyFile
});
} catch (error) {
console.error(
`Error processing certificate ${cert.certId} (${cert.domain}):`,
error
);
throw error;
}
}
// Process eventStreamingDestinations entries
for (const dest of streamingDestinations) {
try {
const decryptedConfig = decrypt(dest.config, oldSecret);
const encryptedConfig = encrypt(decryptedConfig, newSecret);
streamingDestinationUpdates.push({
destinationId: dest.destinationId,
encryptedConfig
});
} catch (error) {
console.error(
`Error processing event streaming destination ${dest.destinationId}:`,
error
);
throw error;
}
}
// Process alertWebhookActions entries
for (const webhook of webhookActions) {
try {
if (webhook.config == null) continue;
const decryptedConfig = decrypt(webhook.config, oldSecret);
const encryptedConfig = encrypt(decryptedConfig, newSecret);
webhookActionUpdates.push({
webhookActionId: webhook.webhookActionId,
encryptedConfig
});
} catch (error) {
console.error(
`Error processing alert webhook action ${webhook.webhookActionId}:`,
error
);
throw error;
}
}
// Perform all database updates in a single transaction // Perform all database updates in a single transaction
console.log("\nUpdating database in transaction..."); console.log("\nUpdating database in transaction...");
await db.transaction(async (trx) => { await db.transaction(async (trx) => {
@@ -339,50 +250,10 @@ export const rotateServerSecret: CommandModule<
instanceId: update.encryptedInstanceId instanceId: update.encryptedInstanceId
}); });
} }
// Update certificate entries
for (const update of certUpdates) {
await trx
.update(certificates)
.set({
certFile: update.encryptedCertFile,
keyFile: update.encryptedKeyFile
})
.where(eq(certificates.certId, update.certId));
}
// Update event streaming destination entries
for (const update of streamingDestinationUpdates) {
await trx
.update(eventStreamingDestinations)
.set({ config: update.encryptedConfig })
.where(
eq(
eventStreamingDestinations.destinationId,
update.destinationId
)
);
}
// Update alert webhook action entries
for (const update of webhookActionUpdates) {
await trx
.update(alertWebhookActions)
.set({ config: update.encryptedConfig })
.where(
eq(
alertWebhookActions.webhookActionId,
update.webhookActionId
)
);
}
}); });
console.log(`Rotated ${idpUpdates.length} OIDC IdP configuration(s)`); console.log(`Rotated ${idpUpdates.length} OIDC IdP configuration(s)`);
console.log(`Rotated ${licenseKeyUpdates.length} license key(s)`); console.log(`Rotated ${licenseKeyUpdates.length} license key(s)`);
console.log(`Rotated ${certUpdates.length} certificate(s)`);
console.log(`Rotated ${streamingDestinationUpdates.length} event streaming destination(s)`);
console.log(`Rotated ${webhookActionUpdates.length} alert webhook action(s)`);
// Update config file with new secret // Update config file with new secret
console.log("\nUpdating config file..."); console.log("\nUpdating config file...");
@@ -399,9 +270,6 @@ export const rotateServerSecret: CommandModule<
console.log(`\nSummary:`); console.log(`\nSummary:`);
console.log(` - OIDC IdP configurations: ${idpUpdates.length}`); console.log(` - OIDC IdP configurations: ${idpUpdates.length}`);
console.log(` - License keys: ${licenseKeyUpdates.length}`); console.log(` - License keys: ${licenseKeyUpdates.length}`);
console.log(` - Certificates: ${certUpdates.length}`);
console.log(` - Event streaming destinations: ${streamingDestinationUpdates.length}`);
console.log(` - Alert webhook actions: ${webhookActionUpdates.length}`);
console.log( console.log(
`\n IMPORTANT: Restart the server for the new secret to take effect.` `\n IMPORTANT: Restart the server for the new secret to take effect.`
); );

View File

@@ -8,8 +8,6 @@ import { clearExitNodes } from "./commands/clearExitNodes";
import { rotateServerSecret } from "./commands/rotateServerSecret"; import { rotateServerSecret } from "./commands/rotateServerSecret";
import { clearLicenseKeys } from "./commands/clearLicenseKeys"; import { clearLicenseKeys } from "./commands/clearLicenseKeys";
import { deleteClient } from "./commands/deleteClient"; import { deleteClient } from "./commands/deleteClient";
import { generateOrgCaKeys } from "./commands/generateOrgCaKeys";
import { clearCertificates } from "./commands/clearCertificates";
yargs(hideBin(process.argv)) yargs(hideBin(process.argv))
.scriptName("pangctl") .scriptName("pangctl")
@@ -19,7 +17,5 @@ yargs(hideBin(process.argv))
.command(rotateServerSecret) .command(rotateServerSecret)
.command(clearLicenseKeys) .command(clearLicenseKeys)
.command(deleteClient) .command(deleteClient)
.command(generateOrgCaKeys)
.command(clearCertificates)
.demandCommand() .demandCommand()
.help().argv; .help().argv;

View File

@@ -1 +0,0 @@
*-journal

View File

@@ -4,12 +4,6 @@ services:
image: fosrl/pangolin:latest image: fosrl/pangolin:latest
container_name: pangolin container_name: pangolin
restart: unless-stopped restart: unless-stopped
deploy:
resources:
limits:
memory: 1g
reservations:
memory: 256m
volumes: volumes:
- ./config:/app/config - ./config:/app/config
healthcheck: healthcheck:

View File

@@ -1,12 +0,0 @@
services:
mailer:
image: axllent/mailpit
ports:
- 8025:8025
- 1025:1025
volumes:
- mailpit-storage:/data
environment:
- MP_DATABASE=/data/mailpit.db
volumes:
mailpit-storage:

View File

@@ -6,12 +6,6 @@ import path from "path";
import fs from "fs"; import fs from "fs";
// import { glob } from "glob"; // import { glob } from "glob";
// Read default build type from server/build.ts
let build = "oss";
const buildFile = fs.readFileSync(path.resolve("server/build.ts"), "utf8");
const m = buildFile.match(/export\s+const\s+build\s*=\s*["'](oss|saas|enterprise)["']/);
if (m) build = m[1];
const banner = ` const banner = `
// patch __dirname // patch __dirname
// import { fileURLToPath } from "url"; // import { fileURLToPath } from "url";
@@ -43,7 +37,7 @@ const argv = yargs(hideBin(process.argv))
describe: "Build type (oss, saas, enterprise)", describe: "Build type (oss, saas, enterprise)",
type: "string", type: "string",
choices: ["oss", "saas", "enterprise"], choices: ["oss", "saas", "enterprise"],
default: build default: "oss"
}) })
.help() .help()
.alias("help", "h").argv; .alias("help", "h").argv;
@@ -281,7 +275,7 @@ esbuild
}) })
], ],
sourcemap: "inline", sourcemap: "inline",
target: "node24" target: "node22"
}) })
.then((result) => { .then((result) => {
// Check if there were any errors in the build result // Check if there were any errors in the build result

View File

@@ -1,24 +1,41 @@
all: go-build-release all: update-versions go-build-release put-back
dev-all: dev-update-versions dev-build dev-clean
# Build with version injection via ldflags
# Versions can be passed via: make go-build-release PANGOLIN_VERSION=x.x.x GERBIL_VERSION=x.x.x BADGER_VERSION=x.x.x
# Or fetched automatically if not provided (requires curl and jq)
PANGOLIN_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name')
GERBIL_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
BADGER_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
LDFLAGS = -X main.pangolinVersion=$(PANGOLIN_VERSION) \
-X main.gerbilVersion=$(GERBIL_VERSION) \
-X main.badgerVersion=$(BADGER_VERSION)
go-build-release: go-build-release:
@echo "Building with versions - Pangolin: $(PANGOLIN_VERSION), Gerbil: $(GERBIL_VERSION), Badger: $(BADGER_VERSION)" CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/installer_linux_amd64
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o bin/installer_linux_amd64 CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/installer_linux_arm64
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$(LDFLAGS)" -o bin/installer_linux_arm64
clean: clean:
rm -f bin/installer_linux_amd64 rm -f bin/installer_linux_amd64
rm -f bin/installer_linux_arm64 rm -f bin/installer_linux_arm64
.PHONY: all go-build-release clean update-versions:
@echo "Fetching latest versions..."
cp main.go main.go.bak && \
$(MAKE) dev-update-versions
put-back:
mv main.go.bak main.go
dev-update-versions:
if [ -z "$(tag)" ]; then \
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name'); \
else \
PANGOLIN_VERSION=$(tag); \
fi && \
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$$PANGOLIN_VERSION\"/" main.go && \
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$$GERBIL_VERSION\"/" main.go && \
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$$BADGER_VERSION\"/" main.go && \
echo "Updated main.go with latest versions"
dev-build: go-build-release
dev-clean:
@echo "Restoring version values ..."
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"replaceme\"/" main.go && \
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"replaceme\"/" main.go && \
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"replaceme\"/" main.go
@echo "Restored version strings in main.go"

View File

@@ -99,6 +99,11 @@ func ReadAppConfig(configPath string) (*AppConfigValues, error) {
return values, nil return values, nil
} }
// findPattern finds the start of a pattern in a string
func findPattern(s, pattern string) int {
return bytes.Index([]byte(s), []byte(pattern))
}
func copyDockerService(sourceFile, destFile, serviceName string) error { func copyDockerService(sourceFile, destFile, serviceName string) error {
// Read source file // Read source file
sourceData, err := os.ReadFile(sourceFile) sourceData, err := os.ReadFile(sourceFile)
@@ -113,19 +118,19 @@ func copyDockerService(sourceFile, destFile, serviceName string) error {
} }
// Parse source Docker Compose YAML // Parse source Docker Compose YAML
var sourceCompose map[string]any var sourceCompose map[string]interface{}
if err := yaml.Unmarshal(sourceData, &sourceCompose); err != nil { if err := yaml.Unmarshal(sourceData, &sourceCompose); err != nil {
return fmt.Errorf("error parsing source Docker Compose file: %w", err) return fmt.Errorf("error parsing source Docker Compose file: %w", err)
} }
// Parse destination Docker Compose YAML // Parse destination Docker Compose YAML
var destCompose map[string]any var destCompose map[string]interface{}
if err := yaml.Unmarshal(destData, &destCompose); err != nil { if err := yaml.Unmarshal(destData, &destCompose); err != nil {
return fmt.Errorf("error parsing destination Docker Compose file: %w", err) return fmt.Errorf("error parsing destination Docker Compose file: %w", err)
} }
// Get services section from source // Get services section from source
sourceServices, ok := sourceCompose["services"].(map[string]any) sourceServices, ok := sourceCompose["services"].(map[string]interface{})
if !ok { if !ok {
return fmt.Errorf("services section not found in source file or has invalid format") return fmt.Errorf("services section not found in source file or has invalid format")
} }
@@ -137,10 +142,10 @@ func copyDockerService(sourceFile, destFile, serviceName string) error {
} }
// Get or create services section in destination // Get or create services section in destination
destServices, ok := destCompose["services"].(map[string]any) destServices, ok := destCompose["services"].(map[string]interface{})
if !ok { if !ok {
// If services section doesn't exist, create it // If services section doesn't exist, create it
destServices = make(map[string]any) destServices = make(map[string]interface{})
destCompose["services"] = destServices destCompose["services"] = destServices
} }
@@ -182,21 +187,17 @@ func backupConfig() error {
return nil return nil
} }
func MarshalYAMLWithIndent(data any, indent int) (resp []byte, err error) { func MarshalYAMLWithIndent(data interface{}, indent int) ([]byte, error) {
buffer := new(bytes.Buffer) buffer := new(bytes.Buffer)
encoder := yaml.NewEncoder(buffer) encoder := yaml.NewEncoder(buffer)
encoder.SetIndent(indent) encoder.SetIndent(indent)
if err := encoder.Encode(data); err != nil { err := encoder.Encode(data)
if err != nil {
return nil, err return nil, err
} }
defer func() { defer encoder.Close()
if cerr := encoder.Close(); cerr != nil && err == nil {
err = cerr
}
}()
return buffer.Bytes(), nil return buffer.Bytes(), nil
} }
@@ -208,7 +209,7 @@ func replaceInFile(filepath, oldStr, newStr string) error {
} }
// Replace the string // Replace the string
newContent := strings.ReplaceAll(string(content), oldStr, newStr) newContent := strings.Replace(string(content), oldStr, newStr, -1)
// Write the modified content back to the file // Write the modified content back to the file
err = os.WriteFile(filepath, []byte(newContent), 0644) err = os.WriteFile(filepath, []byte(newContent), 0644)
@@ -227,28 +228,28 @@ func CheckAndAddTraefikLogVolume(composePath string) error {
} }
// Parse YAML into a generic map // Parse YAML into a generic map
var compose map[string]any var compose map[string]interface{}
if err := yaml.Unmarshal(data, &compose); err != nil { if err := yaml.Unmarshal(data, &compose); err != nil {
return fmt.Errorf("error parsing compose file: %w", err) return fmt.Errorf("error parsing compose file: %w", err)
} }
// Get services section // Get services section
services, ok := compose["services"].(map[string]any) services, ok := compose["services"].(map[string]interface{})
if !ok { if !ok {
return fmt.Errorf("services section not found or invalid") return fmt.Errorf("services section not found or invalid")
} }
// Get traefik service // Get traefik service
traefik, ok := services["traefik"].(map[string]any) traefik, ok := services["traefik"].(map[string]interface{})
if !ok { if !ok {
return fmt.Errorf("traefik service not found or invalid") return fmt.Errorf("traefik service not found or invalid")
} }
// Check volumes // Check volumes
logVolume := "./config/traefik/logs:/var/log/traefik" logVolume := "./config/traefik/logs:/var/log/traefik"
var volumes []any var volumes []interface{}
if existingVolumes, ok := traefik["volumes"].([]any); ok { if existingVolumes, ok := traefik["volumes"].([]interface{}); ok {
// Check if volume already exists // Check if volume already exists
for _, v := range existingVolumes { for _, v := range existingVolumes {
if v.(string) == logVolume { if v.(string) == logVolume {
@@ -294,13 +295,13 @@ func MergeYAML(baseFile, overlayFile string) error {
} }
// Parse base YAML into a map // Parse base YAML into a map
var baseMap map[string]any var baseMap map[string]interface{}
if err := yaml.Unmarshal(baseContent, &baseMap); err != nil { if err := yaml.Unmarshal(baseContent, &baseMap); err != nil {
return fmt.Errorf("error parsing base YAML: %v", err) return fmt.Errorf("error parsing base YAML: %v", err)
} }
// Parse overlay YAML into a map // Parse overlay YAML into a map
var overlayMap map[string]any var overlayMap map[string]interface{}
if err := yaml.Unmarshal(overlayContent, &overlayMap); err != nil { if err := yaml.Unmarshal(overlayContent, &overlayMap); err != nil {
return fmt.Errorf("error parsing overlay YAML: %v", err) return fmt.Errorf("error parsing overlay YAML: %v", err)
} }
@@ -323,8 +324,8 @@ func MergeYAML(baseFile, overlayFile string) error {
} }
// mergeMap recursively merges two maps // mergeMap recursively merges two maps
func mergeMap(base, overlay map[string]any) map[string]any { func mergeMap(base, overlay map[string]interface{}) map[string]interface{} {
result := make(map[string]any) result := make(map[string]interface{})
// Copy all key-values from base map // Copy all key-values from base map
for k, v := range base { for k, v := range base {
@@ -335,8 +336,8 @@ func mergeMap(base, overlay map[string]any) map[string]any {
for k, v := range overlay { for k, v := range overlay {
// If both maps have the same key and both values are maps, merge recursively // If both maps have the same key and both values are maps, merge recursively
if baseVal, ok := base[k]; ok { if baseVal, ok := base[k]; ok {
if baseMap, isBaseMap := baseVal.(map[string]any); isBaseMap { if baseMap, isBaseMap := baseVal.(map[string]interface{}); isBaseMap {
if overlayMap, isOverlayMap := v.(map[string]any); isOverlayMap { if overlayMap, isOverlayMap := v.(map[string]interface{}); isOverlayMap {
result[k] = mergeMap(baseMap, overlayMap) result[k] = mergeMap(baseMap, overlayMap)
continue continue
} }

View File

@@ -81,19 +81,11 @@ entryPoints:
transport: transport:
respondingTimeouts: respondingTimeouts:
readTimeout: "30m" readTimeout: "30m"
http3:
advertisedPort: 443
http: http:
tls: tls:
certResolver: "letsencrypt" certResolver: "letsencrypt"
middlewares: middlewares:
- crowdsec@file - crowdsec@file
encodedCharacters:
allowEncodedSlash: true
allowEncodedQuestionMark: true
serversTransport: serversTransport:
insecureSkipVerify: true insecureSkipVerify: true
ping:
entryPoint: "web"

View File

@@ -4,12 +4,6 @@ services:
image: docker.io/fosrl/pangolin:{{if .IsEnterprise}}ee-{{end}}{{.PangolinVersion}} image: docker.io/fosrl/pangolin:{{if .IsEnterprise}}ee-{{end}}{{.PangolinVersion}}
container_name: pangolin container_name: pangolin
restart: unless-stopped restart: unless-stopped
deploy:
resources:
limits:
memory: 1g
reservations:
memory: 256m
volumes: volumes:
- ./config:/app/config - ./config:/app/config
healthcheck: healthcheck:
@@ -38,14 +32,15 @@ services:
- 51820:51820/udp - 51820:51820/udp
- 21820:21820/udp - 21820:21820/udp
- 443:443 - 443:443
- 443:443/udp # For http3 QUIC if desired
- 80:80 - 80:80
{{end}} {{end}}
traefik: traefik:
image: docker.io/traefik:v3.6 image: docker.io/traefik:v3.6
container_name: traefik container_name: traefik
restart: unless-stopped restart: unless-stopped
{{if .InstallGerbil}} network_mode: service:gerbil # Ports appear on the gerbil service{{end}}{{if not .InstallGerbil}} {{if .InstallGerbil}}
network_mode: service:gerbil # Ports appear on the gerbil service
{{end}}{{if not .InstallGerbil}}
ports: ports:
- 443:443 - 443:443
- 80:80 - 80:80

View File

@@ -40,8 +40,6 @@ entryPoints:
transport: transport:
respondingTimeouts: respondingTimeouts:
readTimeout: "30m" readTimeout: "30m"
http3:
advertisedPort: 443
http: http:
tls: tls:
certResolver: "letsencrypt" certResolver: "letsencrypt"

View File

@@ -144,13 +144,12 @@ func installDocker() error {
} }
func startDockerService() error { func startDockerService() error {
switch runtime.GOOS { if runtime.GOOS == "linux" {
case "linux":
cmd := exec.Command("systemctl", "enable", "--now", "docker") cmd := exec.Command("systemctl", "enable", "--now", "docker")
cmd.Stdout = os.Stdout cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr cmd.Stderr = os.Stderr
return cmd.Run() return cmd.Run()
case "darwin": } else if runtime.GOOS == "darwin" {
// On macOS, Docker is usually started via the Docker Desktop application // On macOS, Docker is usually started via the Docker Desktop application
fmt.Println("Please start Docker Desktop manually on macOS.") fmt.Println("Please start Docker Desktop manually on macOS.")
return nil return nil
@@ -303,7 +302,7 @@ func pullContainers(containerType SupportedContainer) error {
return nil return nil
} }
return fmt.Errorf("unsupported container type: %s", containerType) return fmt.Errorf("Unsupported container type: %s", containerType)
} }
// startContainers starts the containers using the appropriate command. // startContainers starts the containers using the appropriate command.
@@ -326,7 +325,7 @@ func startContainers(containerType SupportedContainer) error {
return nil return nil
} }
return fmt.Errorf("unsupported container type: %s", containerType) return fmt.Errorf("Unsupported container type: %s", containerType)
} }
// stopContainers stops the containers using the appropriate command. // stopContainers stops the containers using the appropriate command.
@@ -348,7 +347,7 @@ func stopContainers(containerType SupportedContainer) error {
return nil return nil
} }
return fmt.Errorf("unsupported container type: %s", containerType) return fmt.Errorf("Unsupported container type: %s", containerType)
} }
// restartContainer restarts a specific container using the appropriate command. // restartContainer restarts a specific container using the appropriate command.
@@ -370,5 +369,5 @@ func restartContainer(container string, containerType SupportedContainer) error
return nil return nil
} }
return fmt.Errorf("unsupported container type: %s", containerType) return fmt.Errorf("Unsupported container type: %s", containerType)
} }

View File

@@ -6,13 +6,12 @@ import (
"log" "log"
"os" "os"
"os/exec" "os/exec"
"path/filepath"
"strings" "strings"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
func installCrowdsec(config Config, installDir string) error { func installCrowdsec(config Config) error {
if err := stopContainers(config.InstallationContainerType); err != nil { if err := stopContainers(config.InstallationContainerType); err != nil {
return fmt.Errorf("failed to stop containers: %v", err) return fmt.Errorf("failed to stop containers: %v", err)
@@ -28,20 +27,9 @@ func installCrowdsec(config Config, installDir string) error {
os.Exit(1) os.Exit(1)
} }
if err := os.MkdirAll("config/crowdsec/db", 0755); err != nil { os.MkdirAll("config/crowdsec/db", 0755)
fmt.Printf("Error creating config files: %v\n", err) os.MkdirAll("config/crowdsec/acquis.d", 0755)
os.Exit(1) os.MkdirAll("config/traefik/logs", 0755)
}
if err := os.MkdirAll("config/crowdsec/acquis.d", 0755); err != nil {
fmt.Printf("Error creating config files: %v\n", err)
os.Exit(1)
}
if err := os.MkdirAll("config/traefik/logs", 0755); err != nil {
fmt.Printf("Error creating config files: %v\n", err)
os.Exit(1)
}
setupTraefikLogRotate(installDir)
if err := copyDockerService("config/crowdsec/docker-compose.yml", "docker-compose.yml", "crowdsec"); err != nil { if err := copyDockerService("config/crowdsec/docker-compose.yml", "docker-compose.yml", "crowdsec"); err != nil {
fmt.Printf("Error copying docker service: %v\n", err) fmt.Printf("Error copying docker service: %v\n", err)
@@ -165,34 +153,34 @@ func CheckAndAddCrowdsecDependency(composePath string) error {
} }
// Parse YAML into a generic map // Parse YAML into a generic map
var compose map[string]any var compose map[string]interface{}
if err := yaml.Unmarshal(data, &compose); err != nil { if err := yaml.Unmarshal(data, &compose); err != nil {
return fmt.Errorf("error parsing compose file: %w", err) return fmt.Errorf("error parsing compose file: %w", err)
} }
// Get services section // Get services section
services, ok := compose["services"].(map[string]any) services, ok := compose["services"].(map[string]interface{})
if !ok { if !ok {
return fmt.Errorf("services section not found or invalid") return fmt.Errorf("services section not found or invalid")
} }
// Get traefik service // Get traefik service
traefik, ok := services["traefik"].(map[string]any) traefik, ok := services["traefik"].(map[string]interface{})
if !ok { if !ok {
return fmt.Errorf("traefik service not found or invalid") return fmt.Errorf("traefik service not found or invalid")
} }
// Get dependencies // Get dependencies
dependsOn, ok := traefik["depends_on"].(map[string]any) dependsOn, ok := traefik["depends_on"].(map[string]interface{})
if ok { if ok {
// Append the new block for crowdsec // Append the new block for crowdsec
dependsOn["crowdsec"] = map[string]any{ dependsOn["crowdsec"] = map[string]interface{}{
"condition": "service_healthy", "condition": "service_healthy",
} }
} else { } else {
// No dependencies exist, create it // No dependencies exist, create it
traefik["depends_on"] = map[string]any{ traefik["depends_on"] = map[string]interface{}{
"crowdsec": map[string]any{ "crowdsec": map[string]interface{}{
"condition": "service_healthy", "condition": "service_healthy",
}, },
} }
@@ -211,69 +199,3 @@ func CheckAndAddCrowdsecDependency(composePath string) error {
fmt.Println("Added dependency of crowdsec to traefik") fmt.Println("Added dependency of crowdsec to traefik")
return nil return nil
} }
// setupTraefikLogRotate writes a logrotate config for the Traefik access log
// that CrowdSec depends on. This is only needed when CrowdSec is installed
// because the default Pangolin install does not enable Traefik access logs.
//
// copytruncate is used so Traefik does not need to be restarted or sent a
// signal after rotation — it keeps writing to the same file descriptor while
// the rotated copy is made and the original is truncated in place.
func setupTraefikLogRotate(installDir string) {
const logrotateDir = "/etc/logrotate.d"
const logrotateFile = "/etc/logrotate.d/pangolin-traefik"
logPath := filepath.Join(installDir, "config/traefik/logs/access.log")
if os.Geteuid() != 0 {
fmt.Println("\n[logrotate] Skipping automatic logrotate setup: not running as root.")
fmt.Println("[logrotate] To prevent unbounded growth of the Traefik access log used by CrowdSec,")
fmt.Println("[logrotate] create the file /etc/logrotate.d/pangolin-traefik manually with:")
printLogrotateConfig(logPath)
return
}
config := fmt.Sprintf(`# Logrotate config for Traefik access logs used by CrowdSec.
# Generated by the Pangolin installer. Safe to edit.
%s {
daily
rotate 7
compress
delaycompress
missingok
notifempty
copytruncate
}
`, logPath)
if err := os.MkdirAll(logrotateDir, 0755); err != nil {
fmt.Printf("[logrotate] Warning: could not create %s: %v\n", logrotateDir, err)
return
}
if err := os.WriteFile(logrotateFile, []byte(config), 0644); err != nil {
fmt.Printf("[logrotate] Warning: could not write %s: %v\n", logrotateFile, err)
fmt.Println("[logrotate] Set it up manually:")
printLogrotateConfig(logPath)
return
}
fmt.Printf("[logrotate] Wrote logrotate config to %s\n", logrotateFile)
fmt.Println("[logrotate] Traefik access logs will be rotated daily, keeping 7 compressed copies.")
}
// printLogrotateConfig prints a logrotate config block to stdout so users can
// set it up manually when the installer cannot write to /etc.
func printLogrotateConfig(logPath string) {
fmt.Printf(`
%s {
daily
rotate 7
compress
delaycompress
missingok
notifempty
copytruncate
}
`, logPath)
}

View File

@@ -1,38 +1,10 @@
module installer module installer
go 1.25.0 go 1.24.0
require ( require (
github.com/charmbracelet/huh v1.0.0 golang.org/x/term v0.39.0
github.com/charmbracelet/lipgloss v1.1.0
golang.org/x/term v0.42.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
) )
require ( require golang.org/x/sys v0.40.0 // indirect
github.com/atotto/clipboard v0.1.4 // indirect
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
github.com/catppuccin/go v0.3.0 // indirect
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 // indirect
github.com/charmbracelet/bubbletea v1.3.6 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
github.com/charmbracelet/x/ansi v0.9.3 // indirect
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 // indirect
github.com/charmbracelet/x/term v0.2.1 // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/mattn/go-localereader v0.0.1 // indirect
github.com/mattn/go-runewidth v0.0.16 // indirect
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
github.com/muesli/cancelreader v0.2.2 // indirect
github.com/muesli/termenv v0.16.0 // indirect
github.com/rivo/uniseg v0.4.7 // indirect
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.43.0 // indirect
golang.org/x/text v0.23.0 // indirect
)

View File

@@ -1,80 +1,7 @@
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= golang.org/x/term v0.39.0 h1:RclSuaJf32jOqZz74CkPA9qFuVTX7vhLlpfj/IGWlqY=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= golang.org/x/term v0.39.0/go.mod h1:yxzUCTP/U+FzoxfdKmLaA0RV1WgE0VY7hXBwKtY/4ww=
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY=
github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E=
github.com/catppuccin/go v0.3.0 h1:d+0/YicIq+hSTo5oPuRi5kOpqkVA5tAsU6dNhvRu+aY=
github.com/catppuccin/go v0.3.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc=
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 h1:JFgG/xnwFfbezlUnFMJy0nusZvytYysV4SCS2cYbvws=
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7/go.mod h1:ISC1gtLcVilLOf23wvTfoQuYbW2q0JevFxPfUzZ9Ybw=
github.com/charmbracelet/bubbletea v1.3.6 h1:VkHIxPJQeDt0aFJIsVxw8BQdh/F/L2KKZGsK6et5taU=
github.com/charmbracelet/bubbletea v1.3.6/go.mod h1:oQD9VCRQFF8KplacJLo28/jofOI2ToOfGYeFgBBxHOc=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
github.com/charmbracelet/huh v1.0.0 h1:wOnedH8G4qzJbmhftTqrpppyqHakl/zbbNdXIWJyIxw=
github.com/charmbracelet/huh v1.0.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
github.com/charmbracelet/x/ansi v0.9.3 h1:BXt5DHS/MKF+LjuK4huWrC6NCvHtexww7dMayh6GXd0=
github.com/charmbracelet/x/ansi v0.9.3/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
github.com/charmbracelet/x/conpty v0.1.0/go.mod h1:rMFsDJoDwVmiYM10aD4bH2XiRgwI7NYJtQgl5yskjEQ=
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86 h1:JSt3B+U9iqk37QUU2Rvb6DSBYRLtWqFqfxf8l5hOZUA=
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86/go.mod h1:2P0UgXMEa6TsToMSuFqKFQR+fZTO9CNGUNokkPatT/0=
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 h1:qko3AQ4gK1MTS/de7F5hPGx6/k1u0w4TeYmBFwzYVP4=
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0/go.mod h1:pBhA0ybfXv6hDjQUZ7hk1lVxBiUbupdw5R31yPUViVQ=
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8JawjaNZY=
github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.43.0 h1:Rlag2XtaFTxp19wS8MXlJwTvoh8ArU6ezoyFsMyCTNI=
golang.org/x/sys v0.43.0/go.mod h1:4GL1E5IUh+htKOUEOaiffhrAeqysfVGipDYzABqnCmw=
golang.org/x/term v0.42.0 h1:UiKe+zDFmJobeJ5ggPwOshJIVt6/Ft0rcfrXZDLWAWY=
golang.org/x/term v0.42.0/go.mod h1:Dq/D+snpsbazcBG5+F9Q1n2rXV8Ma+71xEjTRufARgY=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View File

@@ -1,208 +1,92 @@
package main package main
import ( import (
"errors" "bufio"
"fmt" "fmt"
"os" "strings"
"strconv" "syscall"
"github.com/charmbracelet/huh"
"golang.org/x/term" "golang.org/x/term"
) )
// pangolinTheme is the custom theme using brand colors func readString(reader *bufio.Reader, prompt string, defaultValue string) string {
var pangolinTheme = ThemePangolin()
// isAccessibleMode checks if we should use accessible mode (simple prompts)
// This is true for: non-TTY, TERM=dumb, or ACCESSIBLE env var set
func isAccessibleMode() bool {
// Check if stdin is not a terminal (piped input, CI, etc.)
if !term.IsTerminal(int(os.Stdin.Fd())) {
return true
}
// Check for dumb terminal
if os.Getenv("TERM") == "dumb" {
return true
}
// Check for explicit accessible mode request
if os.Getenv("ACCESSIBLE") != "" {
return true
}
return false
}
// handleAbort checks if the error is a user abort (Ctrl+C) and exits if so
func handleAbort(err error) {
if err != nil && errors.Is(err, huh.ErrUserAborted) {
fmt.Println("\nInstallation cancelled.")
os.Exit(0)
}
}
// runField runs a single field with the Pangolin theme, handling accessible mode
func runField(field huh.Field) error {
if isAccessibleMode() {
return field.RunAccessible(os.Stdout, os.Stdin)
}
form := huh.NewForm(huh.NewGroup(field)).WithTheme(pangolinTheme)
return form.Run()
}
func readString(prompt string, defaultValue string) string {
var value string
title := prompt
if defaultValue != "" { if defaultValue != "" {
title = fmt.Sprintf("%s (default: %s)", prompt, defaultValue) fmt.Printf("%s (default: %s): ", prompt, defaultValue)
} else {
fmt.Print(prompt + ": ")
} }
input, _ := reader.ReadString('\n')
input := huh.NewInput(). input = strings.TrimSpace(input)
Title(title). if input == "" {
Value(&value) return defaultValue
// If no default value, this field is required
if defaultValue == "" {
input = input.Validate(func(s string) error {
if s == "" {
return fmt.Errorf("this field is required")
}
return nil
})
} }
return input
err := runField(input)
handleAbort(err)
if value == "" {
value = defaultValue
}
// Print the answer so it remains visible in terminal history (skip in accessible mode as it already shows)
if !isAccessibleMode() {
fmt.Printf("%s: %s\n", prompt, value)
}
return value
} }
func readPassword(prompt string) string { func readStringNoDefault(reader *bufio.Reader, prompt string) string {
var value string fmt.Print(prompt + ": ")
input, _ := reader.ReadString('\n')
return strings.TrimSpace(input)
}
func readPassword(prompt string, reader *bufio.Reader) string {
if term.IsTerminal(int(syscall.Stdin)) {
fmt.Print(prompt + ": ")
// Read password without echo if we're in a terminal
password, err := term.ReadPassword(int(syscall.Stdin))
fmt.Println() // Add a newline since ReadPassword doesn't add one
if err != nil {
return ""
}
input := strings.TrimSpace(string(password))
if input == "" {
return readPassword(prompt, reader)
}
return input
} else {
// Fallback to reading from stdin if not in a terminal
return readString(reader, prompt, "")
}
}
func readBool(reader *bufio.Reader, prompt string, defaultValue bool) bool {
defaultStr := "no"
if defaultValue {
defaultStr = "yes"
}
for { for {
input := huh.NewInput(). input := readString(reader, prompt+" (yes/no)", defaultStr)
Title(prompt). lower := strings.ToLower(input)
Value(&value). if lower == "yes" {
EchoMode(huh.EchoModePassword). return true
Validate(func(s string) error { } else if lower == "no" {
if s == "" { return false
return fmt.Errorf("password is required") } else {
} fmt.Println("Please enter 'yes' or 'no'.")
return nil
})
err := runField(input)
handleAbort(err)
if value != "" {
// Print confirmation without revealing the password
if !isAccessibleMode() {
fmt.Printf("%s: %s\n", prompt, "********")
}
return value
} }
} }
} }
func readBool(prompt string, defaultValue bool) bool { func readBoolNoDefault(reader *bufio.Reader, prompt string) bool {
var value = defaultValue for {
input := readStringNoDefault(reader, prompt+" (yes/no)")
confirm := huh.NewConfirm(). lower := strings.ToLower(input)
Title(prompt). if lower == "yes" {
Value(&value). return true
Affirmative("Yes"). } else if lower == "no" {
Negative("No") return false
} else {
err := runField(confirm) fmt.Println("Please enter 'yes' or 'no'.")
handleAbort(err)
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
answer := "No"
if value {
answer = "Yes"
} }
fmt.Printf("%s: %s\n", prompt, answer)
} }
return value
} }
func readBoolNoDefault(prompt string) bool { func readInt(reader *bufio.Reader, prompt string, defaultValue int) int {
var value bool input := readString(reader, prompt, fmt.Sprintf("%d", defaultValue))
if input == "" {
confirm := huh.NewConfirm().
Title(prompt).
Value(&value).
Affirmative("Yes").
Negative("No")
err := runField(confirm)
handleAbort(err)
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
answer := "No"
if value {
answer = "Yes"
}
fmt.Printf("%s: %s\n", prompt, answer)
}
return value
}
func readInt(prompt string, defaultValue int) int {
var value string
title := fmt.Sprintf("%s (default: %d)", prompt, defaultValue)
input := huh.NewInput().
Title(title).
Value(&value).
Validate(func(s string) error {
if s == "" {
return nil
}
_, err := strconv.Atoi(s)
if err != nil {
return fmt.Errorf("please enter a valid number")
}
return nil
})
err := runField(input)
handleAbort(err)
if value == "" {
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
fmt.Printf("%s: %d\n", prompt, defaultValue)
}
return defaultValue return defaultValue
} }
value := defaultValue
result, err := strconv.Atoi(value) fmt.Sscanf(input, "%d", &value)
if err != nil { return value
if !isAccessibleMode() {
fmt.Printf("%s: %d\n", prompt, defaultValue)
}
return defaultValue
}
// Print the answer so it remains visible in terminal history
if !isAccessibleMode() {
fmt.Printf("%s: %d\n", prompt, result)
}
return result
} }

View File

@@ -1,35 +1,29 @@
package main package main
import ( import (
"crypto/rand" "bufio"
"embed" "embed"
"encoding/base64"
"fmt" "fmt"
"io" "io"
"io/fs" "io/fs"
"math/rand"
"net" "net"
"net/http"
"net/url" "net/url"
"os" "os"
"os/exec" "os/exec"
"path/filepath" "path/filepath"
"runtime" "runtime"
"strconv"
"strings" "strings"
"text/template" "text/template"
"time" "time"
) )
// Version variables injected at build time via -ldflags // DO NOT EDIT THIS FUNCTION; IT MATCHED BY REGEX IN CICD
var (
pangolinVersion string
gerbilVersion string
badgerVersion string
)
func loadVersions(config *Config) { func loadVersions(config *Config) {
config.PangolinVersion = pangolinVersion config.PangolinVersion = "replaceme"
config.GerbilVersion = gerbilVersion config.GerbilVersion = "replaceme"
config.BadgerVersion = badgerVersion config.BadgerVersion = "replaceme"
} }
//go:embed config/* //go:embed config/*
@@ -87,19 +81,14 @@ func main() {
} }
} }
reader := bufio.NewReader(os.Stdin)
var config Config var config Config
var alreadyInstalled = false var alreadyInstalled = false
// Determine installation directory
installDir := findOrSelectInstallDirectory()
if err := os.Chdir(installDir); err != nil {
fmt.Printf("Error changing to installation directory: %v\n", err)
os.Exit(1)
}
// check if there is already a config file // check if there is already a config file
if _, err := os.Stat("config/config.yml"); err != nil { if _, err := os.Stat("config/config.yml"); err != nil {
config = collectUserInput() config = collectUserInput(reader)
loadVersions(&config) loadVersions(&config)
config.DoCrowdsecInstall = false config.DoCrowdsecInstall = false
@@ -112,10 +101,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
if err := moveFile("config/docker-compose.yml", "docker-compose.yml"); err != nil { moveFile("config/docker-compose.yml", "docker-compose.yml")
fmt.Printf("Error moving docker-compose.yml: %v\n", err)
os.Exit(1)
}
fmt.Println("\nConfiguration files created successfully!") fmt.Println("\nConfiguration files created successfully!")
@@ -130,17 +116,13 @@ func main() {
fmt.Println("\n=== Starting installation ===") fmt.Println("\n=== Starting installation ===")
if readBool("Would you like to install and start the containers?", true) { if readBool(reader, "Would you like to install and start the containers?", true) {
config.InstallationContainerType = podmanOrDocker() config.InstallationContainerType = podmanOrDocker(reader)
if !isDockerInstalled() && runtime.GOOS == "linux" && config.InstallationContainerType == Docker { if !isDockerInstalled() && runtime.GOOS == "linux" && config.InstallationContainerType == Docker {
if readBool("Docker is not installed. Would you like to install it?", true) { if readBool(reader, "Docker is not installed. Would you like to install it?", true) {
if err := installDocker(); err != nil { installDocker()
fmt.Printf("Error installing Docker: %v\n", err)
return
}
// try to start docker service but ignore errors // try to start docker service but ignore errors
if err := startDockerService(); err != nil { if err := startDockerService(); err != nil {
fmt.Println("Error starting Docker service:", err) fmt.Println("Error starting Docker service:", err)
@@ -149,7 +131,7 @@ func main() {
} }
// wait 10 seconds for docker to start checking if docker is running every 2 seconds // wait 10 seconds for docker to start checking if docker is running every 2 seconds
fmt.Println("Waiting for Docker to start...") fmt.Println("Waiting for Docker to start...")
for range 5 { for i := 0; i < 5; i++ {
if isDockerRunning() { if isDockerRunning() {
fmt.Println("Docker is running!") fmt.Println("Docker is running!")
break break
@@ -184,7 +166,7 @@ func main() {
fmt.Println("\n=== MaxMind Database Update ===") fmt.Println("\n=== MaxMind Database Update ===")
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil { if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
fmt.Println("MaxMind GeoLite2 Country database found.") fmt.Println("MaxMind GeoLite2 Country database found.")
if readBool("Would you like to update the MaxMind database to the latest version?", false) { if readBool(reader, "Would you like to update the MaxMind database to the latest version?", false) {
if err := downloadMaxMindDatabase(); err != nil { if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error updating MaxMind database: %v\n", err) fmt.Printf("Error updating MaxMind database: %v\n", err)
fmt.Println("You can try updating it manually later if needed.") fmt.Println("You can try updating it manually later if needed.")
@@ -192,7 +174,7 @@ func main() {
} }
} else { } else {
fmt.Println("MaxMind GeoLite2 Country database not found.") fmt.Println("MaxMind GeoLite2 Country database not found.")
if readBool("Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) { if readBool(reader, "Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
if err := downloadMaxMindDatabase(); err != nil { if err := downloadMaxMindDatabase(); err != nil {
fmt.Printf("Error downloading MaxMind database: %v\n", err) fmt.Printf("Error downloading MaxMind database: %v\n", err)
fmt.Println("You can try downloading it manually later if needed.") fmt.Println("You can try downloading it manually later if needed.")
@@ -209,11 +191,11 @@ func main() {
if !checkIsCrowdsecInstalledInCompose() { if !checkIsCrowdsecInstalledInCompose() {
fmt.Println("\n=== CrowdSec Install ===") fmt.Println("\n=== CrowdSec Install ===")
// check if crowdsec is installed // check if crowdsec is installed
if readBool("Would you like to install CrowdSec?", false) { if readBool(reader, "Would you like to install CrowdSec?", false) {
fmt.Println("This installer constitutes a minimal viable CrowdSec deployment. CrowdSec will add extra complexity to your Pangolin installation and may not work to the best of its abilities out of the box. Users are expected to implement configuration adjustments on their own to achieve the best security posture. Consult the CrowdSec documentation for detailed configuration instructions.") fmt.Println("This installer constitutes a minimal viable CrowdSec deployment. CrowdSec will add extra complexity to your Pangolin installation and may not work to the best of its abilities out of the box. Users are expected to implement configuration adjustments on their own to achieve the best security posture. Consult the CrowdSec documentation for detailed configuration instructions.")
// BUG: crowdsec installation will be skipped if the user chooses to install on the first installation. // BUG: crowdsec installation will be skipped if the user chooses to install on the first installation.
if readBool("Are you willing to manage CrowdSec?", false) { if readBool(reader, "Are you willing to manage CrowdSec?", false) {
if config.DashboardDomain == "" { if config.DashboardDomain == "" {
traefikConfig, err := ReadTraefikConfig("config/traefik/traefik_config.yml") traefikConfig, err := ReadTraefikConfig("config/traefik/traefik_config.yml")
if err != nil { if err != nil {
@@ -242,8 +224,8 @@ func main() {
fmt.Printf("Let's Encrypt Email: %s\n", config.LetsEncryptEmail) fmt.Printf("Let's Encrypt Email: %s\n", config.LetsEncryptEmail)
fmt.Printf("Badger Version: %s\n", config.BadgerVersion) fmt.Printf("Badger Version: %s\n", config.BadgerVersion)
if !readBool("Are these values correct?", true) { if !readBool(reader, "Are these values correct?", true) {
config = collectUserInput() config = collectUserInput(reader)
} }
} }
@@ -252,14 +234,14 @@ func main() {
if detectedType == Undefined { if detectedType == Undefined {
// If detection fails, prompt the user // If detection fails, prompt the user
fmt.Println("Unable to detect container type from existing installation.") fmt.Println("Unable to detect container type from existing installation.")
config.InstallationContainerType = podmanOrDocker() config.InstallationContainerType = podmanOrDocker(reader)
} else { } else {
config.InstallationContainerType = detectedType config.InstallationContainerType = detectedType
fmt.Printf("Detected container type: %s\n", config.InstallationContainerType) fmt.Printf("Detected container type: %s\n", config.InstallationContainerType)
} }
config.DoCrowdsecInstall = true config.DoCrowdsecInstall = true
err := installCrowdsec(config, installDir) err := installCrowdsec(config)
if err != nil { if err != nil {
fmt.Printf("Error installing CrowdSec: %v\n", err) fmt.Printf("Error installing CrowdSec: %v\n", err)
return return
@@ -294,119 +276,8 @@ func main() {
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain) fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
} }
func hasExistingInstall(dir string) bool { func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
configPath := filepath.Join(dir, "config", "config.yml") inputContainer := readString(reader, "Would you like to run Pangolin as Docker or Podman containers?", "docker")
_, err := os.Stat(configPath)
return err == nil
}
func findOrSelectInstallDirectory() string {
const defaultInstallDir = "/opt/pangolin"
// Get current working directory
cwd, err := os.Getwd()
if err != nil {
fmt.Printf("Error getting current directory: %v\n", err)
os.Exit(1)
}
// 1. Check current directory for existing install
if hasExistingInstall(cwd) {
fmt.Printf("Found existing Pangolin installation in current directory: %s\n", cwd)
return cwd
}
// 2. Check default location (/opt/pangolin) for existing install
if cwd != defaultInstallDir && hasExistingInstall(defaultInstallDir) {
fmt.Printf("\nFound existing Pangolin installation at: %s\n", defaultInstallDir)
if readBool(fmt.Sprintf("Would you like to use the existing installation at %s?", defaultInstallDir), true) {
return defaultInstallDir
}
}
// 3. No existing install found, prompt for installation directory
fmt.Println("\n=== Installation Directory ===")
fmt.Println("No existing Pangolin installation detected.")
installDir := readString("Enter the installation directory", defaultInstallDir)
// Expand ~ to home directory if present
if strings.HasPrefix(installDir, "~") {
home, err := os.UserHomeDir()
if err != nil {
fmt.Printf("Error getting home directory: %v\n", err)
os.Exit(1)
}
installDir = filepath.Join(home, installDir[1:])
}
// Convert to absolute path
absPath, err := filepath.Abs(installDir)
if err != nil {
fmt.Printf("Error resolving path: %v\n", err)
os.Exit(1)
}
installDir = absPath
// Check if directory exists
if _, err := os.Stat(installDir); os.IsNotExist(err) {
// Directory doesn't exist, create it
if readBool(fmt.Sprintf("Directory %s does not exist. Create it?", installDir), true) {
if err := os.MkdirAll(installDir, 0755); err != nil {
fmt.Printf("Error creating directory: %v\n", err)
os.Exit(1)
}
fmt.Printf("Created directory: %s\n", installDir)
// Offer to change ownership if running via sudo
changeDirectoryOwnership(installDir)
} else {
fmt.Println("Installation cancelled.")
os.Exit(0)
}
}
fmt.Printf("Installation directory: %s\n", installDir)
return installDir
}
func changeDirectoryOwnership(dir string) {
// Check if we're running via sudo by looking for SUDO_USER
sudoUser := os.Getenv("SUDO_USER")
if sudoUser == "" || os.Geteuid() != 0 {
return
}
sudoUID := os.Getenv("SUDO_UID")
sudoGID := os.Getenv("SUDO_GID")
if sudoUID == "" || sudoGID == "" {
return
}
fmt.Printf("\nRunning as root via sudo (original user: %s)\n", sudoUser)
if readBool(fmt.Sprintf("Would you like to change ownership of %s to user '%s'? This makes it easier to manage config files without sudo.", dir, sudoUser), true) {
uid, err := strconv.Atoi(sudoUID)
if err != nil {
fmt.Printf("Warning: Could not parse SUDO_UID: %v\n", err)
return
}
gid, err := strconv.Atoi(sudoGID)
if err != nil {
fmt.Printf("Warning: Could not parse SUDO_GID: %v\n", err)
return
}
if err := os.Chown(dir, uid, gid); err != nil {
fmt.Printf("Warning: Could not change ownership: %v\n", err)
} else {
fmt.Printf("Changed ownership of %s to %s\n", dir, sudoUser)
}
}
}
func podmanOrDocker() SupportedContainer {
inputContainer := readString("Would you like to run Pangolin as Docker or Podman containers?", "docker")
chosenContainer := Docker chosenContainer := Docker
if strings.EqualFold(inputContainer, "docker") { if strings.EqualFold(inputContainer, "docker") {
@@ -418,8 +289,7 @@ func podmanOrDocker() SupportedContainer {
os.Exit(1) os.Exit(1)
} }
switch chosenContainer { if chosenContainer == Podman {
case Podman:
if !isPodmanInstalled() { if !isPodmanInstalled() {
fmt.Println("Podman or podman-compose is not installed. Please install both manually. Automated installation will be available in a later release.") fmt.Println("Podman or podman-compose is not installed. Please install both manually. Automated installation will be available in a later release.")
os.Exit(1) os.Exit(1)
@@ -428,7 +298,7 @@ func podmanOrDocker() SupportedContainer {
if err := exec.Command("bash", "-c", "cat /etc/sysctl.d/99-podman.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start=' || cat /etc/sysctl.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start='").Run(); err != nil { if err := exec.Command("bash", "-c", "cat /etc/sysctl.d/99-podman.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start=' || cat /etc/sysctl.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start='").Run(); err != nil {
fmt.Println("Would you like to configure ports >= 80 as unprivileged ports? This enables podman containers to listen on low-range ports.") fmt.Println("Would you like to configure ports >= 80 as unprivileged ports? This enables podman containers to listen on low-range ports.")
fmt.Println("Pangolin will experience startup issues if this is not configured, because it needs to listen on port 80/443 by default.") fmt.Println("Pangolin will experience startup issues if this is not configured, because it needs to listen on port 80/443 by default.")
approved := readBool("The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system\". Approve?", true) approved := readBool(reader, "The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system\". Approve?", true)
if approved { if approved {
if os.Geteuid() != 0 { if os.Geteuid() != 0 {
fmt.Println("You need to run the installer as root for such a configuration.") fmt.Println("You need to run the installer as root for such a configuration.")
@@ -440,7 +310,7 @@ func podmanOrDocker() SupportedContainer {
// Linux only. // Linux only.
if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system"); err != nil { if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system"); err != nil {
fmt.Printf("Error configuring unprivileged ports: %v\n", err) fmt.Printf("Error configuring unprivileged ports: %v\n", err)
os.Exit(1) os.Exit(1)
} }
} else { } else {
@@ -450,7 +320,7 @@ func podmanOrDocker() SupportedContainer {
fmt.Println("Unprivileged ports have been configured.") fmt.Println("Unprivileged ports have been configured.")
} }
case Docker: } else if chosenContainer == Docker {
// check if docker is not installed and the user is root // check if docker is not installed and the user is root
if !isDockerInstalled() { if !isDockerInstalled() {
if os.Geteuid() != 0 { if os.Geteuid() != 0 {
@@ -465,7 +335,7 @@ func podmanOrDocker() SupportedContainer {
fmt.Println("The installer will not be able to run docker commands without running it as root.") fmt.Println("The installer will not be able to run docker commands without running it as root.")
os.Exit(1) os.Exit(1)
} }
default: } else {
// This shouldn't happen unless there's a third container runtime. // This shouldn't happen unless there's a third container runtime.
os.Exit(1) os.Exit(1)
} }
@@ -473,35 +343,35 @@ func podmanOrDocker() SupportedContainer {
return chosenContainer return chosenContainer
} }
func collectUserInput() Config { func collectUserInput(reader *bufio.Reader) Config {
config := Config{} config := Config{}
// Basic configuration // Basic configuration
fmt.Println("\n=== Basic Configuration ===") fmt.Println("\n=== Basic Configuration ===")
config.IsEnterprise = readBoolNoDefault("Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.") config.IsEnterprise = readBoolNoDefault(reader, "Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.")
config.BaseDomain = readString("Enter your base domain (no subdomain e.g. example.com)", "") config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
// Set default dashboard domain after base domain is collected // Set default dashboard domain after base domain is collected
defaultDashboardDomain := "" defaultDashboardDomain := ""
if config.BaseDomain != "" { if config.BaseDomain != "" {
defaultDashboardDomain = "pangolin." + config.BaseDomain defaultDashboardDomain = "pangolin." + config.BaseDomain
} }
config.DashboardDomain = readString("Enter the domain for the Pangolin dashboard", defaultDashboardDomain) config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
config.LetsEncryptEmail = readString("Enter email for Let's Encrypt certificates", "") config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
config.InstallGerbil = readBool("Do you want to use Gerbil to allow tunneled connections", true) config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
// Email configuration // Email configuration
fmt.Println("\n=== Email Configuration ===") fmt.Println("\n=== Email Configuration ===")
config.EnableEmail = readBool("Enable email functionality (SMTP)", false) config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
if config.EnableEmail { if config.EnableEmail {
config.EmailSMTPHost = readString("Enter SMTP host", "") config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
config.EmailSMTPPort = readInt("Enter SMTP port (default 587)", 587) config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
config.EmailSMTPUser = readString("Enter SMTP username", "") config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
config.EmailSMTPPass = readPassword("Enter SMTP password") config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
config.EmailNoReply = readString("Enter no-reply email address (often the same as SMTP username)", "") config.EmailNoReply = readString(reader, "Enter no-reply email address (often the same as SMTP username)", "")
} }
// Validate required fields // Validate required fields
@@ -522,8 +392,8 @@ func collectUserInput() Config {
fmt.Println("\n=== Advanced Configuration ===") fmt.Println("\n=== Advanced Configuration ===")
config.EnableIPv6 = readBool("Is your server IPv6 capable?", true) config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
config.EnableGeoblocking = readBool("Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true) config.EnableGeoblocking = readBool(reader, "Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
if config.DashboardDomain == "" { if config.DashboardDomain == "" {
fmt.Println("Error: Dashboard Domain name is required") fmt.Println("Error: Dashboard Domain name is required")
@@ -534,23 +404,15 @@ func collectUserInput() Config {
} }
func createConfigFiles(config Config) error { func createConfigFiles(config Config) error {
if err := os.MkdirAll("config", 0755); err != nil { os.MkdirAll("config", 0755)
return fmt.Errorf("failed to create config directory: %v", err) os.MkdirAll("config/letsencrypt", 0755)
} os.MkdirAll("config/db", 0755)
if err := os.MkdirAll("config/letsencrypt", 0755); err != nil { os.MkdirAll("config/logs", 0755)
return fmt.Errorf("failed to create letsencrypt directory: %v", err)
}
if err := os.MkdirAll("config/db", 0755); err != nil {
return fmt.Errorf("failed to create db directory: %v", err)
}
if err := os.MkdirAll("config/logs", 0755); err != nil {
return fmt.Errorf("failed to create logs directory: %v", err)
}
// Walk through all embedded files // Walk through all embedded files
err := fs.WalkDir(configFiles, "config", func(path string, d fs.DirEntry, walkErr error) (err error) { err := fs.WalkDir(configFiles, "config", func(path string, d fs.DirEntry, err error) error {
if walkErr != nil { if err != nil {
return walkErr return err
} }
// Skip the root fs directory itself // Skip the root fs directory itself
@@ -601,11 +463,7 @@ func createConfigFiles(config Config) error {
if err != nil { if err != nil {
return fmt.Errorf("failed to create %s: %v", path, err) return fmt.Errorf("failed to create %s: %v", path, err)
} }
defer func() { defer outFile.Close()
if cerr := outFile.Close(); cerr != nil && err == nil {
err = cerr
}
}()
// Execute template // Execute template
if err := tmpl.Execute(outFile, config); err != nil { if err := tmpl.Execute(outFile, config); err != nil {
@@ -621,26 +479,18 @@ func createConfigFiles(config Config) error {
return nil return nil
} }
func copyFile(src, dst string) (err error) { func copyFile(src, dst string) error {
source, err := os.Open(src) source, err := os.Open(src)
if err != nil { if err != nil {
return err return err
} }
defer func() { defer source.Close()
if cerr := source.Close(); cerr != nil && err == nil {
err = cerr
}
}()
destination, err := os.Create(dst) destination, err := os.Create(dst)
if err != nil { if err != nil {
return err return err
} }
defer func() { defer destination.Close()
if cerr := destination.Close(); cerr != nil && err == nil {
err = cerr
}
}()
_, err = io.Copy(destination, source) _, err = io.Copy(destination, source)
return err return err
@@ -711,24 +561,22 @@ func showSetupTokenInstructions(containerType SupportedContainer, dashboardDomai
fmt.Println("To get your setup token, you need to:") fmt.Println("To get your setup token, you need to:")
fmt.Println("") fmt.Println("")
fmt.Println("1. Start the containers") fmt.Println("1. Start the containers")
switch containerType { if containerType == Docker {
case Docker:
fmt.Println(" docker compose up -d") fmt.Println(" docker compose up -d")
case Podman: } else if containerType == Podman {
fmt.Println(" podman-compose up -d") fmt.Println(" podman-compose up -d")
} else {
} }
fmt.Println("") fmt.Println("")
fmt.Println("2. Wait for the Pangolin container to start and generate the token") fmt.Println("2. Wait for the Pangolin container to start and generate the token")
fmt.Println("") fmt.Println("")
fmt.Println("3. Check the container logs for the setup token") fmt.Println("3. Check the container logs for the setup token")
switch containerType { if containerType == Docker {
case Docker:
fmt.Println(" docker logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'") fmt.Println(" docker logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
case Podman: } else if containerType == Podman {
fmt.Println(" podman logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'") fmt.Println(" podman logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
} else {
} }
fmt.Println("") fmt.Println("")
fmt.Println("4. Look for output like") fmt.Println("4. Look for output like")
fmt.Println(" === SETUP TOKEN GENERATED ===") fmt.Println(" === SETUP TOKEN GENERATED ===")
@@ -744,12 +592,43 @@ func showSetupTokenInstructions(containerType SupportedContainer, dashboardDomai
} }
func generateRandomSecretKey() string { func generateRandomSecretKey() string {
secret := make([]byte, 32) const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
_, err := rand.Read(secret) const length = 32
if err != nil {
panic(fmt.Sprintf("Failed to generate random secret key: %v", err)) var seededRand *rand.Rand = rand.New(
rand.NewSource(time.Now().UnixNano()))
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
} }
return base64.StdEncoding.EncodeToString(secret) return string(b)
}
func getPublicIP() string {
client := &http.Client{
Timeout: 10 * time.Second,
}
resp, err := client.Get("https://ifconfig.io/ip")
if err != nil {
return ""
}
defer resp.Body.Close()
body, err := io.ReadAll(resp.Body)
if err != nil {
return ""
}
ip := strings.TrimSpace(string(body))
// Validate that it's a valid IP address
if net.ParseIP(ip) != nil {
return ip
}
return ""
} }
// Run external commands with stdio/stderr attached. // Run external commands with stdio/stderr attached.
@@ -764,7 +643,10 @@ func checkPortsAvailable(port int) error {
addr := fmt.Sprintf(":%d", port) addr := fmt.Sprintf(":%d", port)
ln, err := net.Listen("tcp", addr) ln, err := net.Listen("tcp", addr)
if err != nil { if err != nil {
return fmt.Errorf("ERROR: port %d is occupied or cannot be bound: %w", port, err) return fmt.Errorf(
"ERROR: port %d is occupied or cannot be bound: %w\n\n",
port, err,
)
} }
if closeErr := ln.Close(); closeErr != nil { if closeErr := ln.Close(); closeErr != nil {
fmt.Fprintf(os.Stderr, fmt.Fprintf(os.Stderr,

View File

@@ -1,51 +0,0 @@
package main
import (
"github.com/charmbracelet/huh"
"github.com/charmbracelet/lipgloss"
)
// Pangolin brand colors (converted from oklch to hex)
var (
// Primary orange/amber - oklch(0.6717 0.1946 41.93)
primaryColor = lipgloss.AdaptiveColor{Light: "#D97706", Dark: "#F59E0B"}
// Muted foreground
mutedColor = lipgloss.AdaptiveColor{Light: "#737373", Dark: "#A3A3A3"}
// Success green
successColor = lipgloss.AdaptiveColor{Light: "#16A34A", Dark: "#22C55E"}
// Error red - oklch(0.577 0.245 27.325)
errorColor = lipgloss.AdaptiveColor{Light: "#DC2626", Dark: "#EF4444"}
// Normal text
normalFg = lipgloss.AdaptiveColor{Light: "#171717", Dark: "#FAFAFA"}
)
// ThemePangolin returns a huh theme using Pangolin brand colors
func ThemePangolin() *huh.Theme {
t := huh.ThemeBase()
// Focused state styles
t.Focused.Base = t.Focused.Base.BorderForeground(primaryColor)
t.Focused.Title = t.Focused.Title.Foreground(primaryColor).Bold(true)
t.Focused.Description = t.Focused.Description.Foreground(mutedColor)
t.Focused.ErrorIndicator = t.Focused.ErrorIndicator.Foreground(errorColor)
t.Focused.ErrorMessage = t.Focused.ErrorMessage.Foreground(errorColor)
t.Focused.SelectSelector = t.Focused.SelectSelector.Foreground(primaryColor)
t.Focused.NextIndicator = t.Focused.NextIndicator.Foreground(primaryColor)
t.Focused.PrevIndicator = t.Focused.PrevIndicator.Foreground(primaryColor)
t.Focused.Option = t.Focused.Option.Foreground(normalFg)
t.Focused.SelectedOption = t.Focused.SelectedOption.Foreground(primaryColor)
t.Focused.SelectedPrefix = lipgloss.NewStyle().Foreground(successColor).SetString("✓ ")
t.Focused.UnselectedPrefix = lipgloss.NewStyle().Foreground(mutedColor).SetString(" ")
t.Focused.FocusedButton = t.Focused.FocusedButton.Foreground(lipgloss.Color("#FFFFFF")).Background(primaryColor)
t.Focused.BlurredButton = t.Focused.BlurredButton.Foreground(normalFg).Background(lipgloss.AdaptiveColor{Light: "#E5E5E5", Dark: "#404040"})
t.Focused.TextInput.Cursor = t.Focused.TextInput.Cursor.Foreground(primaryColor)
t.Focused.TextInput.Prompt = t.Focused.TextInput.Prompt.Foreground(primaryColor)
// Blurred state inherits from focused but with hidden border
t.Blurred = t.Focused
t.Blurred.Base = t.Focused.Base.BorderStyle(lipgloss.HiddenBorder())
t.Blurred.Title = t.Blurred.Title.Foreground(mutedColor).Bold(false)
t.Blurred.TextInput.Prompt = t.Blurred.TextInput.Prompt.Foreground(mutedColor)
return t
}

View File

@@ -1,137 +0,0 @@
import os
import sys
# --- Configuration ---
# The header text to be added to the files.
HEADER_TEXT = """/*
* This file is part of a proprietary work.
*
* Copyright (c) 2025-2026 Fossorial, Inc.
* All rights reserved.
*
* This file is licensed under the Fossorial Commercial License.
* You may not use this file except in compliance with the License.
* Unauthorized use, copying, modification, or distribution is strictly prohibited.
*
* This file is not licensed under the AGPLv3.
*/
"""
HEADER_NORMALIZED = HEADER_TEXT.strip()
def extract_leading_block_comment(content):
"""
If the file content begins with a /* ... */ block comment, return the
full text of that comment (including the delimiters) and the index at
which the rest of the file starts (after any trailing newlines).
Returns (None, 0) when no such comment is found.
"""
stripped = content.lstrip()
if not stripped.startswith('/*'):
return None, 0
# Account for any leading whitespace before the comment
comment_start = content.index('/*')
end_marker = content.find('*/', comment_start + 2)
if end_marker == -1:
return None, 0
comment_end = end_marker + 2 # position just after '*/'
comment_text = content[comment_start:comment_end].strip()
# Advance past any whitespace / newlines that follow the closing */
rest_start = comment_end
while rest_start < len(content) and content[rest_start] in '\n\r':
rest_start += 1
return comment_text, rest_start
def should_add_header(file_path):
"""
Checks if a file should receive the commercial license header.
Returns True if 'server/private' is in the path.
"""
if 'server/private' in file_path.lower():
return True
return False
def process_directory(root_dir):
"""
Recursively scans a directory and adds/replaces/removes headers in
qualifying .ts or .tsx files, skipping any 'node_modules' directories.
"""
print(f"Scanning directory: {root_dir}")
files_processed = 0
files_modified = 0
for root, dirs, files in os.walk(root_dir):
# Exclude 'node_modules' directories from the scan.
if 'node_modules' in dirs:
dirs.remove('node_modules')
for file in files:
if not (file.endswith('.ts') or file.endswith('.tsx')):
continue
file_path = os.path.join(root, file)
files_processed += 1
try:
with open(file_path, 'r', encoding='utf-8') as f:
original_content = f.read()
existing_comment, body_start = extract_leading_block_comment(
original_content
)
has_any_header = existing_comment is not None
has_correct_header = existing_comment == HEADER_NORMALIZED
body = original_content[body_start:] if has_any_header else original_content
if should_add_header(file_path):
if has_correct_header:
print(f"Header up-to-date: {file_path}")
else:
# Either no header exists or the header is outdated - write
# the correct one.
action = "Replaced header in" if has_any_header else "Added header to"
new_content = HEADER_NORMALIZED + '\n\n' + body
with open(file_path, 'w', encoding='utf-8') as f:
f.write(new_content)
print(f"{action}: {file_path}")
files_modified += 1
else:
if has_any_header:
# Remove the header - it shouldn't be here.
with open(file_path, 'w', encoding='utf-8') as f:
f.write(body)
print(f"Removed header from: {file_path}")
files_modified += 1
else:
print(f"No header needed: {file_path}")
except Exception as e:
print(f"Error processing file {file_path}: {e}")
print("\n--- Scan Complete ---")
print(f"Total .ts or .tsx files found: {files_processed}")
print(f"Files modified (added/replaced/removed): {files_modified}")
if __name__ == "__main__":
# Get the target directory from the command line arguments.
# If no directory is provided, it uses the current directory ('.').
if len(sys.argv) > 1:
target_directory = sys.argv[1]
else:
target_directory = '.' # Default to current directory
if not os.path.isdir(target_directory):
print(f"Error: Directory '{target_directory}' not found.")
sys.exit(1)
process_directory(os.path.abspath(target_directory))

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

Some files were not shown because too many files have changed in this diff Show More