Compare commits
7 Commits
dependabot
...
policies
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7fa44981cf | ||
|
|
1bacad7854 | ||
|
|
75cec731e8 | ||
|
|
16a88281bb | ||
|
|
1574cbc5df | ||
|
|
2cb2a115b0 | ||
|
|
9dce7b2cde |
@@ -28,9 +28,5 @@ LICENSE
|
||||
CONTRIBUTING.md
|
||||
dist
|
||||
.git
|
||||
server/migrations/
|
||||
config/
|
||||
build.ts
|
||||
tsconfig.json
|
||||
Dockerfile*
|
||||
drizzle.config.ts
|
||||
migrations/
|
||||
config/
|
||||
@@ -1,3 +1,6 @@
|
||||
{
|
||||
"extends": ["next/core-web-vitals", "next/typescript"]
|
||||
"extends": [
|
||||
"next/core-web-vitals",
|
||||
"next/typescript"
|
||||
]
|
||||
}
|
||||
|
||||
14
.github/dependabot.yml
vendored
@@ -44,9 +44,19 @@ updates:
|
||||
schedule:
|
||||
interval: "daily"
|
||||
groups:
|
||||
patch-updates:
|
||||
dev-patch-updates:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "patch"
|
||||
minor-updates:
|
||||
dev-minor-updates:
|
||||
dependency-type: "development"
|
||||
update-types:
|
||||
- "minor"
|
||||
prod-patch-updates:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "patch"
|
||||
prod-minor-updates:
|
||||
dependency-type: "production"
|
||||
update-types:
|
||||
- "minor"
|
||||
554
.github/workflows/cicd.yml
vendored
@@ -1,270 +1,34 @@
|
||||
name: Public CICD Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
# Required secrets:
|
||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN: push to Docker Hub
|
||||
# - GITHUB_TOKEN: used for GHCR login and OIDC keyless signing
|
||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY: for key-based signing
|
||||
name: CI/CD Pipeline
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+"
|
||||
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
release:
|
||||
name: Build and Release
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v6
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-arm tag=$TAG
|
||||
else
|
||||
make build-release-arm tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed ARM64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
release-amd:
|
||||
name: Build and Release (AMD64)
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - AMD64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make build-rc-amd tag=$TAG
|
||||
else
|
||||
make build-release-amd tag=$TAG
|
||||
fi
|
||||
echo "Built & pushed AMD64 images to: ${{ env.DOCKERHUB_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
create-manifest:
|
||||
name: Create Multi-Arch Manifests
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success'
|
||||
}}
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Log in to Docker Hub
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: docker.io
|
||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Check if release candidate
|
||||
id: check-rc
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
echo "IS_RC=true" >> $GITHUB_ENV
|
||||
else
|
||||
echo "IS_RC=false" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Create multi-arch manifests
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
make create-manifests-rc tag=$TAG
|
||||
else
|
||||
make create-manifests tag=$TAG
|
||||
fi
|
||||
echo "Created multi-arch manifests for tag: ${TAG}"
|
||||
shell: bash
|
||||
|
||||
sign-and-package:
|
||||
name: Sign and Package
|
||||
runs-on: [self-hosted, linux, x64, us-east-1]
|
||||
needs: [release-arm, release-amd, create-manifest]
|
||||
if: >-
|
||||
${{
|
||||
needs.release-arm.result == 'success' &&
|
||||
needs.release-amd.result == 'success' &&
|
||||
needs.create-manifest.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
|
||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
||||
uses: actions/setup-go@v6
|
||||
with:
|
||||
go-version: 1.24
|
||||
|
||||
@@ -273,314 +37,42 @@ jobs:
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Gerbil version
|
||||
id: get-gerbil-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
echo "LATEST_GERBIL_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Pull latest Badger version
|
||||
id: get-badger-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
echo "LATEST_BADGER_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update install/main.go
|
||||
run: |
|
||||
PANGOLIN_VERSION=${{ env.TAG }}
|
||||
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }}
|
||||
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
|
||||
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$PANGOLIN_VERSION\"/" install/main.go
|
||||
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$GERBIL_VERSION\"/" install/main.go
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$BADGER_VERSION\"/" install/main.go
|
||||
echo "Updated install/main.go with Pangolin version $PANGOLIN_VERSION, Gerbil version $GERBIL_VERSION, and Badger version $BADGER_VERSION"
|
||||
cat install/main.go
|
||||
|
||||
- name: Build installer
|
||||
working-directory: install
|
||||
run: |
|
||||
make go-build-release \
|
||||
PANGOLIN_VERSION=${{ env.TAG }} \
|
||||
GERBIL_VERSION=${{ env.LATEST_GERBIL_TAG }} \
|
||||
BADGER_VERSION=${{ env.LATEST_BADGER_TAG }}
|
||||
shell: bash
|
||||
make go-build-release
|
||||
|
||||
- name: Upload artifacts from /install/bin
|
||||
uses: actions/upload-artifact@b7c566a772e6b6bfb58ed0dc250532a479d7789f # v6.0.0
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: install-bin
|
||||
path: install/bin/
|
||||
|
||||
- name: Install skopeo + jq
|
||||
# skopeo: copy/inspect images between registries
|
||||
# jq: JSON parsing tool used to extract digest values
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
shell: bash
|
||||
|
||||
- name: Login to GHCR
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
mkdir -p "$(dirname "$REGISTRY_AUTH_FILE")"
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
shell: bash
|
||||
|
||||
- name: Copy tags from Docker Hub to GHCR
|
||||
# Mirror the already-built images (all architectures) to GHCR so we can sign them
|
||||
# Wait a bit for both architectures to be available in Docker Hub manifest
|
||||
env:
|
||||
REGISTRY_AUTH_FILE: ${{ runner.temp }}/containers/auth.json
|
||||
run: |
|
||||
set -euo pipefail
|
||||
TAG=${{ env.TAG }}
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
|
||||
echo "Waiting for multi-arch manifests to be ready..."
|
||||
sleep 30
|
||||
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release detected - copying version-specific tags only"
|
||||
|
||||
# SQLite OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG} -> ${{ env.GHCR_IMAGE }}:${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG \
|
||||
docker://$GHCR_IMAGE:$TAG
|
||||
|
||||
# PostgreSQL OSS
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG
|
||||
|
||||
# SQLite Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-$TAG
|
||||
|
||||
# PostgreSQL Enterprise
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG
|
||||
else
|
||||
echo "Regular release detected - copying all tags (latest, major, minor, full version)"
|
||||
|
||||
# SQLite OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL OSS - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:postgresql-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# SQLite Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-$TAG_SUFFIX
|
||||
done
|
||||
|
||||
# PostgreSQL Enterprise - all tags
|
||||
for TAG_SUFFIX in "latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"; do
|
||||
echo "Copying ${{ env.DOCKERHUB_IMAGE }}:ee-postgresql-${TAG_SUFFIX} -> ${{ env.GHCR_IMAGE }}:ee-postgresql-${TAG_SUFFIX}"
|
||||
skopeo copy --all --retry-times 3 \
|
||||
docker://$DOCKERHUB_IMAGE:ee-postgresql-$TAG_SUFFIX \
|
||||
docker://$GHCR_IMAGE:ee-postgresql-$TAG_SUFFIX
|
||||
done
|
||||
fi
|
||||
|
||||
echo "All images copied successfully to GHCR!"
|
||||
shell: bash
|
||||
|
||||
- name: Login to GitHub Container Registry (for cosign)
|
||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install cosign
|
||||
# cosign is used to sign and verify container images (key and keyless)
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Dual-sign and verify (GHCR & Docker Hub)
|
||||
# Sign each image by digest using keyless (OIDC) and key-based signing,
|
||||
# then verify both the public key signature and the keyless OIDC signature.
|
||||
env:
|
||||
TAG: ${{ env.TAG }}
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
COSIGN_YES: "true"
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+" # accept this repo (all workflows/refs)
|
||||
|
||||
# Track failures
|
||||
FAILED_TAGS=()
|
||||
SUCCESSFUL_TAGS=()
|
||||
|
||||
# Determine if this is an RC release
|
||||
IS_RC="false"
|
||||
if [[ "$TAG" == *"-rc."* ]]; then
|
||||
IS_RC="true"
|
||||
fi
|
||||
|
||||
# Define image variants to sign
|
||||
if [ "$IS_RC" = "true" ]; then
|
||||
echo "RC release - signing version-specific tags only"
|
||||
IMAGE_TAGS=(
|
||||
"${TAG}"
|
||||
"postgresql-${TAG}"
|
||||
"ee-${TAG}"
|
||||
"ee-postgresql-${TAG}"
|
||||
)
|
||||
else
|
||||
echo "Regular release - signing all tags"
|
||||
MAJOR_TAG=$(echo $TAG | cut -d. -f1)
|
||||
MINOR_TAG=$(echo $TAG | cut -d. -f1,2)
|
||||
IMAGE_TAGS=(
|
||||
"latest" "$MAJOR_TAG" "$MINOR_TAG" "$TAG"
|
||||
"postgresql-latest" "postgresql-$MAJOR_TAG" "postgresql-$MINOR_TAG" "postgresql-$TAG"
|
||||
"ee-latest" "ee-$MAJOR_TAG" "ee-$MINOR_TAG" "ee-$TAG"
|
||||
"ee-postgresql-latest" "ee-postgresql-$MAJOR_TAG" "ee-postgresql-$MINOR_TAG" "ee-postgresql-$TAG"
|
||||
)
|
||||
fi
|
||||
|
||||
# Sign each image variant for both registries
|
||||
for BASE_IMAGE in "${GHCR_IMAGE}" "${DOCKERHUB_IMAGE}"; do
|
||||
for IMAGE_TAG in "${IMAGE_TAGS[@]}"; do
|
||||
echo "Processing ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
TAG_FAILED=false
|
||||
|
||||
# Wrap the entire tag processing in error handling
|
||||
(
|
||||
set -e
|
||||
DIGEST="$(skopeo inspect --retry-times 3 docker://${BASE_IMAGE}:${IMAGE_TAG} | jq -r '.Digest')"
|
||||
REF="${BASE_IMAGE}@${DIGEST}"
|
||||
echo "Resolved digest: ${REF}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${REF}"
|
||||
cosign sign --recursive "${REF}"
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${REF}"
|
||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${REF}"
|
||||
|
||||
# Retry wrapper for verification to handle registry propagation delays
|
||||
retry_verify() {
|
||||
local cmd="$1"
|
||||
local attempts=6
|
||||
local delay=5
|
||||
local i=1
|
||||
until eval "$cmd"; do
|
||||
if [ $i -ge $attempts ]; then
|
||||
echo "Verification failed after $attempts attempts"
|
||||
return 1
|
||||
fi
|
||||
echo "Verification not yet available. Retry $i/$attempts after ${delay}s..."
|
||||
sleep $delay
|
||||
i=$((i+1))
|
||||
delay=$((delay*2))
|
||||
# Cap the delay to avoid very long waits
|
||||
if [ $delay -gt 60 ]; then delay=60; fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
echo "==> cosign verify (public key) ${REF}"
|
||||
if retry_verify "cosign verify --key env://COSIGN_PUBLIC_KEY '${REF}' -o text"; then
|
||||
VERIFIED_INDEX=true
|
||||
else
|
||||
VERIFIED_INDEX=false
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${REF}"
|
||||
if retry_verify "cosign verify --certificate-oidc-issuer '${issuer}' --certificate-identity-regexp '${id_regex}' '${REF}' -o text"; then
|
||||
VERIFIED_INDEX_KEYLESS=true
|
||||
else
|
||||
VERIFIED_INDEX_KEYLESS=false
|
||||
fi
|
||||
|
||||
# Check if verification succeeded
|
||||
if [ "${VERIFIED_INDEX}" != "true" ] && [ "${VERIFIED_INDEX_KEYLESS}" != "true" ]; then
|
||||
echo "⚠️ WARNING: Verification not available for ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
echo "This may be due to registry propagation delays. Continuing anyway."
|
||||
fi
|
||||
) || TAG_FAILED=true
|
||||
|
||||
if [ "$TAG_FAILED" = "true" ]; then
|
||||
echo "⚠️ WARNING: Failed to sign/verify ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
FAILED_TAGS+=("${BASE_IMAGE}:${IMAGE_TAG}")
|
||||
else
|
||||
echo "✓ Successfully signed and verified ${BASE_IMAGE}:${IMAGE_TAG}"
|
||||
SUCCESSFUL_TAGS+=("${BASE_IMAGE}:${IMAGE_TAG}")
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
# Report summary
|
||||
echo ""
|
||||
echo "=========================================="
|
||||
echo "Sign and Verify Summary"
|
||||
echo "=========================================="
|
||||
echo "Successful: ${#SUCCESSFUL_TAGS[@]}"
|
||||
echo "Failed: ${#FAILED_TAGS[@]}"
|
||||
echo ""
|
||||
|
||||
if [ ${#FAILED_TAGS[@]} -gt 0 ]; then
|
||||
echo "Failed tags:"
|
||||
for tag in "${FAILED_TAGS[@]}"; do
|
||||
echo " - $tag"
|
||||
done
|
||||
echo ""
|
||||
echo "⚠️ WARNING: Some tags failed to sign/verify, but continuing anyway"
|
||||
else
|
||||
echo "✓ All images signed and verified successfully!"
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm, release-amd, create-manifest, sign-and-package]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure') &&
|
||||
(needs.release-amd.result == 'success' || needs.release-amd.result == 'skipped' || needs.release-amd.result == 'failure') &&
|
||||
(needs.create-manifest.result == 'success' || needs.create-manifest.result == 'skipped' || needs.create-manifest.result == 'failure') &&
|
||||
(needs.sign-and-package.result == 'success' || needs.sign-and-package.result == 'skipped' || needs.sign-and-package.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v6
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
make build-release tag=$TAG
|
||||
|
||||
11
.github/workflows/linting.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: ESLint
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
paths:
|
||||
@@ -21,12 +18,12 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Node.js
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '24'
|
||||
node-version: '22'
|
||||
|
||||
- name: Install dependencies
|
||||
run: npm ci
|
||||
@@ -35,4 +32,4 @@ jobs:
|
||||
run: npm run set:oss
|
||||
|
||||
- name: Run ESLint
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
run: npx eslint . --ext .js,.jsx,.ts,.tsx
|
||||
132
.github/workflows/mirror.yaml
vendored
@@ -1,132 +0,0 @@
|
||||
name: Mirror & Sign (Docker Hub to GHCR)
|
||||
|
||||
on:
|
||||
workflow_dispatch: {}
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
id-token: write # for keyless OIDC
|
||||
|
||||
env:
|
||||
SOURCE_IMAGE: docker.io/fosrl/pangolin
|
||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
||||
|
||||
jobs:
|
||||
mirror-and-dual-sign:
|
||||
runs-on: amd64-runner
|
||||
steps:
|
||||
- name: Install skopeo + jq
|
||||
run: |
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y skopeo jq
|
||||
skopeo --version
|
||||
|
||||
- name: Install cosign
|
||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
||||
|
||||
- name: Input check
|
||||
run: |
|
||||
test -n "${SOURCE_IMAGE}" || (echo "SOURCE_IMAGE is empty" && exit 1)
|
||||
echo "Source : ${SOURCE_IMAGE}"
|
||||
echo "Target : ${DEST_IMAGE}"
|
||||
|
||||
# Auth for skopeo (containers-auth)
|
||||
- name: Skopeo login to GHCR
|
||||
run: |
|
||||
skopeo login ghcr.io -u "${{ github.actor }}" -p "${{ secrets.GITHUB_TOKEN }}"
|
||||
|
||||
# Auth for cosign (docker-config)
|
||||
- name: Docker login to GHCR (for cosign)
|
||||
run: |
|
||||
echo "${{ secrets.GITHUB_TOKEN }}" | docker login ghcr.io -u "${{ github.actor }}" --password-stdin
|
||||
|
||||
- name: List source tags
|
||||
run: |
|
||||
set -euo pipefail
|
||||
skopeo list-tags --retry-times 3 docker://"${SOURCE_IMAGE}" \
|
||||
| jq -r '.Tags[]' | grep -v -e '-arm64' -e '-amd64' | sort -u > src-tags.txt
|
||||
echo "Found source tags: $(wc -l < src-tags.txt)"
|
||||
head -n 20 src-tags.txt || true
|
||||
|
||||
- name: List destination tags (skip existing)
|
||||
run: |
|
||||
set -euo pipefail
|
||||
if skopeo list-tags --retry-times 3 docker://"${DEST_IMAGE}" >/tmp/dst.json 2>/dev/null; then
|
||||
jq -r '.Tags[]' /tmp/dst.json | sort -u > dst-tags.txt
|
||||
else
|
||||
: > dst-tags.txt
|
||||
fi
|
||||
echo "Existing destination tags: $(wc -l < dst-tags.txt)"
|
||||
|
||||
- name: Mirror, dual-sign, and verify
|
||||
env:
|
||||
# keyless
|
||||
COSIGN_YES: "true"
|
||||
# key-based
|
||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
||||
# verify
|
||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
||||
run: |
|
||||
set -euo pipefail
|
||||
copied=0; skipped=0; v_ok=0; errs=0
|
||||
|
||||
issuer="https://token.actions.githubusercontent.com"
|
||||
id_regex="^https://github.com/${{ github.repository }}/.+"
|
||||
|
||||
while read -r tag; do
|
||||
[ -z "$tag" ] && continue
|
||||
|
||||
if grep -Fxq "$tag" dst-tags.txt; then
|
||||
echo "::notice ::Skip (exists) ${DEST_IMAGE}:${tag}"
|
||||
skipped=$((skipped+1))
|
||||
continue
|
||||
fi
|
||||
|
||||
echo "==> Copy ${SOURCE_IMAGE}:${tag} → ${DEST_IMAGE}:${tag}"
|
||||
if ! skopeo copy --all --retry-times 3 \
|
||||
docker://"${SOURCE_IMAGE}:${tag}" docker://"${DEST_IMAGE}:${tag}"; then
|
||||
echo "::warning title=Copy failed::${SOURCE_IMAGE}:${tag}"
|
||||
errs=$((errs+1)); continue
|
||||
fi
|
||||
copied=$((copied+1))
|
||||
|
||||
digest="$(skopeo inspect --retry-times 3 docker://"${DEST_IMAGE}:${tag}" | jq -r '.Digest')"
|
||||
ref="${DEST_IMAGE}@${digest}"
|
||||
|
||||
echo "==> cosign sign (keyless) --recursive ${ref}"
|
||||
if ! cosign sign --recursive "${ref}"; then
|
||||
echo "::warning title=Keyless sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign sign (key) --recursive ${ref}"
|
||||
if ! cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${ref}"; then
|
||||
echo "::warning title=Key sign failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (public key) ${ref}"
|
||||
if ! cosign verify --key env://COSIGN_PUBLIC_KEY "${ref}" -o text; then
|
||||
echo "::warning title=Verify(pubkey) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
fi
|
||||
|
||||
echo "==> cosign verify (keyless policy) ${ref}"
|
||||
if ! cosign verify \
|
||||
--certificate-oidc-issuer "${issuer}" \
|
||||
--certificate-identity-regexp "${id_regex}" \
|
||||
"${ref}" -o text; then
|
||||
echo "::warning title=Verify(keyless) failed::${ref}"
|
||||
errs=$((errs+1))
|
||||
else
|
||||
v_ok=$((v_ok+1))
|
||||
fi
|
||||
done < src-tags.txt
|
||||
|
||||
echo "---- Summary ----"
|
||||
echo "Copied : $copied"
|
||||
echo "Skipped : $skipped"
|
||||
echo "Verified OK : $v_ok"
|
||||
echo "Errors : $errs"
|
||||
39
.github/workflows/restart-runners.yml
vendored
@@ -1,39 +0,0 @@
|
||||
name: Restart Runners
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 0 */7 * *'
|
||||
|
||||
permissions:
|
||||
id-token: write
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
ec2-maintenance-prod:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v6
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instance
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
- name: Wait
|
||||
run: sleep 600
|
||||
|
||||
- name: Stop EC2 instance
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
160
.github/workflows/saas.yml
vendored
@@ -1,160 +0,0 @@
|
||||
name: SAAS Pipeline
|
||||
|
||||
# CI/CD workflow for building, publishing, mirroring, signing container images and building release binaries.
|
||||
# Actions are pinned to specific SHAs to reduce supply-chain risk. This workflow triggers on tag push events.
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write # for GHCR push
|
||||
id-token: write # for Cosign Keyless (OIDC) Signing
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- "[0-9]+.[0-9]+.[0-9]+-s.[0-9]+"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
pre-run:
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v6
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Start EC2 instances
|
||||
run: |
|
||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances started"
|
||||
|
||||
|
||||
release-arm:
|
||||
name: Build and Release (ARM64)
|
||||
runs-on: [self-hosted, linux, arm64, us-east-1]
|
||||
needs: [pre-run]
|
||||
if: >-
|
||||
${{
|
||||
needs.pre-run.result == 'success'
|
||||
}}
|
||||
# Job-level timeout to avoid runaway or stuck runs
|
||||
timeout-minutes: 120
|
||||
env:
|
||||
# Target images
|
||||
AWS_IMAGE: ${{ secrets.aws_account_id }}.dkr.ecr.us-east-1.amazonaws.com/${{ github.event.repository.name }}
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Download MaxMind GeoLite2 databases
|
||||
env:
|
||||
MAXMIND_LICENSE_KEY: ${{ secrets.MAXMIND_LICENSE_KEY }}
|
||||
run: |
|
||||
echo "Downloading MaxMind GeoLite2 databases..."
|
||||
|
||||
# Download GeoLite2-Country
|
||||
curl -L "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-Country&license_key=${MAXMIND_LICENSE_KEY}&suffix=tar.gz" \
|
||||
-o GeoLite2-Country.tar.gz
|
||||
|
||||
# Download GeoLite2-ASN
|
||||
curl -L "https://download.maxmind.com/app/geoip_download?edition_id=GeoLite2-ASN&license_key=${MAXMIND_LICENSE_KEY}&suffix=tar.gz" \
|
||||
-o GeoLite2-ASN.tar.gz
|
||||
|
||||
# Extract the .mmdb files
|
||||
tar -xzf GeoLite2-Country.tar.gz --strip-components=1 --wildcards '*.mmdb'
|
||||
tar -xzf GeoLite2-ASN.tar.gz --strip-components=1 --wildcards '*.mmdb'
|
||||
|
||||
# Verify files exist
|
||||
if [ ! -f "GeoLite2-Country.mmdb" ]; then
|
||||
echo "ERROR: Failed to download GeoLite2-Country.mmdb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -f "GeoLite2-ASN.mmdb" ]; then
|
||||
echo "ERROR: Failed to download GeoLite2-ASN.mmdb"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Clean up tar files
|
||||
rm -f GeoLite2-Country.tar.gz GeoLite2-ASN.tar.gz
|
||||
|
||||
echo "MaxMind databases downloaded successfully"
|
||||
ls -lh GeoLite2-*.mmdb
|
||||
|
||||
- name: Monitor storage space
|
||||
run: |
|
||||
THRESHOLD=75
|
||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
||||
echo "Used space: $USED_SPACE%"
|
||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
||||
echo "Used space is below the threshold of 75% free. Running Docker system prune."
|
||||
echo y | docker system prune -a
|
||||
else
|
||||
echo "Storage space is above the threshold. No action needed."
|
||||
fi
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v6
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.aws_account_id }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Extract tag name
|
||||
id: get-tag
|
||||
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
|
||||
shell: bash
|
||||
|
||||
- name: Update version in package.json
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
sed -i "s/export const APP_VERSION = \".*\";/export const APP_VERSION = \"$TAG\";/" server/lib/consts.ts
|
||||
cat server/lib/consts.ts
|
||||
shell: bash
|
||||
|
||||
- name: Build and push Docker images (Docker Hub - ARM64)
|
||||
run: |
|
||||
TAG=${{ env.TAG }}
|
||||
make build-saas tag=$TAG
|
||||
echo "Built & pushed ARM64 images to: ${{ env.AWS_IMAGE }}:${TAG}"
|
||||
shell: bash
|
||||
|
||||
post-run:
|
||||
needs: [pre-run, release-arm]
|
||||
if: >-
|
||||
${{
|
||||
always() &&
|
||||
needs.pre-run.result == 'success' &&
|
||||
(needs.release-arm.result == 'success' || needs.release-arm.result == 'skipped' || needs.release-arm.result == 'failure')
|
||||
}}
|
||||
runs-on: ubuntu-latest
|
||||
permissions: write-all
|
||||
steps:
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v6
|
||||
with:
|
||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
||||
role-duration-seconds: 3600
|
||||
aws-region: ${{ secrets.AWS_REGION }}
|
||||
|
||||
- name: Verify AWS identity
|
||||
run: aws sts get-caller-identity
|
||||
|
||||
- name: Stop EC2 instances
|
||||
run: |
|
||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
||||
echo "EC2 instances stopped"
|
||||
4
.github/workflows/stale-bot.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@b5d41d4e1d5dceea10e7104786b73624c18a190f # v10.2.0
|
||||
- uses: actions/stale@v10
|
||||
with:
|
||||
days-before-stale: 14
|
||||
days-before-close: 14
|
||||
@@ -34,4 +34,4 @@ jobs:
|
||||
operations-per-run: 100
|
||||
remove-stale-when-updated: true
|
||||
delete-branch: false
|
||||
enable-statistics: true
|
||||
enable-statistics: true
|
||||
34
.github/workflows/test.yml
vendored
@@ -1,8 +1,5 @@
|
||||
name: Run Tests
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
@@ -12,14 +9,13 @@ on:
|
||||
jobs:
|
||||
test:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Install Node
|
||||
uses: actions/setup-node@6044e13b5dc448c55e2357c09f80417699197238 # v6.2.0
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
|
||||
- uses: actions/setup-node@v5
|
||||
with:
|
||||
node-version: '24'
|
||||
node-version: '22'
|
||||
|
||||
- name: Copy config file
|
||||
run: cp config/config.example.yml config/config.yml
|
||||
@@ -34,10 +30,10 @@ jobs:
|
||||
run: npm run set:oss
|
||||
|
||||
- name: Generate database migrations
|
||||
run: npm run db:generate
|
||||
run: npm run db:sqlite:generate
|
||||
|
||||
- name: Apply database migrations
|
||||
run: npm run db:push
|
||||
run: npm run db:sqlite:push
|
||||
|
||||
- name: Test with tsc
|
||||
run: npx tsc --noEmit
|
||||
@@ -58,20 +54,8 @@ jobs:
|
||||
echo "App failed to start"
|
||||
exit 1
|
||||
|
||||
build-sqlite:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
|
||||
- name: Build Docker image sqlite
|
||||
run: make dev-build-sqlite
|
||||
|
||||
build-postgres:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@de0fac2e4500dabe0009e67214ff5f5447ce83dd # v6.0.2
|
||||
run: make build-sqlite
|
||||
|
||||
- name: Build Docker image pg
|
||||
run: make dev-build-pg
|
||||
run: make build-pg
|
||||
|
||||
8
.gitignore
vendored
@@ -47,10 +47,4 @@ server/db/index.ts
|
||||
server/build.ts
|
||||
postgres/
|
||||
dynamic/
|
||||
*.mmdb
|
||||
scratch/
|
||||
tsconfig.json
|
||||
hydrateSaas.ts
|
||||
CLAUDE.md
|
||||
drizzle.config.ts
|
||||
server/setup/migrations.ts
|
||||
*.mmdb
|
||||
@@ -1,12 +0,0 @@
|
||||
.github/
|
||||
bruno/
|
||||
cli/
|
||||
config/
|
||||
messages/
|
||||
next.config.mjs/
|
||||
public/
|
||||
tailwind.config.js/
|
||||
test/
|
||||
**/*.yml
|
||||
**/*.yaml
|
||||
**/*.md
|
||||
3
.vscode/extensions.json
vendored
@@ -1,3 +0,0 @@
|
||||
{
|
||||
"recommendations": ["esbenp.prettier-vscode"]
|
||||
}
|
||||
22
.vscode/settings.json
vendored
@@ -1,22 +0,0 @@
|
||||
{
|
||||
"editor.codeActionsOnSave": {
|
||||
"source.addMissingImports.ts": "always"
|
||||
},
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode",
|
||||
"[jsonc]": {
|
||||
"editor.defaultFormatter": "vscode.json-language-features"
|
||||
},
|
||||
"[javascript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[typescript]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[typescriptreact]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"[json]": {
|
||||
"editor.defaultFormatter": "esbenp.prettier-vscode"
|
||||
},
|
||||
"editor.formatOnSave": true
|
||||
}
|
||||
@@ -4,7 +4,7 @@ Contributions are welcome!
|
||||
|
||||
Please see the contribution and local development guide on the docs page before getting started:
|
||||
|
||||
https://docs.pangolin.net/development/contributing
|
||||
https://docs.digpangolin.com/development/contributing
|
||||
|
||||
### Licensing Considerations
|
||||
|
||||
|
||||
91
Dockerfile
@@ -1,91 +1,46 @@
|
||||
FROM node:24-slim AS base
|
||||
FROM node:22-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && apt-get install -y python3 make g++ && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY package*.json ./
|
||||
|
||||
FROM base AS builder-dev
|
||||
|
||||
RUN npm ci
|
||||
|
||||
COPY . .
|
||||
|
||||
ARG BUILD=oss
|
||||
ARG DATABASE=sqlite
|
||||
|
||||
RUN if [ "$BUILD" = "oss" ]; then rm -rf server/private; fi && \
|
||||
npm run set:$DATABASE && \
|
||||
npm run set:$BUILD && \
|
||||
npm run db:generate && \
|
||||
npm run build && \
|
||||
npm run build:cli && \
|
||||
test -f dist/server.mjs
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
RUN npm ci
|
||||
|
||||
# Create placeholder files for MaxMind databases to avoid COPY errors
|
||||
# Real files should be present for saas builds, placeholders for oss builds
|
||||
RUN touch /app/GeoLite2-Country.mmdb /app/GeoLite2-ASN.mmdb
|
||||
COPY . .
|
||||
|
||||
FROM base AS builder
|
||||
RUN echo "export * from \"./$DATABASE\";" > server/db/index.ts
|
||||
|
||||
RUN npm ci --omit=dev
|
||||
RUN echo "export const build = \"$BUILD\" as any;" > server/build.ts
|
||||
|
||||
FROM node:24-slim AS runner
|
||||
RUN if [ "$DATABASE" = "pg" ]; then npx drizzle-kit generate --dialect postgresql --schema ./server/db/pg/schema.ts --out init; else npx drizzle-kit generate --dialect $DATABASE --schema ./server/db/$DATABASE/schema.ts --out init; fi
|
||||
|
||||
RUN npm run build:$DATABASE
|
||||
RUN npm run build:cli
|
||||
|
||||
FROM node:22-alpine AS runner
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apt-get update && apt-get install -y curl tzdata && rm -rf /var/lib/apt/lists/*
|
||||
# Curl used for the health checks
|
||||
RUN apk add --no-cache curl tzdata
|
||||
|
||||
COPY --from=builder /app/node_modules ./node_modules
|
||||
COPY --from=builder /app/package.json ./package.json
|
||||
# COPY package.json package-lock.json ./
|
||||
COPY package*.json ./
|
||||
|
||||
COPY --from=builder-dev /app/.next/standalone ./
|
||||
COPY --from=builder-dev /app/.next/static ./.next/static
|
||||
COPY --from=builder-dev /app/dist ./dist
|
||||
COPY --from=builder-dev /app/server/migrations ./dist/init
|
||||
RUN npm ci --omit=dev && npm cache clean --force
|
||||
|
||||
COPY --from=builder /app/.next/standalone ./
|
||||
COPY --from=builder /app/.next/static ./.next/static
|
||||
COPY --from=builder /app/dist ./dist
|
||||
COPY --from=builder /app/init ./dist/init
|
||||
|
||||
COPY ./cli/wrapper.sh /usr/local/bin/pangctl
|
||||
RUN chmod +x /usr/local/bin/pangctl ./dist/cli.mjs
|
||||
|
||||
COPY server/db/names.json ./dist/names.json
|
||||
COPY server/db/ios_models.json ./dist/ios_models.json
|
||||
COPY server/db/mac_models.json ./dist/mac_models.json
|
||||
COPY public ./public
|
||||
|
||||
# Copy MaxMind databases for SaaS builds
|
||||
ARG BUILD=oss
|
||||
|
||||
RUN mkdir -p ./maxmind
|
||||
|
||||
# Copy MaxMind databases (placeholders exist for oss builds, real files for saas)
|
||||
COPY --from=builder-dev /app/GeoLite2-Country.mmdb ./maxmind/GeoLite2-Country.mmdb
|
||||
COPY --from=builder-dev /app/GeoLite2-ASN.mmdb ./maxmind/GeoLite2-ASN.mmdb
|
||||
|
||||
# Remove MaxMind databases for non-saas builds (keep only for saas)
|
||||
RUN if [ "$BUILD" != "saas" ]; then rm -rf ./maxmind; fi
|
||||
|
||||
# OCI Image Labels - Build Args for dynamic values
|
||||
ARG VERSION="dev"
|
||||
ARG REVISION=""
|
||||
ARG CREATED=""
|
||||
ARG LICENSE="AGPL-3.0"
|
||||
|
||||
# Derive title and description based on BUILD type
|
||||
ARG IMAGE_TITLE="Pangolin"
|
||||
ARG IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
# OCI Image Labels
|
||||
# https://github.com/opencontainers/image-spec/blob/main/annotations.md
|
||||
LABEL org.opencontainers.image.source="https://github.com/fosrl/pangolin" \
|
||||
org.opencontainers.image.url="https://github.com/fosrl/pangolin" \
|
||||
org.opencontainers.image.documentation="https://docs.pangolin.net" \
|
||||
org.opencontainers.image.vendor="Fossorial" \
|
||||
org.opencontainers.image.licenses="${LICENSE}" \
|
||||
org.opencontainers.image.title="${IMAGE_TITLE}" \
|
||||
org.opencontainers.image.description="${IMAGE_DESCRIPTION}" \
|
||||
org.opencontainers.image.version="${VERSION}" \
|
||||
org.opencontainers.image.revision="${REVISION}" \
|
||||
org.opencontainers.image.created="${CREATED}"
|
||||
|
||||
CMD ["npm", "run", "start"]
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
FROM node:24-alpine
|
||||
FROM node:22-alpine
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN apk add --no-cache python3 make g++
|
||||
|
||||
COPY package*.json ./
|
||||
|
||||
# Install dependencies
|
||||
|
||||
496
Makefile
@@ -1,56 +1,22 @@
|
||||
.PHONY: build build-pg build-release build-release-arm build-release-amd create-manifests build-arm build-x86 test clean
|
||||
.PHONY: build build-pg build-release build-arm build-x86 test clean
|
||||
|
||||
major_tag := $(shell echo $(tag) | cut -d. -f1)
|
||||
minor_tag := $(shell echo $(tag) | cut -d. -f1,2)
|
||||
|
||||
# OCI label variables
|
||||
CREATED := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
|
||||
REVISION := $(shell git rev-parse HEAD 2>/dev/null || echo "unknown")
|
||||
|
||||
# Common OCI build args for OSS builds
|
||||
OCI_ARGS_OSS = --build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$(REVISION) \
|
||||
--build-arg CREATED=$(CREATED) \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
# Common OCI build args for Enterprise builds
|
||||
OCI_ARGS_EE = --build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$(REVISION) \
|
||||
--build-arg CREATED=$(CREATED) \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere"
|
||||
|
||||
.PHONY: build-release build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
|
||||
|
||||
build-release: build-sqlite build-postgresql build-ee-sqlite build-ee-postgresql
|
||||
|
||||
build-sqlite:
|
||||
build-release:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
$(OCI_ARGS_OSS) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:latest \
|
||||
--tag fosrl/pangolin:$(major_tag) \
|
||||
--tag fosrl/pangolin:$(minor_tag) \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push .
|
||||
|
||||
build-postgresql:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
$(OCI_ARGS_OSS) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
--tag fosrl/pangolin:postgresql-$(major_tag) \
|
||||
@@ -58,463 +24,17 @@ build-postgresql:
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-ee-sqlite:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
$(OCI_ARGS_EE) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push .
|
||||
|
||||
build-ee-postgresql:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
$(OCI_ARGS_EE) \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(major_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(minor_tag) \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-saas:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
docker buildx build \
|
||||
--build-arg BUILD=saas \
|
||||
--build-arg DATABASE=pg \
|
||||
--platform linux/arm64 \
|
||||
--tag $(AWS_IMAGE):$(tag) \
|
||||
--push .
|
||||
|
||||
build-release-arm:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release-arm tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:latest-arm64 \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:postgresql-latest-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-latest-arm64 \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG-arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
--push .
|
||||
|
||||
build-release-amd:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release-amd tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:latest-amd64 \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-latest-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-latest-amd64 \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG-amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-amd64 \
|
||||
--push .
|
||||
|
||||
create-manifests:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make create-manifests tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@MAJOR_TAG=$$(echo $(tag) | cut -d. -f1); \
|
||||
MINOR_TAG=$$(echo $(tag) | cut -d. -f1,2); \
|
||||
echo "Creating multi-arch manifests for sqlite (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:latest \
|
||||
--tag fosrl/pangolin:$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
fosrl/pangolin:latest-arm64 \
|
||||
fosrl/pangolin:latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for postgresql (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:postgresql-latest \
|
||||
--tag fosrl/pangolin:postgresql-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:postgresql-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
fosrl/pangolin:postgresql-latest-arm64 \
|
||||
fosrl/pangolin:postgresql-latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for sqlite (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-latest \
|
||||
--tag fosrl/pangolin:ee-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:ee-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
fosrl/pangolin:ee-latest-arm64 \
|
||||
fosrl/pangolin:ee-latest-amd64 && \
|
||||
echo "Creating multi-arch manifests for postgresql (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-postgresql-latest \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MAJOR_TAG \
|
||||
--tag fosrl/pangolin:ee-postgresql-$$MINOR_TAG \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
fosrl/pangolin:ee-postgresql-latest-arm64 \
|
||||
fosrl/pangolin:ee-postgresql-latest-amd64 && \
|
||||
echo "All multi-arch manifests created successfully!"
|
||||
|
||||
build-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-release tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64,linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
--push .
|
||||
|
||||
build-rc-arm:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-rc-arm tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
--push .
|
||||
|
||||
build-rc-amd:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make build-rc-amd tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=oss \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:postgresql-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-$(tag)-amd64 \
|
||||
--push . && \
|
||||
docker buildx build \
|
||||
--build-arg BUILD=enterprise \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=$(tag) \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg LICENSE="Fossorial Commercial" \
|
||||
--build-arg IMAGE_TITLE="Pangolin EE" \
|
||||
--build-arg IMAGE_DESCRIPTION="Pangolin Enterprise Edition - Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag)-amd64 \
|
||||
--push .
|
||||
|
||||
create-manifests-rc:
|
||||
@if [ -z "$(tag)" ]; then \
|
||||
echo "Error: tag is required. Usage: make create-manifests-rc tag=<tag>"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "Creating multi-arch manifests for RC sqlite (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:$(tag) \
|
||||
fosrl/pangolin:$(tag)-arm64 \
|
||||
fosrl/pangolin:$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC postgresql (oss)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:postgresql-$(tag) \
|
||||
fosrl/pangolin:postgresql-$(tag)-arm64 \
|
||||
fosrl/pangolin:postgresql-$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC sqlite (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-$(tag) \
|
||||
fosrl/pangolin:ee-$(tag)-arm64 \
|
||||
fosrl/pangolin:ee-$(tag)-amd64 && \
|
||||
echo "Creating multi-arch manifests for RC postgresql (enterprise)..." && \
|
||||
docker buildx imagetools create \
|
||||
--tag fosrl/pangolin:ee-postgresql-$(tag) \
|
||||
fosrl/pangolin:ee-postgresql-$(tag)-arm64 \
|
||||
fosrl/pangolin:ee-postgresql-$(tag)-amd64 && \
|
||||
echo "All RC multi-arch manifests created successfully!"
|
||||
|
||||
build-arm:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/arm64 \
|
||||
-t fosrl/pangolin:latest .
|
||||
docker buildx build --platform linux/arm64 -t fosrl/pangolin:latest .
|
||||
|
||||
build-x86:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker buildx build \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
--platform linux/amd64 \
|
||||
-t fosrl/pangolin:latest .
|
||||
docker buildx build --platform linux/amd64 -t fosrl/pangolin:latest .
|
||||
|
||||
dev-build-sqlite:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker build \
|
||||
--build-arg DATABASE=sqlite \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
-t fosrl/pangolin:latest .
|
||||
build-sqlite:
|
||||
docker build --build-arg DATABASE=sqlite -t fosrl/pangolin:latest .
|
||||
|
||||
dev-build-pg:
|
||||
@CREATED=$$(date -u +"%Y-%m-%dT%H:%M:%SZ"); \
|
||||
REVISION=$$(git rev-parse HEAD 2>/dev/null || echo "unknown"); \
|
||||
docker build \
|
||||
--build-arg DATABASE=pg \
|
||||
--build-arg VERSION=dev \
|
||||
--build-arg REVISION=$$REVISION \
|
||||
--build-arg CREATED=$$CREATED \
|
||||
--build-arg IMAGE_TITLE="Pangolin" \
|
||||
--build-arg IMAGE_DESCRIPTION="Identity-aware VPN and proxy for remote access to anything, anywhere" \
|
||||
-t fosrl/pangolin:postgresql-latest .
|
||||
build-pg:
|
||||
docker build --build-arg DATABASE=pg -t fosrl/pangolin:postgresql-latest .
|
||||
|
||||
test:
|
||||
docker run -it -p 3000:3000 -p 3001:3001 -p 3002:3002 -v ./config:/app/config fosrl/pangolin:latest
|
||||
|
||||
168
README.md
@@ -1,109 +1,157 @@
|
||||
<div align="center">
|
||||
<h2>
|
||||
<a href="https://pangolin.net/">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="350">
|
||||
<picture>
|
||||
<source media="(prefers-color-scheme: dark)" srcset="public/logo/word_mark_white.png">
|
||||
<img alt="Pangolin Logo" src="public/logo/word_mark_black.png" width="250">
|
||||
</picture>
|
||||
</a>
|
||||
</h2>
|
||||
</div>
|
||||
|
||||
<h4 align="center">Secure gateway to your private networks</h4>
|
||||
<div align="center">
|
||||
|
||||
_Pangolin tunnels your services to the internet so you can access anything from anywhere._
|
||||
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
<h5>
|
||||
<a href="https://pangolin.net/">
|
||||
<a href="https://digpangolin.com">
|
||||
Website
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://docs.pangolin.net/">
|
||||
Documentation
|
||||
<a href="https://docs.digpangolin.com/self-host/quick-install-managed">
|
||||
Quick Install Guide
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="mailto:contact@pangolin.net">
|
||||
<a href="mailto:contact@fossorial.io">
|
||||
Contact Us
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://digpangolin.com/slack">
|
||||
Slack
|
||||
</a>
|
||||
<span> | </span>
|
||||
<a href="https://discord.gg/HCJR8Xhme4">
|
||||
Discord
|
||||
</a>
|
||||
</h5>
|
||||
</div>
|
||||
|
||||
<div align="center">
|
||||
|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://pangolin.net/slack)
|
||||
[](https://digpangolin.com/slack)
|
||||
[](https://hub.docker.com/r/fosrl/pangolin)
|
||||

|
||||
[](https://www.youtube.com/@pangolin-net)
|
||||
[](https://discord.gg/HCJR8Xhme4)
|
||||
[](https://www.youtube.com/@fossorial-app)
|
||||
|
||||
</div>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.pangolin.net/careers/join-us">
|
||||
<img src="https://img.shields.io/badge/🚀_We're_Hiring!-Join_Our_Team-brightgreen?style=for-the-badge" alt="We're Hiring!" />
|
||||
</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<strong>
|
||||
Start testing Pangolin at <a href="https://app.pangolin.net/auth/signup">app.pangolin.net</a>
|
||||
Start testing Pangolin at <a href="https://pangolin.fossorial.io/auth/signup">pangolin.fossorial.io</a>
|
||||
</strong>
|
||||
</p>
|
||||
|
||||
Pangolin is an open-source, identity-based remote access platform built on WireGuard that enables secure, seamless connectivity to private and public resources. Pangolin combines reverse proxy and VPN capabilities into one platform, providing browser-based access to web applications and client-based access to any private resources, all with zero-trust security and granular access control.
|
||||
Pangolin is a self-hosted tunneled reverse proxy server with identity and access control, designed to securely expose private resources on distributed networks. Acting as a central hub, it connects isolated networks — even those behind restrictive firewalls — through encrypted tunnels, enabling easy access to remote services without opening ports.
|
||||
|
||||
## Installation
|
||||
<img src="public/screenshots/hero.png" alt="Preview"/>
|
||||
|
||||
- Check out the [quick install guide](https://docs.pangolin.net/self-host/quick-install) for how to install and set up Pangolin.
|
||||
- Install from the [DigitalOcean marketplace](https://marketplace.digitalocean.com/apps/pangolin-ce-1?refcode=edf0480eeb81) for a one-click pre-configured installer.
|
||||
|
||||
<img src="public/screenshots/hero.png" />
|
||||
|
||||
## Deployment Options
|
||||
|
||||
| <img width=500 /> | Description |
|
||||
|-----------------|--------------|
|
||||
| **Self-Host: Community Edition** | Free, open source, and licensed under AGPL-3. |
|
||||
| **Self-Host: Enterprise Edition** | Licensed under Fossorial Commercial License. Free for personal and hobbyist use, and for businesses earning under \$100K USD annually. |
|
||||
| **Pangolin Cloud** | Fully managed service with instant setup and pay-as-you-go pricing — no infrastructure required. Or, self-host your own [remote node](https://docs.pangolin.net/manage/remote-node/nodes) and connect to our control plane. |
|
||||

|
||||
|
||||
## Key Features
|
||||
|
||||
| <img width=500 /> | <img width=500 /> |
|
||||
|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------|
|
||||
| **Connect remote networks with sites**<br /><br />Pangolin's lightweight site connectors create secure tunnels from remote networks without requiring public IP addresses or open ports. Sites make any network anywhere available for authorized access. | <img src="public/screenshots/sites.png" width=500 /><tr></tr> |
|
||||
| **Browser-based reverse proxy access**<br /><br />Expose web applications through identity and context-aware tunneled reverse proxies. Pangolin handles routing, load balancing, health checking, and automatic SSL certificates without exposing your network directly to the internet. Users access applications through any web browser with authentication and granular access control. | <img src="public/clip.gif" width=500 /><tr></tr> |
|
||||
| **Client-based private resource access**<br /><br />Access private resources like SSH servers, databases, RDP, and entire network ranges through Pangolin clients. Intelligent NAT traversal enables connections even through restrictive firewalls, while DNS aliases provide friendly names and fast connections to resources across all your sites. | <img src="public/screenshots/private-resources.png" width=500 /><tr></tr> |
|
||||
| **Zero-trust granular access**<br /><br />Grant users access to specific resources, not entire networks. Unlike traditional VPNs that expose full network access, Pangolin's zero-trust model ensures users can only reach the applications and services you explicitly define, reducing security risk and attack surface. | <img src="public/screenshots/user-devices.png" width=500 /><tr></tr> |
|
||||
### Reverse Proxy Through WireGuard Tunnel
|
||||
|
||||
## Download Clients
|
||||
- Expose private resources on your network **without opening ports** (firewall punching).
|
||||
- Secure and easy to configure private connectivity via a custom **user space WireGuard client**, [Newt](https://github.com/fosrl/newt).
|
||||
- Built-in support for any WireGuard client.
|
||||
- Automated **SSL certificates** (https) via [LetsEncrypt](https://letsencrypt.org/).
|
||||
- Support for HTTP/HTTPS and **raw TCP/UDP services**.
|
||||
- Load balancing.
|
||||
- Extend functionality with existing [Traefik](https://github.com/traefik/traefik) plugins, such as [CrowdSec](https://plugins.traefik.io/plugins/6335346ca4caa9ddeffda116/crowdsec-bouncer-traefik-plugin) and [Geoblock](https://github.com/PascalMinder/geoblock).
|
||||
- **Automatically install and configure Crowdsec via Pangolin's installer script.**
|
||||
- Attach as many sites to the central server as you wish.
|
||||
|
||||
Download the Pangolin client for your platform:
|
||||
### Identity & Access Management
|
||||
|
||||
- [Mac](https://pangolin.net/downloads/mac)
|
||||
- [Windows](https://pangolin.net/downloads/windows)
|
||||
- [Linux](https://pangolin.net/downloads/linux)
|
||||
- [iOS](https://pangolin.net/downloads/ios)
|
||||
- [Android](https://pangolin.net/downloads/android)
|
||||
- Centralized authentication system using platform SSO. **Users will only have to manage one login.**
|
||||
- **Define access control rules for IPs, IP ranges, and URL paths per resource.**
|
||||
- TOTP with backup codes for two-factor authentication.
|
||||
- Create organizations, each with multiple sites, users, and roles.
|
||||
- **Role-based access control** to manage resource access permissions.
|
||||
- Additional authentication options include:
|
||||
- Email whitelisting with **one-time passcodes.**
|
||||
- **Temporary, self-destructing share links.**
|
||||
- Resource specific pin codes.
|
||||
- Resource specific passwords.
|
||||
- Passkeys
|
||||
- External identity provider (IdP) support with OAuth2/OIDC, such as Authentik, Keycloak, Okta, and others.
|
||||
- Auto-provision users and roles from your IdP.
|
||||
|
||||
## Get Started
|
||||
<img src="public/auth-diagram1.png" alt="Auth and diagram"/>
|
||||
|
||||
### Check out the docs
|
||||
## Use Cases
|
||||
|
||||
We encourage everyone to read the full documentation first, which is
|
||||
available at [docs.pangolin.net](https://docs.pangolin.net). This README provides only a very brief subset of
|
||||
the docs to illustrate some basic ideas.
|
||||
### Manage Access to Internal Apps
|
||||
|
||||
### Sign up and try now
|
||||
- Grant users access to your apps from anywhere using just a web browser. No client software required.
|
||||
|
||||
For Pangolin's managed service, you will first need to create an account at
|
||||
[app.pangolin.net](https://app.pangolin.net). We have a generous free tier to get started.
|
||||
### Developers and DevOps
|
||||
|
||||
- Expose and test internal tools and dashboards like **Grafana**. Bring localhost or private IPs online for easy access.
|
||||
|
||||
### Secure API Gateway
|
||||
|
||||
- One application load balancer across multiple clouds and on-premises.
|
||||
|
||||
### IoT and Edge Devices
|
||||
|
||||
- Easily expose **IoT devices**, **edge servers**, or **Raspberry Pi** to the internet for field equipment monitoring.
|
||||
|
||||
<img src="public/screenshots/sites.png" alt="Sites"/>
|
||||
|
||||
## Deployment Options
|
||||
|
||||
### Fully Self Hosted
|
||||
|
||||
Host the full application on your own server or on the cloud with a VPS. Take a look at the [documentation](https://docs.digpangolin.com/self-host/quick-install) to get started.
|
||||
|
||||
> Many of our users have had a great experience with [RackNerd](https://my.racknerd.com/aff.php?aff=13788). Depending on promotions, you can get a [**VPS with 1 vCPU, 1GB RAM, and ~20GB SSD for just around $12/year**](https://my.racknerd.com/aff.php?aff=13788&pid=912). That's a great deal!
|
||||
|
||||
### Pangolin Cloud
|
||||
|
||||
Easy to use with simple [pay as you go pricing](https://digpangolin.com/pricing). [Check it out here](https://pangolin.fossorial.io/auth/signup).
|
||||
|
||||
- Everything you get with self hosted Pangolin, but fully managed for you.
|
||||
|
||||
### Managed & High Availability
|
||||
|
||||
Managed control plane, your infrastructure
|
||||
|
||||
- We manage database and control plane.
|
||||
- You self-host lightweight exit-node.
|
||||
- Traffic flows through your infra.
|
||||
- We coordinate failover between your nodes or to Cloud when things go bad.
|
||||
|
||||
Try it out using [Pangolin Cloud](https://pangolin.fossorial.io)
|
||||
|
||||
### Full Enterprise On-Premises
|
||||
|
||||
[Contact us](mailto:numbat@fossorial.io) for a full distributed and enterprise deployments on your infrastructure controlled by your team.
|
||||
|
||||
## Project Development / Roadmap
|
||||
|
||||
We want to hear your feature requests! Add them to the [discussion board](https://github.com/orgs/fosrl/discussions/categories/feature-requests).
|
||||
|
||||
## Licensing
|
||||
|
||||
Pangolin is dual licensed under the AGPL-3 and the [Fossorial Commercial License](https://pangolin.net/fcl.html). For inquiries about commercial licensing, please contact us at [contact@pangolin.net](mailto:contact@pangolin.net).
|
||||
Pangolin is dual licensed under the AGPL-3 and the Fossorial Commercial license. For inquiries about commercial licensing, please contact us at [numbat@fossorial.io](mailto:numbat@fossorial.io).
|
||||
|
||||
## Contributions
|
||||
|
||||
Looking for something to contribute? Take a look at issues marked with [help wanted](https://github.com/fosrl/pangolin/issues?q=is%3Aissue%20state%3Aopen%20label%3A%22help%20wanted%22). Also take a look through the freature requests in Discussions - any are available and some are marked as a good first issue.
|
||||
|
||||
Please see [CONTRIBUTING](./CONTRIBUTING.md) in the repository for guidelines and best practices.
|
||||
|
||||
---
|
||||
Please post bug reports and other functional issues in the [Issues](https://github.com/fosrl/pangolin/issues) section of the repository.
|
||||
|
||||
WireGuard® is a registered trademark of Jason A. Donenfeld.
|
||||
If you are looking to help with translations, please contribute [on Crowdin](https://crowdin.com/project/fossorial-pangolin) or open a PR with changes to the translations files found in `messages/`.
|
||||
@@ -3,7 +3,7 @@
|
||||
If you discover a security vulnerability, please follow the steps below to responsibly disclose it to us:
|
||||
|
||||
1. **Do not create a public GitHub issue or discussion post.** This could put the security of other users at risk.
|
||||
2. Send a detailed report to [security@pangolin.net](mailto:security@pangolin.net) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
2. Send a detailed report to [security@fossorial.io](mailto:security@fossorial.io) or send a **private** message to a maintainer on [Discord](https://discord.gg/HCJR8Xhme4). Include:
|
||||
|
||||
- Description and location of the vulnerability.
|
||||
- Potential impact of the vulnerability.
|
||||
|
||||
72
blueprint.py
Normal file
@@ -0,0 +1,72 @@
|
||||
import requests
|
||||
import yaml
|
||||
import json
|
||||
import base64
|
||||
|
||||
# The file path for the YAML file to be read
|
||||
# You can change this to the path of your YAML file
|
||||
YAML_FILE_PATH = 'blueprint.yaml'
|
||||
|
||||
# The API endpoint and headers from the curl request
|
||||
API_URL = 'http://api.pangolin.fossorial.io/v1/org/test/blueprint'
|
||||
HEADERS = {
|
||||
'accept': '*/*',
|
||||
'Authorization': 'Bearer <your_token_here>',
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
|
||||
def convert_and_send(file_path, url, headers):
|
||||
"""
|
||||
Reads a YAML file, converts its content to a JSON payload,
|
||||
and sends it via a PUT request to a specified URL.
|
||||
"""
|
||||
try:
|
||||
# Read the YAML file content
|
||||
with open(file_path, 'r') as file:
|
||||
yaml_content = file.read()
|
||||
|
||||
# Parse the YAML string to a Python dictionary
|
||||
# This will be used to ensure the YAML is valid before sending
|
||||
parsed_yaml = yaml.safe_load(yaml_content)
|
||||
|
||||
# convert the parsed YAML to a JSON string
|
||||
json_payload = json.dumps(parsed_yaml)
|
||||
print("Converted JSON payload:")
|
||||
print(json_payload)
|
||||
|
||||
# Encode the JSON string to Base64
|
||||
encoded_json = base64.b64encode(json_payload.encode('utf-8')).decode('utf-8')
|
||||
|
||||
# Create the final payload with the base64 encoded data
|
||||
final_payload = {
|
||||
"blueprint": encoded_json
|
||||
}
|
||||
|
||||
print("Sending the following Base64 encoded JSON payload:")
|
||||
print(final_payload)
|
||||
print("-" * 20)
|
||||
|
||||
# Make the PUT request with the base64 encoded payload
|
||||
response = requests.put(url, headers=headers, json=final_payload)
|
||||
|
||||
# Print the API response for debugging
|
||||
print(f"API Response Status Code: {response.status_code}")
|
||||
print("API Response Content:")
|
||||
print(response.text)
|
||||
|
||||
# Raise an exception for bad status codes (4xx or 5xx)
|
||||
response.raise_for_status()
|
||||
|
||||
except FileNotFoundError:
|
||||
print(f"Error: The file '{file_path}' was not found.")
|
||||
except yaml.YAMLError as e:
|
||||
print(f"Error parsing YAML file: {e}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
print(f"An error occurred during the API request: {e}")
|
||||
except Exception as e:
|
||||
print(f"An unexpected error occurred: {e}")
|
||||
|
||||
# Run the function
|
||||
if __name__ == "__main__":
|
||||
convert_and_send(YAML_FILE_PATH, API_URL, HEADERS)
|
||||
|
||||
69
blueprint.yaml
Normal file
@@ -0,0 +1,69 @@
|
||||
client-resources:
|
||||
client-resource-nice-id-uno:
|
||||
name: this is my resource
|
||||
protocol: tcp
|
||||
proxy-port: 3001
|
||||
hostname: localhost
|
||||
internal-port: 3000
|
||||
site: lively-yosemite-toad
|
||||
client-resource-nice-id-duce:
|
||||
name: this is my resource
|
||||
protocol: udp
|
||||
proxy-port: 3000
|
||||
hostname: localhost
|
||||
internal-port: 3000
|
||||
site: lively-yosemite-toad
|
||||
|
||||
proxy-resources:
|
||||
resource-nice-id-uno:
|
||||
name: this is my resource
|
||||
protocol: http
|
||||
full-domain: duce.test.example.com
|
||||
host-header: example.com
|
||||
tls-server-name: example.com
|
||||
# auth:
|
||||
# pincode: 123456
|
||||
# password: sadfasdfadsf
|
||||
# sso-enabled: true
|
||||
# sso-roles:
|
||||
# - Member
|
||||
# sso-users:
|
||||
# - owen@fossorial.io
|
||||
# whitelist-users:
|
||||
# - owen@fossorial.io
|
||||
headers:
|
||||
- name: X-Example-Header
|
||||
value: example-value
|
||||
- name: X-Another-Header
|
||||
value: another-value
|
||||
rules:
|
||||
- action: allow
|
||||
match: ip
|
||||
value: 1.1.1.1
|
||||
- action: deny
|
||||
match: cidr
|
||||
value: 2.2.2.2/32
|
||||
- action: pass
|
||||
match: path
|
||||
value: /admin
|
||||
targets:
|
||||
- site: lively-yosemite-toad
|
||||
path: /path
|
||||
pathMatchType: prefix
|
||||
hostname: localhost
|
||||
method: http
|
||||
port: 8000
|
||||
- site: slim-alpine-chipmunk
|
||||
hostname: localhost
|
||||
path: /yoman
|
||||
pathMatchType: exact
|
||||
method: http
|
||||
port: 8001
|
||||
resource-nice-id-duce:
|
||||
name: this is other resource
|
||||
protocol: tcp
|
||||
proxy-port: 3000
|
||||
targets:
|
||||
- site: lively-yosemite-toad
|
||||
hostname: localhost
|
||||
port: 3000
|
||||
@@ -5,14 +5,14 @@ meta {
|
||||
}
|
||||
|
||||
post {
|
||||
url: http://localhost:3000/api/v1/auth/login
|
||||
url: http://localhost:4000/api/v1/auth/login
|
||||
body: json
|
||||
auth: none
|
||||
}
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "admin@fosrl.io",
|
||||
"email": "owen@fossorial.io",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,6 @@ post {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "milo@pangolin.net"
|
||||
"email": "milo@fossorial.io"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ put {
|
||||
|
||||
body:json {
|
||||
{
|
||||
"email": "numbat@pangolin.net",
|
||||
"email": "numbat@fossorial.io",
|
||||
"password": "Password123!"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
meta {
|
||||
name: createOlm
|
||||
type: http
|
||||
seq: 1
|
||||
}
|
||||
|
||||
put {
|
||||
url: http://localhost:3000/api/v1/olm
|
||||
body: none
|
||||
auth: inherit
|
||||
}
|
||||
|
||||
settings {
|
||||
encodeUrl: true
|
||||
}
|
||||
@@ -1,8 +0,0 @@
|
||||
meta {
|
||||
name: Olm
|
||||
seq: 15
|
||||
}
|
||||
|
||||
auth {
|
||||
mode: inherit
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"version": "1",
|
||||
"name": "Pangolin",
|
||||
"name": "Pangolin Saas",
|
||||
"type": "collection",
|
||||
"ignore": [
|
||||
"node_modules",
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, exitNodes } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
type ClearExitNodesArgs = { };
|
||||
|
||||
export const clearExitNodes: CommandModule<
|
||||
{},
|
||||
ClearExitNodesArgs
|
||||
> = {
|
||||
command: "clear-exit-nodes",
|
||||
describe:
|
||||
"Clear all exit nodes from the database",
|
||||
// no args
|
||||
builder: (yargs) => {
|
||||
return yargs;
|
||||
},
|
||||
handler: async (argv: {}) => {
|
||||
try {
|
||||
|
||||
console.log(`Clearing all exit nodes from the database`);
|
||||
|
||||
// Delete all exit nodes
|
||||
const deletedCount = await db
|
||||
.delete(exitNodes)
|
||||
.where(eq(exitNodes.exitNodeId, exitNodes.exitNodeId)) .returning();; // delete all
|
||||
|
||||
console.log(`Deleted ${deletedCount.length} exit node(s) from the database`);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,36 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, licenseKey } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
|
||||
type ClearLicenseKeysArgs = { };
|
||||
|
||||
export const clearLicenseKeys: CommandModule<
|
||||
{},
|
||||
ClearLicenseKeysArgs
|
||||
> = {
|
||||
command: "clear-license-keys",
|
||||
describe:
|
||||
"Clear all license keys from the database",
|
||||
// no args
|
||||
builder: (yargs) => {
|
||||
return yargs;
|
||||
},
|
||||
handler: async (argv: {}) => {
|
||||
try {
|
||||
|
||||
console.log(`Clearing all license keys from the database`);
|
||||
|
||||
// Delete all license keys
|
||||
const deletedCount = await db
|
||||
.delete(licenseKey)
|
||||
.where(eq(licenseKey.licenseKeyId, licenseKey.licenseKeyId)) .returning();; // delete all
|
||||
|
||||
console.log(`Deleted ${deletedCount.length} license key(s) from the database`);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,123 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, clients, olms, currentFingerprint, userClients, approvals } from "@server/db";
|
||||
import { eq, and, inArray } from "drizzle-orm";
|
||||
|
||||
type DeleteClientArgs = {
|
||||
orgId: string;
|
||||
niceId: string;
|
||||
};
|
||||
|
||||
export const deleteClient: CommandModule<{}, DeleteClientArgs> = {
|
||||
command: "delete-client",
|
||||
describe:
|
||||
"Delete a client and all associated data (OLMs, current fingerprint, userClients, approvals). Snapshots are preserved.",
|
||||
builder: (yargs) => {
|
||||
return yargs
|
||||
.option("orgId", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The organization ID"
|
||||
})
|
||||
.option("niceId", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The client niceId (identifier)"
|
||||
});
|
||||
},
|
||||
handler: async (argv: { orgId: string; niceId: string }) => {
|
||||
try {
|
||||
const { orgId, niceId } = argv;
|
||||
|
||||
console.log(
|
||||
`Deleting client with orgId: ${orgId}, niceId: ${niceId}...`
|
||||
);
|
||||
|
||||
// Find the client
|
||||
const [client] = await db
|
||||
.select()
|
||||
.from(clients)
|
||||
.where(and(eq(clients.orgId, orgId), eq(clients.niceId, niceId)))
|
||||
.limit(1);
|
||||
|
||||
if (!client) {
|
||||
console.error(
|
||||
`Error: Client with orgId "${orgId}" and niceId "${niceId}" not found.`
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const clientId = client.clientId;
|
||||
console.log(`Found client with clientId: ${clientId}`);
|
||||
|
||||
// Find all OLMs associated with this client
|
||||
const associatedOlms = await db
|
||||
.select()
|
||||
.from(olms)
|
||||
.where(eq(olms.clientId, clientId));
|
||||
|
||||
console.log(`Found ${associatedOlms.length} OLM(s) associated with this client`);
|
||||
|
||||
// Delete in a transaction to ensure atomicity
|
||||
await db.transaction(async (trx) => {
|
||||
// Delete currentFingerprint entries for the associated OLMs
|
||||
// Note: We delete these explicitly before deleting OLMs to ensure
|
||||
// we have control, even though cascade would handle it
|
||||
let fingerprintCount = 0;
|
||||
if (associatedOlms.length > 0) {
|
||||
const olmIds = associatedOlms.map((olm) => olm.olmId);
|
||||
const deletedFingerprints = await trx
|
||||
.delete(currentFingerprint)
|
||||
.where(inArray(currentFingerprint.olmId, olmIds))
|
||||
.returning();
|
||||
fingerprintCount = deletedFingerprints.length;
|
||||
}
|
||||
console.log(`Deleted ${fingerprintCount} current fingerprint(s)`);
|
||||
|
||||
// Delete OLMs
|
||||
// Note: OLMs have onDelete: "set null" for clientId, so we need to delete them explicitly
|
||||
const deletedOlms = await trx
|
||||
.delete(olms)
|
||||
.where(eq(olms.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted ${deletedOlms.length} OLM(s)`);
|
||||
|
||||
// Delete approvals
|
||||
// Note: Approvals have onDelete: "cascade" but we delete explicitly for clarity
|
||||
const deletedApprovals = await trx
|
||||
.delete(approvals)
|
||||
.where(eq(approvals.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted ${deletedApprovals.length} approval(s)`);
|
||||
|
||||
// Delete userClients
|
||||
// Note: userClients have onDelete: "cascade" but we delete explicitly for clarity
|
||||
const deletedUserClients = await trx
|
||||
.delete(userClients)
|
||||
.where(eq(userClients.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted ${deletedUserClients.length} userClient association(s)`);
|
||||
|
||||
// Finally, delete the client itself
|
||||
const deletedClients = await trx
|
||||
.delete(clients)
|
||||
.where(eq(clients.clientId, clientId))
|
||||
.returning();
|
||||
console.log(`Deleted client: ${deletedClients[0]?.name || niceId}`);
|
||||
});
|
||||
|
||||
console.log("\nClient deletion completed successfully!");
|
||||
console.log("\nSummary:");
|
||||
console.log(` - Client: ${niceId} (clientId: ${clientId})`);
|
||||
console.log(` - Olm(s): ${associatedOlms.length}`);
|
||||
console.log(` - Current fingerprints: deleted`);
|
||||
console.log(` - Approvals: deleted`);
|
||||
console.log(` - UserClients: deleted`);
|
||||
console.log(` - Snapshots: preserved (not deleted)`);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error deleting client:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,121 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, orgs } from "@server/db";
|
||||
import { eq } from "drizzle-orm";
|
||||
import { encrypt } from "@server/lib/crypto";
|
||||
import { configFilePath1, configFilePath2 } from "@server/lib/consts";
|
||||
import { generateCA } from "@server/lib/sshCA";
|
||||
import fs from "fs";
|
||||
import yaml from "js-yaml";
|
||||
|
||||
type GenerateOrgCaKeysArgs = {
|
||||
orgId: string;
|
||||
secret?: string;
|
||||
force?: boolean;
|
||||
};
|
||||
|
||||
export const generateOrgCaKeys: CommandModule<{}, GenerateOrgCaKeysArgs> = {
|
||||
command: "generate-org-ca-keys",
|
||||
describe:
|
||||
"Generate SSH CA public/private key pair for an organization and store them in the database (private key encrypted with server secret)",
|
||||
builder: (yargs) => {
|
||||
return yargs
|
||||
.option("orgId", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The organization ID"
|
||||
})
|
||||
.option("secret", {
|
||||
type: "string",
|
||||
describe:
|
||||
"Server secret used to encrypt the CA private key. If omitted, read from config file (config.yml or config.yaml)."
|
||||
})
|
||||
.option("force", {
|
||||
type: "boolean",
|
||||
default: false,
|
||||
describe:
|
||||
"Overwrite existing CA keys for the org if they already exist"
|
||||
});
|
||||
},
|
||||
handler: async (argv: {
|
||||
orgId: string;
|
||||
secret?: string;
|
||||
force?: boolean;
|
||||
}) => {
|
||||
try {
|
||||
const { orgId, force } = argv;
|
||||
let secret = argv.secret;
|
||||
|
||||
if (!secret) {
|
||||
const configPath = fs.existsSync(configFilePath1)
|
||||
? configFilePath1
|
||||
: fs.existsSync(configFilePath2)
|
||||
? configFilePath2
|
||||
: null;
|
||||
|
||||
if (!configPath) {
|
||||
console.error(
|
||||
"Error: No server secret provided and config file not found. " +
|
||||
"Expected config.yml or config.yaml in the config directory, or pass --secret."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const configContent = fs.readFileSync(configPath, "utf8");
|
||||
const config = yaml.load(configContent) as {
|
||||
server?: { secret?: string };
|
||||
};
|
||||
|
||||
if (!config?.server?.secret) {
|
||||
console.error(
|
||||
"Error: No server.secret in config file. Pass --secret or set server.secret in config."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
secret = config.server.secret;
|
||||
}
|
||||
|
||||
const [org] = await db
|
||||
.select({
|
||||
orgId: orgs.orgId,
|
||||
sshCaPrivateKey: orgs.sshCaPrivateKey,
|
||||
sshCaPublicKey: orgs.sshCaPublicKey
|
||||
})
|
||||
.from(orgs)
|
||||
.where(eq(orgs.orgId, orgId))
|
||||
.limit(1);
|
||||
|
||||
if (!org) {
|
||||
console.error(`Error: Organization with orgId "${orgId}" not found.`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (org.sshCaPrivateKey != null || org.sshCaPublicKey != null) {
|
||||
if (!force) {
|
||||
console.error(
|
||||
"Error: This organization already has CA keys. Use --force to overwrite."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
const ca = generateCA(`pangolin-ssh-ca-${orgId}`);
|
||||
const encryptedPrivateKey = encrypt(ca.privateKeyPem, secret);
|
||||
|
||||
await db
|
||||
.update(orgs)
|
||||
.set({
|
||||
sshCaPrivateKey: encryptedPrivateKey,
|
||||
sshCaPublicKey: ca.publicKeyOpenSSH
|
||||
})
|
||||
.where(eq(orgs.orgId, orgId));
|
||||
|
||||
console.log("SSH CA keys generated and stored for org:", orgId);
|
||||
console.log("\nPublic key (OpenSSH format):");
|
||||
console.log(ca.publicKeyOpenSSH);
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error generating org CA keys:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
@@ -1,284 +0,0 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { db, idpOidcConfig, licenseKey } from "@server/db";
|
||||
import { encrypt, decrypt } from "@server/lib/crypto";
|
||||
import { configFilePath1, configFilePath2 } from "@server/lib/consts";
|
||||
import { eq } from "drizzle-orm";
|
||||
import fs from "fs";
|
||||
import yaml from "js-yaml";
|
||||
|
||||
type RotateServerSecretArgs = {
|
||||
"old-secret": string;
|
||||
"new-secret": string;
|
||||
force?: boolean;
|
||||
};
|
||||
|
||||
export const rotateServerSecret: CommandModule<
|
||||
{},
|
||||
RotateServerSecretArgs
|
||||
> = {
|
||||
command: "rotate-server-secret",
|
||||
describe:
|
||||
"Rotate the server secret by decrypting all encrypted values with the old secret and re-encrypting with a new secret",
|
||||
builder: (yargs) => {
|
||||
return yargs
|
||||
.option("old-secret", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The current server secret (for verification)"
|
||||
})
|
||||
.option("new-secret", {
|
||||
type: "string",
|
||||
demandOption: true,
|
||||
describe: "The new server secret to use"
|
||||
})
|
||||
.option("force", {
|
||||
type: "boolean",
|
||||
default: false,
|
||||
describe:
|
||||
"Force rotation even if the old secret doesn't match the config file. " +
|
||||
"Use this if you know the old secret is correct but the config file is out of sync. " +
|
||||
"WARNING: This will attempt to decrypt all values with the provided old secret. " +
|
||||
"If the old secret is incorrect, the rotation will fail or corrupt data."
|
||||
});
|
||||
},
|
||||
handler: async (argv: {
|
||||
"old-secret": string;
|
||||
"new-secret": string;
|
||||
force?: boolean;
|
||||
}) => {
|
||||
try {
|
||||
// Determine which config file exists
|
||||
const configPath = fs.existsSync(configFilePath1)
|
||||
? configFilePath1
|
||||
: fs.existsSync(configFilePath2)
|
||||
? configFilePath2
|
||||
: null;
|
||||
|
||||
if (!configPath) {
|
||||
console.error(
|
||||
"Error: Config file not found. Expected config.yml or config.yaml in the config directory."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Read current config
|
||||
const configContent = fs.readFileSync(configPath, "utf8");
|
||||
const config = yaml.load(configContent) as any;
|
||||
|
||||
if (!config?.server?.secret) {
|
||||
console.error(
|
||||
"Error: No server secret found in config file. Cannot rotate."
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const configSecret = config.server.secret;
|
||||
const oldSecret = argv["old-secret"];
|
||||
const newSecret = argv["new-secret"];
|
||||
const force = argv.force || false;
|
||||
|
||||
// Verify that the provided old secret matches the one in config
|
||||
if (configSecret !== oldSecret) {
|
||||
if (!force) {
|
||||
console.error(
|
||||
"Error: The provided old secret does not match the secret in the config file."
|
||||
);
|
||||
console.error(
|
||||
"\nIf you are certain the old secret is correct and the config file is out of sync,"
|
||||
);
|
||||
console.error(
|
||||
"you can use the --force flag to bypass this check."
|
||||
);
|
||||
console.error(
|
||||
"\nWARNING: Using --force with an incorrect old secret will cause the rotation to fail"
|
||||
);
|
||||
console.error(
|
||||
"or corrupt encrypted data. Only use --force if you are absolutely certain."
|
||||
);
|
||||
process.exit(1);
|
||||
} else {
|
||||
console.warn(
|
||||
"\nWARNING: Using --force flag. Bypassing old secret verification."
|
||||
);
|
||||
console.warn(
|
||||
"The provided old secret does not match the config file, but proceeding anyway."
|
||||
);
|
||||
console.warn(
|
||||
"If the old secret is incorrect, this operation will fail or corrupt data.\n"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Validate new secret
|
||||
if (newSecret.length < 8) {
|
||||
console.error(
|
||||
"Error: New secret must be at least 8 characters long"
|
||||
);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
if (oldSecret === newSecret) {
|
||||
console.error("Error: New secret must be different from old secret");
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
console.log("Starting server secret rotation...");
|
||||
console.log("This will decrypt and re-encrypt all encrypted values in the database.");
|
||||
|
||||
// Read all data first
|
||||
console.log("\nReading encrypted data from database...");
|
||||
const idpConfigs = await db.select().from(idpOidcConfig);
|
||||
const licenseKeys = await db.select().from(licenseKey);
|
||||
|
||||
console.log(`Found ${idpConfigs.length} OIDC IdP configuration(s)`);
|
||||
console.log(`Found ${licenseKeys.length} license key(s)`);
|
||||
|
||||
// Prepare all decrypted and re-encrypted values
|
||||
console.log("\nDecrypting and re-encrypting values...");
|
||||
|
||||
type IdpUpdate = {
|
||||
idpOauthConfigId: number;
|
||||
encryptedClientId: string;
|
||||
encryptedClientSecret: string;
|
||||
};
|
||||
|
||||
type LicenseKeyUpdate = {
|
||||
oldLicenseKeyId: string;
|
||||
newLicenseKeyId: string;
|
||||
encryptedToken: string;
|
||||
encryptedInstanceId: string;
|
||||
};
|
||||
|
||||
const idpUpdates: IdpUpdate[] = [];
|
||||
const licenseKeyUpdates: LicenseKeyUpdate[] = [];
|
||||
|
||||
// Process idpOidcConfig entries
|
||||
for (const idpConfig of idpConfigs) {
|
||||
try {
|
||||
// Decrypt with old secret
|
||||
const decryptedClientId = decrypt(idpConfig.clientId, oldSecret);
|
||||
const decryptedClientSecret = decrypt(
|
||||
idpConfig.clientSecret,
|
||||
oldSecret
|
||||
);
|
||||
|
||||
// Re-encrypt with new secret
|
||||
const encryptedClientId = encrypt(decryptedClientId, newSecret);
|
||||
const encryptedClientSecret = encrypt(
|
||||
decryptedClientSecret,
|
||||
newSecret
|
||||
);
|
||||
|
||||
idpUpdates.push({
|
||||
idpOauthConfigId: idpConfig.idpOauthConfigId,
|
||||
encryptedClientId,
|
||||
encryptedClientSecret
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error processing IdP config ${idpConfig.idpOauthConfigId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Process licenseKey entries
|
||||
for (const key of licenseKeys) {
|
||||
try {
|
||||
// Decrypt with old secret
|
||||
const decryptedLicenseKeyId = decrypt(key.licenseKeyId, oldSecret);
|
||||
const decryptedToken = decrypt(key.token, oldSecret);
|
||||
const decryptedInstanceId = decrypt(key.instanceId, oldSecret);
|
||||
|
||||
// Re-encrypt with new secret
|
||||
const encryptedLicenseKeyId = encrypt(
|
||||
decryptedLicenseKeyId,
|
||||
newSecret
|
||||
);
|
||||
const encryptedToken = encrypt(decryptedToken, newSecret);
|
||||
const encryptedInstanceId = encrypt(
|
||||
decryptedInstanceId,
|
||||
newSecret
|
||||
);
|
||||
|
||||
licenseKeyUpdates.push({
|
||||
oldLicenseKeyId: key.licenseKeyId,
|
||||
newLicenseKeyId: encryptedLicenseKeyId,
|
||||
encryptedToken,
|
||||
encryptedInstanceId
|
||||
});
|
||||
} catch (error) {
|
||||
console.error(
|
||||
`Error processing license key ${key.licenseKeyId}:`,
|
||||
error
|
||||
);
|
||||
throw error;
|
||||
}
|
||||
}
|
||||
|
||||
// Perform all database updates in a single transaction
|
||||
console.log("\nUpdating database in transaction...");
|
||||
await db.transaction(async (trx) => {
|
||||
// Update idpOidcConfig entries
|
||||
for (const update of idpUpdates) {
|
||||
await trx
|
||||
.update(idpOidcConfig)
|
||||
.set({
|
||||
clientId: update.encryptedClientId,
|
||||
clientSecret: update.encryptedClientSecret
|
||||
})
|
||||
.where(
|
||||
eq(
|
||||
idpOidcConfig.idpOauthConfigId,
|
||||
update.idpOauthConfigId
|
||||
)
|
||||
);
|
||||
}
|
||||
|
||||
// Update licenseKey entries (delete old, insert new)
|
||||
for (const update of licenseKeyUpdates) {
|
||||
// Delete old entry
|
||||
await trx
|
||||
.delete(licenseKey)
|
||||
.where(eq(licenseKey.licenseKeyId, update.oldLicenseKeyId));
|
||||
|
||||
// Insert new entry with re-encrypted values
|
||||
await trx.insert(licenseKey).values({
|
||||
licenseKeyId: update.newLicenseKeyId,
|
||||
token: update.encryptedToken,
|
||||
instanceId: update.encryptedInstanceId
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
console.log(`Rotated ${idpUpdates.length} OIDC IdP configuration(s)`);
|
||||
console.log(`Rotated ${licenseKeyUpdates.length} license key(s)`);
|
||||
|
||||
// Update config file with new secret
|
||||
console.log("\nUpdating config file...");
|
||||
config.server.secret = newSecret;
|
||||
const newConfigContent = yaml.dump(config, {
|
||||
indent: 2,
|
||||
lineWidth: -1
|
||||
});
|
||||
fs.writeFileSync(configPath, newConfigContent, "utf8");
|
||||
|
||||
console.log(`Updated config file: ${configPath}`);
|
||||
|
||||
console.log("\nServer secret rotation completed successfully!");
|
||||
console.log(`\nSummary:`);
|
||||
console.log(` - OIDC IdP configurations: ${idpUpdates.length}`);
|
||||
console.log(` - License keys: ${licenseKeyUpdates.length}`);
|
||||
console.log(
|
||||
`\n IMPORTANT: Restart the server for the new secret to take effect.`
|
||||
);
|
||||
|
||||
process.exit(0);
|
||||
} catch (error) {
|
||||
console.error("Error rotating server secret:", error);
|
||||
process.exit(1);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
@@ -90,8 +90,7 @@ export const setAdminCredentials: CommandModule<{}, SetAdminCredentialsArgs> = {
|
||||
passwordHash,
|
||||
dateCreated: moment().toISOString(),
|
||||
serverAdmin: true,
|
||||
emailVerified: true,
|
||||
lastPasswordChange: new Date().getTime()
|
||||
emailVerified: true
|
||||
});
|
||||
|
||||
console.log("Server admin created");
|
||||
|
||||
10
cli/index.ts
@@ -4,20 +4,10 @@ import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { setAdminCredentials } from "@cli/commands/setAdminCredentials";
|
||||
import { resetUserSecurityKeys } from "@cli/commands/resetUserSecurityKeys";
|
||||
import { clearExitNodes } from "./commands/clearExitNodes";
|
||||
import { rotateServerSecret } from "./commands/rotateServerSecret";
|
||||
import { clearLicenseKeys } from "./commands/clearLicenseKeys";
|
||||
import { deleteClient } from "./commands/deleteClient";
|
||||
import { generateOrgCaKeys } from "./commands/generateOrgCaKeys";
|
||||
|
||||
yargs(hideBin(process.argv))
|
||||
.scriptName("pangctl")
|
||||
.command(setAdminCredentials)
|
||||
.command(resetUserSecurityKeys)
|
||||
.command(clearExitNodes)
|
||||
.command(rotateServerSecret)
|
||||
.command(clearLicenseKeys)
|
||||
.command(deleteClient)
|
||||
.command(generateOrgCaKeys)
|
||||
.demandCommand()
|
||||
.help().argv;
|
||||
|
||||
@@ -17,4 +17,4 @@
|
||||
"lib": "@/lib",
|
||||
"hooks": "@/hooks"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,30 +1,28 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.pangolin.net/
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
base_endpoint: "{{.DashboardDomain}}"
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
|
||||
app:
|
||||
dashboard_url: "https://{{.DashboardDomain}}"
|
||||
log_level: "info"
|
||||
telemetry:
|
||||
anonymous_usage: true
|
||||
dashboard_url: http://localhost:3002
|
||||
log_level: debug
|
||||
|
||||
domains:
|
||||
domain1:
|
||||
base_domain: "{{.BaseDomain}}"
|
||||
domain1:
|
||||
base_domain: example.com
|
||||
|
||||
server:
|
||||
secret: "{{.Secret}}"
|
||||
cors:
|
||||
origins: ["https://{{.DashboardDomain}}"]
|
||||
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
|
||||
allowed_headers: ["X-CSRF-Token", "Content-Type"]
|
||||
credentials: false
|
||||
secret: my_secret_key
|
||||
|
||||
gerbil:
|
||||
base_endpoint: example.com
|
||||
|
||||
orgs:
|
||||
block_size: 24
|
||||
subnet_group: 100.90.137.0/20
|
||||
|
||||
flags:
|
||||
require_email_verification: false
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: false
|
||||
allow_raw_resources: true
|
||||
require_email_verification: false
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: true
|
||||
allow_raw_resources: true
|
||||
enable_integration_api: true
|
||||
enable_clients: true
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -17,16 +13,14 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
rule: "Host(`{{.DashboardDomain}}`) && !PathPrefix(`/api/v1`)"
|
||||
rule: "Host(`{{.DashboardDomain}}`)"
|
||||
service: next-service
|
||||
priority: 10
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -34,10 +28,9 @@ http:
|
||||
api-router:
|
||||
rule: "Host(`{{.DashboardDomain}}`) && PathPrefix(`/api/v1`)"
|
||||
service: api-service
|
||||
priority: 100
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -51,12 +44,3 @@ http:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
|
||||
@@ -3,52 +3,32 @@ api:
|
||||
dashboard: true
|
||||
|
||||
providers:
|
||||
http:
|
||||
endpoint: "http://pangolin:3001/api/v1/traefik-config"
|
||||
pollInterval: "5s"
|
||||
file:
|
||||
filename: "/etc/traefik/dynamic_config.yml"
|
||||
directory: "/var/dynamic"
|
||||
watch: true
|
||||
|
||||
experimental:
|
||||
plugins:
|
||||
badger:
|
||||
moduleName: "github.com/fosrl/badger"
|
||||
version: "{{.BadgerVersion}}"
|
||||
version: "v1.2.0"
|
||||
|
||||
log:
|
||||
level: "INFO"
|
||||
level: "DEBUG"
|
||||
format: "common"
|
||||
maxSize: 100
|
||||
maxBackups: 3
|
||||
maxAge: 3
|
||||
compress: true
|
||||
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
httpChallenge:
|
||||
entryPoint: web
|
||||
email: "{{.LetsEncryptEmail}}"
|
||||
storage: "/letsencrypt/acme.json"
|
||||
caServer: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
websecure:
|
||||
address: ":443"
|
||||
address: ":9443"
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
readTimeout: "30m"
|
||||
http:
|
||||
tls:
|
||||
certResolver: "letsencrypt"
|
||||
encodedCharacters:
|
||||
allowEncodedSlash: true
|
||||
allowEncodedQuestionMark: true
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
ping:
|
||||
entryPoint: "web"
|
||||
|
||||
@@ -1,15 +0,0 @@
|
||||
services:
|
||||
drizzle-gateway:
|
||||
image: ghcr.io/drizzle-team/gateway:latest
|
||||
ports:
|
||||
- "4984:4983"
|
||||
depends_on:
|
||||
- db
|
||||
environment:
|
||||
- STORE_PATH=/app
|
||||
- DATABASE_URL=postgresql://postgres:password@db:5432/postgres
|
||||
volumes:
|
||||
- drizzle-gateway-data:/app
|
||||
|
||||
volumes:
|
||||
drizzle-gateway-data:
|
||||
@@ -20,7 +20,7 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
@@ -35,7 +35,7 @@ services:
|
||||
- 80:80 # Port for traefik because of the network_mode
|
||||
|
||||
traefik:
|
||||
image: traefik:v3.6
|
||||
image: traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
network_mode: service:gerbil # Ports appear on the gerbil service
|
||||
@@ -52,4 +52,4 @@ networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
enable_ipv6: true
|
||||
enable_ipv6: true
|
||||
@@ -7,11 +7,11 @@ services:
|
||||
POSTGRES_DB: postgres # Default database name
|
||||
POSTGRES_USER: postgres # Default user
|
||||
POSTGRES_PASSWORD: password # Default password (change for production!)
|
||||
# volumes:
|
||||
# - ./config/postgres:/var/lib/postgresql/data
|
||||
volumes:
|
||||
- ./config/postgres:/var/lib/postgresql/data
|
||||
ports:
|
||||
- "5432:5432" # Map host port 5432 to container port 5432
|
||||
restart: no
|
||||
restart: no
|
||||
|
||||
redis:
|
||||
image: redis:latest # Use the latest Redis image
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
import { build } from "@server/build";
|
||||
|
||||
const schema = [path.join("server", "db", "pg", "schema")];
|
||||
let schema;
|
||||
if (build === "oss") {
|
||||
schema = [path.join("server", "db", "pg", "schema.ts")];
|
||||
} else {
|
||||
schema = [
|
||||
path.join("server", "db", "pg", "schema.ts"),
|
||||
path.join("server", "db", "pg", "privateSchema.ts")
|
||||
];
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "postgresql",
|
||||
|
||||
@@ -1,8 +1,17 @@
|
||||
import { build } from "@server/build";
|
||||
import { APP_PATH } from "@server/lib/consts";
|
||||
import { defineConfig } from "drizzle-kit";
|
||||
import path from "path";
|
||||
|
||||
const schema = [path.join("server", "db", "sqlite", "schema")];
|
||||
let schema;
|
||||
if (build === "oss") {
|
||||
schema = [path.join("server", "db", "sqlite", "schema.ts")];
|
||||
} else {
|
||||
schema = [
|
||||
path.join("server", "db", "sqlite", "schema.ts"),
|
||||
path.join("server", "db", "sqlite", "privateSchema.ts")
|
||||
];
|
||||
}
|
||||
|
||||
export default defineConfig({
|
||||
dialect: "sqlite",
|
||||
|
||||
257
esbuild.mjs
@@ -2,15 +2,8 @@ import esbuild from "esbuild";
|
||||
import yargs from "yargs";
|
||||
import { hideBin } from "yargs/helpers";
|
||||
import { nodeExternalsPlugin } from "esbuild-node-externals";
|
||||
import path from "path";
|
||||
import fs from "fs";
|
||||
// import { glob } from "glob";
|
||||
|
||||
// Read default build type from server/build.ts
|
||||
let build = "oss";
|
||||
const buildFile = fs.readFileSync(path.resolve("server/build.ts"), "utf8");
|
||||
const m = buildFile.match(/export\s+const\s+build\s*=\s*["'](oss|saas|enterprise)["']/);
|
||||
if (m) build = m[1];
|
||||
// import path from "path";
|
||||
|
||||
const banner = `
|
||||
// patch __dirname
|
||||
@@ -25,25 +18,18 @@ const require = topLevelCreateRequire(import.meta.url);
|
||||
`;
|
||||
|
||||
const argv = yargs(hideBin(process.argv))
|
||||
.usage("Usage: $0 -entry [string] -out [string] -build [string]")
|
||||
.usage("Usage: $0 -entry [string] -out [string]")
|
||||
.option("entry", {
|
||||
alias: "e",
|
||||
describe: "Entry point file",
|
||||
type: "string",
|
||||
demandOption: true
|
||||
demandOption: true,
|
||||
})
|
||||
.option("out", {
|
||||
alias: "o",
|
||||
describe: "Output file path",
|
||||
type: "string",
|
||||
demandOption: true
|
||||
})
|
||||
.option("build", {
|
||||
alias: "b",
|
||||
describe: "Build type (oss, saas, enterprise)",
|
||||
type: "string",
|
||||
choices: ["oss", "saas", "enterprise"],
|
||||
default: build
|
||||
demandOption: true,
|
||||
})
|
||||
.help()
|
||||
.alias("help", "h").argv;
|
||||
@@ -60,206 +46,6 @@ function getPackagePaths() {
|
||||
return ["package.json"];
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function privateImportGuardPlugin() {
|
||||
return {
|
||||
name: "private-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#private\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(
|
||||
path.normalize("server/private")
|
||||
);
|
||||
|
||||
if (!isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`PRIVATE IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
|
||||
console.log("");
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(
|
||||
`\nSUMMARY: Found ${violations.length} private import violation(s):`
|
||||
);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
|
||||
);
|
||||
});
|
||||
console.log("");
|
||||
|
||||
result.errors.push({
|
||||
text: `Private import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map((v) => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to guard against bad imports from #private
|
||||
function dynamicImportGuardPlugin() {
|
||||
return {
|
||||
name: "dynamic-import-guard",
|
||||
setup(build) {
|
||||
const violations = [];
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
const importingFile = args.importer;
|
||||
|
||||
// Check if the importing file is NOT in server/private
|
||||
const normalizedImporter = path.normalize(importingFile);
|
||||
const isInServerPrivate = normalizedImporter.includes(
|
||||
path.normalize("server/private")
|
||||
);
|
||||
|
||||
if (isInServerPrivate) {
|
||||
const violation = {
|
||||
file: importingFile,
|
||||
importPath: args.path,
|
||||
resolveDir: args.resolveDir
|
||||
};
|
||||
violations.push(violation);
|
||||
|
||||
console.log(`DYNAMIC IMPORT VIOLATION:`);
|
||||
console.log(` File: ${importingFile}`);
|
||||
console.log(` Import: ${args.path}`);
|
||||
console.log(` Resolve dir: ${args.resolveDir || "N/A"}`);
|
||||
console.log("");
|
||||
}
|
||||
|
||||
// Return null to let the default resolver handle it
|
||||
return null;
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (violations.length > 0) {
|
||||
console.log(
|
||||
`\nSUMMARY: Found ${violations.length} dynamic import violation(s):`
|
||||
);
|
||||
violations.forEach((v, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), v.file)} imports ${v.importPath}`
|
||||
);
|
||||
});
|
||||
console.log("");
|
||||
|
||||
result.errors.push({
|
||||
text: `Dynamic import violations detected: ${violations.length} violation(s) found`,
|
||||
location: null,
|
||||
notes: violations.map((v) => ({
|
||||
text: `${path.relative(process.cwd(), v.file)} imports ${v.importPath}`,
|
||||
location: null
|
||||
}))
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
// Plugin to dynamically switch imports based on build type
|
||||
function dynamicImportSwitcherPlugin(buildValue) {
|
||||
return {
|
||||
name: "dynamic-import-switcher",
|
||||
setup(build) {
|
||||
const switches = [];
|
||||
|
||||
build.onStart(() => {
|
||||
console.log(
|
||||
`Dynamic import switcher using build type: ${buildValue}`
|
||||
);
|
||||
});
|
||||
|
||||
build.onResolve({ filter: /^#dynamic\// }, (args) => {
|
||||
// Extract the path after #dynamic/
|
||||
const dynamicPath = args.path.replace(/^#dynamic\//, "");
|
||||
|
||||
// Determine the replacement based on build type
|
||||
let replacement;
|
||||
if (buildValue === "oss") {
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
} else if (
|
||||
buildValue === "saas" ||
|
||||
buildValue === "enterprise"
|
||||
) {
|
||||
replacement = `#closed/${dynamicPath}`; // We use #closed here so that the route guards dont complain after its been changed but this is the same as #private
|
||||
} else {
|
||||
console.warn(
|
||||
`Unknown build type '${buildValue}', defaulting to #open/`
|
||||
);
|
||||
replacement = `#open/${dynamicPath}`;
|
||||
}
|
||||
|
||||
const switchInfo = {
|
||||
file: args.importer,
|
||||
originalPath: args.path,
|
||||
replacementPath: replacement,
|
||||
buildType: buildValue
|
||||
};
|
||||
switches.push(switchInfo);
|
||||
|
||||
console.log(`DYNAMIC IMPORT SWITCH:`);
|
||||
console.log(` File: ${args.importer}`);
|
||||
console.log(` Original: ${args.path}`);
|
||||
console.log(
|
||||
` Switched to: ${replacement} (build: ${buildValue})`
|
||||
);
|
||||
console.log("");
|
||||
|
||||
// Rewrite the import path and let the normal resolution continue
|
||||
return build.resolve(replacement, {
|
||||
importer: args.importer,
|
||||
namespace: args.namespace,
|
||||
resolveDir: args.resolveDir,
|
||||
kind: args.kind
|
||||
});
|
||||
});
|
||||
|
||||
build.onEnd((result) => {
|
||||
if (switches.length > 0) {
|
||||
console.log(
|
||||
`\nDYNAMIC IMPORT SUMMARY: Switched ${switches.length} import(s) for build type '${buildValue}':`
|
||||
);
|
||||
switches.forEach((s, i) => {
|
||||
console.log(
|
||||
` ${i + 1}. ${path.relative(process.cwd(), s.file)}`
|
||||
);
|
||||
console.log(
|
||||
` ${s.originalPath} → ${s.replacementPath}`
|
||||
);
|
||||
});
|
||||
console.log("");
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
esbuild
|
||||
.build({
|
||||
entryPoints: [argv.entry],
|
||||
@@ -268,44 +54,19 @@ esbuild
|
||||
format: "esm",
|
||||
minify: false,
|
||||
banner: {
|
||||
js: banner
|
||||
js: banner,
|
||||
},
|
||||
platform: "node",
|
||||
external: ["body-parser"],
|
||||
plugins: [
|
||||
privateImportGuardPlugin(),
|
||||
dynamicImportGuardPlugin(),
|
||||
dynamicImportSwitcherPlugin(argv.build),
|
||||
nodeExternalsPlugin({
|
||||
packagePath: getPackagePaths()
|
||||
})
|
||||
packagePath: getPackagePaths(),
|
||||
}),
|
||||
],
|
||||
sourcemap: "inline",
|
||||
target: "node24"
|
||||
target: "node22",
|
||||
})
|
||||
.then((result) => {
|
||||
// Check if there were any errors in the build result
|
||||
if (result.errors && result.errors.length > 0) {
|
||||
console.error(
|
||||
`Build failed with ${result.errors.length} error(s):`
|
||||
);
|
||||
result.errors.forEach((error, i) => {
|
||||
console.error(`${i + 1}. ${error.text}`);
|
||||
if (error.notes) {
|
||||
error.notes.forEach((note) => {
|
||||
console.error(` - ${note.text}`);
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// remove the output file if it was created
|
||||
if (fs.existsSync(argv.out)) {
|
||||
fs.unlinkSync(argv.out);
|
||||
}
|
||||
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
.then(() => {
|
||||
console.log("Build completed successfully");
|
||||
})
|
||||
.catch((error) => {
|
||||
|
||||
@@ -1,19 +1,19 @@
|
||||
import tseslint from "typescript-eslint";
|
||||
import tseslint from 'typescript-eslint';
|
||||
|
||||
export default tseslint.config({
|
||||
files: ["**/*.{ts,tsx,js,jsx}"],
|
||||
languageOptions: {
|
||||
parser: tseslint.parser,
|
||||
parserOptions: {
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
ecmaFeatures: {
|
||||
jsx: true
|
||||
}
|
||||
}
|
||||
},
|
||||
rules: {
|
||||
semi: "error",
|
||||
"prefer-const": "warn"
|
||||
files: ["**/*.{ts,tsx,js,jsx}"],
|
||||
languageOptions: {
|
||||
parser: tseslint.parser,
|
||||
parserOptions: {
|
||||
ecmaVersion: "latest",
|
||||
sourceType: "module",
|
||||
ecmaFeatures: {
|
||||
jsx: true
|
||||
}
|
||||
}
|
||||
});
|
||||
},
|
||||
rules: {
|
||||
"semi": "error",
|
||||
"prefer-const": "warn"
|
||||
}
|
||||
});
|
||||
@@ -1,24 +1,37 @@
|
||||
all: go-build-release
|
||||
|
||||
# Build with version injection via ldflags
|
||||
# Versions can be passed via: make go-build-release PANGOLIN_VERSION=x.x.x GERBIL_VERSION=x.x.x BADGER_VERSION=x.x.x
|
||||
# Or fetched automatically if not provided (requires curl and jq)
|
||||
|
||||
PANGOLIN_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name')
|
||||
GERBIL_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name')
|
||||
BADGER_VERSION ?= $(shell curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name')
|
||||
|
||||
LDFLAGS = -X main.pangolinVersion=$(PANGOLIN_VERSION) \
|
||||
-X main.gerbilVersion=$(GERBIL_VERSION) \
|
||||
-X main.badgerVersion=$(BADGER_VERSION)
|
||||
all: update-versions go-build-release put-back
|
||||
dev-all: dev-update-versions dev-build dev-clean
|
||||
|
||||
go-build-release:
|
||||
@echo "Building with versions - Pangolin: $(PANGOLIN_VERSION), Gerbil: $(GERBIL_VERSION), Badger: $(BADGER_VERSION)"
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o bin/installer_linux_amd64
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$(LDFLAGS)" -o bin/installer_linux_arm64
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/installer_linux_amd64
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/installer_linux_arm64
|
||||
|
||||
clean:
|
||||
rm -f bin/installer_linux_amd64
|
||||
rm -f bin/installer_linux_arm64
|
||||
|
||||
.PHONY: all go-build-release clean
|
||||
update-versions:
|
||||
@echo "Fetching latest versions..."
|
||||
cp main.go main.go.bak && \
|
||||
$(MAKE) dev-update-versions
|
||||
|
||||
put-back:
|
||||
mv main.go.bak main.go
|
||||
|
||||
dev-update-versions:
|
||||
PANGOLIN_VERSION=$$(curl -s https://api.github.com/repos/fosrl/pangolin/tags | jq -r '.[0].name') && \
|
||||
GERBIL_VERSION=$$(curl -s https://api.github.com/repos/fosrl/gerbil/tags | jq -r '.[0].name') && \
|
||||
BADGER_VERSION=$$(curl -s https://api.github.com/repos/fosrl/badger/tags | jq -r '.[0].name') && \
|
||||
echo "Latest versions - Pangolin: $$PANGOLIN_VERSION, Gerbil: $$GERBIL_VERSION, Badger: $$BADGER_VERSION" && \
|
||||
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"$$PANGOLIN_VERSION\"/" main.go && \
|
||||
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"$$GERBIL_VERSION\"/" main.go && \
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"$$BADGER_VERSION\"/" main.go && \
|
||||
echo "Updated main.go with latest versions"
|
||||
|
||||
dev-build: go-build-release
|
||||
|
||||
dev-clean:
|
||||
@echo "Restoring version values ..."
|
||||
sed -i "s/config.PangolinVersion = \".*\"/config.PangolinVersion = \"replaceme\"/" main.go && \
|
||||
sed -i "s/config.GerbilVersion = \".*\"/config.GerbilVersion = \"replaceme\"/" main.go && \
|
||||
sed -i "s/config.BadgerVersion = \".*\"/config.BadgerVersion = \"replaceme\"/" main.go
|
||||
@echo "Restored version strings in main.go"
|
||||
|
||||
@@ -118,19 +118,19 @@ func copyDockerService(sourceFile, destFile, serviceName string) error {
|
||||
}
|
||||
|
||||
// Parse source Docker Compose YAML
|
||||
var sourceCompose map[string]any
|
||||
var sourceCompose map[string]interface{}
|
||||
if err := yaml.Unmarshal(sourceData, &sourceCompose); err != nil {
|
||||
return fmt.Errorf("error parsing source Docker Compose file: %w", err)
|
||||
}
|
||||
|
||||
// Parse destination Docker Compose YAML
|
||||
var destCompose map[string]any
|
||||
var destCompose map[string]interface{}
|
||||
if err := yaml.Unmarshal(destData, &destCompose); err != nil {
|
||||
return fmt.Errorf("error parsing destination Docker Compose file: %w", err)
|
||||
}
|
||||
|
||||
// Get services section from source
|
||||
sourceServices, ok := sourceCompose["services"].(map[string]any)
|
||||
sourceServices, ok := sourceCompose["services"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("services section not found in source file or has invalid format")
|
||||
}
|
||||
@@ -142,10 +142,10 @@ func copyDockerService(sourceFile, destFile, serviceName string) error {
|
||||
}
|
||||
|
||||
// Get or create services section in destination
|
||||
destServices, ok := destCompose["services"].(map[string]any)
|
||||
destServices, ok := destCompose["services"].(map[string]interface{})
|
||||
if !ok {
|
||||
// If services section doesn't exist, create it
|
||||
destServices = make(map[string]any)
|
||||
destServices = make(map[string]interface{})
|
||||
destCompose["services"] = destServices
|
||||
}
|
||||
|
||||
@@ -187,12 +187,13 @@ func backupConfig() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func MarshalYAMLWithIndent(data any, indent int) ([]byte, error) {
|
||||
func MarshalYAMLWithIndent(data interface{}, indent int) ([]byte, error) {
|
||||
buffer := new(bytes.Buffer)
|
||||
encoder := yaml.NewEncoder(buffer)
|
||||
encoder.SetIndent(indent)
|
||||
|
||||
if err := encoder.Encode(data); err != nil {
|
||||
err := encoder.Encode(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -208,7 +209,7 @@ func replaceInFile(filepath, oldStr, newStr string) error {
|
||||
}
|
||||
|
||||
// Replace the string
|
||||
newContent := strings.ReplaceAll(string(content), oldStr, newStr)
|
||||
newContent := strings.Replace(string(content), oldStr, newStr, -1)
|
||||
|
||||
// Write the modified content back to the file
|
||||
err = os.WriteFile(filepath, []byte(newContent), 0644)
|
||||
@@ -227,28 +228,28 @@ func CheckAndAddTraefikLogVolume(composePath string) error {
|
||||
}
|
||||
|
||||
// Parse YAML into a generic map
|
||||
var compose map[string]any
|
||||
var compose map[string]interface{}
|
||||
if err := yaml.Unmarshal(data, &compose); err != nil {
|
||||
return fmt.Errorf("error parsing compose file: %w", err)
|
||||
}
|
||||
|
||||
// Get services section
|
||||
services, ok := compose["services"].(map[string]any)
|
||||
services, ok := compose["services"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("services section not found or invalid")
|
||||
}
|
||||
|
||||
// Get traefik service
|
||||
traefik, ok := services["traefik"].(map[string]any)
|
||||
traefik, ok := services["traefik"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("traefik service not found or invalid")
|
||||
}
|
||||
|
||||
// Check volumes
|
||||
logVolume := "./config/traefik/logs:/var/log/traefik"
|
||||
var volumes []any
|
||||
var volumes []interface{}
|
||||
|
||||
if existingVolumes, ok := traefik["volumes"].([]any); ok {
|
||||
if existingVolumes, ok := traefik["volumes"].([]interface{}); ok {
|
||||
// Check if volume already exists
|
||||
for _, v := range existingVolumes {
|
||||
if v.(string) == logVolume {
|
||||
@@ -294,13 +295,13 @@ func MergeYAML(baseFile, overlayFile string) error {
|
||||
}
|
||||
|
||||
// Parse base YAML into a map
|
||||
var baseMap map[string]any
|
||||
var baseMap map[string]interface{}
|
||||
if err := yaml.Unmarshal(baseContent, &baseMap); err != nil {
|
||||
return fmt.Errorf("error parsing base YAML: %v", err)
|
||||
}
|
||||
|
||||
// Parse overlay YAML into a map
|
||||
var overlayMap map[string]any
|
||||
var overlayMap map[string]interface{}
|
||||
if err := yaml.Unmarshal(overlayContent, &overlayMap); err != nil {
|
||||
return fmt.Errorf("error parsing overlay YAML: %v", err)
|
||||
}
|
||||
@@ -323,8 +324,8 @@ func MergeYAML(baseFile, overlayFile string) error {
|
||||
}
|
||||
|
||||
// mergeMap recursively merges two maps
|
||||
func mergeMap(base, overlay map[string]any) map[string]any {
|
||||
result := make(map[string]any)
|
||||
func mergeMap(base, overlay map[string]interface{}) map[string]interface{} {
|
||||
result := make(map[string]interface{})
|
||||
|
||||
// Copy all key-values from base map
|
||||
for k, v := range base {
|
||||
@@ -335,8 +336,8 @@ func mergeMap(base, overlay map[string]any) map[string]any {
|
||||
for k, v := range overlay {
|
||||
// If both maps have the same key and both values are maps, merge recursively
|
||||
if baseVal, ok := base[k]; ok {
|
||||
if baseMap, isBaseMap := baseVal.(map[string]any); isBaseMap {
|
||||
if overlayMap, isOverlayMap := v.(map[string]any); isOverlayMap {
|
||||
if baseMap, isBaseMap := baseVal.(map[string]interface{}); isBaseMap {
|
||||
if overlayMap, isOverlayMap := v.(map[string]interface{}); isOverlayMap {
|
||||
result[k] = mergeMap(baseMap, overlayMap)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -1,10 +1,15 @@
|
||||
# To see all available options, please visit the docs:
|
||||
# https://docs.pangolin.net/
|
||||
# https://docs.digpangolin.com/self-host/advanced/config-file
|
||||
|
||||
gerbil:
|
||||
start_port: 51820
|
||||
base_endpoint: "{{.DashboardDomain}}"
|
||||
|
||||
{{if .HybridMode}}
|
||||
managed:
|
||||
id: "{{.HybridId}}"
|
||||
secret: "{{.HybridSecret}}"
|
||||
|
||||
{{else}}
|
||||
app:
|
||||
dashboard_url: "https://{{.DashboardDomain}}"
|
||||
log_level: "info"
|
||||
@@ -14,6 +19,7 @@ app:
|
||||
domains:
|
||||
domain1:
|
||||
base_domain: "{{.BaseDomain}}"
|
||||
cert_resolver: "letsencrypt"
|
||||
|
||||
server:
|
||||
secret: "{{.Secret}}"
|
||||
@@ -22,7 +28,6 @@ server:
|
||||
methods: ["GET", "POST", "PUT", "DELETE", "PATCH"]
|
||||
allowed_headers: ["X-CSRF-Token", "Content-Type"]
|
||||
credentials: false
|
||||
{{if .EnableGeoblocking}}maxmind_db_path: "./config/GeoLite2-Country.mmdb"{{end}}
|
||||
{{if .EnableEmail}}
|
||||
email:
|
||||
smtp_host: "{{.EmailSMTPHost}}"
|
||||
@@ -36,3 +41,4 @@ flags:
|
||||
disable_signup_without_invite: true
|
||||
disable_user_create_org: false
|
||||
allow_raw_resources: true
|
||||
{{end}}
|
||||
@@ -9,15 +9,10 @@ services:
|
||||
PARSERS: crowdsecurity/whitelists
|
||||
ENROLL_TAGS: docker
|
||||
healthcheck:
|
||||
test:
|
||||
- CMD
|
||||
- cscli
|
||||
- lapi
|
||||
- status
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
interval: 10s
|
||||
retries: 15
|
||||
timeout: 10s
|
||||
test: ["CMD", "cscli", "capi", "status"]
|
||||
labels:
|
||||
- "traefik.enable=false" # Disable traefik for crowdsec
|
||||
volumes:
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -48,7 +44,7 @@ http:
|
||||
crowdsecAppsecUnreachableBlock: true # Block on unreachable
|
||||
crowdsecAppsecBodyLimit: 10485760
|
||||
crowdsecLapiKey: "PUT_YOUR_BOUNCER_KEY_HERE_OR_IT_WILL_NOT_WORK" # CrowdSec API key which you noted down later
|
||||
crowdsecLapiHost: crowdsec:8080 # CrowdSec
|
||||
crowdsecLapiHost: crowdsec:8080 # CrowdSec
|
||||
crowdsecLapiScheme: http # CrowdSec API scheme
|
||||
forwardedHeadersTrustedIPs: # Forwarded headers trusted IPs
|
||||
- "0.0.0.0/0" # All IP addresses are trusted for forwarded headers (CHANGE MADE HERE)
|
||||
@@ -67,7 +63,6 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
@@ -77,7 +72,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -89,7 +83,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -101,7 +94,6 @@ http:
|
||||
- websecure
|
||||
middlewares:
|
||||
- security-headers # Add security headers middleware
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -114,13 +106,4 @@ http:
|
||||
api-service:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
@@ -1,11 +1,13 @@
|
||||
name: pangolin
|
||||
services:
|
||||
pangolin:
|
||||
image: docker.io/fosrl/pangolin:{{if .IsEnterprise}}ee-{{end}}{{.PangolinVersion}}
|
||||
image: docker.io/fosrl/pangolin:{{.PangolinVersion}}
|
||||
container_name: pangolin
|
||||
restart: unless-stopped
|
||||
volumes:
|
||||
- ./config:/app/config
|
||||
- pangolin-data:/var/certificates
|
||||
- pangolin-data:/var/dynamic
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:3001/api/v1/"]
|
||||
interval: "10s"
|
||||
@@ -20,7 +22,7 @@ services:
|
||||
pangolin:
|
||||
condition: service_healthy
|
||||
command:
|
||||
- --reachableAt=http://gerbil:3004
|
||||
- --reachableAt=http://gerbil:3003
|
||||
- --generateAndSaveKeyTo=/var/config/key
|
||||
- --remoteConfig=http://pangolin:3001/api/v1/
|
||||
volumes:
|
||||
@@ -31,14 +33,16 @@ services:
|
||||
ports:
|
||||
- 51820:51820/udp
|
||||
- 21820:21820/udp
|
||||
- 443:443
|
||||
- 443:{{if .HybridMode}}8443{{else}}443{{end}}
|
||||
- 80:80
|
||||
{{end}}
|
||||
traefik:
|
||||
image: docker.io/traefik:v3.6
|
||||
image: docker.io/traefik:v3.5
|
||||
container_name: traefik
|
||||
restart: unless-stopped
|
||||
{{if .InstallGerbil}} network_mode: service:gerbil # Ports appear on the gerbil service{{end}}{{if not .InstallGerbil}}
|
||||
{{if .InstallGerbil}}
|
||||
network_mode: service:gerbil # Ports appear on the gerbil service
|
||||
{{end}}{{if not .InstallGerbil}}
|
||||
ports:
|
||||
- 443:443
|
||||
- 80:80
|
||||
@@ -52,9 +56,15 @@ services:
|
||||
- ./config/traefik:/etc/traefik:ro # Volume to store the Traefik configuration
|
||||
- ./config/letsencrypt:/letsencrypt # Volume to store the Let's Encrypt certificates
|
||||
- ./config/traefik/logs:/var/log/traefik # Volume to store Traefik logs
|
||||
# Shared volume for certificates and dynamic config in file mode
|
||||
- pangolin-data:/var/certificates:ro
|
||||
- pangolin-data:/var/dynamic:ro
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
name: pangolin
|
||||
{{if .EnableIPv6}} enable_ipv6: true{{end}}
|
||||
|
||||
volumes:
|
||||
pangolin-data:
|
||||
|
||||
@@ -1,9 +1,5 @@
|
||||
http:
|
||||
middlewares:
|
||||
badger:
|
||||
plugin:
|
||||
badger:
|
||||
disableForwardAuth: true
|
||||
redirect-to-https:
|
||||
redirectScheme:
|
||||
scheme: https
|
||||
@@ -17,7 +13,6 @@ http:
|
||||
- web
|
||||
middlewares:
|
||||
- redirect-to-https
|
||||
- badger
|
||||
|
||||
# Next.js router (handles everything except API and WebSocket paths)
|
||||
next-router:
|
||||
@@ -25,8 +20,6 @@ http:
|
||||
service: next-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -36,8 +29,6 @@ http:
|
||||
service: api-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -47,8 +38,6 @@ http:
|
||||
service: api-service
|
||||
entryPoints:
|
||||
- websecure
|
||||
middlewares:
|
||||
- badger
|
||||
tls:
|
||||
certResolver: letsencrypt
|
||||
|
||||
@@ -62,12 +51,3 @@ http:
|
||||
loadBalancer:
|
||||
servers:
|
||||
- url: "http://pangolin:3000" # API/WebSocket server
|
||||
|
||||
tcp:
|
||||
serversTransports:
|
||||
pp-transport-v1:
|
||||
proxyProtocol:
|
||||
version: 1
|
||||
pp-transport-v2:
|
||||
proxyProtocol:
|
||||
version: 2
|
||||
|
||||
@@ -3,12 +3,17 @@ api:
|
||||
dashboard: true
|
||||
|
||||
providers:
|
||||
{{if not .HybridMode}}
|
||||
http:
|
||||
endpoint: "http://pangolin:3001/api/v1/traefik-config"
|
||||
pollInterval: "5s"
|
||||
file:
|
||||
filename: "/etc/traefik/dynamic_config.yml"
|
||||
|
||||
{{else}}
|
||||
file:
|
||||
directory: "/var/dynamic"
|
||||
watch: true
|
||||
{{end}}
|
||||
experimental:
|
||||
plugins:
|
||||
badger:
|
||||
@@ -22,7 +27,7 @@ log:
|
||||
maxBackups: 3
|
||||
maxAge: 3
|
||||
compress: true
|
||||
|
||||
{{if not .HybridMode}}
|
||||
certificatesResolvers:
|
||||
letsencrypt:
|
||||
acme:
|
||||
@@ -31,24 +36,25 @@ certificatesResolvers:
|
||||
email: "{{.LetsEncryptEmail}}"
|
||||
storage: "/letsencrypt/acme.json"
|
||||
caServer: "https://acme-v02.api.letsencrypt.org/directory"
|
||||
|
||||
{{end}}
|
||||
entryPoints:
|
||||
web:
|
||||
address: ":80"
|
||||
websecure:
|
||||
address: ":443"
|
||||
{{if .HybridMode}} proxyProtocol:
|
||||
trustedIPs:
|
||||
- 0.0.0.0/0
|
||||
- ::1/128{{end}}
|
||||
transport:
|
||||
respondingTimeouts:
|
||||
readTimeout: "30m"
|
||||
http:
|
||||
{{if not .HybridMode}} http:
|
||||
tls:
|
||||
certResolver: "letsencrypt"
|
||||
encodedCharacters:
|
||||
allowEncodedSlash: true
|
||||
allowEncodedQuestionMark: true
|
||||
certResolver: "letsencrypt"{{end}}
|
||||
|
||||
serversTransport:
|
||||
insecureSkipVerify: true
|
||||
|
||||
ping:
|
||||
entryPoint: "web"
|
||||
entryPoint: "web"
|
||||
@@ -73,7 +73,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=ubuntu"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl gpg &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
@@ -82,7 +82,7 @@ func installDocker() error {
|
||||
case strings.Contains(osRelease, "ID=debian"):
|
||||
installCmd = exec.Command("bash", "-c", fmt.Sprintf(`
|
||||
apt-get update &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl gpg &&
|
||||
apt-get install -y apt-transport-https ca-certificates curl software-properties-common &&
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg &&
|
||||
echo "deb [arch=%s signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian $(lsb_release -cs) stable" > /etc/apt/sources.list.d/docker.list &&
|
||||
apt-get update &&
|
||||
@@ -144,13 +144,12 @@ func installDocker() error {
|
||||
}
|
||||
|
||||
func startDockerService() error {
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if runtime.GOOS == "linux" {
|
||||
cmd := exec.Command("systemctl", "enable", "--now", "docker")
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
return cmd.Run()
|
||||
case "darwin":
|
||||
} else if runtime.GOOS == "darwin" {
|
||||
// On macOS, Docker is usually started via the Docker Desktop application
|
||||
fmt.Println("Please start Docker Desktop manually on macOS.")
|
||||
return nil
|
||||
@@ -211,47 +210,6 @@ func isDockerRunning() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func isPodmanRunning() bool {
|
||||
cmd := exec.Command("podman", "info")
|
||||
if err := cmd.Run(); err != nil {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// detectContainerType detects whether the system is currently using Docker or Podman
|
||||
// by checking which container runtime is running and has containers
|
||||
func detectContainerType() SupportedContainer {
|
||||
// Check if we have running containers with podman
|
||||
if isPodmanRunning() {
|
||||
cmd := exec.Command("podman", "ps", "-q")
|
||||
output, err := cmd.Output()
|
||||
if err == nil && len(strings.TrimSpace(string(output))) > 0 {
|
||||
return Podman
|
||||
}
|
||||
}
|
||||
|
||||
// Check if we have running containers with docker
|
||||
if isDockerRunning() {
|
||||
cmd := exec.Command("docker", "ps", "-q")
|
||||
output, err := cmd.Output()
|
||||
if err == nil && len(strings.TrimSpace(string(output))) > 0 {
|
||||
return Docker
|
||||
}
|
||||
}
|
||||
|
||||
// If no containers are running, check which one is installed and running
|
||||
if isPodmanRunning() && isPodmanInstalled() {
|
||||
return Podman
|
||||
}
|
||||
|
||||
if isDockerRunning() && isDockerInstalled() {
|
||||
return Docker
|
||||
}
|
||||
|
||||
return Undefined
|
||||
}
|
||||
|
||||
// executeDockerComposeCommandWithArgs executes the appropriate docker command with arguments supplied
|
||||
func executeDockerComposeCommandWithArgs(args ...string) error {
|
||||
var cmd *exec.Cmd
|
||||
@@ -303,7 +261,7 @@ func pullContainers(containerType SupportedContainer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported container type: %s", containerType)
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// startContainers starts the containers using the appropriate command.
|
||||
@@ -326,7 +284,7 @@ func startContainers(containerType SupportedContainer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported container type: %s", containerType)
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// stopContainers stops the containers using the appropriate command.
|
||||
@@ -348,7 +306,7 @@ func stopContainers(containerType SupportedContainer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported container type: %s", containerType)
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
// restartContainer restarts a specific container using the appropriate command.
|
||||
@@ -370,5 +328,5 @@ func restartContainer(container string, containerType SupportedContainer) error
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported container type: %s", containerType)
|
||||
return fmt.Errorf("Unsupported container type: %s", containerType)
|
||||
}
|
||||
|
||||
@@ -27,18 +27,9 @@ func installCrowdsec(config Config) error {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := os.MkdirAll("config/crowdsec/db", 0755); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := os.MkdirAll("config/crowdsec/acquis.d", 0755); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
if err := os.MkdirAll("config/traefik/logs", 0755); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
os.MkdirAll("config/crowdsec/db", 0755)
|
||||
os.MkdirAll("config/crowdsec/acquis.d", 0755)
|
||||
os.MkdirAll("config/traefik/logs", 0755)
|
||||
|
||||
if err := copyDockerService("config/crowdsec/docker-compose.yml", "docker-compose.yml", "crowdsec"); err != nil {
|
||||
fmt.Printf("Error copying docker service: %v\n", err)
|
||||
@@ -102,7 +93,7 @@ func installCrowdsec(config Config) error {
|
||||
|
||||
if checkIfTextInFile("config/traefik/dynamic_config.yml", "PUT_YOUR_BOUNCER_KEY_HERE_OR_IT_WILL_NOT_WORK") {
|
||||
fmt.Println("Failed to replace bouncer key! Please retrieve the key and replace it in the config/traefik/dynamic_config.yml file using the following command:")
|
||||
fmt.Printf(" %s exec crowdsec cscli bouncers add traefik-bouncer\n", config.InstallationContainerType)
|
||||
fmt.Println(" docker exec crowdsec cscli bouncers add traefik-bouncer")
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -126,7 +117,7 @@ func GetCrowdSecAPIKey(containerType SupportedContainer) (string, error) {
|
||||
}
|
||||
|
||||
// Execute the command to get the API key
|
||||
cmd := exec.Command(string(containerType), "exec", "crowdsec", "cscli", "bouncers", "add", "traefik-bouncer", "-o", "raw")
|
||||
cmd := exec.Command("docker", "exec", "crowdsec", "cscli", "bouncers", "add", "traefik-bouncer", "-o", "raw")
|
||||
var out bytes.Buffer
|
||||
cmd.Stdout = &out
|
||||
|
||||
@@ -162,34 +153,34 @@ func CheckAndAddCrowdsecDependency(composePath string) error {
|
||||
}
|
||||
|
||||
// Parse YAML into a generic map
|
||||
var compose map[string]any
|
||||
var compose map[string]interface{}
|
||||
if err := yaml.Unmarshal(data, &compose); err != nil {
|
||||
return fmt.Errorf("error parsing compose file: %w", err)
|
||||
}
|
||||
|
||||
// Get services section
|
||||
services, ok := compose["services"].(map[string]any)
|
||||
services, ok := compose["services"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("services section not found or invalid")
|
||||
}
|
||||
|
||||
// Get traefik service
|
||||
traefik, ok := services["traefik"].(map[string]any)
|
||||
traefik, ok := services["traefik"].(map[string]interface{})
|
||||
if !ok {
|
||||
return fmt.Errorf("traefik service not found or invalid")
|
||||
}
|
||||
|
||||
// Get dependencies
|
||||
dependsOn, ok := traefik["depends_on"].(map[string]any)
|
||||
dependsOn, ok := traefik["depends_on"].(map[string]interface{})
|
||||
if ok {
|
||||
// Append the new block for crowdsec
|
||||
dependsOn["crowdsec"] = map[string]any{
|
||||
dependsOn["crowdsec"] = map[string]interface{}{
|
||||
"condition": "service_healthy",
|
||||
}
|
||||
} else {
|
||||
// No dependencies exist, create it
|
||||
traefik["depends_on"] = map[string]any{
|
||||
"crowdsec": map[string]any{
|
||||
traefik["depends_on"] = map[string]interface{}{
|
||||
"crowdsec": map[string]interface{}{
|
||||
"condition": "service_healthy",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,180 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Get installer - Cross-platform installation script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/installer/refs/heads/main/get-installer.sh | bash
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# GitHub repository info
|
||||
REPO="fosrl/pangolin"
|
||||
GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
|
||||
|
||||
# Function to print colored output
|
||||
print_status() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Function to get latest version from GitHub API
|
||||
get_latest_version() {
|
||||
local latest_info
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
latest_info=$(curl -fsSL "$GITHUB_API_URL" 2>/dev/null)
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
latest_info=$(wget -qO- "$GITHUB_API_URL" 2>/dev/null)
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them." >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ -z "$latest_info" ]; then
|
||||
print_error "Failed to fetch latest version information" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Extract version from JSON response (works without jq)
|
||||
local version=$(echo "$latest_info" | grep '"tag_name"' | head -1 | sed 's/.*"tag_name": *"\([^"]*\)".*/\1/')
|
||||
|
||||
if [ -z "$version" ]; then
|
||||
print_error "Could not parse version from GitHub API response" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove 'v' prefix if present
|
||||
version=$(echo "$version" | sed 's/^v//')
|
||||
|
||||
echo "$version"
|
||||
}
|
||||
|
||||
# Detect OS and architecture
|
||||
detect_platform() {
|
||||
local os arch
|
||||
|
||||
# Detect OS - only support Linux
|
||||
case "$(uname -s)" in
|
||||
Linux*) os="linux" ;;
|
||||
*)
|
||||
print_error "Unsupported operating system: $(uname -s). Only Linux is supported."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
# Detect architecture - only support amd64 and arm64
|
||||
case "$(uname -m)" in
|
||||
x86_64|amd64) arch="amd64" ;;
|
||||
arm64|aarch64) arch="arm64" ;;
|
||||
*)
|
||||
print_error "Unsupported architecture: $(uname -m). Only amd64 and arm64 are supported on Linux."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "${os}_${arch}"
|
||||
}
|
||||
|
||||
# Get installation directory
|
||||
get_install_dir() {
|
||||
# Install to the current directory
|
||||
local install_dir="$(pwd)"
|
||||
if [ ! -d "$install_dir" ]; then
|
||||
print_error "Installation directory does not exist: $install_dir"
|
||||
exit 1
|
||||
fi
|
||||
echo "$install_dir"
|
||||
}
|
||||
|
||||
# Download and install installer
|
||||
install_installer() {
|
||||
local platform="$1"
|
||||
local install_dir="$2"
|
||||
local binary_name="installer_${platform}"
|
||||
|
||||
local download_url="${BASE_URL}/${binary_name}"
|
||||
local temp_file="/tmp/installer"
|
||||
local final_path="${install_dir}/installer"
|
||||
|
||||
print_status "Downloading installer from ${download_url}"
|
||||
|
||||
# Download the binary
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$download_url" -o "$temp_file"
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -q "$download_url" -O "$temp_file"
|
||||
else
|
||||
print_error "Neither curl nor wget is available. Please install one of them."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create install directory if it doesn't exist
|
||||
mkdir -p "$install_dir"
|
||||
|
||||
# Move binary to install directory
|
||||
mv "$temp_file" "$final_path"
|
||||
|
||||
# Make executable
|
||||
chmod +x "$final_path"
|
||||
|
||||
print_status "Installer downloaded to ${final_path}"
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
verify_installation() {
|
||||
local install_dir="$1"
|
||||
local installer_path="${install_dir}/installer"
|
||||
|
||||
if [ -f "$installer_path" ] && [ -x "$installer_path" ]; then
|
||||
print_status "Installation successful!"
|
||||
return 0
|
||||
else
|
||||
print_error "Installation failed. Binary not found or not executable."
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main installation process
|
||||
main() {
|
||||
print_status "Installing latest version of installer..."
|
||||
|
||||
# Get latest version
|
||||
print_status "Fetching latest version from GitHub..."
|
||||
VERSION=$(get_latest_version)
|
||||
print_status "Latest version: v${VERSION}"
|
||||
|
||||
# Set base URL with the fetched version
|
||||
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
|
||||
|
||||
# Detect platform
|
||||
PLATFORM=$(detect_platform)
|
||||
print_status "Detected platform: ${PLATFORM}"
|
||||
|
||||
# Get install directory
|
||||
INSTALL_DIR=$(get_install_dir)
|
||||
print_status "Install directory: ${INSTALL_DIR}"
|
||||
|
||||
# Install installer
|
||||
install_installer "$PLATFORM" "$INSTALL_DIR"
|
||||
|
||||
# Verify installation
|
||||
if verify_installation "$INSTALL_DIR"; then
|
||||
print_status "Installer is ready to use!"
|
||||
else
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -3,36 +3,8 @@ module installer
|
||||
go 1.24.0
|
||||
|
||||
require (
|
||||
github.com/charmbracelet/huh v0.8.0
|
||||
github.com/charmbracelet/lipgloss v1.1.0
|
||||
golang.org/x/term v0.40.0
|
||||
golang.org/x/term v0.35.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/atotto/clipboard v0.1.4 // indirect
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 // indirect
|
||||
github.com/catppuccin/go v0.3.0 // indirect
|
||||
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 // indirect
|
||||
github.com/charmbracelet/bubbletea v1.3.6 // indirect
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
|
||||
github.com/charmbracelet/x/ansi v0.9.3 // indirect
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 // indirect
|
||||
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 // indirect
|
||||
github.com/charmbracelet/x/term v0.2.1 // indirect
|
||||
github.com/dustin/go-humanize v1.0.1 // indirect
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f // indirect
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 // indirect
|
||||
github.com/mattn/go-isatty v0.0.20 // indirect
|
||||
github.com/mattn/go-localereader v0.0.1 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.16 // indirect
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 // indirect
|
||||
github.com/muesli/cancelreader v0.2.2 // indirect
|
||||
github.com/muesli/termenv v0.16.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.7 // indirect
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect
|
||||
golang.org/x/sync v0.15.0 // indirect
|
||||
golang.org/x/sys v0.41.0 // indirect
|
||||
golang.org/x/text v0.23.0 // indirect
|
||||
)
|
||||
require golang.org/x/sys v0.36.0 // indirect
|
||||
|
||||
@@ -1,80 +1,7 @@
|
||||
github.com/MakeNowJust/heredoc v1.0.0 h1:cXCdzVdstXyiTqTvfqk9SDHpKNjxuom+DOlyEeQ4pzQ=
|
||||
github.com/MakeNowJust/heredoc v1.0.0/go.mod h1:mG5amYoWBHf8vpLOuehzbGGw0EHxpZZ6lCpQ4fNJ8LE=
|
||||
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1 h1:HwpRHbFMcZLEVr42D4p7XBqjyuxQH5SMiErDT4WkJ2k=
|
||||
github.com/aymanbagabas/go-osc52/v2 v2.0.1/go.mod h1:uYgXzlJ7ZpABp8OJ+exZzJJhRNQ2ASbcXHWsFqH8hp8=
|
||||
github.com/aymanbagabas/go-udiff v0.3.1 h1:LV+qyBQ2pqe0u42ZsUEtPiCaUoqgA9gYRDs3vj1nolY=
|
||||
github.com/aymanbagabas/go-udiff v0.3.1/go.mod h1:G0fsKmG+P6ylD0r6N/KgQD/nWzgfnl8ZBcNLgcbrw8E=
|
||||
github.com/catppuccin/go v0.3.0 h1:d+0/YicIq+hSTo5oPuRi5kOpqkVA5tAsU6dNhvRu+aY=
|
||||
github.com/catppuccin/go v0.3.0/go.mod h1:8IHJuMGaUUjQM82qBrGNBv7LFq6JI3NnQCF6MOlZjpc=
|
||||
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7 h1:JFgG/xnwFfbezlUnFMJy0nusZvytYysV4SCS2cYbvws=
|
||||
github.com/charmbracelet/bubbles v0.21.1-0.20250623103423-23b8fd6302d7/go.mod h1:ISC1gtLcVilLOf23wvTfoQuYbW2q0JevFxPfUzZ9Ybw=
|
||||
github.com/charmbracelet/bubbletea v1.3.6 h1:VkHIxPJQeDt0aFJIsVxw8BQdh/F/L2KKZGsK6et5taU=
|
||||
github.com/charmbracelet/bubbletea v1.3.6/go.mod h1:oQD9VCRQFF8KplacJLo28/jofOI2ToOfGYeFgBBxHOc=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc h1:4pZI35227imm7yK2bGPcfpFEmuY1gc2YSTShr4iJBfs=
|
||||
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc/go.mod h1:X4/0JoqgTIPSFcRA/P6INZzIuyqdFY5rm8tb41s9okk=
|
||||
github.com/charmbracelet/huh v0.8.0 h1:Xz/Pm2h64cXQZn/Jvele4J3r7DDiqFCNIVteYukxDvY=
|
||||
github.com/charmbracelet/huh v0.8.0/go.mod h1:5YVc+SlZ1IhQALxRPpkGwwEKftN/+OlJlnJYlDRFqN4=
|
||||
github.com/charmbracelet/lipgloss v1.1.0 h1:vYXsiLHVkK7fp74RkV7b2kq9+zDLoEU4MZoFqR/noCY=
|
||||
github.com/charmbracelet/lipgloss v1.1.0/go.mod h1:/6Q8FR2o+kj8rz4Dq0zQc3vYf7X+B0binUUBwA0aL30=
|
||||
github.com/charmbracelet/x/ansi v0.9.3 h1:BXt5DHS/MKF+LjuK4huWrC6NCvHtexww7dMayh6GXd0=
|
||||
github.com/charmbracelet/x/ansi v0.9.3/go.mod h1:3RQDQ6lDnROptfpWuUVIUG64bD2g2BgntdxH0Ya5TeE=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13 h1:/KBBKHuVRbq1lYx5BzEHBAFBP8VcQzJejZ/IA3iR28k=
|
||||
github.com/charmbracelet/x/cellbuf v0.0.13/go.mod h1:xe0nKWGd3eJgtqZRaN9RjMtK7xUYchjzPr7q6kcvCCs=
|
||||
github.com/charmbracelet/x/conpty v0.1.0 h1:4zc8KaIcbiL4mghEON8D72agYtSeIgq8FSThSPQIb+U=
|
||||
github.com/charmbracelet/x/conpty v0.1.0/go.mod h1:rMFsDJoDwVmiYM10aD4bH2XiRgwI7NYJtQgl5yskjEQ=
|
||||
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86 h1:JSt3B+U9iqk37QUU2Rvb6DSBYRLtWqFqfxf8l5hOZUA=
|
||||
github.com/charmbracelet/x/errors v0.0.0-20240508181413-e8d8b6e2de86/go.mod h1:2P0UgXMEa6TsToMSuFqKFQR+fZTO9CNGUNokkPatT/0=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91 h1:payRxjMjKgx2PaCWLZ4p3ro9y97+TVLZNaRZgJwSVDQ=
|
||||
github.com/charmbracelet/x/exp/golden v0.0.0-20241011142426-46044092ad91/go.mod h1:wDlXFlCrmJ8J+swcL/MnGUuYnqgQdW9rhSD61oNMb6U=
|
||||
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0 h1:qko3AQ4gK1MTS/de7F5hPGx6/k1u0w4TeYmBFwzYVP4=
|
||||
github.com/charmbracelet/x/exp/strings v0.0.0-20240722160745-212f7b056ed0/go.mod h1:pBhA0ybfXv6hDjQUZ7hk1lVxBiUbupdw5R31yPUViVQ=
|
||||
github.com/charmbracelet/x/term v0.2.1 h1:AQeHeLZ1OqSXhrAWpYUtZyX1T3zVxfpZuEQMIQaGIAQ=
|
||||
github.com/charmbracelet/x/term v0.2.1/go.mod h1:oQ4enTYFV7QN4m0i9mzHrViD7TQKvNEEkHUMCmsxdUg=
|
||||
github.com/charmbracelet/x/termios v0.1.1 h1:o3Q2bT8eqzGnGPOYheoYS8eEleT5ZVNYNy8JawjaNZY=
|
||||
github.com/charmbracelet/x/termios v0.1.1/go.mod h1:rB7fnv1TgOPOyyKRJ9o+AsTU/vK5WHJ2ivHeut/Pcwo=
|
||||
github.com/charmbracelet/x/xpty v0.1.2 h1:Pqmu4TEJ8KeA9uSkISKMU3f+C1F6OGBn8ABuGlqCbtI=
|
||||
github.com/charmbracelet/x/xpty v0.1.2/go.mod h1:XK2Z0id5rtLWcpeNiMYBccNNBrP2IJnzHI0Lq13Xzq4=
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
|
||||
github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6baUTXGLOoWe4PQhGxaX0KpnayAqC48p4=
|
||||
github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0 h1:1nnpGOrhyZZuNyfu1QjKiUICQ74+3FNCN69Aj6K7nkY=
|
||||
github.com/lucasb-eyer/go-colorful v1.2.0/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
|
||||
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
|
||||
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4=
|
||||
github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88=
|
||||
github.com/mattn/go-runewidth v0.0.16 h1:E5ScNMtiwvlvB5paMFdw9p4kSQzbXFikJ5SQO6TULQc=
|
||||
github.com/mattn/go-runewidth v0.0.16/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6 h1:ZK8zHtRHOkbHy6Mmr5D264iyp3TiX5OmNcI5cIARiQI=
|
||||
github.com/muesli/ansi v0.0.0-20230316100256-276c6243b2f6/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
github.com/muesli/cancelreader v0.2.2 h1:3I4Kt4BQjOR54NavqnDogx/MIoWBFa0StPA8ELUXHmA=
|
||||
github.com/muesli/cancelreader v0.2.2/go.mod h1:3XuTXfFS2VjM+HTLZY9Ak0l6eUKfijIfMUZ4EgX0QYo=
|
||||
github.com/muesli/termenv v0.16.0 h1:S5AlUN9dENB57rsbnkPyfdGuWIlkmzJjbFf0Tf5FWUc=
|
||||
github.com/muesli/termenv v0.16.0/go.mod h1:ZRfOIKPFDYQoDFF4Olj7/QJbW60Ol/kL1pU3VfY/Cnk=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ=
|
||||
github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no=
|
||||
github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI=
|
||||
golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo=
|
||||
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
|
||||
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.40.0 h1:36e4zGLqU4yhjlmxEaagx2KuYbJq3EwY8K943ZsHcvg=
|
||||
golang.org/x/term v0.40.0/go.mod h1:w2P8uVp06p2iyKKuvXIm7N/y0UCRt3UfJTfZ7oOpglM=
|
||||
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
|
||||
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
|
||||
golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k=
|
||||
golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||
golang.org/x/term v0.35.0 h1:bZBVKBudEyhRcajGcNc3jIfWPqV4y/Kt2XcoigOWtDQ=
|
||||
golang.org/x/term v0.35.0/go.mod h1:TPGtkTLesOwf2DE8CgVYiZinHAOuy5AYUYT1lENIZnA=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
|
||||
275
install/input.go
@@ -1,235 +1,74 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"syscall"
|
||||
|
||||
"github.com/charmbracelet/huh"
|
||||
"golang.org/x/term"
|
||||
)
|
||||
|
||||
// pangolinTheme is the custom theme using brand colors
|
||||
var pangolinTheme = ThemePangolin()
|
||||
|
||||
// isAccessibleMode checks if we should use accessible mode (simple prompts)
|
||||
// This is true for: non-TTY, TERM=dumb, or ACCESSIBLE env var set
|
||||
func isAccessibleMode() bool {
|
||||
// Check if stdin is not a terminal (piped input, CI, etc.)
|
||||
if !term.IsTerminal(int(os.Stdin.Fd())) {
|
||||
return true
|
||||
}
|
||||
// Check for dumb terminal
|
||||
if os.Getenv("TERM") == "dumb" {
|
||||
return true
|
||||
}
|
||||
// Check for explicit accessible mode request
|
||||
if os.Getenv("ACCESSIBLE") != "" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// handleAbort checks if the error is a user abort (Ctrl+C) and exits if so
|
||||
func handleAbort(err error) {
|
||||
if err != nil && errors.Is(err, huh.ErrUserAborted) {
|
||||
fmt.Println("\nInstallation cancelled.")
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
// runField runs a single field with the Pangolin theme, handling accessible mode
|
||||
func runField(field huh.Field) error {
|
||||
if isAccessibleMode() {
|
||||
return field.RunAccessible(os.Stdout, os.Stdin)
|
||||
}
|
||||
form := huh.NewForm(huh.NewGroup(field)).WithTheme(pangolinTheme)
|
||||
return form.Run()
|
||||
}
|
||||
|
||||
func readString(prompt string, defaultValue string) string {
|
||||
var value string
|
||||
|
||||
title := prompt
|
||||
func readString(reader *bufio.Reader, prompt string, defaultValue string) string {
|
||||
if defaultValue != "" {
|
||||
title = fmt.Sprintf("%s (default: %s)", prompt, defaultValue)
|
||||
fmt.Printf("%s (default: %s): ", prompt, defaultValue)
|
||||
} else {
|
||||
fmt.Print(prompt + ": ")
|
||||
}
|
||||
|
||||
input := huh.NewInput().
|
||||
Title(title).
|
||||
Value(&value)
|
||||
|
||||
// If no default value, this field is required
|
||||
if defaultValue == "" {
|
||||
input = input.Validate(func(s string) error {
|
||||
if s == "" {
|
||||
return fmt.Errorf("this field is required")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
err := runField(input)
|
||||
handleAbort(err)
|
||||
|
||||
if value == "" {
|
||||
value = defaultValue
|
||||
}
|
||||
|
||||
// Print the answer so it remains visible in terminal history (skip in accessible mode as it already shows)
|
||||
if !isAccessibleMode() {
|
||||
fmt.Printf("%s: %s\n", prompt, value)
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func readStringNoDefault(prompt string) string {
|
||||
var value string
|
||||
|
||||
for {
|
||||
input := huh.NewInput().
|
||||
Title(prompt).
|
||||
Value(&value).
|
||||
Validate(func(s string) error {
|
||||
if s == "" {
|
||||
return fmt.Errorf("this field is required")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
err := runField(input)
|
||||
handleAbort(err)
|
||||
|
||||
if value != "" {
|
||||
// Print the answer so it remains visible in terminal history
|
||||
if !isAccessibleMode() {
|
||||
fmt.Printf("%s: %s\n", prompt, value)
|
||||
}
|
||||
return value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readPassword(prompt string) string {
|
||||
var value string
|
||||
|
||||
for {
|
||||
input := huh.NewInput().
|
||||
Title(prompt).
|
||||
Value(&value).
|
||||
EchoMode(huh.EchoModePassword).
|
||||
Validate(func(s string) error {
|
||||
if s == "" {
|
||||
return fmt.Errorf("password is required")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
err := runField(input)
|
||||
handleAbort(err)
|
||||
|
||||
if value != "" {
|
||||
// Print confirmation without revealing the password
|
||||
if !isAccessibleMode() {
|
||||
fmt.Printf("%s: %s\n", prompt, "********")
|
||||
}
|
||||
return value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func readBool(prompt string, defaultValue bool) bool {
|
||||
var value = defaultValue
|
||||
|
||||
confirm := huh.NewConfirm().
|
||||
Title(prompt).
|
||||
Value(&value).
|
||||
Affirmative("Yes").
|
||||
Negative("No")
|
||||
|
||||
err := runField(confirm)
|
||||
handleAbort(err)
|
||||
|
||||
// Print the answer so it remains visible in terminal history
|
||||
if !isAccessibleMode() {
|
||||
answer := "No"
|
||||
if value {
|
||||
answer = "Yes"
|
||||
}
|
||||
fmt.Printf("%s: %s\n", prompt, answer)
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func readBoolNoDefault(prompt string) bool {
|
||||
var value bool
|
||||
|
||||
confirm := huh.NewConfirm().
|
||||
Title(prompt).
|
||||
Value(&value).
|
||||
Affirmative("Yes").
|
||||
Negative("No")
|
||||
|
||||
err := runField(confirm)
|
||||
handleAbort(err)
|
||||
|
||||
// Print the answer so it remains visible in terminal history
|
||||
if !isAccessibleMode() {
|
||||
answer := "No"
|
||||
if value {
|
||||
answer = "Yes"
|
||||
}
|
||||
fmt.Printf("%s: %s\n", prompt, answer)
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func readInt(prompt string, defaultValue int) int {
|
||||
var value string
|
||||
|
||||
title := fmt.Sprintf("%s (default: %d)", prompt, defaultValue)
|
||||
|
||||
input := huh.NewInput().
|
||||
Title(title).
|
||||
Value(&value).
|
||||
Validate(func(s string) error {
|
||||
if s == "" {
|
||||
return nil
|
||||
}
|
||||
_, err := strconv.Atoi(s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("please enter a valid number")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
err := runField(input)
|
||||
handleAbort(err)
|
||||
|
||||
if value == "" {
|
||||
// Print the answer so it remains visible in terminal history
|
||||
if !isAccessibleMode() {
|
||||
fmt.Printf("%s: %d\n", prompt, defaultValue)
|
||||
}
|
||||
input, _ := reader.ReadString('\n')
|
||||
input = strings.TrimSpace(input)
|
||||
if input == "" {
|
||||
return defaultValue
|
||||
}
|
||||
return input
|
||||
}
|
||||
|
||||
result, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
if !isAccessibleMode() {
|
||||
fmt.Printf("%s: %d\n", prompt, defaultValue)
|
||||
func readStringNoDefault(reader *bufio.Reader, prompt string) string {
|
||||
fmt.Print(prompt + ": ")
|
||||
input, _ := reader.ReadString('\n')
|
||||
return strings.TrimSpace(input)
|
||||
}
|
||||
|
||||
func readPassword(prompt string, reader *bufio.Reader) string {
|
||||
if term.IsTerminal(int(syscall.Stdin)) {
|
||||
fmt.Print(prompt + ": ")
|
||||
// Read password without echo if we're in a terminal
|
||||
password, err := term.ReadPassword(int(syscall.Stdin))
|
||||
fmt.Println() // Add a newline since ReadPassword doesn't add one
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
input := strings.TrimSpace(string(password))
|
||||
if input == "" {
|
||||
return readPassword(prompt, reader)
|
||||
}
|
||||
return input
|
||||
} else {
|
||||
// Fallback to reading from stdin if not in a terminal
|
||||
return readString(reader, prompt, "")
|
||||
}
|
||||
}
|
||||
|
||||
func readBool(reader *bufio.Reader, prompt string, defaultValue bool) bool {
|
||||
defaultStr := "no"
|
||||
if defaultValue {
|
||||
defaultStr = "yes"
|
||||
}
|
||||
input := readString(reader, prompt+" (yes/no)", defaultStr)
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readBoolNoDefault(reader *bufio.Reader, prompt string) bool {
|
||||
input := readStringNoDefault(reader, prompt+" (yes/no)")
|
||||
return strings.ToLower(input) == "yes"
|
||||
}
|
||||
|
||||
func readInt(reader *bufio.Reader, prompt string, defaultValue int) int {
|
||||
input := readString(reader, prompt, fmt.Sprintf("%d", defaultValue))
|
||||
if input == "" {
|
||||
return defaultValue
|
||||
}
|
||||
|
||||
// Print the answer so it remains visible in terminal history
|
||||
if !isAccessibleMode() {
|
||||
fmt.Printf("%s: %d\n", prompt, result)
|
||||
}
|
||||
|
||||
return result
|
||||
value := defaultValue
|
||||
fmt.Sscanf(input, "%d", &value)
|
||||
return value
|
||||
}
|
||||
|
||||
340
install/main.go
@@ -1,12 +1,13 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"bufio"
|
||||
"bytes"
|
||||
"embed"
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"math/rand"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@@ -19,17 +20,11 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Version variables injected at build time via -ldflags
|
||||
var (
|
||||
pangolinVersion string
|
||||
gerbilVersion string
|
||||
badgerVersion string
|
||||
)
|
||||
|
||||
// DO NOT EDIT THIS FUNCTION; IT MATCHED BY REGEX IN CICD
|
||||
func loadVersions(config *Config) {
|
||||
config.PangolinVersion = pangolinVersion
|
||||
config.GerbilVersion = gerbilVersion
|
||||
config.BadgerVersion = badgerVersion
|
||||
config.PangolinVersion = "replaceme"
|
||||
config.GerbilVersion = "replaceme"
|
||||
config.BadgerVersion = "replaceme"
|
||||
}
|
||||
|
||||
//go:embed config/*
|
||||
@@ -53,16 +48,17 @@ type Config struct {
|
||||
InstallGerbil bool
|
||||
TraefikBouncerKey string
|
||||
DoCrowdsecInstall bool
|
||||
EnableGeoblocking bool
|
||||
Secret string
|
||||
IsEnterprise bool
|
||||
HybridMode bool
|
||||
HybridId string
|
||||
HybridSecret string
|
||||
}
|
||||
|
||||
type SupportedContainer string
|
||||
|
||||
const (
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Docker SupportedContainer = "docker"
|
||||
Podman SupportedContainer = "podman"
|
||||
Undefined SupportedContainer = "undefined"
|
||||
)
|
||||
|
||||
@@ -87,12 +83,14 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
var config Config
|
||||
var alreadyInstalled = false
|
||||
|
||||
// check if there is already a config file
|
||||
if _, err := os.Stat("config/config.yml"); err != nil {
|
||||
config = collectUserInput()
|
||||
config = collectUserInput(reader)
|
||||
|
||||
loadVersions(&config)
|
||||
config.DoCrowdsecInstall = false
|
||||
@@ -100,40 +98,42 @@ func main() {
|
||||
|
||||
fmt.Println("\n=== Generating Configuration Files ===")
|
||||
|
||||
// If the secret and id are not generated then generate them
|
||||
if config.HybridMode && (config.HybridId == "" || config.HybridSecret == "") {
|
||||
// fmt.Println("Requesting hybrid credentials from cloud...")
|
||||
credentials, err := requestHybridCredentials()
|
||||
if err != nil {
|
||||
fmt.Printf("Error requesting hybrid credentials: %v\n", err)
|
||||
fmt.Println("Please obtain credentials manually from the dashboard and run the installer again.")
|
||||
os.Exit(1)
|
||||
}
|
||||
config.HybridId = credentials.RemoteExitNodeId
|
||||
config.HybridSecret = credentials.Secret
|
||||
fmt.Printf("Your managed credentials have been obtained successfully.\n")
|
||||
fmt.Printf(" ID: %s\n", config.HybridId)
|
||||
fmt.Printf(" Secret: %s\n", config.HybridSecret)
|
||||
fmt.Println("Take these to the Pangolin dashboard https://pangolin.fossorial.io to adopt your node.")
|
||||
readBool(reader, "Have you adopted your node?", true)
|
||||
}
|
||||
|
||||
if err := createConfigFiles(config); err != nil {
|
||||
fmt.Printf("Error creating config files: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := moveFile("config/docker-compose.yml", "docker-compose.yml"); err != nil {
|
||||
fmt.Printf("Error moving docker-compose.yml: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
moveFile("config/docker-compose.yml", "docker-compose.yml")
|
||||
|
||||
fmt.Println("\nConfiguration files created successfully!")
|
||||
|
||||
// Download MaxMind database if requested
|
||||
if config.EnableGeoblocking {
|
||||
fmt.Println("\n=== Downloading MaxMind Database ===")
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can download it manually later if needed.")
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Println("\n=== Starting installation ===")
|
||||
|
||||
if readBool("Would you like to install and start the containers?", true) {
|
||||
if readBool(reader, "Would you like to install and start the containers?", true) {
|
||||
|
||||
config.InstallationContainerType = podmanOrDocker()
|
||||
config.InstallationContainerType = podmanOrDocker(reader)
|
||||
|
||||
if !isDockerInstalled() && runtime.GOOS == "linux" && config.InstallationContainerType == Docker {
|
||||
if readBool("Docker is not installed. Would you like to install it?", true) {
|
||||
if err := installDocker(); err != nil {
|
||||
fmt.Printf("Error installing Docker: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
if readBool(reader, "Docker is not installed. Would you like to install it?", true) {
|
||||
installDocker()
|
||||
// try to start docker service but ignore errors
|
||||
if err := startDockerService(); err != nil {
|
||||
fmt.Println("Error starting Docker service:", err)
|
||||
@@ -142,7 +142,7 @@ func main() {
|
||||
}
|
||||
// wait 10 seconds for docker to start checking if docker is running every 2 seconds
|
||||
fmt.Println("Waiting for Docker to start...")
|
||||
for range 5 {
|
||||
for i := 0; i < 5; i++ {
|
||||
if isDockerRunning() {
|
||||
fmt.Println("Docker is running!")
|
||||
break
|
||||
@@ -172,41 +172,16 @@ func main() {
|
||||
} else {
|
||||
alreadyInstalled = true
|
||||
fmt.Println("Looks like you already installed Pangolin!")
|
||||
|
||||
// Check if MaxMind database exists and offer to update it
|
||||
fmt.Println("\n=== MaxMind Database Update ===")
|
||||
if _, err := os.Stat("config/GeoLite2-Country.mmdb"); err == nil {
|
||||
fmt.Println("MaxMind GeoLite2 Country database found.")
|
||||
if readBool("Would you like to update the MaxMind database to the latest version?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error updating MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try updating it manually later if needed.")
|
||||
}
|
||||
}
|
||||
} else {
|
||||
fmt.Println("MaxMind GeoLite2 Country database not found.")
|
||||
if readBool("Would you like to download the MaxMind GeoLite2 database for geoblocking functionality?", false) {
|
||||
if err := downloadMaxMindDatabase(); err != nil {
|
||||
fmt.Printf("Error downloading MaxMind database: %v\n", err)
|
||||
fmt.Println("You can try downloading it manually later if needed.")
|
||||
}
|
||||
// Now you need to update your config file accordingly to enable geoblocking
|
||||
fmt.Print("Please remember to update your config/config.yml file to enable geoblocking! \n\n")
|
||||
// add maxmind_db_path: "./config/GeoLite2-Country.mmdb" under server
|
||||
fmt.Println("Add the following line under the 'server' section:")
|
||||
fmt.Println(" maxmind_db_path: \"./config/GeoLite2-Country.mmdb\"")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !checkIsCrowdsecInstalledInCompose() {
|
||||
if !checkIsCrowdsecInstalledInCompose() && !checkIsPangolinInstalledWithHybrid() {
|
||||
fmt.Println("\n=== CrowdSec Install ===")
|
||||
// check if crowdsec is installed
|
||||
if readBool("Would you like to install CrowdSec?", false) {
|
||||
if readBool(reader, "Would you like to install CrowdSec?", false) {
|
||||
fmt.Println("This installer constitutes a minimal viable CrowdSec deployment. CrowdSec will add extra complexity to your Pangolin installation and may not work to the best of its abilities out of the box. Users are expected to implement configuration adjustments on their own to achieve the best security posture. Consult the CrowdSec documentation for detailed configuration instructions.")
|
||||
|
||||
// BUG: crowdsec installation will be skipped if the user chooses to install on the first installation.
|
||||
if readBool("Are you willing to manage CrowdSec?", false) {
|
||||
if readBool(reader, "Are you willing to manage CrowdSec?", false) {
|
||||
if config.DashboardDomain == "" {
|
||||
traefikConfig, err := ReadTraefikConfig("config/traefik/traefik_config.yml")
|
||||
if err != nil {
|
||||
@@ -221,8 +196,8 @@ func main() {
|
||||
|
||||
parsedURL, err := url.Parse(appConfig.DashboardURL)
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
fmt.Printf("Error parsing URL: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
config.DashboardDomain = parsedURL.Hostname()
|
||||
@@ -235,21 +210,12 @@ func main() {
|
||||
fmt.Printf("Let's Encrypt Email: %s\n", config.LetsEncryptEmail)
|
||||
fmt.Printf("Badger Version: %s\n", config.BadgerVersion)
|
||||
|
||||
if !readBool("Are these values correct?", true) {
|
||||
config = collectUserInput()
|
||||
if !readBool(reader, "Are these values correct?", true) {
|
||||
config = collectUserInput(reader)
|
||||
}
|
||||
}
|
||||
|
||||
// Try to detect container type from existing installation
|
||||
detectedType := detectContainerType()
|
||||
if detectedType == Undefined {
|
||||
// If detection fails, prompt the user
|
||||
fmt.Println("Unable to detect container type from existing installation.")
|
||||
config.InstallationContainerType = podmanOrDocker()
|
||||
} else {
|
||||
config.InstallationContainerType = detectedType
|
||||
fmt.Printf("Detected container type: %s\n", config.InstallationContainerType)
|
||||
}
|
||||
config.InstallationContainerType = podmanOrDocker(reader)
|
||||
|
||||
config.DoCrowdsecInstall = true
|
||||
err := installCrowdsec(config)
|
||||
@@ -259,11 +225,12 @@ func main() {
|
||||
}
|
||||
|
||||
fmt.Println("CrowdSec installed successfully!")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !alreadyInstalled || config.DoCrowdsecInstall {
|
||||
if !config.HybridMode && !alreadyInstalled {
|
||||
// Setup Token Section
|
||||
fmt.Println("\n=== Setup Token ===")
|
||||
|
||||
@@ -284,11 +251,13 @@ func main() {
|
||||
|
||||
fmt.Println("\nInstallation complete!")
|
||||
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
if !config.HybridMode && !checkIsPangolinInstalledWithHybrid() {
|
||||
fmt.Printf("\nTo complete the initial setup, please visit:\nhttps://%s/auth/initial-setup\n", config.DashboardDomain)
|
||||
}
|
||||
}
|
||||
|
||||
func podmanOrDocker() SupportedContainer {
|
||||
inputContainer := readString("Would you like to run Pangolin as Docker or Podman containers?", "docker")
|
||||
func podmanOrDocker(reader *bufio.Reader) SupportedContainer {
|
||||
inputContainer := readString(reader, "Would you like to run Pangolin as Docker or Podman containers?", "docker")
|
||||
|
||||
chosenContainer := Docker
|
||||
if strings.EqualFold(inputContainer, "docker") {
|
||||
@@ -300,17 +269,16 @@ func podmanOrDocker() SupportedContainer {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
switch chosenContainer {
|
||||
case Podman:
|
||||
if chosenContainer == Podman {
|
||||
if !isPodmanInstalled() {
|
||||
fmt.Println("Podman or podman-compose is not installed. Please install both manually. Automated installation will be available in a later release.")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if err := exec.Command("bash", "-c", "cat /etc/sysctl.d/99-podman.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start=' || cat /etc/sysctl.conf 2>/dev/null | grep 'net.ipv4.ip_unprivileged_port_start='").Run(); err != nil {
|
||||
if err := exec.Command("bash", "-c", "cat /etc/sysctl.conf | grep 'net.ipv4.ip_unprivileged_port_start='").Run(); err != nil {
|
||||
fmt.Println("Would you like to configure ports >= 80 as unprivileged ports? This enables podman containers to listen on low-range ports.")
|
||||
fmt.Println("Pangolin will experience startup issues if this is not configured, because it needs to listen on port 80/443 by default.")
|
||||
approved := readBool("The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system\". Approve?", true)
|
||||
approved := readBool(reader, "The installer is about to execute \"echo 'net.ipv4.ip_unprivileged_port_start=80' >> /etc/sysctl.conf && sysctl -p\". Approve?", true)
|
||||
if approved {
|
||||
if os.Geteuid() != 0 {
|
||||
fmt.Println("You need to run the installer as root for such a configuration.")
|
||||
@@ -321,8 +289,8 @@ func podmanOrDocker() SupportedContainer {
|
||||
// container low-range ports as unprivileged ports.
|
||||
// Linux only.
|
||||
|
||||
if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' > /etc/sysctl.d/99-podman.conf && sysctl --system"); err != nil {
|
||||
fmt.Printf("Error configuring unprivileged ports: %v\n", err)
|
||||
if err := run("bash", "-c", "echo 'net.ipv4.ip_unprivileged_port_start=80' >> /etc/sysctl.conf && sysctl -p"); err != nil {
|
||||
fmt.Sprintf("failed to configure unprivileged ports: %v.\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
@@ -332,7 +300,7 @@ func podmanOrDocker() SupportedContainer {
|
||||
fmt.Println("Unprivileged ports have been configured.")
|
||||
}
|
||||
|
||||
case Docker:
|
||||
} else if chosenContainer == Docker {
|
||||
// check if docker is not installed and the user is root
|
||||
if !isDockerInstalled() {
|
||||
if os.Geteuid() != 0 {
|
||||
@@ -347,7 +315,7 @@ func podmanOrDocker() SupportedContainer {
|
||||
fmt.Println("The installer will not be able to run docker commands without running it as root.")
|
||||
os.Exit(1)
|
||||
}
|
||||
default:
|
||||
} else {
|
||||
// This shouldn't happen unless there's a third container runtime.
|
||||
os.Exit(1)
|
||||
}
|
||||
@@ -355,57 +323,78 @@ func podmanOrDocker() SupportedContainer {
|
||||
return chosenContainer
|
||||
}
|
||||
|
||||
func collectUserInput() Config {
|
||||
func collectUserInput(reader *bufio.Reader) Config {
|
||||
config := Config{}
|
||||
|
||||
// Basic configuration
|
||||
fmt.Println("\n=== Basic Configuration ===")
|
||||
|
||||
config.IsEnterprise = readBoolNoDefault("Do you want to install the Enterprise version of Pangolin? The EE is free for personal use or for businesses making less than 100k USD annually.")
|
||||
|
||||
config.BaseDomain = readString("Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
defaultDashboardDomain := ""
|
||||
if config.BaseDomain != "" {
|
||||
defaultDashboardDomain = "pangolin." + config.BaseDomain
|
||||
}
|
||||
config.DashboardDomain = readString("Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
|
||||
config.LetsEncryptEmail = readString("Enter email for Let's Encrypt certificates", "")
|
||||
config.InstallGerbil = readBool("Do you want to use Gerbil to allow tunneled connections", true)
|
||||
|
||||
// Email configuration
|
||||
fmt.Println("\n=== Email Configuration ===")
|
||||
config.EnableEmail = readBool("Enable email functionality (SMTP)", false)
|
||||
|
||||
if config.EnableEmail {
|
||||
config.EmailSMTPHost = readString("Enter SMTP host", "")
|
||||
config.EmailSMTPPort = readInt("Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString("Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readPassword("Enter SMTP password")
|
||||
config.EmailNoReply = readString("Enter no-reply email address (often the same as SMTP username)", "")
|
||||
for {
|
||||
response := readString(reader, "Do you want to install Pangolin as a cloud-managed (beta) node? (yes/no)", "")
|
||||
if strings.EqualFold(response, "yes") || strings.EqualFold(response, "y") {
|
||||
config.HybridMode = true
|
||||
break
|
||||
} else if strings.EqualFold(response, "no") || strings.EqualFold(response, "n") {
|
||||
config.HybridMode = false
|
||||
break
|
||||
}
|
||||
fmt.Println("Please answer 'yes' or 'no'")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if config.BaseDomain == "" {
|
||||
fmt.Println("Error: Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.LetsEncryptEmail == "" {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.EnableEmail && config.EmailNoReply == "" {
|
||||
fmt.Println("Error: No-reply email address is required when email is enabled")
|
||||
os.Exit(1)
|
||||
if config.HybridMode {
|
||||
alreadyHaveCreds := readBool(reader, "Do you already have credentials from the dashboard? If not, we will create them later", false)
|
||||
|
||||
if alreadyHaveCreds {
|
||||
config.HybridId = readString(reader, "Enter your ID", "")
|
||||
config.HybridSecret = readString(reader, "Enter your secret", "")
|
||||
}
|
||||
|
||||
// Try to get public IP as default
|
||||
publicIP := getPublicIP()
|
||||
if publicIP != "" {
|
||||
fmt.Printf("Detected public IP: %s\n", publicIP)
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "The public addressable IP address for this node or a domain pointing to it", publicIP)
|
||||
config.InstallGerbil = true
|
||||
} else {
|
||||
config.BaseDomain = readString(reader, "Enter your base domain (no subdomain e.g. example.com)", "")
|
||||
|
||||
// Set default dashboard domain after base domain is collected
|
||||
defaultDashboardDomain := ""
|
||||
if config.BaseDomain != "" {
|
||||
defaultDashboardDomain = "pangolin." + config.BaseDomain
|
||||
}
|
||||
config.DashboardDomain = readString(reader, "Enter the domain for the Pangolin dashboard", defaultDashboardDomain)
|
||||
config.LetsEncryptEmail = readString(reader, "Enter email for Let's Encrypt certificates", "")
|
||||
config.InstallGerbil = readBool(reader, "Do you want to use Gerbil to allow tunneled connections", true)
|
||||
|
||||
// Email configuration
|
||||
fmt.Println("\n=== Email Configuration ===")
|
||||
config.EnableEmail = readBool(reader, "Enable email functionality (SMTP)", false)
|
||||
|
||||
if config.EnableEmail {
|
||||
config.EmailSMTPHost = readString(reader, "Enter SMTP host", "")
|
||||
config.EmailSMTPPort = readInt(reader, "Enter SMTP port (default 587)", 587)
|
||||
config.EmailSMTPUser = readString(reader, "Enter SMTP username", "")
|
||||
config.EmailSMTPPass = readString(reader, "Enter SMTP password", "") // Should this be readPassword?
|
||||
config.EmailNoReply = readString(reader, "Enter no-reply email address", "")
|
||||
}
|
||||
|
||||
// Validate required fields
|
||||
if config.BaseDomain == "" {
|
||||
fmt.Println("Error: Domain name is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
if config.LetsEncryptEmail == "" {
|
||||
fmt.Println("Error: Let's Encrypt email is required")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Advanced configuration
|
||||
|
||||
fmt.Println("\n=== Advanced Configuration ===")
|
||||
|
||||
config.EnableIPv6 = readBool("Is your server IPv6 capable?", true)
|
||||
config.EnableGeoblocking = readBool("Do you want to download the MaxMind GeoLite2 database for geoblocking functionality?", true)
|
||||
config.EnableIPv6 = readBool(reader, "Is your server IPv6 capable?", true)
|
||||
|
||||
if config.DashboardDomain == "" {
|
||||
fmt.Println("Error: Dashboard Domain name is required")
|
||||
@@ -416,18 +405,10 @@ func collectUserInput() Config {
|
||||
}
|
||||
|
||||
func createConfigFiles(config Config) error {
|
||||
if err := os.MkdirAll("config", 0755); err != nil {
|
||||
return fmt.Errorf("failed to create config directory: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll("config/letsencrypt", 0755); err != nil {
|
||||
return fmt.Errorf("failed to create letsencrypt directory: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll("config/db", 0755); err != nil {
|
||||
return fmt.Errorf("failed to create db directory: %v", err)
|
||||
}
|
||||
if err := os.MkdirAll("config/logs", 0755); err != nil {
|
||||
return fmt.Errorf("failed to create logs directory: %v", err)
|
||||
}
|
||||
os.MkdirAll("config", 0755)
|
||||
os.MkdirAll("config/letsencrypt", 0755)
|
||||
os.MkdirAll("config/db", 0755)
|
||||
os.MkdirAll("config/logs", 0755)
|
||||
|
||||
// Walk through all embedded files
|
||||
err := fs.WalkDir(configFiles, "config", func(path string, d fs.DirEntry, err error) error {
|
||||
@@ -448,6 +429,11 @@ func createConfigFiles(config Config) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// the hybrid does not need the dynamic config
|
||||
if config.HybridMode && strings.Contains(path, "dynamic_config.yml") {
|
||||
return nil
|
||||
}
|
||||
|
||||
// skip .DS_Store
|
||||
if strings.Contains(path, ".DS_Store") {
|
||||
return nil
|
||||
@@ -581,24 +567,22 @@ func showSetupTokenInstructions(containerType SupportedContainer, dashboardDomai
|
||||
fmt.Println("To get your setup token, you need to:")
|
||||
fmt.Println("")
|
||||
fmt.Println("1. Start the containers")
|
||||
switch containerType {
|
||||
case Docker:
|
||||
if containerType == Docker {
|
||||
fmt.Println(" docker compose up -d")
|
||||
case Podman:
|
||||
} else if containerType == Podman {
|
||||
fmt.Println(" podman-compose up -d")
|
||||
} else {
|
||||
}
|
||||
|
||||
fmt.Println("")
|
||||
fmt.Println("2. Wait for the Pangolin container to start and generate the token")
|
||||
fmt.Println("")
|
||||
fmt.Println("3. Check the container logs for the setup token")
|
||||
switch containerType {
|
||||
case Docker:
|
||||
if containerType == Docker {
|
||||
fmt.Println(" docker logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
|
||||
case Podman:
|
||||
} else if containerType == Podman {
|
||||
fmt.Println(" podman logs pangolin | grep -A 2 -B 2 'SETUP TOKEN'")
|
||||
} else {
|
||||
}
|
||||
|
||||
fmt.Println("")
|
||||
fmt.Println("4. Look for output like")
|
||||
fmt.Println(" === SETUP TOKEN GENERATED ===")
|
||||
@@ -614,12 +598,17 @@ func showSetupTokenInstructions(containerType SupportedContainer, dashboardDomai
|
||||
}
|
||||
|
||||
func generateRandomSecretKey() string {
|
||||
secret := make([]byte, 32)
|
||||
_, err := rand.Read(secret)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to generate random secret key: %v", err))
|
||||
const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
const length = 32
|
||||
|
||||
var seededRand *rand.Rand = rand.New(
|
||||
rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
b := make([]byte, length)
|
||||
for i := range b {
|
||||
b[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(secret)
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func getPublicIP() string {
|
||||
@@ -660,7 +649,10 @@ func checkPortsAvailable(port int) error {
|
||||
addr := fmt.Sprintf(":%d", port)
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ERROR: port %d is occupied or cannot be bound: %w", port, err)
|
||||
return fmt.Errorf(
|
||||
"ERROR: port %d is occupied or cannot be bound: %w\n\n",
|
||||
port, err,
|
||||
)
|
||||
}
|
||||
if closeErr := ln.Close(); closeErr != nil {
|
||||
fmt.Fprintf(os.Stderr,
|
||||
@@ -671,30 +663,18 @@ func checkPortsAvailable(port int) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func downloadMaxMindDatabase() error {
|
||||
fmt.Println("Downloading MaxMind GeoLite2 Country database...")
|
||||
|
||||
// Download the GeoLite2 Country database
|
||||
if err := run("curl", "-L", "-o", "GeoLite2-Country.tar.gz",
|
||||
"https://github.com/GitSquared/node-geolite2-redist/raw/refs/heads/master/redist/GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to download GeoLite2 database: %v", err)
|
||||
func checkIsPangolinInstalledWithHybrid() bool {
|
||||
// Check if config/config.yml exists and contains hybrid section
|
||||
if _, err := os.Stat("config/config.yml"); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Extract the database
|
||||
if err := run("tar", "-xzf", "GeoLite2-Country.tar.gz"); err != nil {
|
||||
return fmt.Errorf("failed to extract GeoLite2 database: %v", err)
|
||||
// Read config file to check for hybrid section
|
||||
content, err := os.ReadFile("config/config.yml")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Find the .mmdb file and move it to the config directory
|
||||
if err := run("bash", "-c", "mv GeoLite2-Country_*/GeoLite2-Country.mmdb config/"); err != nil {
|
||||
return fmt.Errorf("failed to move GeoLite2 database to config directory: %v", err)
|
||||
}
|
||||
|
||||
// Clean up the downloaded files
|
||||
if err := run("rm", "-rf", "GeoLite2-Country.tar.gz", "GeoLite2-Country_*"); err != nil {
|
||||
fmt.Printf("Warning: failed to clean up temporary files: %v\n", err)
|
||||
}
|
||||
|
||||
fmt.Println("MaxMind GeoLite2 Country database downloaded successfully!")
|
||||
return nil
|
||||
// Check for hybrid section
|
||||
return bytes.Contains(content, []byte("managed:"))
|
||||
}
|
||||
|
||||
110
install/quickStart.go
Normal file
@@ -0,0 +1,110 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
FRONTEND_SECRET_KEY = "af4e4785-7e09-11f0-b93a-74563c4e2a7e"
|
||||
// CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
|
||||
CLOUD_API_URL = "https://pangolin.fossorial.io/api/v1/remote-exit-node/quick-start"
|
||||
)
|
||||
|
||||
// HybridCredentials represents the response from the cloud API
|
||||
type HybridCredentials struct {
|
||||
RemoteExitNodeId string `json:"remoteExitNodeId"`
|
||||
Secret string `json:"secret"`
|
||||
}
|
||||
|
||||
// APIResponse represents the full response structure from the cloud API
|
||||
type APIResponse struct {
|
||||
Data HybridCredentials `json:"data"`
|
||||
}
|
||||
|
||||
// RequestPayload represents the request body structure
|
||||
type RequestPayload struct {
|
||||
Token string `json:"token"`
|
||||
}
|
||||
|
||||
func generateValidationToken() string {
|
||||
timestamp := time.Now().UnixMilli()
|
||||
data := fmt.Sprintf("%s|%d", FRONTEND_SECRET_KEY, timestamp)
|
||||
obfuscated := make([]byte, len(data))
|
||||
for i, char := range []byte(data) {
|
||||
obfuscated[i] = char + 5
|
||||
}
|
||||
return base64.StdEncoding.EncodeToString(obfuscated)
|
||||
}
|
||||
|
||||
// requestHybridCredentials makes an HTTP POST request to the cloud API
|
||||
// to get hybrid credentials (ID and secret)
|
||||
func requestHybridCredentials() (*HybridCredentials, error) {
|
||||
// Generate validation token
|
||||
token := generateValidationToken()
|
||||
|
||||
// Create request payload
|
||||
payload := RequestPayload{
|
||||
Token: token,
|
||||
}
|
||||
|
||||
// Marshal payload to JSON
|
||||
jsonData, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal request payload: %v", err)
|
||||
}
|
||||
|
||||
// Create HTTP request
|
||||
req, err := http.NewRequest("POST", CLOUD_API_URL, bytes.NewBuffer(jsonData))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create HTTP request: %v", err)
|
||||
}
|
||||
|
||||
// Set headers
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
req.Header.Set("X-CSRF-Token", "x-csrf-protection")
|
||||
|
||||
// Create HTTP client with timeout
|
||||
client := &http.Client{
|
||||
Timeout: 30 * time.Second,
|
||||
}
|
||||
|
||||
// Make the request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to make HTTP request: %v", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
// Check response status
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return nil, fmt.Errorf("API request failed with status code: %d", resp.StatusCode)
|
||||
}
|
||||
|
||||
// Read response body for debugging
|
||||
body, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response body: %v", err)
|
||||
}
|
||||
|
||||
// Print the raw JSON response for debugging
|
||||
// fmt.Printf("Raw JSON response: %s\n", string(body))
|
||||
|
||||
// Parse response
|
||||
var apiResponse APIResponse
|
||||
if err := json.Unmarshal(body, &apiResponse); err != nil {
|
||||
return nil, fmt.Errorf("failed to decode API response: %v", err)
|
||||
}
|
||||
|
||||
// Validate response data
|
||||
if apiResponse.Data.RemoteExitNodeId == "" || apiResponse.Data.Secret == "" {
|
||||
return nil, fmt.Errorf("invalid response: missing remoteExitNodeId or secret")
|
||||
}
|
||||
|
||||
return &apiResponse.Data, nil
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/charmbracelet/huh"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
)
|
||||
|
||||
// Pangolin brand colors (converted from oklch to hex)
|
||||
var (
|
||||
// Primary orange/amber - oklch(0.6717 0.1946 41.93)
|
||||
primaryColor = lipgloss.AdaptiveColor{Light: "#D97706", Dark: "#F59E0B"}
|
||||
// Muted foreground
|
||||
mutedColor = lipgloss.AdaptiveColor{Light: "#737373", Dark: "#A3A3A3"}
|
||||
// Success green
|
||||
successColor = lipgloss.AdaptiveColor{Light: "#16A34A", Dark: "#22C55E"}
|
||||
// Error red - oklch(0.577 0.245 27.325)
|
||||
errorColor = lipgloss.AdaptiveColor{Light: "#DC2626", Dark: "#EF4444"}
|
||||
// Normal text
|
||||
normalFg = lipgloss.AdaptiveColor{Light: "#171717", Dark: "#FAFAFA"}
|
||||
)
|
||||
|
||||
// ThemePangolin returns a huh theme using Pangolin brand colors
|
||||
func ThemePangolin() *huh.Theme {
|
||||
t := huh.ThemeBase()
|
||||
|
||||
// Focused state styles
|
||||
t.Focused.Base = t.Focused.Base.BorderForeground(primaryColor)
|
||||
t.Focused.Title = t.Focused.Title.Foreground(primaryColor).Bold(true)
|
||||
t.Focused.Description = t.Focused.Description.Foreground(mutedColor)
|
||||
t.Focused.ErrorIndicator = t.Focused.ErrorIndicator.Foreground(errorColor)
|
||||
t.Focused.ErrorMessage = t.Focused.ErrorMessage.Foreground(errorColor)
|
||||
t.Focused.SelectSelector = t.Focused.SelectSelector.Foreground(primaryColor)
|
||||
t.Focused.NextIndicator = t.Focused.NextIndicator.Foreground(primaryColor)
|
||||
t.Focused.PrevIndicator = t.Focused.PrevIndicator.Foreground(primaryColor)
|
||||
t.Focused.Option = t.Focused.Option.Foreground(normalFg)
|
||||
t.Focused.SelectedOption = t.Focused.SelectedOption.Foreground(primaryColor)
|
||||
t.Focused.SelectedPrefix = lipgloss.NewStyle().Foreground(successColor).SetString("✓ ")
|
||||
t.Focused.UnselectedPrefix = lipgloss.NewStyle().Foreground(mutedColor).SetString(" ")
|
||||
t.Focused.FocusedButton = t.Focused.FocusedButton.Foreground(lipgloss.Color("#FFFFFF")).Background(primaryColor)
|
||||
t.Focused.BlurredButton = t.Focused.BlurredButton.Foreground(normalFg).Background(lipgloss.AdaptiveColor{Light: "#E5E5E5", Dark: "#404040"})
|
||||
t.Focused.TextInput.Cursor = t.Focused.TextInput.Cursor.Foreground(primaryColor)
|
||||
t.Focused.TextInput.Prompt = t.Focused.TextInput.Prompt.Foreground(primaryColor)
|
||||
|
||||
// Blurred state inherits from focused but with hidden border
|
||||
t.Blurred = t.Focused
|
||||
t.Blurred.Base = t.Focused.Base.BorderStyle(lipgloss.HiddenBorder())
|
||||
t.Blurred.Title = t.Blurred.Title.Foreground(mutedColor).Bold(false)
|
||||
t.Blurred.TextInput.Prompt = t.Blurred.TextInput.Prompt.Foreground(mutedColor)
|
||||
|
||||
return t
|
||||
}
|
||||
1294
messages/bg-BG.json
1276
messages/cs-CZ.json
1410
messages/de-DE.json
1324
messages/en-US.json
1286
messages/es-ES.json
1478
messages/fr-FR.json
1280
messages/it-IT.json
1194
messages/ko-KR.json
1288
messages/nb-NO.json
1286
messages/nl-NL.json
1302
messages/pl-PL.json
1290
messages/pt-PT.json
1282
messages/ru-RU.json
1206
messages/tr-TR.json
1278
messages/zh-CN.json
2398
messages/zh-TW.json
@@ -1,16 +1,12 @@
|
||||
import type { NextConfig } from "next";
|
||||
import createNextIntlPlugin from "next-intl/plugin";
|
||||
|
||||
const withNextIntl = createNextIntlPlugin();
|
||||
|
||||
const nextConfig: NextConfig = {
|
||||
reactStrictMode: false,
|
||||
/** @type {import("next").NextConfig} */
|
||||
const nextConfig = {
|
||||
eslint: {
|
||||
ignoreDuringBuilds: true
|
||||
},
|
||||
experimental: {
|
||||
reactCompiler: true
|
||||
},
|
||||
output: "standalone"
|
||||
};
|
||||
|
||||
18010
package-lock.json
generated
232
package.json
@@ -3,7 +3,7 @@
|
||||
"version": "0.0.0",
|
||||
"private": true,
|
||||
"type": "module",
|
||||
"description": "Identity-aware VPN and proxy for remote access to anything, anywhere and Dashboard UI",
|
||||
"description": "Tunneled Reverse Proxy Management Server with Identity and Access Control and Dashboard UI",
|
||||
"homepage": "https://github.com/fosrl/pangolin",
|
||||
"repository": {
|
||||
"type": "git",
|
||||
@@ -12,165 +12,157 @@
|
||||
"license": "SEE LICENSE IN LICENSE AND README.md",
|
||||
"scripts": {
|
||||
"dev": "NODE_ENV=development ENVIRONMENT=dev tsx watch server/index.ts",
|
||||
"dev:check": "npx tsc --noEmit && npm run format:check",
|
||||
"dev:setup": "cp config/config.example.yml config/config.yml && npm run set:oss && npm run set:sqlite && npm run db:sqlite:generate && npm run db:sqlite:push",
|
||||
"db:generate": "drizzle-kit generate --config=./drizzle.config.ts",
|
||||
"db:push": "npx tsx server/db/migrate.ts",
|
||||
"db:studio": "drizzle-kit studio --config=./drizzle.config.ts",
|
||||
"db:pg:generate": "drizzle-kit generate --config=./drizzle.pg.config.ts",
|
||||
"db:sqlite:generate": "drizzle-kit generate --config=./drizzle.sqlite.config.ts",
|
||||
"db:pg:push": "npx tsx server/db/pg/migrate.ts",
|
||||
"db:sqlite:push": "npx tsx server/db/sqlite/migrate.ts",
|
||||
"db:sqlite:studio": "drizzle-kit studio --config=./drizzle.sqlite.config.ts",
|
||||
"db:pg:studio": "drizzle-kit studio --config=./drizzle.pg.config.ts",
|
||||
"db:clear-migrations": "rm -rf server/migrations",
|
||||
"set:oss": "echo 'export const build = \"oss\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.oss.json tsconfig.json",
|
||||
"set:saas": "echo 'export const build = \"saas\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.saas.json tsconfig.json",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as \"saas\" | \"enterprise\" | \"oss\";' > server/build.ts && cp tsconfig.enterprise.json tsconfig.json",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";\nexport const driver: \"pg\" | \"sqlite\" = \"sqlite\";' > server/db/index.ts && cp drizzle.sqlite.config.ts drizzle.config.ts && cp server/setup/migrationsSqlite.ts server/setup/migrations.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";\nexport const driver: \"pg\" | \"sqlite\" = \"pg\";' > server/db/index.ts && cp drizzle.pg.config.ts drizzle.config.ts && cp server/setup/migrationsPg.ts server/setup/migrations.ts",
|
||||
"build:next": "next build",
|
||||
"build": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrations.ts -o dist/migrations.mjs",
|
||||
"set:oss": "echo 'export const build = \"oss\" as any;' > server/build.ts",
|
||||
"set:saas": "echo 'export const build = \"saas\" as any;' > server/build.ts",
|
||||
"set:enterprise": "echo 'export const build = \"enterprise\" as any;' > server/build.ts",
|
||||
"set:sqlite": "echo 'export * from \"./sqlite\";' > server/db/index.ts",
|
||||
"set:pg": "echo 'export * from \"./pg\";' > server/db/index.ts",
|
||||
"build:sqlite": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsSqlite.ts -o dist/migrations.mjs",
|
||||
"build:pg": "mkdir -p dist && next build && node esbuild.mjs -e server/index.ts -o dist/server.mjs && node esbuild.mjs -e server/setup/migrationsPg.ts -o dist/migrations.mjs",
|
||||
"start": "ENVIRONMENT=prod node dist/migrations.mjs && ENVIRONMENT=prod NODE_ENV=development node --enable-source-maps dist/server.mjs",
|
||||
"email": "email dev --dir server/emails/templates --port 3005",
|
||||
"build:cli": "node esbuild.mjs -e cli/index.ts -o dist/cli.mjs",
|
||||
"format:check": "prettier --check .",
|
||||
"format": "prettier --write ."
|
||||
"db:sqlite:seed-exit-node": "sqlite3 config/db/db.sqlite \"INSERT INTO exitNodes (exitNodeId, name, address, endpoint, publicKey, listenPort, reachableAt, maxConnections, online, lastPing, type, region) VALUES (null, 'test', '10.0.0.1/24', 'localhost', 'MJ44MpnWGxMZURgxW/fWXDFsejhabnEFYDo60LQwK3A=', 1234, 'http://localhost:3003', 123, 1, null, 'gerbil', null);\""
|
||||
},
|
||||
"dependencies": {
|
||||
"@asteasolutions/zod-to-openapi": "8.4.1",
|
||||
"@aws-sdk/client-s3": "3.989.0",
|
||||
"@faker-js/faker": "10.3.0",
|
||||
"@headlessui/react": "2.2.9",
|
||||
"@asteasolutions/zod-to-openapi": "^7.3.4",
|
||||
"@aws-sdk/client-s3": "3.837.0",
|
||||
"@hookform/resolvers": "5.2.2",
|
||||
"@monaco-editor/react": "4.7.0",
|
||||
"@node-rs/argon2": "2.0.2",
|
||||
"@node-rs/argon2": "^2.0.2",
|
||||
"@oslojs/crypto": "1.0.1",
|
||||
"@oslojs/encoding": "1.1.0",
|
||||
"@radix-ui/react-avatar": "1.1.11",
|
||||
"@radix-ui/react-avatar": "1.1.10",
|
||||
"@radix-ui/react-checkbox": "1.3.3",
|
||||
"@radix-ui/react-collapsible": "1.1.12",
|
||||
"@radix-ui/react-dialog": "1.1.15",
|
||||
"@radix-ui/react-dropdown-menu": "2.1.16",
|
||||
"@radix-ui/react-icons": "1.3.2",
|
||||
"@radix-ui/react-label": "2.1.8",
|
||||
"@radix-ui/react-label": "2.1.7",
|
||||
"@radix-ui/react-popover": "1.1.15",
|
||||
"@radix-ui/react-progress": "1.1.8",
|
||||
"@radix-ui/react-progress": "^1.1.7",
|
||||
"@radix-ui/react-radio-group": "1.3.8",
|
||||
"@radix-ui/react-scroll-area": "1.2.10",
|
||||
"@radix-ui/react-scroll-area": "^1.2.10",
|
||||
"@radix-ui/react-select": "2.2.6",
|
||||
"@radix-ui/react-separator": "1.1.8",
|
||||
"@radix-ui/react-slot": "1.2.4",
|
||||
"@radix-ui/react-separator": "1.1.7",
|
||||
"@radix-ui/react-slot": "1.2.3",
|
||||
"@radix-ui/react-switch": "1.2.6",
|
||||
"@radix-ui/react-tabs": "1.1.13",
|
||||
"@radix-ui/react-toast": "1.2.15",
|
||||
"@radix-ui/react-tooltip": "1.2.8",
|
||||
"@react-email/components": "1.0.8",
|
||||
"@react-email/render": "2.0.4",
|
||||
"@react-email/tailwind": "2.0.5",
|
||||
"@simplewebauthn/browser": "13.2.2",
|
||||
"@simplewebauthn/server": "13.2.3",
|
||||
"@tailwindcss/forms": "0.5.11",
|
||||
"@tanstack/react-query": "5.90.21",
|
||||
"@radix-ui/react-tooltip": "^1.2.8",
|
||||
"@react-email/components": "0.5.5",
|
||||
"@react-email/render": "^1.2.0",
|
||||
"@react-email/tailwind": "1.2.2",
|
||||
"@simplewebauthn/browser": "^13.2.0",
|
||||
"@simplewebauthn/server": "^13.2.1",
|
||||
"@tailwindcss/forms": "^0.5.10",
|
||||
"@tanstack/react-table": "8.21.3",
|
||||
"arctic": "3.7.0",
|
||||
"axios": "1.13.5",
|
||||
"better-sqlite3": "11.9.1",
|
||||
"canvas-confetti": "1.9.4",
|
||||
"class-variance-authority": "0.7.1",
|
||||
"arctic": "^3.7.0",
|
||||
"axios": "^1.12.2",
|
||||
"better-sqlite3": "11.7.0",
|
||||
"canvas-confetti": "1.9.3",
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "2.1.1",
|
||||
"cmdk": "1.1.1",
|
||||
"cookie": "^1.0.2",
|
||||
"cookie-parser": "1.4.7",
|
||||
"cors": "2.8.6",
|
||||
"crypto-js": "4.2.0",
|
||||
"d3": "7.9.0",
|
||||
"drizzle-orm": "0.45.1",
|
||||
"express": "5.2.1",
|
||||
"express-rate-limit": "8.2.1",
|
||||
"glob": "13.0.6",
|
||||
"cookies": "^0.9.1",
|
||||
"cors": "2.8.5",
|
||||
"crypto-js": "^4.2.0",
|
||||
"drizzle-orm": "0.44.6",
|
||||
"eslint": "9.35.0",
|
||||
"eslint-config-next": "15.5.4",
|
||||
"express": "5.1.0",
|
||||
"express-rate-limit": "8.1.0",
|
||||
"glob": "11.0.3",
|
||||
"helmet": "8.1.0",
|
||||
"http-errors": "2.0.1",
|
||||
"http-errors": "2.0.0",
|
||||
"i": "^0.3.7",
|
||||
"input-otp": "1.4.2",
|
||||
"ioredis": "5.9.3",
|
||||
"jmespath": "0.16.0",
|
||||
"js-yaml": "4.1.1",
|
||||
"jsonwebtoken": "9.0.3",
|
||||
"lucide-react": "0.563.0",
|
||||
"maxmind": "5.0.5",
|
||||
"ioredis": "5.6.1",
|
||||
"jmespath": "^0.16.0",
|
||||
"js-yaml": "4.1.0",
|
||||
"jsonwebtoken": "^9.0.2",
|
||||
"lucide-react": "^0.544.0",
|
||||
"maxmind": "5.0.0",
|
||||
"moment": "2.30.1",
|
||||
"next": "15.5.12",
|
||||
"next-intl": "4.8.3",
|
||||
"next": "15.5.4",
|
||||
"next-intl": "^4.3.9",
|
||||
"next-themes": "0.4.6",
|
||||
"nextjs-toploader": "3.9.17",
|
||||
"node-cache": "5.1.2",
|
||||
"nodemailer": "8.0.1",
|
||||
"node-fetch": "3.3.2",
|
||||
"nodemailer": "7.0.6",
|
||||
"npm": "^11.6.1",
|
||||
"oslo": "1.2.1",
|
||||
"pg": "8.19.0",
|
||||
"posthog-node": "5.26.0",
|
||||
"pg": "^8.16.2",
|
||||
"posthog-node": "^5.8.4",
|
||||
"qrcode.react": "4.2.0",
|
||||
"react": "19.2.4",
|
||||
"react-day-picker": "9.13.2",
|
||||
"react-dom": "19.2.4",
|
||||
"react-easy-sort": "1.8.0",
|
||||
"react-hook-form": "7.71.2",
|
||||
"react-icons": "5.5.0",
|
||||
"recharts": "2.15.4",
|
||||
"reodotdev": "1.0.0",
|
||||
"resend": "6.9.2",
|
||||
"semver": "7.7.4",
|
||||
"sshpk": "^1.18.0",
|
||||
"stripe": "20.3.1",
|
||||
"swagger-ui-express": "5.0.1",
|
||||
"tailwind-merge": "3.5.0",
|
||||
"topojson-client": "3.1.0",
|
||||
"tw-animate-css": "1.4.0",
|
||||
"use-debounce": "^10.1.0",
|
||||
"uuid": "13.0.0",
|
||||
"react": "19.1.1",
|
||||
"react-dom": "19.1.1",
|
||||
"react-easy-sort": "^1.7.0",
|
||||
"react-hook-form": "7.62.0",
|
||||
"react-icons": "^5.5.0",
|
||||
"rebuild": "0.1.2",
|
||||
"reodotdev": "^1.0.0",
|
||||
"resend": "^6.1.1",
|
||||
"semver": "^7.7.2",
|
||||
"stripe": "18.2.1",
|
||||
"swagger-ui-express": "^5.0.1",
|
||||
"tailwind-merge": "3.3.1",
|
||||
"tw-animate-css": "^1.3.8",
|
||||
"uuid": "^13.0.0",
|
||||
"vaul": "1.1.2",
|
||||
"visionscarto-world-atlas": "1.0.0",
|
||||
"winston": "3.19.0",
|
||||
"winston": "3.17.0",
|
||||
"winston-daily-rotate-file": "5.0.0",
|
||||
"ws": "8.19.0",
|
||||
"yaml": "2.8.2",
|
||||
"ws": "8.18.3",
|
||||
"yargs": "18.0.0",
|
||||
"zod": "4.3.6",
|
||||
"zod-validation-error": "5.0.0"
|
||||
"zod": "3.25.76",
|
||||
"zod-validation-error": "3.5.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@dotenvx/dotenvx": "1.52.0",
|
||||
"@dotenvx/dotenvx": "1.51.0",
|
||||
"@esbuild-plugins/tsconfig-paths": "0.1.2",
|
||||
"@react-email/preview-server": "5.2.8",
|
||||
"@tailwindcss/postcss": "4.1.18",
|
||||
"@tanstack/react-query-devtools": "5.91.3",
|
||||
"@types/better-sqlite3": "7.6.13",
|
||||
"@types/cookie-parser": "1.4.10",
|
||||
"@react-email/preview-server": "4.2.12",
|
||||
"@tailwindcss/postcss": "^4.1.14",
|
||||
"@types/better-sqlite3": "7.6.12",
|
||||
"@types/cookie-parser": "1.4.9",
|
||||
"@types/cors": "2.8.19",
|
||||
"@types/crypto-js": "4.2.2",
|
||||
"@types/d3": "7.4.3",
|
||||
"@types/express": "5.0.6",
|
||||
"@types/express-session": "1.18.2",
|
||||
"@types/jmespath": "0.15.2",
|
||||
"@types/crypto-js": "^4.2.2",
|
||||
"@types/express": "5.0.3",
|
||||
"@types/express-session": "^1.18.2",
|
||||
"@types/jmespath": "^0.15.2",
|
||||
"@types/js-yaml": "4.0.9",
|
||||
"@types/jsonwebtoken": "9.0.10",
|
||||
"@types/node": "25.2.3",
|
||||
"@types/nodemailer": "7.0.11",
|
||||
"@types/nprogress": "0.2.3",
|
||||
"@types/pg": "8.16.0",
|
||||
"@types/react": "19.2.14",
|
||||
"@types/react-dom": "19.2.3",
|
||||
"@types/semver": "7.7.1",
|
||||
"@types/sshpk": "^1.17.4",
|
||||
"@types/swagger-ui-express": "4.1.8",
|
||||
"@types/topojson-client": "3.1.5",
|
||||
"@types/jsonwebtoken": "^9.0.10",
|
||||
"@types/node": "24.6.2",
|
||||
"@types/nodemailer": "7.0.2",
|
||||
"@types/pg": "8.15.5",
|
||||
"@types/react": "19.1.16",
|
||||
"@types/react-dom": "19.1.9",
|
||||
"@types/semver": "^7.7.1",
|
||||
"@types/swagger-ui-express": "^4.1.8",
|
||||
"@types/ws": "8.18.1",
|
||||
"@types/yargs": "17.0.35",
|
||||
"babel-plugin-react-compiler": "1.0.0",
|
||||
"drizzle-kit": "0.31.9",
|
||||
"esbuild": "0.27.3",
|
||||
"esbuild-node-externals": "1.20.1",
|
||||
"eslint": "10.0.2",
|
||||
"eslint-config-next": "16.1.6",
|
||||
"postcss": "8.5.6",
|
||||
"prettier": "3.8.1",
|
||||
"react-email": "5.2.8",
|
||||
"tailwindcss": "4.1.18",
|
||||
"@types/yargs": "17.0.33",
|
||||
"drizzle-kit": "0.31.5",
|
||||
"esbuild": "0.25.10",
|
||||
"esbuild-node-externals": "1.18.0",
|
||||
"postcss": "^8",
|
||||
"react-email": "4.2.12",
|
||||
"tailwindcss": "^4.1.4",
|
||||
"tsc-alias": "1.8.16",
|
||||
"tsx": "4.21.0",
|
||||
"typescript": "5.9.3",
|
||||
"typescript-eslint": "8.55.0"
|
||||
"tsx": "4.20.6",
|
||||
"typescript": "^5",
|
||||
"typescript-eslint": "^8.45.0"
|
||||
},
|
||||
"overrides": {
|
||||
"emblor": {
|
||||
"react": "19.0.0",
|
||||
"react-dom": "19.0.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
/** @type {import('postcss-load-config').Config} */
|
||||
const config = {
|
||||
plugins: {
|
||||
"@tailwindcss/postcss": {}
|
||||
}
|
||||
"@tailwindcss/postcss": {},
|
||||
},
|
||||
};
|
||||
|
||||
export default config;
|
||||
|
||||
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 34 KiB |
|
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 33 KiB |
BIN
public/screenshots/create-resource.png
Normal file
|
After Width: | Height: | Size: 687 KiB |
|
Before Width: | Height: | Size: 493 KiB After Width: | Height: | Size: 713 KiB |
BIN
public/screenshots/edit-resource.png
Normal file
|
After Width: | Height: | Size: 636 KiB |
|
Before Width: | Height: | Size: 484 KiB After Width: | Height: | Size: 713 KiB |
|
Before Width: | Height: | Size: 421 KiB |
|
Before Width: | Height: | Size: 484 KiB |
BIN
public/screenshots/resources.png
Normal file
|
After Width: | Height: | Size: 713 KiB |
BIN
public/screenshots/sites-fade.png
Normal file
|
After Width: | Height: | Size: 456 KiB |
|
Before Width: | Height: | Size: 396 KiB After Width: | Height: | Size: 674 KiB |
|
Before Width: | Height: | Size: 434 KiB |
@@ -7,21 +7,21 @@ import {
|
||||
errorHandlerMiddleware,
|
||||
notFoundMiddleware
|
||||
} from "@server/middlewares";
|
||||
import { authenticated, unauthenticated } from "#dynamic/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "#dynamic/routers/ws";
|
||||
import { corsWithLoginPageSupport } from "@server/middlewares/private/corsWithLoginPage";
|
||||
import { authenticated, unauthenticated } from "@server/routers/external";
|
||||
import { router as wsRouter, handleWSUpgrade } from "@server/routers/ws";
|
||||
import { logIncomingMiddleware } from "./middlewares/logIncoming";
|
||||
import { csrfProtectionMiddleware } from "./middlewares/csrfProtection";
|
||||
import helmet from "helmet";
|
||||
import { stripeWebhookHandler } from "@server/routers/private/billing/webhooks";
|
||||
import { build } from "./build";
|
||||
import rateLimit, { ipKeyGenerator } from "express-rate-limit";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "./types/HttpCode";
|
||||
import requestTimeoutMiddleware from "./middlewares/requestTimeout";
|
||||
import { createStore } from "#dynamic/lib/rateLimitStore";
|
||||
import { createStore } from "@server/lib/private/rateLimitStore";
|
||||
import hybridRouter from "@server/routers/private/hybrid";
|
||||
import { stripDuplicateSesions } from "./middlewares/stripDuplicateSessions";
|
||||
import { corsWithLoginPageSupport } from "@server/lib/corsWithLoginPage";
|
||||
import { hybridRouter } from "#dynamic/routers/hybrid";
|
||||
import { billingWebhookHandler } from "#dynamic/routers/billing/webhooks";
|
||||
|
||||
const dev = config.isDev;
|
||||
const externalPort = config.getRawConfig().server.external_port;
|
||||
@@ -39,30 +39,32 @@ export function createApiServer() {
|
||||
apiServer.post(
|
||||
`${prefix}/billing/webhooks`,
|
||||
express.raw({ type: "application/json" }),
|
||||
billingWebhookHandler
|
||||
stripeWebhookHandler
|
||||
);
|
||||
}
|
||||
|
||||
const corsConfig = config.getRawConfig().server.cors;
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
: {
|
||||
origin: (origin: any, callback: any) => {
|
||||
callback(null, true);
|
||||
}
|
||||
}),
|
||||
...(corsConfig?.methods && { methods: corsConfig.methods }),
|
||||
...(corsConfig?.allowed_headers && {
|
||||
allowedHeaders: corsConfig.allowed_headers
|
||||
}),
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
if (build == "oss" || !corsConfig) {
|
||||
if (build == "oss") {
|
||||
const options = {
|
||||
...(corsConfig?.origins
|
||||
? { origin: corsConfig.origins }
|
||||
: {
|
||||
origin: (origin: any, callback: any) => {
|
||||
callback(null, true);
|
||||
}
|
||||
}),
|
||||
...(corsConfig?.methods && { methods: corsConfig.methods }),
|
||||
...(corsConfig?.allowed_headers && {
|
||||
allowedHeaders: corsConfig.allowed_headers
|
||||
}),
|
||||
credentials: !(corsConfig?.credentials === false)
|
||||
};
|
||||
|
||||
logger.debug("Using CORS options", options);
|
||||
|
||||
apiServer.use(cors(options));
|
||||
} else if (corsConfig) {
|
||||
} else {
|
||||
// Use the custom CORS middleware with loginPage support
|
||||
apiServer.use(corsWithLoginPageSupport(corsConfig));
|
||||
}
|
||||
@@ -79,12 +81,6 @@ export function createApiServer() {
|
||||
// Add request timeout middleware
|
||||
apiServer.use(requestTimeoutMiddleware(60000)); // 60 second timeout
|
||||
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter); // put before rate limiting because we will rate limit there separately because some of the routes are heavily used
|
||||
}
|
||||
|
||||
if (!dev) {
|
||||
apiServer.use(
|
||||
rateLimit({
|
||||
@@ -107,7 +103,11 @@ export function createApiServer() {
|
||||
}
|
||||
|
||||
// API routes
|
||||
apiServer.use(logIncomingMiddleware);
|
||||
apiServer.use(prefix, unauthenticated);
|
||||
if (build !== "oss") {
|
||||
apiServer.use(`${prefix}/hybrid`, hybridRouter);
|
||||
}
|
||||
apiServer.use(prefix, authenticated);
|
||||
|
||||
// WebSocket routes
|
||||
|
||||
@@ -4,6 +4,7 @@ import { userActions, roleActions, userOrgs } from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import createHttpError from "http-errors";
|
||||
import HttpCode from "@server/types/HttpCode";
|
||||
import { sendUsageNotification } from "@server/routers/org";
|
||||
|
||||
export enum ActionsEnum {
|
||||
createOrgUser = "createOrgUser",
|
||||
@@ -19,7 +20,6 @@ export enum ActionsEnum {
|
||||
getSite = "getSite",
|
||||
listSites = "listSites",
|
||||
updateSite = "updateSite",
|
||||
reGenerateSecret = "reGenerateSecret",
|
||||
createResource = "createResource",
|
||||
deleteResource = "deleteResource",
|
||||
getResource = "getResource",
|
||||
@@ -61,7 +61,6 @@ export enum ActionsEnum {
|
||||
getUser = "getUser",
|
||||
setResourcePassword = "setResourcePassword",
|
||||
setResourcePincode = "setResourcePincode",
|
||||
setResourceHeaderAuth = "setResourceHeaderAuth",
|
||||
setResourceWhitelist = "setResourceWhitelist",
|
||||
getResourceWhitelist = "getResourceWhitelist",
|
||||
generateAccessToken = "generateAccessToken",
|
||||
@@ -78,19 +77,11 @@ export enum ActionsEnum {
|
||||
updateSiteResource = "updateSiteResource",
|
||||
createClient = "createClient",
|
||||
deleteClient = "deleteClient",
|
||||
archiveClient = "archiveClient",
|
||||
unarchiveClient = "unarchiveClient",
|
||||
blockClient = "blockClient",
|
||||
unblockClient = "unblockClient",
|
||||
updateClient = "updateClient",
|
||||
listClients = "listClients",
|
||||
getClient = "getClient",
|
||||
listOrgDomains = "listOrgDomains",
|
||||
getDomain = "getDomain",
|
||||
updateOrgDomain = "updateOrgDomain",
|
||||
getDNSRecords = "getDNSRecords",
|
||||
createNewt = "createNewt",
|
||||
createOlm = "createOlm",
|
||||
createIdp = "createIdp",
|
||||
updateIdp = "updateIdp",
|
||||
deleteIdp = "deleteIdp",
|
||||
@@ -125,14 +116,7 @@ export enum ActionsEnum {
|
||||
updateLoginPage = "updateLoginPage",
|
||||
getLoginPage = "getLoginPage",
|
||||
deleteLoginPage = "deleteLoginPage",
|
||||
listBlueprints = "listBlueprints",
|
||||
getBlueprint = "getBlueprint",
|
||||
applyBlueprint = "applyBlueprint",
|
||||
viewLogs = "viewLogs",
|
||||
exportLogs = "exportLogs",
|
||||
listApprovals = "listApprovals",
|
||||
updateApprovals = "updateApprovals",
|
||||
signSshKey = "signSshKey"
|
||||
applyBlueprint = "applyBlueprint"
|
||||
}
|
||||
|
||||
export async function checkUserActionPermission(
|
||||
@@ -209,6 +193,8 @@ export async function checkUserActionPermission(
|
||||
.limit(1);
|
||||
|
||||
return roleActionPermission.length > 0;
|
||||
|
||||
return false;
|
||||
} catch (error) {
|
||||
console.error("Error checking user action permission:", error);
|
||||
throw createHttpError(
|
||||
|
||||
@@ -1,45 +0,0 @@
|
||||
import { db } from "@server/db";
|
||||
import { and, eq } from "drizzle-orm";
|
||||
import { roleSiteResources, userSiteResources } from "@server/db";
|
||||
|
||||
export async function canUserAccessSiteResource({
|
||||
userId,
|
||||
resourceId,
|
||||
roleId
|
||||
}: {
|
||||
userId: string;
|
||||
resourceId: number;
|
||||
roleId: number;
|
||||
}): Promise<boolean> {
|
||||
const roleResourceAccess = await db
|
||||
.select()
|
||||
.from(roleSiteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(roleSiteResources.siteResourceId, resourceId),
|
||||
eq(roleSiteResources.roleId, roleId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (roleResourceAccess.length > 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
const userResourceAccess = await db
|
||||
.select()
|
||||
.from(userSiteResources)
|
||||
.where(
|
||||
and(
|
||||
eq(userSiteResources.userId, userId),
|
||||
eq(userSiteResources.siteResourceId, resourceId)
|
||||
)
|
||||
)
|
||||
.limit(1);
|
||||
|
||||
if (userResourceAccess.length > 0) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
@@ -2,13 +2,13 @@ import { hash, verify } from "@node-rs/argon2";
|
||||
|
||||
export async function verifyPassword(
|
||||
password: string,
|
||||
hash: string
|
||||
hash: string,
|
||||
): Promise<boolean> {
|
||||
const validPassword = await verify(hash, password, {
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
outputLen: 32,
|
||||
parallelism: 1
|
||||
parallelism: 1,
|
||||
});
|
||||
return validPassword;
|
||||
}
|
||||
@@ -18,7 +18,7 @@ export async function hashPassword(password: string): Promise<string> {
|
||||
memoryCost: 19456,
|
||||
timeCost: 2,
|
||||
outputLen: 32,
|
||||
parallelism: 1
|
||||
parallelism: 1,
|
||||
});
|
||||
|
||||
return passwordHash;
|
||||
|
||||