mirror of
https://github.com/fosrl/newt.git
synced 2026-03-26 20:46:41 +00:00
Compare commits
14 Commits
dependabot
...
1.9.5
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cdd8a132f2 | ||
|
|
6618bb4483 | ||
|
|
08952c20c5 | ||
|
|
5e60da37d1 | ||
|
|
53d79aea5a | ||
|
|
0f6852b681 | ||
|
|
2b8e280f2e | ||
|
|
3a377d43de | ||
|
|
792057cf6c | ||
|
|
57afe91e85 | ||
|
|
3389088c43 | ||
|
|
e73150c187 | ||
|
|
18556f34b2 | ||
|
|
66c235624a |
908
.github/workflows/cicd.yml
vendored
908
.github/workflows/cicd.yml
vendored
@@ -1,19 +1,9 @@
|
|||||||
name: CI/CD Pipeline (AWS Self-Hosted Runners)
|
name: CI/CD Pipeline
|
||||||
|
|
||||||
# CI/CD workflow for building, publishing, attesting, signing container images and building release binaries.
|
|
||||||
# Native multi-arch pipeline using two AWS EC2 self-hosted runners (x86_64 + arm64) to build and push architecture-specific images in parallel, then create multi-arch manifests.
|
|
||||||
#
|
|
||||||
# Required secrets:
|
|
||||||
# - AWS_ACCOUNT_ID, AWS_ROLE_NAME, AWS_REGION
|
|
||||||
# - EC2_INSTANCE_ID_AMD_RUNNER, EC2_INSTANCE_ID_ARM_RUNNER
|
|
||||||
# - DOCKER_HUB_USERNAME / DOCKER_HUB_ACCESS_TOKEN
|
|
||||||
# - GITHUB_TOKEN
|
|
||||||
# - COSIGN_PRIVATE_KEY / COSIGN_PASSWORD / COSIGN_PUBLIC_KEY
|
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write # gh-release
|
contents: write # gh-release
|
||||||
packages: write # GHCR push
|
packages: write # GHCR push
|
||||||
id-token: write # Keyless-Signatures & Attestations (OIDC)
|
id-token: write # Keyless-Signatures & Attestations
|
||||||
attestations: write # actions/attest-build-provenance
|
attestations: write # actions/attest-build-provenance
|
||||||
security-events: write # upload-sarif
|
security-events: write # upload-sarif
|
||||||
actions: read
|
actions: read
|
||||||
@@ -27,19 +17,9 @@ on:
|
|||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
version:
|
version:
|
||||||
description: "Version to release (X.Y.Z or X.Y.Z-rc.N)"
|
description: "SemVer version to release (e.g., 1.2.3, no leading 'v')"
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
publish_latest:
|
|
||||||
description: "Publish latest tag (non-RC only)"
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
publish_minor:
|
|
||||||
description: "Publish minor tag (X.Y) (non-RC only)"
|
|
||||||
required: true
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
target_branch:
|
target_branch:
|
||||||
description: "Branch to tag"
|
description: "Branch to tag"
|
||||||
required: false
|
required: false
|
||||||
@@ -50,47 +30,10 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 1) Start AWS EC2 runner instances
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
pre-run:
|
|
||||||
name: Start AWS EC2 runners
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions: write-all
|
|
||||||
outputs:
|
|
||||||
image_created: ${{ steps.created.outputs.image_created }}
|
|
||||||
steps:
|
|
||||||
- name: Capture created timestamp (shared)
|
|
||||||
id: created
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
echo "image_created=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> "$GITHUB_OUTPUT"
|
|
||||||
|
|
||||||
- name: Configure AWS credentials (OIDC)
|
|
||||||
uses: aws-actions/configure-aws-credentials@8df5847569e6427dd6c4fb1cf565c83acfa8afa7 # v6.0.0
|
|
||||||
with:
|
|
||||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
aws-region: ${{ secrets.AWS_REGION }}
|
|
||||||
|
|
||||||
- name: Verify AWS identity
|
|
||||||
run: aws sts get-caller-identity
|
|
||||||
|
|
||||||
- name: Start EC2 instances
|
|
||||||
run: |
|
|
||||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
|
||||||
aws ec2 start-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
|
||||||
echo "EC2 instances started"
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 2) Prepare release
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
prepare:
|
prepare:
|
||||||
if: github.event_name == 'workflow_dispatch'
|
if: github.event_name == 'workflow_dispatch'
|
||||||
name: Prepare release (create tag)
|
name: Prepare release (create tag)
|
||||||
needs: [pre-run]
|
runs-on: ubuntu-24.04
|
||||||
runs-on: [self-hosted, linux, x64]
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: write
|
contents: write
|
||||||
steps:
|
steps:
|
||||||
@@ -109,7 +52,6 @@ jobs:
|
|||||||
echo "Invalid version: $INPUT_VERSION (expected X.Y.Z or X.Y.Z-rc.N)" >&2
|
echo "Invalid version: $INPUT_VERSION (expected X.Y.Z or X.Y.Z-rc.N)" >&2
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Create and push tag
|
- name: Create and push tag
|
||||||
shell: bash
|
shell: bash
|
||||||
env:
|
env:
|
||||||
@@ -129,27 +71,11 @@ jobs:
|
|||||||
fi
|
fi
|
||||||
git tag -a "$VERSION" -m "Release $VERSION"
|
git tag -a "$VERSION" -m "Release $VERSION"
|
||||||
git push origin "refs/tags/$VERSION"
|
git push origin "refs/tags/$VERSION"
|
||||||
|
release:
|
||||||
# ---------------------------------------------------------------------------
|
if: ${{ github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && github.actor != 'github-actions[bot]') }}
|
||||||
# 3) Build and Release (x86 job)
|
name: Build and Release
|
||||||
# ---------------------------------------------------------------------------
|
runs-on: ubuntu-24.04
|
||||||
build-amd:
|
|
||||||
name: Build image (linux/amd64)
|
|
||||||
needs: [pre-run, prepare]
|
|
||||||
if: ${{ needs.pre-run.result == 'success' && ((github.event_name == 'push' && github.actor != 'github-actions[bot]' && needs.prepare.result == 'skipped') || (github.event_name == 'workflow_dispatch' && (needs.prepare.result == 'success' || needs.prepare.result == 'skipped'))) }}
|
|
||||||
runs-on: [self-hosted, linux, x64]
|
|
||||||
timeout-minutes: 120
|
timeout-minutes: 120
|
||||||
env:
|
|
||||||
DOCKERHUB_IMAGE: docker.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
IMAGE_LICENSE: ${{ github.event.repository.license.spdx_id || 'NOASSERTION' }}
|
|
||||||
IMAGE_CREATED: ${{ needs.pre-run.outputs.image_created }}
|
|
||||||
|
|
||||||
outputs:
|
|
||||||
tag: ${{ steps.tag.outputs.tag }}
|
|
||||||
is_rc: ${{ steps.tag.outputs.is_rc }}
|
|
||||||
major: ${{ steps.tag.outputs.major }}
|
|
||||||
minor: ${{ steps.tag.outputs.minor }}
|
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
@@ -157,59 +83,33 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 0
|
||||||
|
|
||||||
- name: Monitor storage space
|
- name: Extract tag name
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
THRESHOLD=75
|
|
||||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
|
||||||
echo "Used space: $USED_SPACE%"
|
|
||||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
|
||||||
echo "Disk usage >= ${THRESHOLD}%, pruning docker..."
|
|
||||||
echo y | docker system prune -a || true
|
|
||||||
else
|
|
||||||
echo "Disk usage < ${THRESHOLD}%, no action needed."
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Determine tag + rc/major/minor
|
|
||||||
id: tag
|
|
||||||
shell: bash
|
|
||||||
env:
|
env:
|
||||||
EVENT_NAME: ${{ github.event_name }}
|
EVENT_NAME: ${{ github.event_name }}
|
||||||
INPUT_VERSION: ${{ inputs.version }}
|
INPUT_VERSION: ${{ inputs.version }}
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
|
||||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
||||||
TAG="$INPUT_VERSION"
|
echo "TAG=${INPUT_VERSION}" >> $GITHUB_ENV
|
||||||
else
|
else
|
||||||
TAG="${{ github.ref_name }}"
|
echo "TAG=${{ github.ref_name }}" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
shell: bash
|
||||||
|
|
||||||
if ! [[ "$TAG" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
|
- name: Validate pushed tag format (no leading 'v')
|
||||||
echo "Invalid tag: $TAG" >&2
|
if: ${{ github.event_name == 'push' }}
|
||||||
exit 1
|
shell: bash
|
||||||
|
env:
|
||||||
|
TAG_GOT: ${{ env.TAG }}
|
||||||
|
run: |
|
||||||
|
set -euo pipefail
|
||||||
|
if [[ "$TAG_GOT" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
|
||||||
|
echo "Tag OK: $TAG_GOT"
|
||||||
|
exit 0
|
||||||
fi
|
fi
|
||||||
|
echo "ERROR: Tag '$TAG_GOT' is not allowed. Use 'X.Y.Z' or 'X.Y.Z-rc.N' (no leading 'v')." >&2
|
||||||
IS_RC="false"
|
exit 1
|
||||||
if [[ "$TAG" =~ -rc\.[0-9]+$ ]]; then
|
|
||||||
IS_RC="true"
|
|
||||||
fi
|
|
||||||
|
|
||||||
MAJOR="$(echo "$TAG" | cut -d. -f1)"
|
|
||||||
MINOR="$(echo "$TAG" | cut -d. -f1,2)"
|
|
||||||
|
|
||||||
echo "tag=$TAG" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "is_rc=$IS_RC" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "major=$MAJOR" >> "$GITHUB_OUTPUT"
|
|
||||||
echo "minor=$MINOR" >> "$GITHUB_OUTPUT"
|
|
||||||
|
|
||||||
echo "TAG=$TAG" >> $GITHUB_ENV
|
|
||||||
echo "IS_RC=$IS_RC" >> $GITHUB_ENV
|
|
||||||
echo "MAJOR_TAG=$MAJOR" >> $GITHUB_ENV
|
|
||||||
echo "MINOR_TAG=$MINOR" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Wait for tag to be visible (dispatch only)
|
- name: Wait for tag to be visible (dispatch only)
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||||
shell: bash
|
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
for i in {1..90}; do
|
for i in {1..90}; do
|
||||||
@@ -219,718 +119,110 @@ jobs:
|
|||||||
echo "Tag not yet visible, retrying... ($i/90)"
|
echo "Tag not yet visible, retrying... ($i/90)"
|
||||||
sleep 2
|
sleep 2
|
||||||
done
|
done
|
||||||
echo "Tag ${TAG} not visible after waiting" >&2
|
echo "Tag ${TAG} not visible after waiting"; exit 1
|
||||||
exit 1
|
shell: bash
|
||||||
|
|
||||||
- name: Ensure repository is at the tagged commit (dispatch only)
|
- name: Ensure repository is at the tagged commit (dispatch only)
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
if: ${{ github.event_name == 'workflow_dispatch' }}
|
||||||
shell: bash
|
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
git fetch --tags --force
|
git fetch --tags --force
|
||||||
git checkout "refs/tags/${TAG}"
|
git checkout "refs/tags/${TAG}"
|
||||||
echo "Checked out $(git rev-parse --short HEAD) for tag ${TAG}"
|
echo "Checked out $(git rev-parse --short HEAD) for tag ${TAG}"
|
||||||
|
|
||||||
#- name: Set up QEMU
|
|
||||||
# uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
|
||||||
|
|
||||||
#- name: Set up Docker Buildx
|
|
||||||
# uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
- name: Log in to Docker Hub
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: docker.io
|
|
||||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
- name: Log in to GHCR
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Normalize image names to lowercase
|
|
||||||
shell: bash
|
shell: bash
|
||||||
|
|
||||||
|
- name: Detect release candidate (rc)
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
|
if [[ "${TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
|
||||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
|
echo "IS_RC=true" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
# Build ONLY amd64 and push arch-specific tag suffixes used later for manifest creation.
|
|
||||||
- name: Build and push (amd64 -> *:amd64-TAG)
|
|
||||||
id: build_amd
|
|
||||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
platforms: linux/amd64
|
|
||||||
build-args: VERSION=${{ env.TAG }}
|
|
||||||
tags: |
|
|
||||||
${{ env.GHCR_IMAGE }}:amd64-${{ env.TAG }}
|
|
||||||
${{ env.DOCKERHUB_IMAGE }}:amd64-${{ env.TAG }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.title=${{ github.event.repository.name }}
|
|
||||||
org.opencontainers.image.version=${{ env.TAG }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
org.opencontainers.image.source=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.url=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.documentation=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.description=${{ github.event.repository.description }}
|
|
||||||
org.opencontainers.image.licenses=${{ env.IMAGE_LICENSE }}
|
|
||||||
org.opencontainers.image.created=${{ env.IMAGE_CREATED }}
|
|
||||||
org.opencontainers.image.ref.name=${{ env.TAG }}
|
|
||||||
org.opencontainers.image.authors=${{ github.repository_owner }}
|
|
||||||
cache-from: type=gha,scope=${{ github.repository }}-amd64
|
|
||||||
cache-to: type=gha,mode=max,scope=${{ github.repository }}-amd64
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 4) Build ARM64 image natively on ARM runner
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
build-arm:
|
|
||||||
name: Build image (linux/arm64)
|
|
||||||
needs: [pre-run, prepare]
|
|
||||||
if: ${{ needs.pre-run.result == 'success' && ((github.event_name == 'push' && github.actor != 'github-actions[bot]' && needs.prepare.result == 'skipped') || (github.event_name == 'workflow_dispatch' && (needs.prepare.result == 'success' || needs.prepare.result == 'skipped'))) }}
|
|
||||||
runs-on: [self-hosted, linux, arm64] # NOTE: ensure label exists on runner
|
|
||||||
timeout-minutes: 120
|
|
||||||
env:
|
|
||||||
DOCKERHUB_IMAGE: docker.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
IMAGE_LICENSE: ${{ github.event.repository.license.spdx_id || 'NOASSERTION' }}
|
|
||||||
IMAGE_CREATED: ${{ needs.pre-run.outputs.image_created }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Monitor storage space
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
THRESHOLD=75
|
|
||||||
USED_SPACE=$(df / | grep / | awk '{ print $5 }' | sed 's/%//g')
|
|
||||||
echo "Used space: $USED_SPACE%"
|
|
||||||
if [ "$USED_SPACE" -ge "$THRESHOLD" ]; then
|
|
||||||
echo y | docker system prune -a || true
|
|
||||||
fi
|
|
||||||
|
|
||||||
- name: Determine tag + validate format
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
EVENT_NAME: ${{ github.event_name }}
|
|
||||||
INPUT_VERSION: ${{ inputs.version }}
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
|
||||||
TAG="$INPUT_VERSION"
|
|
||||||
else
|
else
|
||||||
TAG="${{ github.ref_name }}"
|
echo "IS_RC=false" >> $GITHUB_ENV
|
||||||
fi
|
fi
|
||||||
|
|
||||||
if ! [[ "$TAG" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
|
|
||||||
echo "Invalid tag: $TAG" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "TAG=$TAG" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Wait for tag to be visible (dispatch only)
|
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
for i in {1..90}; do
|
|
||||||
if git ls-remote --tags origin "refs/tags/${TAG}" | grep -qE "refs/tags/${TAG}$"; then
|
|
||||||
echo "Tag ${TAG} is visible on origin"; exit 0
|
|
||||||
fi
|
|
||||||
echo "Tag not yet visible, retrying... ($i/90)"
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
echo "Tag ${TAG} not visible after waiting" >&2
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
- name: Ensure repository is at the tagged commit (dispatch only)
|
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
git fetch --tags --force
|
|
||||||
git checkout "refs/tags/${TAG}"
|
|
||||||
echo "Checked out $(git rev-parse --short HEAD) for tag ${TAG}"
|
|
||||||
|
|
||||||
- name: Log in to Docker Hub
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: docker.io
|
|
||||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
- name: Log in to GHCR
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Normalize image names to lowercase
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
|
|
||||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
# Build ONLY arm64 and push arch-specific tag suffixes used later for manifest creation.
|
|
||||||
- name: Build and push (arm64 -> *:arm64-TAG)
|
|
||||||
id: build_arm
|
|
||||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
platforms: linux/arm64
|
|
||||||
build-args: VERSION=${{ env.TAG }}
|
|
||||||
tags: |
|
|
||||||
${{ env.GHCR_IMAGE }}:arm64-${{ env.TAG }}
|
|
||||||
${{ env.DOCKERHUB_IMAGE }}:arm64-${{ env.TAG }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.title=${{ github.event.repository.name }}
|
|
||||||
org.opencontainers.image.version=${{ env.TAG }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
org.opencontainers.image.source=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.url=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.documentation=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.description=${{ github.event.repository.description }}
|
|
||||||
org.opencontainers.image.licenses=${{ env.IMAGE_LICENSE }}
|
|
||||||
org.opencontainers.image.created=${{ env.IMAGE_CREATED }}
|
|
||||||
org.opencontainers.image.ref.name=${{ env.TAG }}
|
|
||||||
org.opencontainers.image.authors=${{ github.repository_owner }}
|
|
||||||
cache-from: type=gha,scope=${{ github.repository }}-arm64
|
|
||||||
cache-to: type=gha,mode=max,scope=${{ github.repository }}-arm64
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 4b) Build ARMv7 image (linux/arm/v7) on arm runner via QEMU
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
build-armv7:
|
|
||||||
name: Build image (linux/arm/v7)
|
|
||||||
needs: [pre-run, prepare]
|
|
||||||
if: ${{ needs.pre-run.result == 'success' && ((github.event_name == 'push' && github.actor != 'github-actions[bot]' && needs.prepare.result == 'skipped') || (github.event_name == 'workflow_dispatch' && (needs.prepare.result == 'success' || needs.prepare.result == 'skipped'))) }}
|
|
||||||
runs-on: [self-hosted, linux, arm64]
|
|
||||||
timeout-minutes: 120
|
|
||||||
env:
|
|
||||||
DOCKERHUB_IMAGE: docker.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
IMAGE_LICENSE: ${{ github.event.repository.license.spdx_id || 'NOASSERTION' }}
|
|
||||||
IMAGE_CREATED: ${{ needs.pre-run.outputs.image_created }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Determine tag + validate format
|
|
||||||
shell: bash
|
|
||||||
env:
|
|
||||||
EVENT_NAME: ${{ github.event_name }}
|
|
||||||
INPUT_VERSION: ${{ inputs.version }}
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
|
|
||||||
TAG="$INPUT_VERSION"
|
|
||||||
else
|
|
||||||
TAG="${{ github.ref_name }}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! [[ "$TAG" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
|
|
||||||
echo "Invalid tag: $TAG" >&2
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "TAG=$TAG" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Wait for tag to be visible (dispatch only)
|
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
for i in {1..90}; do
|
|
||||||
if git ls-remote --tags origin "refs/tags/${TAG}" | grep -qE "refs/tags/${TAG}$"; then
|
|
||||||
echo "Tag ${TAG} is visible on origin"; exit 0
|
|
||||||
fi
|
|
||||||
echo "Tag not yet visible, retrying... ($i/90)"
|
|
||||||
sleep 2
|
|
||||||
done
|
|
||||||
echo "Tag ${TAG} not visible after waiting" >&2
|
|
||||||
exit 1
|
|
||||||
|
|
||||||
- name: Ensure repository is at the tagged commit (dispatch only)
|
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
git fetch --tags --force
|
|
||||||
git checkout "refs/tags/${TAG}"
|
|
||||||
echo "Checked out $(git rev-parse --short HEAD) for tag ${TAG}"
|
|
||||||
|
|
||||||
- name: Log in to Docker Hub
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: docker.io
|
|
||||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
- name: Log in to GHCR
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Normalize image names to lowercase
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
|
|
||||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
|
|
||||||
|
|
||||||
- name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
- name: Build and push (arm/v7 -> *:armv7-TAG)
|
|
||||||
id: build_armv7
|
|
||||||
uses: docker/build-push-action@10e90e3645eae34f1e60eeb005ba3a3d33f178e8 # v6.19.2
|
|
||||||
with:
|
|
||||||
context: .
|
|
||||||
push: true
|
|
||||||
platforms: linux/arm/v7
|
|
||||||
build-args: VERSION=${{ env.TAG }}
|
|
||||||
tags: |
|
|
||||||
${{ env.GHCR_IMAGE }}:armv7-${{ env.TAG }}
|
|
||||||
${{ env.DOCKERHUB_IMAGE }}:armv7-${{ env.TAG }}
|
|
||||||
labels: |
|
|
||||||
org.opencontainers.image.title=${{ github.event.repository.name }}
|
|
||||||
org.opencontainers.image.version=${{ env.TAG }}
|
|
||||||
org.opencontainers.image.revision=${{ github.sha }}
|
|
||||||
org.opencontainers.image.source=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.url=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.documentation=${{ github.event.repository.html_url }}
|
|
||||||
org.opencontainers.image.description=${{ github.event.repository.description }}
|
|
||||||
org.opencontainers.image.licenses=${{ env.IMAGE_LICENSE }}
|
|
||||||
org.opencontainers.image.created=${{ env.IMAGE_CREATED }}
|
|
||||||
org.opencontainers.image.ref.name=${{ env.TAG }}
|
|
||||||
org.opencontainers.image.authors=${{ github.repository_owner }}
|
|
||||||
cache-from: type=gha,scope=${{ github.repository }}-armv7
|
|
||||||
cache-to: type=gha,mode=max,scope=${{ github.repository }}-armv7
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 5) Create and push multi-arch manifests (TAG, plus optional latest/major/minor)
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
create-manifest:
|
|
||||||
name: Create multi-arch manifests
|
|
||||||
needs: [build-amd, build-arm, build-armv7]
|
|
||||||
if: ${{ needs.build-amd.result == 'success' && needs.build-arm.result == 'success' && needs.build-armv7.result == 'success' }}
|
|
||||||
runs-on: [self-hosted, linux, x64] # NOTE: ensure label exists on runner
|
|
||||||
timeout-minutes: 30
|
|
||||||
env:
|
|
||||||
DOCKERHUB_IMAGE: docker.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
TAG: ${{ needs.build-amd.outputs.tag }}
|
|
||||||
IS_RC: ${{ needs.build-amd.outputs.is_rc }}
|
|
||||||
MAJOR_TAG: ${{ needs.build-amd.outputs.major }}
|
|
||||||
MINOR_TAG: ${{ needs.build-amd.outputs.minor }}
|
|
||||||
# workflow_dispatch controls are respected only here (tagging policy)
|
|
||||||
#PUBLISH_LATEST: ${{ github.event_name == 'workflow_dispatch' && inputs.publish_latest || vars.PUBLISH_LATEST }}
|
|
||||||
#PUBLISH_MINOR: ${{ github.event_name == 'workflow_dispatch' && inputs.publish_minor || vars.PUBLISH_MINOR }}
|
|
||||||
steps:
|
|
||||||
- name: Log in to Docker Hub
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: docker.io
|
|
||||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
|
||||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
|
||||||
|
|
||||||
- name: Log in to GHCR
|
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
|
||||||
with:
|
|
||||||
registry: ghcr.io
|
|
||||||
username: ${{ github.actor }}
|
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
|
|
||||||
- name: Normalize image names to lowercase
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
|
|
||||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
|
|
||||||
|
|
||||||
- name: Set up Docker Buildx (needed for imagetools)
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
- name: Create & push multi-arch index (GHCR :TAG) via imagetools
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
docker buildx imagetools create \
|
|
||||||
-t "${GHCR_IMAGE}:${TAG}" \
|
|
||||||
"${GHCR_IMAGE}:amd64-${TAG}" \
|
|
||||||
"${GHCR_IMAGE}:arm64-${TAG}" \
|
|
||||||
"${GHCR_IMAGE}:armv7-${TAG}"
|
|
||||||
|
|
||||||
- name: Create & push multi-arch index (Docker Hub :TAG) via imagetools
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
docker buildx imagetools create \
|
|
||||||
-t "${DOCKERHUB_IMAGE}:${TAG}" \
|
|
||||||
"${DOCKERHUB_IMAGE}:amd64-${TAG}" \
|
|
||||||
"${DOCKERHUB_IMAGE}:arm64-${TAG}" \
|
|
||||||
"${DOCKERHUB_IMAGE}:armv7-${TAG}"
|
|
||||||
|
|
||||||
# Additional tags for non-RC releases: latest, major, minor (always)
|
|
||||||
- name: Publish additional tags (non-RC only) via imagetools
|
|
||||||
if: ${{ env.IS_RC != 'true' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
tags_to_publish=("${MAJOR_TAG}" "${MINOR_TAG}" "latest")
|
|
||||||
|
|
||||||
for t in "${tags_to_publish[@]}"; do
|
|
||||||
echo "Publishing GHCR tag ${t} -> ${TAG}"
|
|
||||||
docker buildx imagetools create \
|
|
||||||
-t "${GHCR_IMAGE}:${t}" \
|
|
||||||
"${GHCR_IMAGE}:amd64-${TAG}" \
|
|
||||||
"${GHCR_IMAGE}:arm64-${TAG}" \
|
|
||||||
"${GHCR_IMAGE}:armv7-${TAG}"
|
|
||||||
|
|
||||||
echo "Publishing Docker Hub tag ${t} -> ${TAG}"
|
|
||||||
docker buildx imagetools create \
|
|
||||||
-t "${DOCKERHUB_IMAGE}:${t}" \
|
|
||||||
"${DOCKERHUB_IMAGE}:amd64-${TAG}" \
|
|
||||||
"${DOCKERHUB_IMAGE}:arm64-${TAG}" \
|
|
||||||
"${DOCKERHUB_IMAGE}:armv7-${TAG}"
|
|
||||||
done
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 6) Sign/attest + build binaries + draft release (x86 runner)
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
sign-and-release:
|
|
||||||
name: Sign, attest, and release
|
|
||||||
needs: [create-manifest, build-amd]
|
|
||||||
if: ${{ needs.create-manifest.result == 'success' && needs.build-amd.result == 'success' }}
|
|
||||||
runs-on: [self-hosted, linux, x64] # NOTE: ensure label exists on runner
|
|
||||||
timeout-minutes: 120
|
|
||||||
env:
|
|
||||||
DOCKERHUB_IMAGE: docker.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
TAG: ${{ needs.build-amd.outputs.tag }}
|
|
||||||
IS_RC: ${{ needs.build-amd.outputs.is_rc }}
|
|
||||||
IMAGE_LICENSE: ${{ github.event.repository.license.spdx_id || 'NOASSERTION' }}
|
|
||||||
IMAGE_CREATED: ${{ needs.pre-run.outputs.image_created }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
|
||||||
with:
|
|
||||||
fetch-depth: 0
|
|
||||||
|
|
||||||
- name: Ensure repository is at the tagged commit (dispatch only)
|
|
||||||
if: ${{ github.event_name == 'workflow_dispatch' }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
git fetch --tags --force
|
|
||||||
git checkout "refs/tags/${TAG}"
|
|
||||||
echo "Checked out $(git rev-parse --short HEAD) for tag ${TAG}"
|
|
||||||
|
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||||
with:
|
with:
|
||||||
go-version-file: go.mod
|
go-version-file: go.mod
|
||||||
|
|
||||||
- name: Log in to Docker Hub
|
- name: Cache Go modules
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
if: ${{ hashFiles('**/go.sum') != '' }}
|
||||||
|
uses: actions/cache@9255dc7a253b0ccc959486e2bca901246202afeb # v5.0.1
|
||||||
with:
|
with:
|
||||||
registry: docker.io
|
path: |
|
||||||
username: ${{ secrets.DOCKER_HUB_USERNAME }}
|
~/.cache/go-build
|
||||||
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
|
~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
- name: Go vet & test
|
||||||
|
if: ${{ hashFiles('**/go.mod') != '' }}
|
||||||
|
run: |
|
||||||
|
go version
|
||||||
|
go vet ./...
|
||||||
|
go test ./... -race -covermode=atomic
|
||||||
|
shell: bash
|
||||||
|
|
||||||
- name: Log in to GHCR
|
# - name: Trivy scan (GHCR image)
|
||||||
uses: docker/login-action@c94ce9fb468520275223c153574b00df6fe4bcc9 # v3.7.0
|
# id: trivy
|
||||||
with:
|
# uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
|
||||||
registry: ghcr.io
|
# with:
|
||||||
username: ${{ github.actor }}
|
# image-ref: ${{ env.GHCR_IMAGE }}@${{ steps.build.outputs.digest }}
|
||||||
password: ${{ secrets.GITHUB_TOKEN }}
|
# format: sarif
|
||||||
|
# output: trivy-ghcr.sarif
|
||||||
|
# ignore-unfixed: true
|
||||||
|
# vuln-type: os,library
|
||||||
|
# severity: CRITICAL,HIGH
|
||||||
|
# exit-code: ${{ (vars.TRIVY_FAIL || '0') }}
|
||||||
|
|
||||||
- name: Normalize image names to lowercase
|
# - name: Upload SARIF,trivy
|
||||||
|
# if: ${{ always() && hashFiles('trivy-ghcr.sarif') != '' }}
|
||||||
|
# uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5
|
||||||
|
# with:
|
||||||
|
# sarif_file: trivy-ghcr.sarif
|
||||||
|
# category: Image Vulnerability Scan
|
||||||
|
|
||||||
|
#- name: Build binaries
|
||||||
|
# env:
|
||||||
|
# CGO_ENABLED: "0"
|
||||||
|
# GOFLAGS: "-trimpath"
|
||||||
|
# run: |
|
||||||
|
# set -euo pipefail
|
||||||
|
# TAG_VAR="${TAG}"
|
||||||
|
# make -j 10 go-build-release tag=$TAG_VAR
|
||||||
|
# shell: bash
|
||||||
|
|
||||||
|
- name: Ensure clean git state for GoReleaser
|
||||||
shell: bash
|
shell: bash
|
||||||
run: |
|
run: |
|
||||||
set -euo pipefail
|
set -euo pipefail
|
||||||
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
|
echo "Checking git status before GoReleaser..."
|
||||||
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
|
git status --porcelain || true
|
||||||
|
if [ -n "$(git status --porcelain)" ]; then
|
||||||
- name: Ensure jq is installed
|
echo "Repository contains local changes. Listing files and diff:"
|
||||||
shell: bash
|
git status --porcelain
|
||||||
run: |
|
git --no-pager diff --name-status || true
|
||||||
set -euo pipefail
|
echo "Resetting tracked files to HEAD to ensure a clean release state"
|
||||||
if command -v jq >/dev/null 2>&1; then
|
git restore --source=HEAD --worktree --staged -- .
|
||||||
exit 0
|
echo "After reset git status:"
|
||||||
fi
|
git status --porcelain || true
|
||||||
sudo apt-get update -y
|
else
|
||||||
sudo apt-get install -y jq
|
echo "Repository clean."
|
||||||
|
|
||||||
- name: Set up Docker Buildx (needed for imagetools)
|
|
||||||
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
|
|
||||||
|
|
||||||
- name: Resolve multi-arch digest refs (by TAG)
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
|
|
||||||
get_digest() {
|
|
||||||
local ref="$1"
|
|
||||||
local d=""
|
|
||||||
# Primary: buildx format output
|
|
||||||
d="$(docker buildx imagetools inspect "$ref" --format '{{.Manifest.Digest}}' 2>/dev/null || true)"
|
|
||||||
|
|
||||||
# Fallback: parse from plain text if format fails
|
|
||||||
if ! [[ "$d" =~ ^sha256:[0-9a-f]{64}$ ]]; then
|
|
||||||
d="$(docker buildx imagetools inspect "$ref" 2>/dev/null | awk '/^Digest:/ {print $2; exit}' || true)"
|
|
||||||
fi
|
|
||||||
|
|
||||||
if ! [[ "$d" =~ ^sha256:[0-9a-f]{64}$ ]]; then
|
|
||||||
echo "ERROR: Could not extract digest for $ref" >&2
|
|
||||||
docker buildx imagetools inspect "$ref" || true
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "$d"
|
|
||||||
}
|
|
||||||
|
|
||||||
GHCR_DIGEST="$(get_digest "${GHCR_IMAGE}:${TAG}")"
|
|
||||||
echo "GHCR_REF=${GHCR_IMAGE}@${GHCR_DIGEST}" >> "$GITHUB_ENV"
|
|
||||||
echo "GHCR_DIGEST=${GHCR_DIGEST}" >> "$GITHUB_ENV"
|
|
||||||
echo "Resolved GHCR_REF=${GHCR_IMAGE}@${GHCR_DIGEST}"
|
|
||||||
|
|
||||||
if [ -n "${{ secrets.DOCKER_HUB_USERNAME }}" ] && [ -n "${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}" ]; then
|
|
||||||
DH_DIGEST="$(get_digest "${DOCKERHUB_IMAGE}:${TAG}")"
|
|
||||||
echo "DH_REF=${DOCKERHUB_IMAGE}@${DH_DIGEST}" >> "$GITHUB_ENV"
|
|
||||||
echo "DH_DIGEST=${DH_DIGEST}" >> "$GITHUB_ENV"
|
|
||||||
echo "Resolved DH_REF=${DOCKERHUB_IMAGE}@${DH_DIGEST}"
|
|
||||||
fi
|
fi
|
||||||
|
|
||||||
- name: Attest build provenance (GHCR) (digest)
|
- name: Run GoReleaser config check
|
||||||
uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0
|
uses: goreleaser/goreleaser-action@v6
|
||||||
with:
|
with:
|
||||||
subject-name: ${{ env.GHCR_IMAGE }}
|
version: 2.14.0
|
||||||
subject-digest: ${{ env.GHCR_DIGEST }}
|
args: check
|
||||||
push-to-registry: true
|
env:
|
||||||
show-summary: true
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
|
||||||
- name: Attest build provenance (Docker Hub)
|
- name: Run GoReleaser (binaries + deb/rpm/apk)
|
||||||
continue-on-error: true
|
uses: goreleaser/goreleaser-action@v6
|
||||||
if: ${{ env.DH_DIGEST != '' }}
|
|
||||||
uses: actions/attest-build-provenance@96278af6caaf10aea03fd8d33a09a777ca52d62f # v3.2.0
|
|
||||||
with:
|
with:
|
||||||
subject-name: index.docker.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
version: 2.14.0
|
||||||
subject-digest: ${{ env.DH_DIGEST }}
|
args: release --clean
|
||||||
push-to-registry: true
|
|
||||||
show-summary: true
|
|
||||||
|
|
||||||
- name: Install cosign
|
|
||||||
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
|
|
||||||
with:
|
|
||||||
cosign-release: "v3.0.2"
|
|
||||||
|
|
||||||
- name: Sanity check cosign private key
|
|
||||||
env:
|
env:
|
||||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign public-key --key env://COSIGN_PRIVATE_KEY >/dev/null
|
|
||||||
|
|
||||||
- name: Generate SBOM (SPDX JSON) from GHCR digest
|
|
||||||
uses: aquasecurity/trivy-action@97e0b3872f55f89b95b2f65b3dbab56962816478 # v0.34.2
|
|
||||||
with:
|
|
||||||
image-ref: ${{ env.GHCR_REF }}
|
|
||||||
format: spdx-json
|
|
||||||
output: sbom.spdx.json
|
|
||||||
version: v0.69.3
|
|
||||||
|
|
||||||
- name: Validate + minify SBOM JSON
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
jq -e . sbom.spdx.json >/dev/null
|
|
||||||
jq -c . sbom.spdx.json > sbom.min.json && mv sbom.min.json sbom.spdx.json
|
|
||||||
|
|
||||||
- name: Sign GHCR digest (key, recursive)
|
|
||||||
env:
|
|
||||||
COSIGN_YES: "true"
|
|
||||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
|
||||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${GHCR_REF}"
|
|
||||||
sleep 20
|
|
||||||
|
|
||||||
- name: Create SBOM attestation (GHCR, key)
|
|
||||||
env:
|
|
||||||
COSIGN_YES: "true"
|
|
||||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
|
||||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign attest \
|
|
||||||
--key env://COSIGN_PRIVATE_KEY \
|
|
||||||
--type spdxjson \
|
|
||||||
--predicate sbom.spdx.json \
|
|
||||||
"${GHCR_REF}"
|
|
||||||
|
|
||||||
- name: Create SBOM attestation (Docker Hub, key)
|
|
||||||
continue-on-error: true
|
|
||||||
env:
|
|
||||||
COSIGN_YES: "true"
|
|
||||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
|
||||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
|
||||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign attest \
|
|
||||||
--key env://COSIGN_PRIVATE_KEY \
|
|
||||||
--type spdxjson \
|
|
||||||
--predicate sbom.spdx.json \
|
|
||||||
"${DH_REF}"
|
|
||||||
|
|
||||||
- name: Keyless sign & verify GHCR digest (OIDC)
|
|
||||||
env:
|
|
||||||
COSIGN_YES: "true"
|
|
||||||
WORKFLOW_REF: ${{ github.workflow_ref }}
|
|
||||||
ISSUER: https://token.actions.githubusercontent.com
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${GHCR_REF}"
|
|
||||||
cosign verify \
|
|
||||||
--certificate-oidc-issuer "${ISSUER}" \
|
|
||||||
--certificate-identity "https://github.com/${WORKFLOW_REF}" \
|
|
||||||
"${GHCR_REF}" -o text
|
|
||||||
|
|
||||||
- name: Verify signature (public key) GHCR digest + tag
|
|
||||||
env:
|
|
||||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${GHCR_REF}" -o text
|
|
||||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${GHCR_IMAGE}:${TAG}" -o text
|
|
||||||
|
|
||||||
- name: Verify SBOM attestation (GHCR)
|
|
||||||
env:
|
|
||||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
|
||||||
run: cosign verify-attestation --key env://COSIGN_PUBLIC_KEY --type spdxjson "${GHCR_REF}" -o text
|
|
||||||
shell: bash
|
|
||||||
|
|
||||||
- name: Sign Docker Hub digest (key, recursive)
|
|
||||||
continue-on-error: true
|
|
||||||
env:
|
|
||||||
COSIGN_YES: "true"
|
|
||||||
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
|
|
||||||
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
|
|
||||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${DH_REF}"
|
|
||||||
|
|
||||||
- name: Keyless sign & verify Docker Hub digest (OIDC)
|
|
||||||
continue-on-error: true
|
|
||||||
if: ${{ env.DH_REF != '' }}
|
|
||||||
env:
|
|
||||||
COSIGN_YES: "true"
|
|
||||||
ISSUER: https://token.actions.githubusercontent.com
|
|
||||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${DH_REF}"
|
|
||||||
cosign verify \
|
|
||||||
--certificate-oidc-issuer "${ISSUER}" \
|
|
||||||
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
|
|
||||||
"${DH_REF}" -o text
|
|
||||||
|
|
||||||
- name: Verify signature (public key) Docker Hub digest + tag
|
|
||||||
continue-on-error: true
|
|
||||||
if: ${{ env.DH_REF != '' }}
|
|
||||||
env:
|
|
||||||
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
|
|
||||||
COSIGN_DOCKER_MEDIA_TYPES: "1"
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${DH_REF}" -o text
|
|
||||||
cosign verify --key env://COSIGN_PUBLIC_KEY "${DOCKERHUB_IMAGE}:${TAG}" -o text
|
|
||||||
|
|
||||||
- name: Build binaries
|
|
||||||
env:
|
|
||||||
CGO_ENABLED: "0"
|
|
||||||
GOFLAGS: "-trimpath"
|
|
||||||
shell: bash
|
|
||||||
run: |
|
|
||||||
set -euo pipefail
|
|
||||||
make -j 10 go-build-release VERSION="${TAG}"
|
|
||||||
|
|
||||||
- name: Create GitHub Release (draft)
|
|
||||||
uses: softprops/action-gh-release@5be0e66d93ac7ed76da52eca8bb058f665c3a5fe # v2.4.2
|
|
||||||
with:
|
|
||||||
tag_name: ${{ env.TAG }}
|
|
||||||
generate_release_notes: true
|
|
||||||
prerelease: ${{ env.IS_RC == 'true' }}
|
|
||||||
files: |
|
|
||||||
bin/*
|
|
||||||
fail_on_unmatched_files: true
|
|
||||||
draft: true
|
|
||||||
body: |
|
|
||||||
## Container Images
|
|
||||||
- GHCR: `${{ env.GHCR_REF }}`
|
|
||||||
- Docker Hub: `${{ env.DH_REF || 'N/A' }}`
|
|
||||||
**Tag:** `${{ env.TAG }}`
|
|
||||||
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
# 7) Stop AWS EC2 runner instances
|
|
||||||
# ---------------------------------------------------------------------------
|
|
||||||
|
|
||||||
post-run:
|
|
||||||
name: Stop AWS EC2 runners
|
|
||||||
needs: [pre-run, prepare, build-amd, build-arm, build-armv7, create-manifest, sign-and-release]
|
|
||||||
if: ${{ always() && needs.pre-run.result == 'success' }}
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
permissions: write-all
|
|
||||||
steps:
|
|
||||||
- name: Configure AWS credentials (OIDC)
|
|
||||||
uses: aws-actions/configure-aws-credentials@8df5847569e6427dd6c4fb1cf565c83acfa8afa7 # v6.0.0
|
|
||||||
with:
|
|
||||||
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.AWS_ROLE_NAME }}
|
|
||||||
role-duration-seconds: 3600
|
|
||||||
aws-region: ${{ secrets.AWS_REGION }}
|
|
||||||
|
|
||||||
- name: Verify AWS identity
|
|
||||||
run: aws sts get-caller-identity
|
|
||||||
|
|
||||||
- name: Stop EC2 instances
|
|
||||||
run: |
|
|
||||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_AMD_RUNNER }}
|
|
||||||
aws ec2 stop-instances --instance-ids ${{ secrets.EC2_INSTANCE_ID_ARM_RUNNER }}
|
|
||||||
echo "EC2 instances stopped"
|
|
||||||
|
|||||||
20
.github/workflows/mirror.yaml
vendored
20
.github/workflows/mirror.yaml
vendored
@@ -1,20 +1,28 @@
|
|||||||
name: Mirror & Sign (Docker Hub to GHCR)
|
name: Mirror & Sign (Docker Hub to GHCR)
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch: {}
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
source_image:
|
||||||
|
description: "Source image (e.g., docker.io/owner/newt)"
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
dest_image:
|
||||||
|
description: "Destination image (e.g., ghcr.io/owner/newt)"
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
|
||||||
permissions:
|
permissions:
|
||||||
contents: read
|
contents: read
|
||||||
packages: write
|
packages: write
|
||||||
id-token: write # for keyless OIDC
|
id-token: write # for keyless OIDC
|
||||||
|
|
||||||
env:
|
|
||||||
SOURCE_IMAGE: docker.io/fosrl/newt
|
|
||||||
DEST_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
mirror-and-dual-sign:
|
mirror-and-dual-sign:
|
||||||
runs-on: amd64-runner
|
runs-on: ubuntu-24.04
|
||||||
|
env:
|
||||||
|
SOURCE_IMAGE: ${{ inputs.source_image }}
|
||||||
|
DEST_IMAGE: ${{ inputs.dest_image }}
|
||||||
steps:
|
steps:
|
||||||
- name: Install skopeo + jq
|
- name: Install skopeo + jq
|
||||||
run: |
|
run: |
|
||||||
|
|||||||
64
.github/workflows/publish-apt.yml
vendored
Normal file
64
.github/workflows/publish-apt.yml
vendored
Normal file
@@ -0,0 +1,64 @@
|
|||||||
|
name: Publish APT repo to S3/CloudFront
|
||||||
|
|
||||||
|
on:
|
||||||
|
release:
|
||||||
|
types: [published]
|
||||||
|
push:
|
||||||
|
tags:
|
||||||
|
- "[0-9]+.[0-9]+.[0-9]+"
|
||||||
|
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
tag:
|
||||||
|
description: "Tag to publish (e.g. 1.9.0). Leave empty to use latest release."
|
||||||
|
required: false
|
||||||
|
type: string
|
||||||
|
backfill_all:
|
||||||
|
description: "Build/publish repo for ALL releases."
|
||||||
|
required: false
|
||||||
|
default: false
|
||||||
|
type: boolean
|
||||||
|
|
||||||
|
permissions:
|
||||||
|
id-token: write
|
||||||
|
contents: read
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
publish:
|
||||||
|
runs-on: ubuntu-24.04
|
||||||
|
env:
|
||||||
|
PKG_NAME: newt
|
||||||
|
SUITE: stable
|
||||||
|
COMPONENT: main
|
||||||
|
REPO_BASE_URL: https://repo.dev.fosrl.io/apt
|
||||||
|
|
||||||
|
AWS_REGION: ${{ vars.AWS_REGION }}
|
||||||
|
S3_BUCKET: ${{ vars.S3_BUCKET }}
|
||||||
|
S3_PREFIX: ${{ vars.S3_PREFIX }}
|
||||||
|
CLOUDFRONT_DISTRIBUTION_ID: ${{ vars.CLOUDFRONT_DISTRIBUTION_ID }}
|
||||||
|
|
||||||
|
INPUT_TAG: ${{ inputs.tag }}
|
||||||
|
BACKFILL_ALL: ${{ inputs.backfill_all }}
|
||||||
|
EVENT_TAG: ${{ github.event.release.tag_name }}
|
||||||
|
PUSH_TAG: ${{ github.ref_name }}
|
||||||
|
GH_REPO: ${{ github.repository }}
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout repository
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
|
- name: Configure AWS credentials (OIDC)
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
role-to-assume: ${{ secrets.AWS_ROLE_ARN }}
|
||||||
|
aws-region: ${{ vars.AWS_REGION }}
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: sudo apt-get update && sudo apt-get install -y dpkg-dev apt-utils gnupg curl jq gh
|
||||||
|
|
||||||
|
- name: Publish APT repo
|
||||||
|
env:
|
||||||
|
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
|
APT_GPG_PRIVATE_KEY: ${{ secrets.APT_GPG_PRIVATE_KEY }}
|
||||||
|
APT_GPG_PASSPHRASE: ${{ secrets.APT_GPG_PASSPHRASE }}
|
||||||
|
run: ./scripts/publish-apt.sh
|
||||||
4
.github/workflows/test.yml
vendored
4
.github/workflows/test.yml
vendored
@@ -16,7 +16,7 @@ jobs:
|
|||||||
matrix:
|
matrix:
|
||||||
target:
|
target:
|
||||||
- local
|
- local
|
||||||
- docker-build
|
#- docker-build
|
||||||
- go-build-release-darwin-amd64
|
- go-build-release-darwin-amd64
|
||||||
- go-build-release-darwin-arm64
|
- go-build-release-darwin-arm64
|
||||||
- go-build-release-freebsd-amd64
|
- go-build-release-freebsd-amd64
|
||||||
@@ -31,7 +31,7 @@ jobs:
|
|||||||
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
|
||||||
|
|
||||||
- name: Set up Go
|
- name: Set up Go
|
||||||
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
|
uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0
|
||||||
with:
|
with:
|
||||||
go-version: 1.25
|
go-version: 1.25
|
||||||
|
|
||||||
|
|||||||
44
.goreleaser.yaml
Normal file
44
.goreleaser.yaml
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
version: 2
|
||||||
|
project_name: newt
|
||||||
|
|
||||||
|
release:
|
||||||
|
draft: true
|
||||||
|
prerelease: "{{ contains .Tag \"-rc.\" }}"
|
||||||
|
name_template: "{{ .Tag }}"
|
||||||
|
|
||||||
|
builds:
|
||||||
|
- id: newt
|
||||||
|
# build the package directory (include all .go files) instead of a single file
|
||||||
|
main: .
|
||||||
|
binary: newt
|
||||||
|
env:
|
||||||
|
- CGO_ENABLED=0
|
||||||
|
goos:
|
||||||
|
- linux
|
||||||
|
goarch:
|
||||||
|
- amd64
|
||||||
|
- arm64
|
||||||
|
flags:
|
||||||
|
- -trimpath
|
||||||
|
ldflags:
|
||||||
|
- -s -w -X main.newtVersion={{ .Tag }}
|
||||||
|
|
||||||
|
checksum:
|
||||||
|
name_template: "checksums.txt"
|
||||||
|
|
||||||
|
nfpms:
|
||||||
|
- id: packages
|
||||||
|
package_name: newt
|
||||||
|
vendor: fosrl
|
||||||
|
maintainer: fosrl <repo@fosrl.io>
|
||||||
|
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||||
|
license: AGPL-3.0-or-later
|
||||||
|
formats:
|
||||||
|
- deb
|
||||||
|
- rpm
|
||||||
|
- apk
|
||||||
|
bindir: /usr/bin
|
||||||
|
file_name_template: "newt_{{ .Version }}_{{ .Arch }}"
|
||||||
|
contents:
|
||||||
|
- src: LICENSE
|
||||||
|
dst: /usr/share/doc/newt/LICENSE
|
||||||
@@ -1,5 +1,4 @@
|
|||||||
# FROM golang:1.25-alpine AS builder
|
FROM golang:1.25-alpine AS builder
|
||||||
FROM public.ecr.aws/docker/library/golang:1.26-alpine AS builder
|
|
||||||
|
|
||||||
# Install git and ca-certificates
|
# Install git and ca-certificates
|
||||||
RUN apk --no-cache add ca-certificates git tzdata
|
RUN apk --no-cache add ca-certificates git tzdata
|
||||||
@@ -17,10 +16,9 @@ RUN go mod download
|
|||||||
COPY . .
|
COPY . .
|
||||||
|
|
||||||
# Build the application
|
# Build the application
|
||||||
ARG VERSION=dev
|
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /newt
|
||||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w -X main.newtVersion=${VERSION}" -o /newt
|
|
||||||
|
|
||||||
FROM public.ecr.aws/docker/library/alpine:3.23 AS runner
|
FROM alpine:3.23 AS runner
|
||||||
|
|
||||||
RUN apk --no-cache add ca-certificates tzdata iputils
|
RUN apk --no-cache add ca-certificates tzdata iputils
|
||||||
|
|
||||||
|
|||||||
23
Makefile
23
Makefile
@@ -2,9 +2,6 @@
|
|||||||
|
|
||||||
all: local
|
all: local
|
||||||
|
|
||||||
VERSION ?= dev
|
|
||||||
LDFLAGS = -X main.newtVersion=$(VERSION)
|
|
||||||
|
|
||||||
local:
|
local:
|
||||||
CGO_ENABLED=0 go build -o ./bin/newt
|
CGO_ENABLED=0 go build -o ./bin/newt
|
||||||
|
|
||||||
@@ -43,31 +40,31 @@ go-build-release: \
|
|||||||
go-build-release-freebsd-arm64
|
go-build-release-freebsd-arm64
|
||||||
|
|
||||||
go-build-release-linux-arm64:
|
go-build-release-linux-arm64:
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -ldflags "$(LDFLAGS)" -o bin/newt_linux_arm64
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/newt_linux_arm64
|
||||||
|
|
||||||
go-build-release-linux-arm32-v7:
|
go-build-release-linux-arm32-v7:
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -ldflags "$(LDFLAGS)" -o bin/newt_linux_arm32
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -o bin/newt_linux_arm32
|
||||||
|
|
||||||
go-build-release-linux-arm32-v6:
|
go-build-release-linux-arm32-v6:
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=6 go build -ldflags "$(LDFLAGS)" -o bin/newt_linux_arm32v6
|
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=6 go build -o bin/newt_linux_arm32v6
|
||||||
|
|
||||||
go-build-release-linux-amd64:
|
go-build-release-linux-amd64:
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o bin/newt_linux_amd64
|
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/newt_linux_amd64
|
||||||
|
|
||||||
go-build-release-linux-riscv64:
|
go-build-release-linux-riscv64:
|
||||||
CGO_ENABLED=0 GOOS=linux GOARCH=riscv64 go build -ldflags "$(LDFLAGS)" -o bin/newt_linux_riscv64
|
CGO_ENABLED=0 GOOS=linux GOARCH=riscv64 go build -o bin/newt_linux_riscv64
|
||||||
|
|
||||||
go-build-release-darwin-arm64:
|
go-build-release-darwin-arm64:
|
||||||
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -ldflags "$(LDFLAGS)" -o bin/newt_darwin_arm64
|
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o bin/newt_darwin_arm64
|
||||||
|
|
||||||
go-build-release-darwin-amd64:
|
go-build-release-darwin-amd64:
|
||||||
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o bin/newt_darwin_amd64
|
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -o bin/newt_darwin_amd64
|
||||||
|
|
||||||
go-build-release-windows-amd64:
|
go-build-release-windows-amd64:
|
||||||
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o bin/newt_windows_amd64.exe
|
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o bin/newt_windows_amd64.exe
|
||||||
|
|
||||||
go-build-release-freebsd-amd64:
|
go-build-release-freebsd-amd64:
|
||||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -ldflags "$(LDFLAGS)" -o bin/newt_freebsd_amd64
|
CGO_ENABLED=0 GOOS=freebsd GOARCH=amd64 go build -o bin/newt_freebsd_amd64
|
||||||
|
|
||||||
go-build-release-freebsd-arm64:
|
go-build-release-freebsd-arm64:
|
||||||
CGO_ENABLED=0 GOOS=freebsd GOARCH=arm64 go build -ldflags "$(LDFLAGS)" -o bin/newt_freebsd_arm64
|
CGO_ENABLED=0 GOOS=freebsd GOARCH=arm64 go build -o bin/newt_freebsd_arm64
|
||||||
|
|||||||
13
README.md
13
README.md
@@ -1,15 +1,24 @@
|
|||||||
# Newt
|
# Newt
|
||||||
|
|
||||||
[](https://pkg.go.dev/github.com/fosrl/newt)
|
[](https://pkg.go.dev/github.com/fosrl/newt)
|
||||||
[](https://github.com/fosrl/newt/blob/main/LICENSE)
|
[](https://github.com/fosrl/newt/blob/main/LICENSE)
|
||||||
[](https://goreportcard.com/report/github.com/fosrl/newt)
|
[](https://goreportcard.com/report/github.com/fosrl/newt)
|
||||||
|
|
||||||
Newt is a fully user space [WireGuard](https://www.wireguard.com/) tunnel client and TCP/UDP proxy, designed to securely expose private resources controlled by Pangolin. By using Newt, you don't need to manage complex WireGuard tunnels and NATing.
|
Newt is a fully user space [WireGuard](https://www.wireguard.com/) tunnel client and TCP/UDP proxy, designed to securely expose private resources controlled by Pangolin. By using Newt, you don't need to manage complex WireGuard tunnels and NATing.
|
||||||
|
|
||||||
### Installation and Documentation
|
## Installation and Documentation
|
||||||
|
|
||||||
Newt is used with Pangolin and Gerbil as part of the larger system. See documentation below:
|
Newt is used with Pangolin and Gerbil as part of the larger system. See documentation below:
|
||||||
|
|
||||||
- [Full Documentation](https://docs.pangolin.net/manage/sites/understanding-sites)
|
- [Full Documentation](https://docs.pangolin.net/manage/sites/understanding-sites)
|
||||||
|
|
||||||
|
### Install via APT (Debian/Ubuntu)
|
||||||
|
|
||||||
|
```bash
|
||||||
|
curl -fsSL https://repo.dev.fosrl.io/apt/public.key | sudo gpg --dearmor -o /usr/share/keyrings/newt-archive-keyring.gpg
|
||||||
|
echo "deb [signed-by=/usr/share/keyrings/newt-archive-keyring.gpg] https://repo.dev.fosrl.io/apt stable main" | sudo tee /etc/apt/sources.list.d/newt.list
|
||||||
|
sudo apt update && sudo apt install newt
|
||||||
|
```
|
||||||
|
|
||||||
## Key Functions
|
## Key Functions
|
||||||
|
|
||||||
|
|||||||
@@ -46,12 +46,11 @@ func startAuthDaemon(ctx context.Context) error {
|
|||||||
|
|
||||||
// Create auth daemon server
|
// Create auth daemon server
|
||||||
cfg := authdaemon.Config{
|
cfg := authdaemon.Config{
|
||||||
DisableHTTPS: true, // We run without HTTP server in newt
|
DisableHTTPS: true, // We run without HTTP server in newt
|
||||||
PresharedKey: "this-key-is-not-used", // Not used in embedded mode, but set to non-empty to satisfy validation
|
PresharedKey: "this-key-is-not-used", // Not used in embedded mode, but set to non-empty to satisfy validation
|
||||||
PrincipalsFilePath: principalsFile,
|
PrincipalsFilePath: principalsFile,
|
||||||
CACertPath: caCertPath,
|
CACertPath: caCertPath,
|
||||||
Force: true,
|
Force: true,
|
||||||
GenerateRandomPassword: authDaemonGenerateRandomPassword,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
srv, err := authdaemon.NewServer(cfg)
|
srv, err := authdaemon.NewServer(cfg)
|
||||||
@@ -73,6 +72,8 @@ func startAuthDaemon(ctx context.Context) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
// runPrincipalsCmd executes the principals subcommand logic
|
// runPrincipalsCmd executes the principals subcommand logic
|
||||||
func runPrincipalsCmd(args []string) {
|
func runPrincipalsCmd(args []string) {
|
||||||
opts := struct {
|
opts := struct {
|
||||||
@@ -147,4 +148,4 @@ Example:
|
|||||||
newt principals --username alice
|
newt principals --username alice
|
||||||
|
|
||||||
`, defaultPrincipalsPath)
|
`, defaultPrincipalsPath)
|
||||||
}
|
}
|
||||||
@@ -7,8 +7,8 @@ import (
|
|||||||
// ProcessConnection runs the same logic as POST /connection: CA cert, user create/reconcile, principals.
|
// ProcessConnection runs the same logic as POST /connection: CA cert, user create/reconcile, principals.
|
||||||
// Use this when DisableHTTPS is true (e.g. embedded in Newt) instead of calling the API.
|
// Use this when DisableHTTPS is true (e.g. embedded in Newt) instead of calling the API.
|
||||||
func (s *Server) ProcessConnection(req ConnectionRequest) {
|
func (s *Server) ProcessConnection(req ConnectionRequest) {
|
||||||
logger.Info("connection: niceId=%q username=%q metadata.sudoMode=%q metadata.sudoCommands=%v metadata.homedir=%v metadata.groups=%v",
|
logger.Info("connection: niceId=%q username=%q metadata.sudo=%v metadata.homedir=%v",
|
||||||
req.NiceId, req.Username, req.Metadata.SudoMode, req.Metadata.SudoCommands, req.Metadata.Homedir, req.Metadata.Groups)
|
req.NiceId, req.Username, req.Metadata.Sudo, req.Metadata.Homedir)
|
||||||
|
|
||||||
cfg := &s.cfg
|
cfg := &s.cfg
|
||||||
if cfg.CACertPath != "" {
|
if cfg.CACertPath != "" {
|
||||||
@@ -16,7 +16,7 @@ func (s *Server) ProcessConnection(req ConnectionRequest) {
|
|||||||
logger.Warn("auth-daemon: write CA cert: %v", err)
|
logger.Warn("auth-daemon: write CA cert: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := ensureUser(req.Username, req.Metadata, s.cfg.GenerateRandomPassword); err != nil {
|
if err := ensureUser(req.Username, req.Metadata); err != nil {
|
||||||
logger.Warn("auth-daemon: ensure user: %v", err)
|
logger.Warn("auth-daemon: ensure user: %v", err)
|
||||||
}
|
}
|
||||||
if cfg.PrincipalsFilePath != "" {
|
if cfg.PrincipalsFilePath != "" {
|
||||||
|
|||||||
@@ -4,8 +4,6 @@ package authdaemon
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"crypto/rand"
|
|
||||||
"encoding/hex"
|
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
@@ -124,73 +122,8 @@ func sudoGroup() string {
|
|||||||
return "sudo"
|
return "sudo"
|
||||||
}
|
}
|
||||||
|
|
||||||
// setRandomPassword generates a random password and sets it for username via chpasswd.
|
|
||||||
// Used when GenerateRandomPassword is true so SSH with PermitEmptyPasswords no can accept the user.
|
|
||||||
func setRandomPassword(username string) error {
|
|
||||||
b := make([]byte, 16)
|
|
||||||
if _, err := rand.Read(b); err != nil {
|
|
||||||
return fmt.Errorf("generate password: %w", err)
|
|
||||||
}
|
|
||||||
password := hex.EncodeToString(b)
|
|
||||||
cmd := exec.Command("chpasswd")
|
|
||||||
cmd.Stdin = strings.NewReader(username + ":" + password)
|
|
||||||
if out, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
return fmt.Errorf("chpasswd: %w (output: %s)", err, string(out))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
const skelDir = "/etc/skel"
|
|
||||||
|
|
||||||
// copySkelInto copies files from srcDir (e.g. /etc/skel) into dstDir (e.g. user's home).
|
|
||||||
// Only creates files that don't already exist. All created paths are chowned to uid:gid.
|
|
||||||
func copySkelInto(srcDir, dstDir string, uid, gid int) {
|
|
||||||
entries, err := os.ReadDir(srcDir)
|
|
||||||
if err != nil {
|
|
||||||
if !os.IsNotExist(err) {
|
|
||||||
logger.Warn("auth-daemon: read %s: %v", srcDir, err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, e := range entries {
|
|
||||||
name := e.Name()
|
|
||||||
src := filepath.Join(srcDir, name)
|
|
||||||
dst := filepath.Join(dstDir, name)
|
|
||||||
if e.IsDir() {
|
|
||||||
if st, err := os.Stat(dst); err == nil && st.IsDir() {
|
|
||||||
copySkelInto(src, dst, uid, gid)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := os.MkdirAll(dst, 0755); err != nil {
|
|
||||||
logger.Warn("auth-daemon: mkdir %s: %v", dst, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := os.Chown(dst, uid, gid); err != nil {
|
|
||||||
logger.Warn("auth-daemon: chown %s: %v", dst, err)
|
|
||||||
}
|
|
||||||
copySkelInto(src, dst, uid, gid)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, err := os.Stat(dst); err == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
data, err := os.ReadFile(src)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn("auth-daemon: read %s: %v", src, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := os.WriteFile(dst, data, 0644); err != nil {
|
|
||||||
logger.Warn("auth-daemon: write %s: %v", dst, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if err := os.Chown(dst, uid, gid); err != nil {
|
|
||||||
logger.Warn("auth-daemon: chown %s: %v", dst, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ensureUser creates the system user if missing, or reconciles sudo and homedir to match meta.
|
// ensureUser creates the system user if missing, or reconciles sudo and homedir to match meta.
|
||||||
func ensureUser(username string, meta ConnectionMetadata, generateRandomPassword bool) error {
|
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||||
if username == "" {
|
if username == "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@@ -199,49 +132,12 @@ func ensureUser(username string, meta ConnectionMetadata, generateRandomPassword
|
|||||||
if _, ok := err.(user.UnknownUserError); !ok {
|
if _, ok := err.(user.UnknownUserError); !ok {
|
||||||
return fmt.Errorf("lookup user %s: %w", username, err)
|
return fmt.Errorf("lookup user %s: %w", username, err)
|
||||||
}
|
}
|
||||||
return createUser(username, meta, generateRandomPassword)
|
return createUser(username, meta)
|
||||||
}
|
}
|
||||||
return reconcileUser(u, meta)
|
return reconcileUser(u, meta)
|
||||||
}
|
}
|
||||||
|
|
||||||
// desiredGroups returns the exact list of supplementary groups the user should have:
|
func createUser(username string, meta ConnectionMetadata) error {
|
||||||
// meta.Groups plus the sudo group when meta.SudoMode is "full" (deduped).
|
|
||||||
func desiredGroups(meta ConnectionMetadata) []string {
|
|
||||||
seen := make(map[string]struct{})
|
|
||||||
var out []string
|
|
||||||
for _, g := range meta.Groups {
|
|
||||||
g = strings.TrimSpace(g)
|
|
||||||
if g == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
if _, ok := seen[g]; ok {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
seen[g] = struct{}{}
|
|
||||||
out = append(out, g)
|
|
||||||
}
|
|
||||||
if meta.SudoMode == "full" {
|
|
||||||
sg := sudoGroup()
|
|
||||||
if _, ok := seen[sg]; !ok {
|
|
||||||
out = append(out, sg)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return out
|
|
||||||
}
|
|
||||||
|
|
||||||
// setUserGroups sets the user's supplementary groups to exactly groups (local mirrors metadata).
|
|
||||||
// When groups is empty, clears all supplementary groups (usermod -G "").
|
|
||||||
func setUserGroups(username string, groups []string) {
|
|
||||||
list := strings.Join(groups, ",")
|
|
||||||
cmd := exec.Command("usermod", "-G", list, username)
|
|
||||||
if out, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
logger.Warn("auth-daemon: usermod -G %s: %v (output: %s)", list, err, string(out))
|
|
||||||
} else {
|
|
||||||
logger.Info("auth-daemon: set %s supplementary groups to %s", username, list)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func createUser(username string, meta ConnectionMetadata, generateRandomPassword bool) error {
|
|
||||||
args := []string{"-s", "/bin/bash"}
|
args := []string{"-s", "/bin/bash"}
|
||||||
if meta.Homedir {
|
if meta.Homedir {
|
||||||
args = append(args, "-m")
|
args = append(args, "-m")
|
||||||
@@ -254,143 +150,75 @@ func createUser(username string, meta ConnectionMetadata, generateRandomPassword
|
|||||||
return fmt.Errorf("useradd %s: %w (output: %s)", username, err, string(out))
|
return fmt.Errorf("useradd %s: %w (output: %s)", username, err, string(out))
|
||||||
}
|
}
|
||||||
logger.Info("auth-daemon: created user %s (homedir=%v)", username, meta.Homedir)
|
logger.Info("auth-daemon: created user %s (homedir=%v)", username, meta.Homedir)
|
||||||
if generateRandomPassword {
|
if meta.Sudo {
|
||||||
if err := setRandomPassword(username); err != nil {
|
group := sudoGroup()
|
||||||
logger.Warn("auth-daemon: set random password for %s: %v", username, err)
|
cmd := exec.Command("usermod", "-aG", group, username)
|
||||||
|
if out, err := cmd.CombinedOutput(); err != nil {
|
||||||
|
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, username, err, string(out))
|
||||||
} else {
|
} else {
|
||||||
logger.Info("auth-daemon: set random password for %s (PermitEmptyPasswords no)", username)
|
logger.Info("auth-daemon: added %s to %s", username, group)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if meta.Homedir {
|
|
||||||
if u, err := user.Lookup(username); err == nil && u.HomeDir != "" {
|
|
||||||
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
|
||||||
copySkelInto(skelDir, u.HomeDir, uid, gid)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
setUserGroups(username, desiredGroups(meta))
|
|
||||||
switch meta.SudoMode {
|
|
||||||
case "full":
|
|
||||||
if err := configurePasswordlessSudo(username); err != nil {
|
|
||||||
logger.Warn("auth-daemon: configure passwordless sudo for %s: %v", username, err)
|
|
||||||
}
|
|
||||||
case "commands":
|
|
||||||
if len(meta.SudoCommands) > 0 {
|
|
||||||
if err := configureSudoCommands(username, meta.SudoCommands); err != nil {
|
|
||||||
logger.Warn("auth-daemon: configure sudo commands for %s: %v", username, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
removeSudoers(username)
|
|
||||||
}
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
const sudoersFilePrefix = "90-pangolin-"
|
|
||||||
|
|
||||||
func sudoersPath(username string) string {
|
|
||||||
return filepath.Join("/etc/sudoers.d", sudoersFilePrefix+username)
|
|
||||||
}
|
|
||||||
|
|
||||||
// writeSudoersFile writes content to the user's sudoers.d file and validates with visudo.
|
|
||||||
func writeSudoersFile(username, content string) error {
|
|
||||||
sudoersFile := sudoersPath(username)
|
|
||||||
tmpFile := sudoersFile + ".tmp"
|
|
||||||
if err := os.WriteFile(tmpFile, []byte(content), 0440); err != nil {
|
|
||||||
return fmt.Errorf("write temp sudoers file: %w", err)
|
|
||||||
}
|
|
||||||
cmd := exec.Command("visudo", "-c", "-f", tmpFile)
|
|
||||||
if out, err := cmd.CombinedOutput(); err != nil {
|
|
||||||
os.Remove(tmpFile)
|
|
||||||
return fmt.Errorf("visudo validation failed: %w (output: %s)", err, string(out))
|
|
||||||
}
|
|
||||||
if err := os.Rename(tmpFile, sudoersFile); err != nil {
|
|
||||||
os.Remove(tmpFile)
|
|
||||||
return fmt.Errorf("move sudoers file: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// configurePasswordlessSudo creates a sudoers.d file to allow passwordless sudo for the user.
|
|
||||||
func configurePasswordlessSudo(username string) error {
|
|
||||||
content := fmt.Sprintf("# Created by Pangolin auth-daemon\n%s ALL=(ALL) NOPASSWD:ALL\n", username)
|
|
||||||
if err := writeSudoersFile(username, content); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Info("auth-daemon: configured passwordless sudo for %s", username)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// configureSudoCommands creates a sudoers.d file allowing only the listed commands (NOPASSWD).
|
|
||||||
// Each command should be a full path (e.g. /usr/bin/systemctl).
|
|
||||||
func configureSudoCommands(username string, commands []string) error {
|
|
||||||
var b strings.Builder
|
|
||||||
b.WriteString("# Created by Pangolin auth-daemon (restricted commands)\n")
|
|
||||||
n := 0
|
|
||||||
for _, c := range commands {
|
|
||||||
c = strings.TrimSpace(c)
|
|
||||||
if c == "" {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
fmt.Fprintf(&b, "%s ALL=(ALL) NOPASSWD: %s\n", username, c)
|
|
||||||
n++
|
|
||||||
}
|
|
||||||
if n == 0 {
|
|
||||||
return fmt.Errorf("no valid sudo commands")
|
|
||||||
}
|
|
||||||
if err := writeSudoersFile(username, b.String()); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
logger.Info("auth-daemon: configured restricted sudo for %s (%d commands)", username, len(commands))
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// removeSudoers removes the sudoers.d file for the user.
|
|
||||||
func removeSudoers(username string) {
|
|
||||||
sudoersFile := sudoersPath(username)
|
|
||||||
if err := os.Remove(sudoersFile); err != nil && !os.IsNotExist(err) {
|
|
||||||
logger.Warn("auth-daemon: remove sudoers for %s: %v", username, err)
|
|
||||||
} else if err == nil {
|
|
||||||
logger.Info("auth-daemon: removed sudoers for %s", username)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func mustAtoi(s string) int {
|
func mustAtoi(s string) int {
|
||||||
n, _ := strconv.Atoi(s)
|
n, _ := strconv.Atoi(s)
|
||||||
return n
|
return n
|
||||||
}
|
}
|
||||||
|
|
||||||
func reconcileUser(u *user.User, meta ConnectionMetadata) error {
|
func reconcileUser(u *user.User, meta ConnectionMetadata) error {
|
||||||
setUserGroups(u.Username, desiredGroups(meta))
|
group := sudoGroup()
|
||||||
switch meta.SudoMode {
|
inGroup, err := userInGroup(u.Username, group)
|
||||||
case "full":
|
if err != nil {
|
||||||
if err := configurePasswordlessSudo(u.Username); err != nil {
|
logger.Warn("auth-daemon: check group %s: %v", group, err)
|
||||||
logger.Warn("auth-daemon: configure passwordless sudo for %s: %v", u.Username, err)
|
inGroup = false
|
||||||
}
|
}
|
||||||
case "commands":
|
if meta.Sudo && !inGroup {
|
||||||
if len(meta.SudoCommands) > 0 {
|
cmd := exec.Command("usermod", "-aG", group, u.Username)
|
||||||
if err := configureSudoCommands(u.Username, meta.SudoCommands); err != nil {
|
if out, err := cmd.CombinedOutput(); err != nil {
|
||||||
logger.Warn("auth-daemon: configure sudo commands for %s: %v", u.Username, err)
|
logger.Warn("auth-daemon: usermod -aG %s %s: %v (output: %s)", group, u.Username, err, string(out))
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
removeSudoers(u.Username)
|
logger.Info("auth-daemon: added %s to %s", u.Username, group)
|
||||||
|
}
|
||||||
|
} else if !meta.Sudo && inGroup {
|
||||||
|
cmd := exec.Command("gpasswd", "-d", u.Username, group)
|
||||||
|
if out, err := cmd.CombinedOutput(); err != nil {
|
||||||
|
logger.Warn("auth-daemon: gpasswd -d %s %s: %v (output: %s)", u.Username, group, err, string(out))
|
||||||
|
} else {
|
||||||
|
logger.Info("auth-daemon: removed %s from %s", u.Username, group)
|
||||||
}
|
}
|
||||||
default:
|
|
||||||
removeSudoers(u.Username)
|
|
||||||
}
|
}
|
||||||
if meta.Homedir && u.HomeDir != "" {
|
if meta.Homedir && u.HomeDir != "" {
|
||||||
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
|
||||||
if st, err := os.Stat(u.HomeDir); err != nil || !st.IsDir() {
|
if st, err := os.Stat(u.HomeDir); err != nil || !st.IsDir() {
|
||||||
if err := os.MkdirAll(u.HomeDir, 0755); err != nil {
|
if err := os.MkdirAll(u.HomeDir, 0755); err != nil {
|
||||||
logger.Warn("auth-daemon: mkdir %s: %v", u.HomeDir, err)
|
logger.Warn("auth-daemon: mkdir %s: %v", u.HomeDir, err)
|
||||||
} else {
|
} else {
|
||||||
|
uid, gid := mustAtoi(u.Uid), mustAtoi(u.Gid)
|
||||||
_ = os.Chown(u.HomeDir, uid, gid)
|
_ = os.Chown(u.HomeDir, uid, gid)
|
||||||
copySkelInto(skelDir, u.HomeDir, uid, gid)
|
|
||||||
logger.Info("auth-daemon: created home %s for %s", u.HomeDir, u.Username)
|
logger.Info("auth-daemon: created home %s for %s", u.HomeDir, u.Username)
|
||||||
}
|
}
|
||||||
} else {
|
|
||||||
// Ensure .bashrc etc. exist (e.g. home existed but was empty or skel was minimal)
|
|
||||||
copySkelInto(skelDir, u.HomeDir, uid, gid)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func userInGroup(username, groupName string) (bool, error) {
|
||||||
|
// getent group wheel returns "wheel:x:10:user1,user2"
|
||||||
|
cmd := exec.Command("getent", "group", groupName)
|
||||||
|
out, err := cmd.Output()
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
parts := strings.SplitN(strings.TrimSpace(string(out)), ":", 4)
|
||||||
|
if len(parts) < 4 {
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
members := strings.Split(parts[3], ",")
|
||||||
|
for _, m := range members {
|
||||||
|
if strings.TrimSpace(m) == username {
|
||||||
|
return true, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ func writeCACertIfNotExists(path, contents string, force bool) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// ensureUser returns an error on non-Linux.
|
// ensureUser returns an error on non-Linux.
|
||||||
func ensureUser(username string, meta ConnectionMetadata, generateRandomPassword bool) error {
|
func ensureUser(username string, meta ConnectionMetadata) error {
|
||||||
return errLinuxOnly
|
return errLinuxOnly
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,10 +13,8 @@ func (s *Server) registerRoutes() {
|
|||||||
|
|
||||||
// ConnectionMetadata is the metadata object in POST /connection.
|
// ConnectionMetadata is the metadata object in POST /connection.
|
||||||
type ConnectionMetadata struct {
|
type ConnectionMetadata struct {
|
||||||
SudoMode string `json:"sudoMode"` // "none" | "full" | "commands"
|
Sudo bool `json:"sudo"`
|
||||||
SudoCommands []string `json:"sudoCommands"` // used when sudoMode is "commands"
|
Homedir bool `json:"homedir"`
|
||||||
Homedir bool `json:"homedir"`
|
|
||||||
Groups []string `json:"groups"` // system groups to add the user to
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ConnectionRequest is the JSON body for POST /connection.
|
// ConnectionRequest is the JSON body for POST /connection.
|
||||||
|
|||||||
@@ -27,9 +27,8 @@ type Config struct {
|
|||||||
Port int // Required when DisableHTTPS is false. Listen port for the HTTPS server. No default.
|
Port int // Required when DisableHTTPS is false. Listen port for the HTTPS server. No default.
|
||||||
PresharedKey string // Required when DisableHTTPS is false. HTTP auth (Authorization: Bearer <key> or X-Preshared-Key: <key>). No default.
|
PresharedKey string // Required when DisableHTTPS is false. HTTP auth (Authorization: Bearer <key> or X-Preshared-Key: <key>). No default.
|
||||||
CACertPath string // Required. Where to write the CA cert (e.g. /etc/ssh/ca.pem). No default.
|
CACertPath string // Required. Where to write the CA cert (e.g. /etc/ssh/ca.pem). No default.
|
||||||
Force bool // If true, overwrite existing CA cert (and other items) when content differs. Default false.
|
Force bool // If true, overwrite existing CA cert (and other items) when content differs. Default false.
|
||||||
PrincipalsFilePath string // Required. Path to the principals data file (JSON: username -> array of principals). No default.
|
PrincipalsFilePath string // Required. Path to the principals data file (JSON: username -> array of principals). No default.
|
||||||
GenerateRandomPassword bool // If true, set a random password on users when they are provisioned (for SSH PermitEmptyPasswords no).
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type Server struct {
|
type Server struct {
|
||||||
|
|||||||
@@ -37,12 +37,11 @@ type WgConfig struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type Target struct {
|
type Target struct {
|
||||||
SourcePrefix string `json:"sourcePrefix"`
|
SourcePrefix string `json:"sourcePrefix"`
|
||||||
SourcePrefixes []string `json:"sourcePrefixes"`
|
DestPrefix string `json:"destPrefix"`
|
||||||
DestPrefix string `json:"destPrefix"`
|
RewriteTo string `json:"rewriteTo,omitempty"`
|
||||||
RewriteTo string `json:"rewriteTo,omitempty"`
|
DisableIcmp bool `json:"disableIcmp,omitempty"`
|
||||||
DisableIcmp bool `json:"disableIcmp,omitempty"`
|
PortRange []PortRange `json:"portRange,omitempty"`
|
||||||
PortRange []PortRange `json:"portRange,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type PortRange struct {
|
type PortRange struct {
|
||||||
@@ -161,8 +160,9 @@ func NewWireGuardService(interfaceName string, port uint16, mtu int, host string
|
|||||||
useNativeInterface: useNativeInterface,
|
useNativeInterface: useNativeInterface,
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create the holepunch manager
|
// Create the holepunch manager with ResolveDomain function
|
||||||
service.holePunchManager = holepunch.NewManager(sharedBind, newtId, "newt", key.PublicKey().String(), nil)
|
// We'll need to pass a domain resolver function
|
||||||
|
service.holePunchManager = holepunch.NewManager(sharedBind, newtId, "newt", key.PublicKey().String())
|
||||||
|
|
||||||
// Register websocket handlers
|
// Register websocket handlers
|
||||||
wsClient.RegisterHandler("newt/wg/receive-config", service.handleConfig)
|
wsClient.RegisterHandler("newt/wg/receive-config", service.handleConfig)
|
||||||
@@ -172,7 +172,6 @@ func NewWireGuardService(interfaceName string, port uint16, mtu int, host string
|
|||||||
wsClient.RegisterHandler("newt/wg/targets/add", service.handleAddTarget)
|
wsClient.RegisterHandler("newt/wg/targets/add", service.handleAddTarget)
|
||||||
wsClient.RegisterHandler("newt/wg/targets/remove", service.handleRemoveTarget)
|
wsClient.RegisterHandler("newt/wg/targets/remove", service.handleRemoveTarget)
|
||||||
wsClient.RegisterHandler("newt/wg/targets/update", service.handleUpdateTarget)
|
wsClient.RegisterHandler("newt/wg/targets/update", service.handleUpdateTarget)
|
||||||
wsClient.RegisterHandler("newt/wg/sync", service.handleSyncConfig)
|
|
||||||
|
|
||||||
return service, nil
|
return service, nil
|
||||||
}
|
}
|
||||||
@@ -278,7 +277,7 @@ func (s *WireGuardService) StartHolepunch(publicKey string, endpoint string, rel
|
|||||||
}
|
}
|
||||||
|
|
||||||
if relayPort == 0 {
|
if relayPort == 0 {
|
||||||
relayPort = 21820
|
relayPort = 21820
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert websocket.ExitNode to holepunch.ExitNode
|
// Convert websocket.ExitNode to holepunch.ExitNode
|
||||||
@@ -493,183 +492,6 @@ func (s *WireGuardService) handleConfig(msg websocket.WSMessage) {
|
|||||||
logger.Info("Client connectivity setup. Ready to accept connections from clients!")
|
logger.Info("Client connectivity setup. Ready to accept connections from clients!")
|
||||||
}
|
}
|
||||||
|
|
||||||
// SyncConfig represents the configuration sent from server for syncing
|
|
||||||
type SyncConfig struct {
|
|
||||||
Targets []Target `json:"targets"`
|
|
||||||
Peers []Peer `json:"peers"`
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *WireGuardService) handleSyncConfig(msg websocket.WSMessage) {
|
|
||||||
var syncConfig SyncConfig
|
|
||||||
|
|
||||||
logger.Debug("Received sync message: %v", msg)
|
|
||||||
logger.Info("Received sync configuration from remote server")
|
|
||||||
|
|
||||||
jsonData, err := json.Marshal(msg.Data)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("Error marshaling sync data: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(jsonData, &syncConfig); err != nil {
|
|
||||||
logger.Error("Error unmarshaling sync data: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync peers
|
|
||||||
if err := s.syncPeers(syncConfig.Peers); err != nil {
|
|
||||||
logger.Error("Failed to sync peers: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Sync targets
|
|
||||||
if err := s.syncTargets(syncConfig.Targets); err != nil {
|
|
||||||
logger.Error("Failed to sync targets: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncPeers synchronizes the current peers with the desired state
|
|
||||||
// It removes peers not in the desired list and adds missing ones
|
|
||||||
func (s *WireGuardService) syncPeers(desiredPeers []Peer) error {
|
|
||||||
if s.device == nil {
|
|
||||||
return fmt.Errorf("WireGuard device is not initialized")
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get current peers from the device
|
|
||||||
currentConfig, err := s.device.IpcGet()
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to get current device config: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse current peer public keys
|
|
||||||
lines := strings.Split(currentConfig, "\n")
|
|
||||||
currentPeerKeys := make(map[string]bool)
|
|
||||||
for _, line := range lines {
|
|
||||||
if strings.HasPrefix(line, "public_key=") {
|
|
||||||
pubKey := strings.TrimPrefix(line, "public_key=")
|
|
||||||
currentPeerKeys[pubKey] = true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a map of desired peers by their public key (normalized)
|
|
||||||
desiredPeerMap := make(map[string]Peer)
|
|
||||||
for _, peer := range desiredPeers {
|
|
||||||
// Normalize the public key for comparison
|
|
||||||
pubKey, err := wgtypes.ParseKey(peer.PublicKey)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn("Invalid public key in desired peers: %s", peer.PublicKey)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
normalizedKey := util.FixKey(pubKey.String())
|
|
||||||
desiredPeerMap[normalizedKey] = peer
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove peers that are not in the desired list
|
|
||||||
for currentKey := range currentPeerKeys {
|
|
||||||
if _, exists := desiredPeerMap[currentKey]; !exists {
|
|
||||||
// Parse the key back to get the original format for removal
|
|
||||||
removeConfig := fmt.Sprintf("public_key=%s\nremove=true", currentKey)
|
|
||||||
if err := s.device.IpcSet(removeConfig); err != nil {
|
|
||||||
logger.Warn("Failed to remove peer %s during sync: %v", currentKey, err)
|
|
||||||
} else {
|
|
||||||
logger.Info("Removed peer %s during sync", currentKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add peers that are missing
|
|
||||||
for normalizedKey, peer := range desiredPeerMap {
|
|
||||||
if _, exists := currentPeerKeys[normalizedKey]; !exists {
|
|
||||||
if err := s.addPeerToDevice(peer); err != nil {
|
|
||||||
logger.Warn("Failed to add peer %s during sync: %v", peer.PublicKey, err)
|
|
||||||
} else {
|
|
||||||
logger.Info("Added peer %s during sync", peer.PublicKey)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// syncTargets synchronizes the current targets with the desired state
|
|
||||||
// It removes targets not in the desired list and adds missing ones
|
|
||||||
func (s *WireGuardService) syncTargets(desiredTargets []Target) error {
|
|
||||||
if s.tnet == nil {
|
|
||||||
// Native interface mode - proxy features not available, skip silently
|
|
||||||
logger.Debug("Skipping target sync - using native interface (no proxy support)")
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get current rules from the proxy handler
|
|
||||||
currentRules := s.tnet.GetProxySubnetRules()
|
|
||||||
|
|
||||||
// Build a map of current rules by source+dest prefix
|
|
||||||
type ruleKey struct {
|
|
||||||
sourcePrefix string
|
|
||||||
destPrefix string
|
|
||||||
}
|
|
||||||
currentRuleMap := make(map[ruleKey]bool)
|
|
||||||
for _, rule := range currentRules {
|
|
||||||
key := ruleKey{
|
|
||||||
sourcePrefix: rule.SourcePrefix.String(),
|
|
||||||
destPrefix: rule.DestPrefix.String(),
|
|
||||||
}
|
|
||||||
currentRuleMap[key] = true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Build a map of desired targets
|
|
||||||
desiredTargetMap := make(map[ruleKey]Target)
|
|
||||||
for _, target := range desiredTargets {
|
|
||||||
key := ruleKey{
|
|
||||||
sourcePrefix: target.SourcePrefix,
|
|
||||||
destPrefix: target.DestPrefix,
|
|
||||||
}
|
|
||||||
desiredTargetMap[key] = target
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove targets that are not in the desired list
|
|
||||||
for _, rule := range currentRules {
|
|
||||||
key := ruleKey{
|
|
||||||
sourcePrefix: rule.SourcePrefix.String(),
|
|
||||||
destPrefix: rule.DestPrefix.String(),
|
|
||||||
}
|
|
||||||
if _, exists := desiredTargetMap[key]; !exists {
|
|
||||||
s.tnet.RemoveProxySubnetRule(rule.SourcePrefix, rule.DestPrefix)
|
|
||||||
logger.Info("Removed target %s -> %s during sync", rule.SourcePrefix.String(), rule.DestPrefix.String())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add targets that are missing
|
|
||||||
for key, target := range desiredTargetMap {
|
|
||||||
if _, exists := currentRuleMap[key]; !exists {
|
|
||||||
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn("Invalid source prefix %s during sync: %v", target.SourcePrefix, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn("Invalid dest prefix %s during sync: %v", target.DestPrefix, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
var portRanges []netstack2.PortRange
|
|
||||||
for _, pr := range target.PortRange {
|
|
||||||
portRanges = append(portRanges, netstack2.PortRange{
|
|
||||||
Min: pr.Min,
|
|
||||||
Max: pr.Max,
|
|
||||||
Protocol: pr.Protocol,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
|
||||||
logger.Info("Added target %s -> %s during sync", target.SourcePrefix, target.DestPrefix)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *WireGuardService) ensureWireguardInterface(wgconfig WgConfig) error {
|
func (s *WireGuardService) ensureWireguardInterface(wgconfig WgConfig) error {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
|
|
||||||
@@ -873,19 +695,6 @@ func (s *WireGuardService) ensureWireguardPeers(peers []Peer) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// resolveSourcePrefixes returns the effective list of source prefixes for a target,
|
|
||||||
// supporting both the legacy single SourcePrefix field and the new SourcePrefixes array.
|
|
||||||
// If SourcePrefixes is non-empty it takes precedence; otherwise SourcePrefix is used.
|
|
||||||
func resolveSourcePrefixes(target Target) []string {
|
|
||||||
if len(target.SourcePrefixes) > 0 {
|
|
||||||
return target.SourcePrefixes
|
|
||||||
}
|
|
||||||
if target.SourcePrefix != "" {
|
|
||||||
return []string{target.SourcePrefix}
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *WireGuardService) ensureTargets(targets []Target) error {
|
func (s *WireGuardService) ensureTargets(targets []Target) error {
|
||||||
if s.tnet == nil {
|
if s.tnet == nil {
|
||||||
// Native interface mode - proxy features not available, skip silently
|
// Native interface mode - proxy features not available, skip silently
|
||||||
@@ -894,6 +703,11 @@ func (s *WireGuardService) ensureTargets(targets []Target) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
|
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||||
|
}
|
||||||
|
|
||||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("invalid CIDR %s: %v", target.DestPrefix, err)
|
return fmt.Errorf("invalid CIDR %s: %v", target.DestPrefix, err)
|
||||||
@@ -908,14 +722,9 @@ func (s *WireGuardService) ensureTargets(targets []Target) error {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sp := range resolveSourcePrefixes(target) {
|
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
|
||||||
if err != nil {
|
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", target.SourcePrefix, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||||
return fmt.Errorf("invalid CIDR %s: %v", sp, err)
|
|
||||||
}
|
|
||||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
|
||||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", sp, target.DestPrefix, target.RewriteTo, target.PortRange)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@@ -1234,7 +1043,7 @@ func (s *WireGuardService) processPeerBandwidth(publicKey string, rxBytes, txByt
|
|||||||
BytesOut: bytesOutMB,
|
BytesOut: bytesOutMB,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1285,6 +1094,12 @@ func (s *WireGuardService) handleAddTarget(msg websocket.WSMessage) {
|
|||||||
|
|
||||||
// Process all targets
|
// Process all targets
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
|
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||||
|
if err != nil {
|
||||||
|
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||||
@@ -1294,21 +1109,15 @@ func (s *WireGuardService) handleAddTarget(msg websocket.WSMessage) {
|
|||||||
var portRanges []netstack2.PortRange
|
var portRanges []netstack2.PortRange
|
||||||
for _, pr := range target.PortRange {
|
for _, pr := range target.PortRange {
|
||||||
portRanges = append(portRanges, netstack2.PortRange{
|
portRanges = append(portRanges, netstack2.PortRange{
|
||||||
Min: pr.Min,
|
Min: pr.Min,
|
||||||
Max: pr.Max,
|
Max: pr.Max,
|
||||||
Protocol: pr.Protocol,
|
Protocol: pr.Protocol,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sp := range resolveSourcePrefixes(target) {
|
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
|
||||||
if err != nil {
|
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", target.SourcePrefix, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
|
||||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", sp, target.DestPrefix, target.RewriteTo, target.PortRange)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1337,21 +1146,21 @@ func (s *WireGuardService) handleRemoveTarget(msg websocket.WSMessage) {
|
|||||||
|
|
||||||
// Process all targets
|
// Process all targets
|
||||||
for _, target := range targets {
|
for _, target := range targets {
|
||||||
|
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||||
|
if err != nil {
|
||||||
|
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sp := range resolveSourcePrefixes(target) {
|
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
||||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
|
||||||
if err != nil {
|
logger.Info("Removed target subnet %s with destination %s", target.SourcePrefix, target.DestPrefix)
|
||||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
|
||||||
logger.Info("Removed target subnet %s with destination %s", sp, target.DestPrefix)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1385,24 +1194,30 @@ func (s *WireGuardService) handleUpdateTarget(msg websocket.WSMessage) {
|
|||||||
|
|
||||||
// Process all update requests
|
// Process all update requests
|
||||||
for _, target := range requests.OldTargets {
|
for _, target := range requests.OldTargets {
|
||||||
|
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||||
|
if err != nil {
|
||||||
|
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sp := range resolveSourcePrefixes(target) {
|
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
||||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
logger.Info("Removed target subnet %s with destination %s", target.SourcePrefix, target.DestPrefix)
|
||||||
if err != nil {
|
|
||||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.tnet.RemoveProxySubnetRule(sourcePrefix, destPrefix)
|
|
||||||
logger.Info("Removed target subnet %s with destination %s", sp, target.DestPrefix)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, target := range requests.NewTargets {
|
for _, target := range requests.NewTargets {
|
||||||
|
// Now add the new target
|
||||||
|
sourcePrefix, err := netip.ParsePrefix(target.SourcePrefix)
|
||||||
|
if err != nil {
|
||||||
|
logger.Info("Invalid CIDR %s: %v", target.SourcePrefix, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
destPrefix, err := netip.ParsePrefix(target.DestPrefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
logger.Info("Invalid CIDR %s: %v", target.DestPrefix, err)
|
||||||
@@ -1412,21 +1227,14 @@ func (s *WireGuardService) handleUpdateTarget(msg websocket.WSMessage) {
|
|||||||
var portRanges []netstack2.PortRange
|
var portRanges []netstack2.PortRange
|
||||||
for _, pr := range target.PortRange {
|
for _, pr := range target.PortRange {
|
||||||
portRanges = append(portRanges, netstack2.PortRange{
|
portRanges = append(portRanges, netstack2.PortRange{
|
||||||
Min: pr.Min,
|
Min: pr.Min,
|
||||||
Max: pr.Max,
|
Max: pr.Max,
|
||||||
Protocol: pr.Protocol,
|
Protocol: pr.Protocol,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, sp := range resolveSourcePrefixes(target) {
|
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
||||||
sourcePrefix, err := netip.ParsePrefix(sp)
|
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", target.SourcePrefix, target.DestPrefix, target.RewriteTo, target.PortRange)
|
||||||
if err != nil {
|
|
||||||
logger.Info("Invalid CIDR %s: %v", sp, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
s.tnet.AddProxySubnetRule(sourcePrefix, destPrefix, target.RewriteTo, portRanges, target.DisableIcmp)
|
|
||||||
logger.Info("Added target subnet from %s to %s rewrite to %s with port ranges: %v", sp, target.DestPrefix, target.RewriteTo, target.PortRange)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
66
common.go
66
common.go
@@ -5,7 +5,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strings"
|
"strings"
|
||||||
@@ -364,62 +363,27 @@ func parseTargetData(data interface{}) (TargetData, error) {
|
|||||||
return targetData, nil
|
return targetData, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// parseTargetString parses a target string in the format "listenPort:host:targetPort"
|
|
||||||
// It properly handles IPv6 addresses which must be in brackets: "listenPort:[ipv6]:targetPort"
|
|
||||||
// Examples:
|
|
||||||
// - IPv4: "3001:192.168.1.1:80"
|
|
||||||
// - IPv6: "3001:[::1]:8080" or "3001:[fd70:1452:b736:4dd5:caca:7db9:c588:f5b3]:80"
|
|
||||||
//
|
|
||||||
// Returns listenPort, targetAddress (in host:port format suitable for net.Dial), and error
|
|
||||||
func parseTargetString(target string) (int, string, error) {
|
|
||||||
// Find the first colon to extract the listen port
|
|
||||||
firstColon := strings.Index(target, ":")
|
|
||||||
if firstColon == -1 {
|
|
||||||
return 0, "", fmt.Errorf("invalid target format, no colon found: %s", target)
|
|
||||||
}
|
|
||||||
|
|
||||||
listenPortStr := target[:firstColon]
|
|
||||||
var listenPort int
|
|
||||||
_, err := fmt.Sscanf(listenPortStr, "%d", &listenPort)
|
|
||||||
if err != nil {
|
|
||||||
return 0, "", fmt.Errorf("invalid listen port: %s", listenPortStr)
|
|
||||||
}
|
|
||||||
if listenPort <= 0 || listenPort > 65535 {
|
|
||||||
return 0, "", fmt.Errorf("listen port out of range: %d", listenPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The remainder is host:targetPort - use net.SplitHostPort which handles IPv6 brackets
|
|
||||||
remainder := target[firstColon+1:]
|
|
||||||
host, targetPort, err := net.SplitHostPort(remainder)
|
|
||||||
if err != nil {
|
|
||||||
return 0, "", fmt.Errorf("invalid host:port format '%s': %w", remainder, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reject empty host or target port
|
|
||||||
if host == "" {
|
|
||||||
return 0, "", fmt.Errorf("empty host in target: %s", target)
|
|
||||||
}
|
|
||||||
if targetPort == "" {
|
|
||||||
return 0, "", fmt.Errorf("empty target port in target: %s", target)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Reconstruct the target address using JoinHostPort (handles IPv6 properly)
|
|
||||||
targetAddr := net.JoinHostPort(host, targetPort)
|
|
||||||
|
|
||||||
return listenPort, targetAddr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func updateTargets(pm *proxy.ProxyManager, action string, tunnelIP string, proto string, targetData TargetData) error {
|
func updateTargets(pm *proxy.ProxyManager, action string, tunnelIP string, proto string, targetData TargetData) error {
|
||||||
for _, t := range targetData.Targets {
|
for _, t := range targetData.Targets {
|
||||||
// Parse the target string, handling both IPv4 and IPv6 addresses
|
// Split the first number off of the target with : separator and use as the port
|
||||||
port, target, err := parseTargetString(t)
|
parts := strings.Split(t, ":")
|
||||||
|
if len(parts) != 3 {
|
||||||
|
logger.Info("Invalid target format: %s", t)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the port as an int
|
||||||
|
port := 0
|
||||||
|
_, err := fmt.Sscanf(parts[0], "%d", &port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Info("Invalid target format: %s (%v)", t, err)
|
logger.Info("Invalid port: %s", parts[0])
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
switch action {
|
switch action {
|
||||||
case "add":
|
case "add":
|
||||||
|
target := parts[1] + ":" + parts[2]
|
||||||
|
|
||||||
// Call updown script if provided
|
// Call updown script if provided
|
||||||
processedTarget := target
|
processedTarget := target
|
||||||
if updownScript != "" {
|
if updownScript != "" {
|
||||||
@@ -446,6 +410,8 @@ func updateTargets(pm *proxy.ProxyManager, action string, tunnelIP string, proto
|
|||||||
case "remove":
|
case "remove":
|
||||||
logger.Info("Removing target with port %d", port)
|
logger.Info("Removing target with port %d", port)
|
||||||
|
|
||||||
|
target := parts[1] + ":" + parts[2]
|
||||||
|
|
||||||
// Call updown script if provided
|
// Call updown script if provided
|
||||||
if updownScript != "" {
|
if updownScript != "" {
|
||||||
_, err := executeUpdownScript(action, proto, target)
|
_, err := executeUpdownScript(action, proto, target)
|
||||||
@@ -454,7 +420,7 @@ func updateTargets(pm *proxy.ProxyManager, action string, tunnelIP string, proto
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = pm.RemoveTarget(proto, tunnelIP, port)
|
err := pm.RemoveTarget(proto, tunnelIP, port)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Failed to remove target: %v", err)
|
logger.Error("Failed to remove target: %v", err)
|
||||||
return err
|
return err
|
||||||
|
|||||||
212
common_test.go
212
common_test.go
@@ -1,212 +0,0 @@
|
|||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestParseTargetString(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
wantListenPort int
|
|
||||||
wantTargetAddr string
|
|
||||||
wantErr bool
|
|
||||||
}{
|
|
||||||
// IPv4 test cases
|
|
||||||
{
|
|
||||||
name: "valid IPv4 basic",
|
|
||||||
input: "3001:192.168.1.1:80",
|
|
||||||
wantListenPort: 3001,
|
|
||||||
wantTargetAddr: "192.168.1.1:80",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid IPv4 localhost",
|
|
||||||
input: "8080:127.0.0.1:3000",
|
|
||||||
wantListenPort: 8080,
|
|
||||||
wantTargetAddr: "127.0.0.1:3000",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid IPv4 same ports",
|
|
||||||
input: "443:10.0.0.1:443",
|
|
||||||
wantListenPort: 443,
|
|
||||||
wantTargetAddr: "10.0.0.1:443",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
|
|
||||||
// IPv6 test cases
|
|
||||||
{
|
|
||||||
name: "valid IPv6 loopback",
|
|
||||||
input: "3001:[::1]:8080",
|
|
||||||
wantListenPort: 3001,
|
|
||||||
wantTargetAddr: "[::1]:8080",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid IPv6 full address",
|
|
||||||
input: "80:[fd70:1452:b736:4dd5:caca:7db9:c588:f5b3]:8080",
|
|
||||||
wantListenPort: 80,
|
|
||||||
wantTargetAddr: "[fd70:1452:b736:4dd5:caca:7db9:c588:f5b3]:8080",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid IPv6 link-local",
|
|
||||||
input: "443:[fe80::1]:443",
|
|
||||||
wantListenPort: 443,
|
|
||||||
wantTargetAddr: "[fe80::1]:443",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid IPv6 all zeros compressed",
|
|
||||||
input: "8000:[::]:9000",
|
|
||||||
wantListenPort: 8000,
|
|
||||||
wantTargetAddr: "[::]:9000",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid IPv6 mixed notation",
|
|
||||||
input: "5000:[::ffff:192.168.1.1]:6000",
|
|
||||||
wantListenPort: 5000,
|
|
||||||
wantTargetAddr: "[::ffff:192.168.1.1]:6000",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Hostname test cases
|
|
||||||
{
|
|
||||||
name: "valid hostname",
|
|
||||||
input: "8080:example.com:80",
|
|
||||||
wantListenPort: 8080,
|
|
||||||
wantTargetAddr: "example.com:80",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid hostname with subdomain",
|
|
||||||
input: "443:api.example.com:8443",
|
|
||||||
wantListenPort: 443,
|
|
||||||
wantTargetAddr: "api.example.com:8443",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "valid localhost hostname",
|
|
||||||
input: "3000:localhost:3000",
|
|
||||||
wantListenPort: 3000,
|
|
||||||
wantTargetAddr: "localhost:3000",
|
|
||||||
wantErr: false,
|
|
||||||
},
|
|
||||||
|
|
||||||
// Error cases
|
|
||||||
{
|
|
||||||
name: "invalid - no colons",
|
|
||||||
input: "invalid",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - empty string",
|
|
||||||
input: "",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - non-numeric listen port",
|
|
||||||
input: "abc:192.168.1.1:80",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - missing target port",
|
|
||||||
input: "3001:192.168.1.1",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - IPv6 without brackets",
|
|
||||||
input: "3001:fd70:1452:b736:4dd5:caca:7db9:c588:f5b3:80",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - only listen port",
|
|
||||||
input: "3001:",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - missing host",
|
|
||||||
input: "3001::80",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - IPv6 unclosed bracket",
|
|
||||||
input: "3001:[::1:80",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - listen port zero",
|
|
||||||
input: "0:192.168.1.1:80",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - listen port negative",
|
|
||||||
input: "-1:192.168.1.1:80",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - listen port out of range",
|
|
||||||
input: "70000:192.168.1.1:80",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "invalid - empty target port",
|
|
||||||
input: "3001:192.168.1.1:",
|
|
||||||
wantErr: true,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
listenPort, targetAddr, err := parseTargetString(tt.input)
|
|
||||||
|
|
||||||
if (err != nil) != tt.wantErr {
|
|
||||||
t.Errorf("parseTargetString(%q) error = %v, wantErr %v", tt.input, err, tt.wantErr)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if tt.wantErr {
|
|
||||||
return // Don't check other values if we expected an error
|
|
||||||
}
|
|
||||||
|
|
||||||
if listenPort != tt.wantListenPort {
|
|
||||||
t.Errorf("parseTargetString(%q) listenPort = %d, want %d", tt.input, listenPort, tt.wantListenPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
if targetAddr != tt.wantTargetAddr {
|
|
||||||
t.Errorf("parseTargetString(%q) targetAddr = %q, want %q", tt.input, targetAddr, tt.wantTargetAddr)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// TestParseTargetStringNetDialCompatibility verifies that the output is compatible with net.Dial
|
|
||||||
func TestParseTargetStringNetDialCompatibility(t *testing.T) {
|
|
||||||
tests := []struct {
|
|
||||||
name string
|
|
||||||
input string
|
|
||||||
}{
|
|
||||||
{"IPv4", "8080:127.0.0.1:80"},
|
|
||||||
{"IPv6 loopback", "8080:[::1]:80"},
|
|
||||||
{"IPv6 full", "8080:[2001:db8::1]:80"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, tt := range tests {
|
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
|
||||||
_, targetAddr, err := parseTargetString(tt.input)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatalf("parseTargetString(%q) unexpected error: %v", tt.input, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Verify the format is valid for net.Dial by checking it can be split back
|
|
||||||
// This doesn't actually dial, just validates the format
|
|
||||||
_, _, err = net.SplitHostPort(targetAddr)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("parseTargetString(%q) produced invalid net.Dial format %q: %v", tt.input, targetAddr, err)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -35,7 +35,7 @@
|
|||||||
inherit version;
|
inherit version;
|
||||||
src = pkgs.nix-gitignore.gitignoreSource [ ] ./.;
|
src = pkgs.nix-gitignore.gitignoreSource [ ] ./.;
|
||||||
|
|
||||||
vendorHash = "sha256-vy6Dqjek7pLdASbCrM9snq5Dt9lbwNJ0AuQboy1JWNQ=";
|
vendorHash = "sha256-Sib6AUCpMgxlMpTc2Esvs+UU0yduVOxWUgT44FHAI+k=";
|
||||||
|
|
||||||
nativeInstallCheckInputs = [ pkgs.versionCheckHook ];
|
nativeInstallCheckInputs = [ pkgs.versionCheckHook ];
|
||||||
|
|
||||||
|
|||||||
119
get-newt.sh
119
get-newt.sh
@@ -1,7 +1,7 @@
|
|||||||
#!/bin/sh
|
#!/bin/bash
|
||||||
|
|
||||||
# Get Newt - Cross-platform installation script
|
# Get Newt - Cross-platform installation script
|
||||||
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/newt/refs/heads/main/get-newt.sh | sh
|
# Usage: curl -fsSL https://raw.githubusercontent.com/fosrl/newt/refs/heads/main/get-newt.sh | bash
|
||||||
|
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
@@ -17,15 +17,15 @@ GITHUB_API_URL="https://api.github.com/repos/${REPO}/releases/latest"
|
|||||||
|
|
||||||
# Function to print colored output
|
# Function to print colored output
|
||||||
print_status() {
|
print_status() {
|
||||||
printf '%b[INFO]%b %s\n' "${GREEN}" "${NC}" "$1"
|
echo -e "${GREEN}[INFO]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
print_warning() {
|
print_warning() {
|
||||||
printf '%b[WARN]%b %s\n' "${YELLOW}" "${NC}" "$1"
|
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
print_error() {
|
print_error() {
|
||||||
printf '%b[ERROR]%b %s\n' "${RED}" "${NC}" "$1"
|
echo -e "${RED}[ERROR]${NC} $1"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Function to get latest version from GitHub API
|
# Function to get latest version from GitHub API
|
||||||
@@ -113,34 +113,16 @@ get_install_dir() {
|
|||||||
if [ "$OS" = "windows" ]; then
|
if [ "$OS" = "windows" ]; then
|
||||||
echo "$HOME/bin"
|
echo "$HOME/bin"
|
||||||
else
|
else
|
||||||
# Prefer /usr/local/bin for system-wide installation
|
# Try to use a directory in PATH, fallback to ~/.local/bin
|
||||||
echo "/usr/local/bin"
|
if echo "$PATH" | grep -q "/usr/local/bin"; then
|
||||||
fi
|
if [ -w "/usr/local/bin" ] 2>/dev/null; then
|
||||||
}
|
echo "/usr/local/bin"
|
||||||
|
else
|
||||||
# Check if we need sudo for installation
|
echo "$HOME/.local/bin"
|
||||||
needs_sudo() {
|
fi
|
||||||
local install_dir="$1"
|
|
||||||
if [ -w "$install_dir" ] 2>/dev/null; then
|
|
||||||
return 1 # No sudo needed
|
|
||||||
else
|
|
||||||
return 0 # Sudo needed
|
|
||||||
fi
|
|
||||||
}
|
|
||||||
|
|
||||||
# Get the appropriate command prefix (sudo or empty)
|
|
||||||
get_sudo_cmd() {
|
|
||||||
local install_dir="$1"
|
|
||||||
if needs_sudo "$install_dir"; then
|
|
||||||
if command -v sudo >/dev/null 2>&1; then
|
|
||||||
echo "sudo"
|
|
||||||
else
|
else
|
||||||
print_error "Cannot write to ${install_dir} and sudo is not available."
|
echo "$HOME/.local/bin"
|
||||||
print_error "Please run this script as root or install sudo."
|
|
||||||
exit 1
|
|
||||||
fi
|
fi
|
||||||
else
|
|
||||||
echo ""
|
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -148,24 +130,21 @@ get_sudo_cmd() {
|
|||||||
install_newt() {
|
install_newt() {
|
||||||
local platform="$1"
|
local platform="$1"
|
||||||
local install_dir="$2"
|
local install_dir="$2"
|
||||||
local sudo_cmd="$3"
|
|
||||||
local binary_name="newt_${platform}"
|
local binary_name="newt_${platform}"
|
||||||
local exe_suffix=""
|
local exe_suffix=""
|
||||||
|
|
||||||
# Add .exe suffix for Windows
|
# Add .exe suffix for Windows
|
||||||
case "$platform" in
|
if [[ "$platform" == *"windows"* ]]; then
|
||||||
*windows*)
|
binary_name="${binary_name}.exe"
|
||||||
binary_name="${binary_name}.exe"
|
exe_suffix=".exe"
|
||||||
exe_suffix=".exe"
|
fi
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
local download_url="${BASE_URL}/${binary_name}"
|
local download_url="${BASE_URL}/${binary_name}"
|
||||||
local temp_file="/tmp/newt${exe_suffix}"
|
local temp_file="/tmp/newt${exe_suffix}"
|
||||||
local final_path="${install_dir}/newt${exe_suffix}"
|
local final_path="${install_dir}/newt${exe_suffix}"
|
||||||
|
|
||||||
print_status "Downloading newt from ${download_url}"
|
print_status "Downloading newt from ${download_url}"
|
||||||
|
|
||||||
# Download the binary
|
# Download the binary
|
||||||
if command -v curl >/dev/null 2>&1; then
|
if command -v curl >/dev/null 2>&1; then
|
||||||
curl -fsSL "$download_url" -o "$temp_file"
|
curl -fsSL "$download_url" -o "$temp_file"
|
||||||
@@ -175,22 +154,18 @@ install_newt() {
|
|||||||
print_error "Neither curl nor wget is available. Please install one of them."
|
print_error "Neither curl nor wget is available. Please install one of them."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# Make executable before moving
|
|
||||||
chmod +x "$temp_file"
|
|
||||||
|
|
||||||
# Create install directory if it doesn't exist
|
# Create install directory if it doesn't exist
|
||||||
if [ -n "$sudo_cmd" ]; then
|
mkdir -p "$install_dir"
|
||||||
$sudo_cmd mkdir -p "$install_dir"
|
|
||||||
print_status "Using sudo to install to ${install_dir}"
|
# Move binary to install directory
|
||||||
$sudo_cmd mv "$temp_file" "$final_path"
|
mv "$temp_file" "$final_path"
|
||||||
else
|
|
||||||
mkdir -p "$install_dir"
|
# Make executable (not needed on Windows, but doesn't hurt)
|
||||||
mv "$temp_file" "$final_path"
|
chmod +x "$final_path"
|
||||||
fi
|
|
||||||
|
|
||||||
print_status "newt installed to ${final_path}"
|
print_status "newt installed to ${final_path}"
|
||||||
|
|
||||||
# Check if install directory is in PATH
|
# Check if install directory is in PATH
|
||||||
if ! echo "$PATH" | grep -q "$install_dir"; then
|
if ! echo "$PATH" | grep -q "$install_dir"; then
|
||||||
print_warning "Install directory ${install_dir} is not in your PATH."
|
print_warning "Install directory ${install_dir} is not in your PATH."
|
||||||
@@ -204,9 +179,9 @@ verify_installation() {
|
|||||||
local install_dir="$1"
|
local install_dir="$1"
|
||||||
local exe_suffix=""
|
local exe_suffix=""
|
||||||
|
|
||||||
case "$PLATFORM" in
|
if [[ "$PLATFORM" == *"windows"* ]]; then
|
||||||
*windows*) exe_suffix=".exe" ;;
|
exe_suffix=".exe"
|
||||||
esac
|
fi
|
||||||
|
|
||||||
local newt_path="${install_dir}/newt${exe_suffix}"
|
local newt_path="${install_dir}/newt${exe_suffix}"
|
||||||
|
|
||||||
@@ -223,36 +198,34 @@ verify_installation() {
|
|||||||
# Main installation process
|
# Main installation process
|
||||||
main() {
|
main() {
|
||||||
print_status "Installing latest version of newt..."
|
print_status "Installing latest version of newt..."
|
||||||
|
|
||||||
# Get latest version
|
# Get latest version
|
||||||
print_status "Fetching latest version from GitHub..."
|
print_status "Fetching latest version from GitHub..."
|
||||||
VERSION=$(get_latest_version)
|
VERSION=$(get_latest_version)
|
||||||
print_status "Latest version: v${VERSION}"
|
print_status "Latest version: v${VERSION}"
|
||||||
|
|
||||||
# Set base URL with the fetched version
|
# Set base URL with the fetched version
|
||||||
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
|
BASE_URL="https://github.com/${REPO}/releases/download/${VERSION}"
|
||||||
|
|
||||||
# Detect platform
|
# Detect platform
|
||||||
PLATFORM=$(detect_platform)
|
PLATFORM=$(detect_platform)
|
||||||
print_status "Detected platform: ${PLATFORM}"
|
print_status "Detected platform: ${PLATFORM}"
|
||||||
|
|
||||||
# Get install directory
|
# Get install directory
|
||||||
INSTALL_DIR=$(get_install_dir)
|
INSTALL_DIR=$(get_install_dir)
|
||||||
print_status "Install directory: ${INSTALL_DIR}"
|
print_status "Install directory: ${INSTALL_DIR}"
|
||||||
|
|
||||||
# Check if we need sudo
|
|
||||||
SUDO_CMD=$(get_sudo_cmd "$INSTALL_DIR")
|
|
||||||
if [ -n "$SUDO_CMD" ]; then
|
|
||||||
print_status "Root privileges required for installation to ${INSTALL_DIR}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Install newt
|
# Install newt
|
||||||
install_newt "$PLATFORM" "$INSTALL_DIR" "$SUDO_CMD"
|
install_newt "$PLATFORM" "$INSTALL_DIR"
|
||||||
|
|
||||||
# Verify installation
|
# Verify installation
|
||||||
if verify_installation "$INSTALL_DIR"; then
|
if verify_installation "$INSTALL_DIR"; then
|
||||||
print_status "newt is ready to use!"
|
print_status "newt is ready to use!"
|
||||||
print_status "Run 'newt --help' to get started"
|
if [[ "$PLATFORM" == *"windows"* ]]; then
|
||||||
|
print_status "Run 'newt --help' to get started"
|
||||||
|
else
|
||||||
|
print_status "Run 'newt --help' to get started"
|
||||||
|
fi
|
||||||
else
|
else
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|||||||
49
go.mod
49
go.mod
@@ -1,30 +1,29 @@
|
|||||||
module github.com/fosrl/newt
|
module github.com/fosrl/newt
|
||||||
|
|
||||||
go 1.25.0
|
go 1.25
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/docker/docker v28.5.2+incompatible
|
github.com/docker/docker v28.5.2+incompatible
|
||||||
github.com/gaissmai/bart v0.26.0
|
|
||||||
github.com/gorilla/websocket v1.5.3
|
github.com/gorilla/websocket v1.5.3
|
||||||
github.com/prometheus/client_golang v1.23.2
|
github.com/prometheus/client_golang v1.23.2
|
||||||
github.com/vishvananda/netlink v1.3.1
|
github.com/vishvananda/netlink v1.3.1
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0
|
||||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0
|
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0
|
||||||
go.opentelemetry.io/otel v1.41.0
|
go.opentelemetry.io/otel v1.39.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0
|
||||||
go.opentelemetry.io/otel/exporters/prometheus v0.63.0
|
go.opentelemetry.io/otel/exporters/prometheus v0.61.0
|
||||||
go.opentelemetry.io/otel/metric v1.41.0
|
go.opentelemetry.io/otel/metric v1.39.0
|
||||||
go.opentelemetry.io/otel/sdk v1.41.0
|
go.opentelemetry.io/otel/sdk v1.39.0
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.41.0
|
go.opentelemetry.io/otel/sdk/metric v1.39.0
|
||||||
golang.org/x/crypto v0.48.0
|
golang.org/x/crypto v0.46.0
|
||||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6
|
||||||
golang.org/x/net v0.51.0
|
golang.org/x/net v0.48.0
|
||||||
golang.org/x/sys v0.41.0
|
golang.org/x/sys v0.39.0
|
||||||
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb
|
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb
|
||||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10
|
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10
|
||||||
golang.zx2c4.com/wireguard/windows v0.5.3
|
golang.zx2c4.com/wireguard/windows v0.5.3
|
||||||
google.golang.org/grpc v1.79.1
|
google.golang.org/grpc v1.77.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c
|
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c
|
||||||
software.sslmate.com/src/go-pkcs12 v0.7.0
|
software.sslmate.com/src/go-pkcs12 v0.7.0
|
||||||
@@ -45,7 +44,7 @@ require (
|
|||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/google/btree v1.1.3 // indirect
|
github.com/google/btree v1.1.3 // indirect
|
||||||
github.com/google/uuid v1.6.0 // indirect
|
github.com/google/uuid v1.6.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect
|
||||||
github.com/moby/docker-image-spec v1.3.1 // indirect
|
github.com/moby/docker-image-spec v1.3.1 // indirect
|
||||||
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
github.com/moby/sys/atomicwriter v0.1.0 // indirect
|
||||||
github.com/moby/term v0.5.2 // indirect
|
github.com/moby/term v0.5.2 // indirect
|
||||||
@@ -55,23 +54,23 @@ require (
|
|||||||
github.com/opencontainers/image-spec v1.1.0 // indirect
|
github.com/opencontainers/image-spec v1.1.0 // indirect
|
||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/prometheus/client_model v0.6.2 // indirect
|
github.com/prometheus/client_model v0.6.2 // indirect
|
||||||
github.com/prometheus/common v0.67.5 // indirect
|
github.com/prometheus/common v0.67.4 // indirect
|
||||||
github.com/prometheus/otlptranslator v1.0.0 // indirect
|
github.com/prometheus/otlptranslator v1.0.0 // indirect
|
||||||
github.com/prometheus/procfs v0.19.2 // indirect
|
github.com/prometheus/procfs v0.19.2 // indirect
|
||||||
github.com/vishvananda/netns v0.0.5 // indirect
|
github.com/vishvananda/netns v0.0.5 // indirect
|
||||||
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect
|
||||||
go.opentelemetry.io/otel/trace v1.41.0 // indirect
|
go.opentelemetry.io/otel/trace v1.39.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
go.opentelemetry.io/proto/otlp v1.9.0 // indirect
|
||||||
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
go.yaml.in/yaml/v2 v2.4.3 // indirect
|
||||||
golang.org/x/mod v0.32.0 // indirect
|
golang.org/x/mod v0.30.0 // indirect
|
||||||
golang.org/x/sync v0.19.0 // indirect
|
golang.org/x/sync v0.19.0 // indirect
|
||||||
golang.org/x/text v0.34.0 // indirect
|
golang.org/x/text v0.32.0 // indirect
|
||||||
golang.org/x/time v0.12.0 // indirect
|
golang.org/x/time v0.12.0 // indirect
|
||||||
golang.org/x/tools v0.41.0 // indirect
|
golang.org/x/tools v0.39.0 // indirect
|
||||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 // indirect
|
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 // indirect
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 // indirect
|
||||||
google.golang.org/protobuf v1.36.11 // indirect
|
google.golang.org/protobuf v1.36.10 // indirect
|
||||||
)
|
)
|
||||||
|
|||||||
94
go.sum
94
go.sum
@@ -26,8 +26,6 @@ github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw
|
|||||||
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
|
||||||
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
|
||||||
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||||
github.com/gaissmai/bart v0.26.0 h1:xOZ57E9hJLBiQaSyeZa9wgWhGuzfGACgqp4BE77OkO0=
|
|
||||||
github.com/gaissmai/bart v0.26.0/go.mod h1:GREWQfTLRWz/c5FTOsIw+KkscuFkIV5t8Rp7Nd1Td5c=
|
|
||||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
|
||||||
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
|
||||||
@@ -43,8 +41,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
|
|||||||
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0 h1:HWRh5R2+9EifMyIHV7ZV+MIZqgz+PMpZ14Jynv3O2Zs=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.28.0/go.mod h1:JfhWUomR1baixubs02l85lZYYOm7LV6om4ceouMv45c=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4=
|
||||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||||
@@ -77,8 +75,8 @@ github.com/prometheus/client_golang v1.23.2 h1:Je96obch5RDVy3FDMndoUsjAhG5Edi49h
|
|||||||
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
github.com/prometheus/client_golang v1.23.2/go.mod h1:Tb1a6LWHB3/SPIzCoaDXI4I8UHKeFTEQ1YCr+0Gyqmg=
|
||||||
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
github.com/prometheus/client_model v0.6.2 h1:oBsgwpGs7iVziMvrGhE53c/GrLUsZdHnqNwqPLxwZyk=
|
||||||
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
github.com/prometheus/client_model v0.6.2/go.mod h1:y3m2F6Gdpfy6Ut/GBsUqTWZqCUvMVzSfMLjcu6wAwpE=
|
||||||
github.com/prometheus/common v0.67.5 h1:pIgK94WWlQt1WLwAC5j2ynLaBRDiinoAb86HZHTUGI4=
|
github.com/prometheus/common v0.67.4 h1:yR3NqWO1/UyO1w2PhUvXlGQs/PtFmoveVO0KZ4+Lvsc=
|
||||||
github.com/prometheus/common v0.67.5/go.mod h1:SjE/0MzDEEAyrdr5Gqc6G+sXI67maCxzaT3A2+HqjUw=
|
github.com/prometheus/common v0.67.4/go.mod h1:gP0fq6YjjNCLssJCQp0yk4M8W6ikLURwkdd/YKtTbyI=
|
||||||
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
|
github.com/prometheus/otlptranslator v1.0.0 h1:s0LJW/iN9dkIH+EnhiD3BlkkP5QVIUVEoIwkU+A6qos=
|
||||||
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
|
github.com/prometheus/otlptranslator v1.0.0/go.mod h1:vRYWnXvI6aWGpsdY/mOT/cbeVRBlPWtBNDb7kGR3uKM=
|
||||||
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
github.com/prometheus/procfs v0.19.2 h1:zUMhqEW66Ex7OXIiDkll3tl9a1ZdilUOd/F6ZXw4Vws=
|
||||||
@@ -95,56 +93,56 @@ github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zd
|
|||||||
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
|
||||||
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
|
||||||
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0 h1:PnV4kVnw0zOmwwFkAzCN5O07fw1YOIQor120zrh0AVo=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0 h1:ssfIgGNANqpVFCndZvcuyKbl0g+UAVcbBcqGkG28H0Y=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.66.0/go.mod h1:ofAwF4uinaf8SXdVzzbL4OsxJ3VfeEg3f/F6CeF49/Y=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.64.0/go.mod h1:GQ/474YrbE4Jx8gZ4q5I4hrhUzM6UPzyrqJYV2AqPoQ=
|
||||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0 h1:JruBNmrPELWjR+PU3fsQBFQRYtsMLQ/zPfbvwDz9I/w=
|
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0 h1:/+/+UjlXjFcdDlXxKL1PouzX8Z2Vl0OxolRKeBEgYDw=
|
||||||
go.opentelemetry.io/contrib/instrumentation/runtime v0.66.0/go.mod h1:vwNrfL6w1uAE3qX48KFii2Qoqf+NEDP5wNjus+RHz8Y=
|
go.opentelemetry.io/contrib/instrumentation/runtime v0.64.0/go.mod h1:Ldm/PDuzY2DP7IypudopCR3OCOW42NJlN9+mNEroevo=
|
||||||
go.opentelemetry.io/otel v1.41.0 h1:YlEwVsGAlCvczDILpUXpIpPSL/VPugt7zHThEMLce1c=
|
go.opentelemetry.io/otel v1.39.0 h1:8yPrr/S0ND9QEfTfdP9V+SiwT4E0G7Y5MO7p85nis48=
|
||||||
go.opentelemetry.io/otel v1.41.0/go.mod h1:Yt4UwgEKeT05QbLwbyHXEwhnjxNO6D8L5PQP51/46dE=
|
go.opentelemetry.io/otel v1.39.0/go.mod h1:kLlFTywNWrFyEdH0oj2xK0bFYZtHRYUdv1NklR/tgc8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0 h1:VO3BL6OZXRQ1yQc8W6EVfJzINeJ35BkiHx4MYfoQf44=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0 h1:cEf8jF6WbuGQWUVcqgyWtTR0kOOAWY1DYZ+UhvdmQPw=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.41.0/go.mod h1:qRDnJ2nv3CQXMK2HUd9K9VtvedsPAce3S+/4LZHjX/s=
|
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.39.0/go.mod h1:k1lzV5n5U3HkGvTCJHraTAGJ7MqsgL1wrGwTj1Isfiw=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0 h1:ao6Oe+wSebTlQ1OEht7jlYTzQKE+pnx/iNywFvTbuuI=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0 h1:f0cb2XPmrqn4XMy9PNliTgRKJgS5WcL/u0/WRYGz4t0=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.41.0/go.mod h1:u3T6vz0gh/NVzgDgiwkgLxpsSF6PaPmo2il0apGJbls=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.39.0/go.mod h1:vnakAaFckOMiMtOIhFI2MNH4FYrZzXCYxmb1LlhoGz8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0 h1:mq/Qcf28TWz719lE3/hMB4KkyDuLJIvgJnFGcd0kEUI=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0 h1:in9O8ESIOlwJAEGTkkf34DesGRAc/Pn8qJ7k3r/42LM=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.41.0/go.mod h1:yk5LXEYhsL2htyDNJbEq7fWzNEigeEdV5xBF/Y+kAv0=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.39.0/go.mod h1:Rp0EXBm5tfnv0WL+ARyO/PHBEaEAT8UUHQ6AGJcSq6c=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4=
|
||||||
go.opentelemetry.io/otel/exporters/prometheus v0.63.0 h1:OLo1FNb0pBZykLqbKRZolKtGZd0Waqlr240YdMEnhhg=
|
go.opentelemetry.io/otel/exporters/prometheus v0.61.0 h1:cCyZS4dr67d30uDyh8etKM2QyDsQ4zC9ds3bdbrVoD0=
|
||||||
go.opentelemetry.io/otel/exporters/prometheus v0.63.0/go.mod h1:8yeQAdhrK5xsWuFehO13Dk/Xb9FuhZoVpJfpoNCfJnw=
|
go.opentelemetry.io/otel/exporters/prometheus v0.61.0/go.mod h1:iivMuj3xpR2DkUrUya3TPS/Z9h3dz7h01GxU+fQBRNg=
|
||||||
go.opentelemetry.io/otel/metric v1.41.0 h1:rFnDcs4gRzBcsO9tS8LCpgR0dxg4aaxWlJxCno7JlTQ=
|
go.opentelemetry.io/otel/metric v1.39.0 h1:d1UzonvEZriVfpNKEVmHXbdf909uGTOQjA0HF0Ls5Q0=
|
||||||
go.opentelemetry.io/otel/metric v1.41.0/go.mod h1:xPvCwd9pU0VN8tPZYzDZV/BMj9CM9vs00GuBjeKhJps=
|
go.opentelemetry.io/otel/metric v1.39.0/go.mod h1:jrZSWL33sD7bBxg1xjrqyDjnuzTUB0x1nBERXd7Ftcs=
|
||||||
go.opentelemetry.io/otel/sdk v1.41.0 h1:YPIEXKmiAwkGl3Gu1huk1aYWwtpRLeskpV+wPisxBp8=
|
go.opentelemetry.io/otel/sdk v1.39.0 h1:nMLYcjVsvdui1B/4FRkwjzoRVsMK8uL/cj0OyhKzt18=
|
||||||
go.opentelemetry.io/otel/sdk v1.41.0/go.mod h1:ahFdU0G5y8IxglBf0QBJXgSe7agzjE4GiTJ6HT9ud90=
|
go.opentelemetry.io/otel/sdk v1.39.0/go.mod h1:vDojkC4/jsTJsE+kh+LXYQlbL8CgrEcwmt1ENZszdJE=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.41.0 h1:siZQIYBAUd1rlIWQT2uCxWJxcCO7q3TriaMlf08rXw8=
|
go.opentelemetry.io/otel/sdk/metric v1.39.0 h1:cXMVVFVgsIf2YL6QkRF4Urbr/aMInf+2WKg+sEJTtB8=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.41.0/go.mod h1:HNBuSvT7ROaGtGI50ArdRLUnvRTRGniSUZbxiWxSO8Y=
|
go.opentelemetry.io/otel/sdk/metric v1.39.0/go.mod h1:xq9HEVH7qeX69/JnwEfp6fVq5wosJsY1mt4lLfYdVew=
|
||||||
go.opentelemetry.io/otel/trace v1.41.0 h1:Vbk2co6bhj8L59ZJ6/xFTskY+tGAbOnCtQGVVa9TIN0=
|
go.opentelemetry.io/otel/trace v1.39.0 h1:2d2vfpEDmCJ5zVYz7ijaJdOF59xLomrvj7bjt6/qCJI=
|
||||||
go.opentelemetry.io/otel/trace v1.41.0/go.mod h1:U1NU4ULCoxeDKc09yCWdWe+3QoyweJcISEVa1RBzOis=
|
go.opentelemetry.io/otel/trace v1.39.0/go.mod h1:88w4/PnZSazkGzz/w84VHpQafiU4EtqqlVdxWy+rNOA=
|
||||||
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
go.opentelemetry.io/proto/otlp v1.9.0 h1:l706jCMITVouPOqEnii2fIAuO3IVGBRPV5ICjceRb/A=
|
||||||
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
go.opentelemetry.io/proto/otlp v1.9.0/go.mod h1:xE+Cx5E/eEHw+ISFkwPLwCZefwVjY+pqKg1qcK03+/4=
|
||||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||||
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
|
||||||
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
go.yaml.in/yaml/v2 v2.4.3 h1:6gvOSjQoTB3vt1l+CU+tSyi/HOjfOjRLJ4YwYZGwRO0=
|
||||||
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
go.yaml.in/yaml/v2 v2.4.3/go.mod h1:zSxWcmIDjOzPXpjlTTbAsKokqkDNAVtZO0WOMiT90s8=
|
||||||
golang.org/x/crypto v0.48.0 h1:/VRzVqiRSggnhY7gNRxPauEQ5Drw9haKdM0jqfcCFts=
|
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
|
||||||
golang.org/x/crypto v0.48.0/go.mod h1:r0kV5h3qnFPlQnBSrULhlsRfryS2pmewsg+XfMgkVos=
|
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
|
||||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
|
||||||
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
|
||||||
golang.org/x/mod v0.32.0 h1:9F4d3PHLljb6x//jOyokMv3eX+YDeepZSEo3mFJy93c=
|
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
|
||||||
golang.org/x/mod v0.32.0/go.mod h1:SgipZ/3h2Ci89DlEtEXWUk/HteuRin+HHhN+WbNhguU=
|
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
|
||||||
golang.org/x/net v0.51.0 h1:94R/GTO7mt3/4wIKpcR5gkGmRLOuE/2hNGeWq/GBIFo=
|
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
|
||||||
golang.org/x/net v0.51.0/go.mod h1:aamm+2QF5ogm02fjy5Bb7CQ0WMt1/WVM7FtyaTLlA9Y=
|
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
|
||||||
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
|
||||||
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
|
||||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.41.0 h1:Ivj+2Cp/ylzLiEU89QhWblYnOE9zerudt9Ftecq2C6k=
|
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
|
||||||
golang.org/x/sys v0.41.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
|
||||||
golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk=
|
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
|
||||||
golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA=
|
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
|
||||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||||
golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc=
|
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
|
||||||
golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg=
|
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
|
||||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
|
||||||
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
|
||||||
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb h1:whnFRlWMcXI9d+ZbWg+4sHnLp52d5yiIPUxMBSt4X9A=
|
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb h1:whnFRlWMcXI9d+ZbWg+4sHnLp52d5yiIPUxMBSt4X9A=
|
||||||
@@ -155,14 +153,14 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus
|
|||||||
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
|
||||||
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk=
|
||||||
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57 h1:JLQynH/LBHfCTSbDWl+py8C+Rg/k1OVH3xfcaiANuF0=
|
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217 h1:fCvbg86sFXwdrl5LgVcTEvNC+2txB5mgROGmRL5mrls=
|
||||||
google.golang.org/genproto/googleapis/api v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:kSJwQxqmFXeo79zOmbrALdflXQeAYcUbgS7PbpMknCY=
|
google.golang.org/genproto/googleapis/api v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:+rXWjjaukWZun3mLfjmVnQi18E1AsFbDN9QdJ5YXLto=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57 h1:mWPCjDEyshlQYzBpMNHaEof6UX1PmHcaUODUywQ0uac=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217 h1:gRkg/vSppuSQoDjxyiGfN4Upv/h/DQmIR10ZU8dh4Ww=
|
||||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20260209200024-4cfbd4190f57/go.mod h1:j9x/tPzZkyxcgEFkiKEEGxfvyumM01BEtsW8xzOahRQ=
|
google.golang.org/genproto/googleapis/rpc v0.0.0-20251202230838-ff82c1b0f217/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk=
|
||||||
google.golang.org/grpc v1.79.1 h1:zGhSi45ODB9/p3VAawt9a+O/MULLl9dpizzNNpq7flY=
|
google.golang.org/grpc v1.77.0 h1:wVVY6/8cGA6vvffn+wWK5ToddbgdU3d8MNENr4evgXM=
|
||||||
google.golang.org/grpc v1.79.1/go.mod h1:KmT0Kjez+0dde/v2j9vzwoAScgEPx/Bw1CYChhHLrHQ=
|
google.golang.org/grpc v1.77.0/go.mod h1:z0BY1iVj0q8E1uSQCjL9cppRj+gnZjzDnzV0dHhrNig=
|
||||||
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
|
google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE=
|
||||||
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||||
|
|||||||
@@ -521,82 +521,3 @@ func (m *Monitor) DisableTarget(id int) error {
|
|||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTargetIDs returns a slice of all current target IDs
|
|
||||||
func (m *Monitor) GetTargetIDs() []int {
|
|
||||||
m.mutex.RLock()
|
|
||||||
defer m.mutex.RUnlock()
|
|
||||||
|
|
||||||
ids := make([]int, 0, len(m.targets))
|
|
||||||
for id := range m.targets {
|
|
||||||
ids = append(ids, id)
|
|
||||||
}
|
|
||||||
return ids
|
|
||||||
}
|
|
||||||
|
|
||||||
// SyncTargets synchronizes the current targets to match the desired set.
|
|
||||||
// It removes targets not in the desired set and adds targets that are missing.
|
|
||||||
func (m *Monitor) SyncTargets(desiredConfigs []Config) error {
|
|
||||||
m.mutex.Lock()
|
|
||||||
defer m.mutex.Unlock()
|
|
||||||
|
|
||||||
logger.Info("Syncing health check targets: %d desired targets", len(desiredConfigs))
|
|
||||||
|
|
||||||
// Build a set of desired target IDs
|
|
||||||
desiredIDs := make(map[int]Config)
|
|
||||||
for _, config := range desiredConfigs {
|
|
||||||
desiredIDs[config.ID] = config
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find targets to remove (exist but not in desired set)
|
|
||||||
var toRemove []int
|
|
||||||
for id := range m.targets {
|
|
||||||
if _, exists := desiredIDs[id]; !exists {
|
|
||||||
toRemove = append(toRemove, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove targets that are not in the desired set
|
|
||||||
for _, id := range toRemove {
|
|
||||||
logger.Info("Sync: removing health check target %d", id)
|
|
||||||
if target, exists := m.targets[id]; exists {
|
|
||||||
target.cancel()
|
|
||||||
delete(m.targets, id)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add or update targets from the desired set
|
|
||||||
var addedCount, updatedCount int
|
|
||||||
for id, config := range desiredIDs {
|
|
||||||
if existing, exists := m.targets[id]; exists {
|
|
||||||
// Target exists - check if config changed and update if needed
|
|
||||||
// For now, we'll replace it to ensure config is up to date
|
|
||||||
logger.Debug("Sync: updating health check target %d", id)
|
|
||||||
existing.cancel()
|
|
||||||
delete(m.targets, id)
|
|
||||||
if err := m.addTargetUnsafe(config); err != nil {
|
|
||||||
logger.Error("Sync: failed to update target %d: %v", id, err)
|
|
||||||
return fmt.Errorf("failed to update target %d: %v", id, err)
|
|
||||||
}
|
|
||||||
updatedCount++
|
|
||||||
} else {
|
|
||||||
// Target doesn't exist - add it
|
|
||||||
logger.Debug("Sync: adding health check target %d", id)
|
|
||||||
if err := m.addTargetUnsafe(config); err != nil {
|
|
||||||
logger.Error("Sync: failed to add target %d: %v", id, err)
|
|
||||||
return fmt.Errorf("failed to add target %d: %v", id, err)
|
|
||||||
}
|
|
||||||
addedCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Info("Sync complete: removed %d, added %d, updated %d targets",
|
|
||||||
len(toRemove), addedCount, updatedCount)
|
|
||||||
|
|
||||||
// Notify callback if any changes were made
|
|
||||||
if (len(toRemove) > 0 || addedCount > 0 || updatedCount > 0) && m.callback != nil {
|
|
||||||
go m.callback(m.getAllTargetsUnsafe())
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -27,17 +27,16 @@ type ExitNode struct {
|
|||||||
|
|
||||||
// Manager handles UDP hole punching operations
|
// Manager handles UDP hole punching operations
|
||||||
type Manager struct {
|
type Manager struct {
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
running bool
|
running bool
|
||||||
stopChan chan struct{}
|
stopChan chan struct{}
|
||||||
sharedBind *bind.SharedBind
|
sharedBind *bind.SharedBind
|
||||||
ID string
|
ID string
|
||||||
token string
|
token string
|
||||||
publicKey string
|
publicKey string
|
||||||
clientType string
|
clientType string
|
||||||
exitNodes map[string]ExitNode // key is endpoint
|
exitNodes map[string]ExitNode // key is endpoint
|
||||||
updateChan chan struct{} // signals the goroutine to refresh exit nodes
|
updateChan chan struct{} // signals the goroutine to refresh exit nodes
|
||||||
publicDNS []string
|
|
||||||
|
|
||||||
sendHolepunchInterval time.Duration
|
sendHolepunchInterval time.Duration
|
||||||
sendHolepunchIntervalMin time.Duration
|
sendHolepunchIntervalMin time.Duration
|
||||||
@@ -50,13 +49,12 @@ const defaultSendHolepunchIntervalMax = 60 * time.Second
|
|||||||
const defaultSendHolepunchIntervalMin = 1 * time.Second
|
const defaultSendHolepunchIntervalMin = 1 * time.Second
|
||||||
|
|
||||||
// NewManager creates a new hole punch manager
|
// NewManager creates a new hole punch manager
|
||||||
func NewManager(sharedBind *bind.SharedBind, ID string, clientType string, publicKey string, publicDNS []string) *Manager {
|
func NewManager(sharedBind *bind.SharedBind, ID string, clientType string, publicKey string) *Manager {
|
||||||
return &Manager{
|
return &Manager{
|
||||||
sharedBind: sharedBind,
|
sharedBind: sharedBind,
|
||||||
ID: ID,
|
ID: ID,
|
||||||
clientType: clientType,
|
clientType: clientType,
|
||||||
publicKey: publicKey,
|
publicKey: publicKey,
|
||||||
publicDNS: publicDNS,
|
|
||||||
exitNodes: make(map[string]ExitNode),
|
exitNodes: make(map[string]ExitNode),
|
||||||
sendHolepunchInterval: defaultSendHolepunchIntervalMin,
|
sendHolepunchInterval: defaultSendHolepunchIntervalMin,
|
||||||
sendHolepunchIntervalMin: defaultSendHolepunchIntervalMin,
|
sendHolepunchIntervalMin: defaultSendHolepunchIntervalMin,
|
||||||
@@ -283,13 +281,7 @@ func (m *Manager) TriggerHolePunch() error {
|
|||||||
// Send hole punch to all exit nodes
|
// Send hole punch to all exit nodes
|
||||||
successCount := 0
|
successCount := 0
|
||||||
for _, exitNode := range currentExitNodes {
|
for _, exitNode := range currentExitNodes {
|
||||||
var host string
|
host, err := util.ResolveDomain(exitNode.Endpoint)
|
||||||
var err error
|
|
||||||
if len(m.publicDNS) > 0 {
|
|
||||||
host, err = util.ResolveDomainUpstream(exitNode.Endpoint, m.publicDNS)
|
|
||||||
} else {
|
|
||||||
host, err = util.ResolveDomain(exitNode.Endpoint)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warn("Failed to resolve endpoint %s: %v", exitNode.Endpoint, err)
|
logger.Warn("Failed to resolve endpoint %s: %v", exitNode.Endpoint, err)
|
||||||
continue
|
continue
|
||||||
@@ -400,13 +392,7 @@ func (m *Manager) runMultipleExitNodes() {
|
|||||||
|
|
||||||
var resolvedNodes []resolvedExitNode
|
var resolvedNodes []resolvedExitNode
|
||||||
for _, exitNode := range currentExitNodes {
|
for _, exitNode := range currentExitNodes {
|
||||||
var host string
|
host, err := util.ResolveDomain(exitNode.Endpoint)
|
||||||
var err error
|
|
||||||
if len(m.publicDNS) > 0 {
|
|
||||||
host, err = util.ResolveDomainUpstream(exitNode.Endpoint, m.publicDNS)
|
|
||||||
} else {
|
|
||||||
host, err = util.ResolveDomain(exitNode.Endpoint)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Warn("Failed to resolve endpoint %s: %v", exitNode.Endpoint, err)
|
logger.Warn("Failed to resolve endpoint %s: %v", exitNode.Endpoint, err)
|
||||||
continue
|
continue
|
||||||
|
|||||||
@@ -49,11 +49,10 @@ type cachedAddr struct {
|
|||||||
|
|
||||||
// HolepunchTester monitors holepunch connectivity using magic packets
|
// HolepunchTester monitors holepunch connectivity using magic packets
|
||||||
type HolepunchTester struct {
|
type HolepunchTester struct {
|
||||||
sharedBind *bind.SharedBind
|
sharedBind *bind.SharedBind
|
||||||
publicDNS []string
|
mu sync.RWMutex
|
||||||
mu sync.RWMutex
|
running bool
|
||||||
running bool
|
stopChan chan struct{}
|
||||||
stopChan chan struct{}
|
|
||||||
|
|
||||||
// Pending requests waiting for responses (key: echo data as string)
|
// Pending requests waiting for responses (key: echo data as string)
|
||||||
pendingRequests sync.Map // map[string]*pendingRequest
|
pendingRequests sync.Map // map[string]*pendingRequest
|
||||||
@@ -85,10 +84,9 @@ type pendingRequest struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewHolepunchTester creates a new holepunch tester using the given SharedBind
|
// NewHolepunchTester creates a new holepunch tester using the given SharedBind
|
||||||
func NewHolepunchTester(sharedBind *bind.SharedBind, publicDNS []string) *HolepunchTester {
|
func NewHolepunchTester(sharedBind *bind.SharedBind) *HolepunchTester {
|
||||||
return &HolepunchTester{
|
return &HolepunchTester{
|
||||||
sharedBind: sharedBind,
|
sharedBind: sharedBind,
|
||||||
publicDNS: publicDNS,
|
|
||||||
addrCache: make(map[string]*cachedAddr),
|
addrCache: make(map[string]*cachedAddr),
|
||||||
addrCacheTTL: 5 * time.Minute, // Cache addresses for 5 minutes
|
addrCacheTTL: 5 * time.Minute, // Cache addresses for 5 minutes
|
||||||
}
|
}
|
||||||
@@ -171,13 +169,7 @@ func (t *HolepunchTester) resolveEndpoint(endpoint string) (*net.UDPAddr, error)
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Resolve the endpoint
|
// Resolve the endpoint
|
||||||
var host string
|
host, err := util.ResolveDomain(endpoint)
|
||||||
var err error
|
|
||||||
if len(t.publicDNS) > 0 {
|
|
||||||
host, err = util.ResolveDomainUpstream(endpoint, t.publicDNS)
|
|
||||||
} else {
|
|
||||||
host, err = util.ResolveDomain(endpoint)
|
|
||||||
}
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
host = endpoint
|
host = endpoint
|
||||||
}
|
}
|
||||||
|
|||||||
237
main.go
237
main.go
@@ -116,7 +116,6 @@ var (
|
|||||||
logLevel string
|
logLevel string
|
||||||
interfaceName string
|
interfaceName string
|
||||||
port uint16
|
port uint16
|
||||||
portStr string
|
|
||||||
disableClients bool
|
disableClients bool
|
||||||
updownScript string
|
updownScript string
|
||||||
dockerSocket string
|
dockerSocket string
|
||||||
@@ -137,7 +136,6 @@ var (
|
|||||||
authDaemonPrincipalsFile string
|
authDaemonPrincipalsFile string
|
||||||
authDaemonCACertPath string
|
authDaemonCACertPath string
|
||||||
authDaemonEnabled bool
|
authDaemonEnabled bool
|
||||||
authDaemonGenerateRandomPassword bool
|
|
||||||
// Build/version (can be overridden via -ldflags "-X main.newtVersion=...")
|
// Build/version (can be overridden via -ldflags "-X main.newtVersion=...")
|
||||||
newtVersion = "version_replaceme"
|
newtVersion = "version_replaceme"
|
||||||
|
|
||||||
@@ -212,12 +210,11 @@ func runNewtMain(ctx context.Context) {
|
|||||||
logLevel = os.Getenv("LOG_LEVEL")
|
logLevel = os.Getenv("LOG_LEVEL")
|
||||||
updownScript = os.Getenv("UPDOWN_SCRIPT")
|
updownScript = os.Getenv("UPDOWN_SCRIPT")
|
||||||
interfaceName = os.Getenv("INTERFACE")
|
interfaceName = os.Getenv("INTERFACE")
|
||||||
portStr = os.Getenv("PORT")
|
portStr := os.Getenv("PORT")
|
||||||
authDaemonKey = os.Getenv("AD_KEY")
|
authDaemonKey = os.Getenv("AD_KEY")
|
||||||
authDaemonPrincipalsFile = os.Getenv("AD_PRINCIPALS_FILE")
|
authDaemonPrincipalsFile = os.Getenv("AD_PRINCIPALS_FILE")
|
||||||
authDaemonCACertPath = os.Getenv("AD_CA_CERT_PATH")
|
authDaemonCACertPath = os.Getenv("AD_CA_CERT_PATH")
|
||||||
authDaemonEnabledEnv := os.Getenv("AUTH_DAEMON_ENABLED")
|
authDaemonEnabledEnv := os.Getenv("AUTH_DAEMON_ENABLED")
|
||||||
authDaemonGenerateRandomPasswordEnv := os.Getenv("AD_GENERATE_RANDOM_PASSWORD")
|
|
||||||
|
|
||||||
// Metrics/observability env mirrors
|
// Metrics/observability env mirrors
|
||||||
metricsEnabledEnv := os.Getenv("NEWT_METRICS_PROMETHEUS_ENABLED")
|
metricsEnabledEnv := os.Getenv("NEWT_METRICS_PROMETHEUS_ENABLED")
|
||||||
@@ -302,10 +299,10 @@ func runNewtMain(ctx context.Context) {
|
|||||||
flag.StringVar(&dockerSocket, "docker-socket", "", "Path or address to Docker socket (typically unix:///var/run/docker.sock)")
|
flag.StringVar(&dockerSocket, "docker-socket", "", "Path or address to Docker socket (typically unix:///var/run/docker.sock)")
|
||||||
}
|
}
|
||||||
if pingIntervalStr == "" {
|
if pingIntervalStr == "" {
|
||||||
flag.StringVar(&pingIntervalStr, "ping-interval", "15s", "Interval for pinging the server (default 15s)")
|
flag.StringVar(&pingIntervalStr, "ping-interval", "3s", "Interval for pinging the server (default 3s)")
|
||||||
}
|
}
|
||||||
if pingTimeoutStr == "" {
|
if pingTimeoutStr == "" {
|
||||||
flag.StringVar(&pingTimeoutStr, "ping-timeout", "7s", " Timeout for each ping (default 7s)")
|
flag.StringVar(&pingTimeoutStr, "ping-timeout", "5s", " Timeout for each ping (default 5s)")
|
||||||
}
|
}
|
||||||
// load the prefer endpoint just as a flag
|
// load the prefer endpoint just as a flag
|
||||||
flag.StringVar(&preferEndpoint, "prefer-endpoint", "", "Prefer this endpoint for the connection (if set, will override the endpoint from the server)")
|
flag.StringVar(&preferEndpoint, "prefer-endpoint", "", "Prefer this endpoint for the connection (if set, will override the endpoint from the server)")
|
||||||
@@ -330,21 +327,30 @@ func runNewtMain(ctx context.Context) {
|
|||||||
if pingIntervalStr != "" {
|
if pingIntervalStr != "" {
|
||||||
pingInterval, err = time.ParseDuration(pingIntervalStr)
|
pingInterval, err = time.ParseDuration(pingIntervalStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Invalid PING_INTERVAL value: %s, using default 15 seconds\n", pingIntervalStr)
|
fmt.Printf("Invalid PING_INTERVAL value: %s, using default 3 seconds\n", pingIntervalStr)
|
||||||
pingInterval = 15 * time.Second
|
pingInterval = 3 * time.Second
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pingInterval = 15 * time.Second
|
pingInterval = 3 * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
if pingTimeoutStr != "" {
|
if pingTimeoutStr != "" {
|
||||||
pingTimeout, err = time.ParseDuration(pingTimeoutStr)
|
pingTimeout, err = time.ParseDuration(pingTimeoutStr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Invalid PING_TIMEOUT value: %s, using default 7 seconds\n", pingTimeoutStr)
|
fmt.Printf("Invalid PING_TIMEOUT value: %s, using default 5 seconds\n", pingTimeoutStr)
|
||||||
pingTimeout = 7 * time.Second
|
pingTimeout = 5 * time.Second
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
pingTimeout = 7 * time.Second
|
pingTimeout = 5 * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
|
if portStr != "" {
|
||||||
|
portInt, err := strconv.Atoi(portStr)
|
||||||
|
if err != nil {
|
||||||
|
logger.Warn("Failed to parse PORT, choosing a random port")
|
||||||
|
} else {
|
||||||
|
port = uint16(portInt)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if dockerEnforceNetworkValidation == "" {
|
if dockerEnforceNetworkValidation == "" {
|
||||||
@@ -414,13 +420,6 @@ func runNewtMain(ctx context.Context) {
|
|||||||
authDaemonEnabled = v
|
authDaemonEnabled = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if authDaemonGenerateRandomPasswordEnv == "" {
|
|
||||||
flag.BoolVar(&authDaemonGenerateRandomPassword, "ad-generate-random-password", false, "Generate a random password for authenticated users")
|
|
||||||
} else {
|
|
||||||
if v, err := strconv.ParseBool(authDaemonGenerateRandomPasswordEnv); err == nil {
|
|
||||||
authDaemonGenerateRandomPassword = v
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// do a --version check
|
// do a --version check
|
||||||
version := flag.Bool("version", false, "Print the version")
|
version := flag.Bool("version", false, "Print the version")
|
||||||
@@ -432,15 +431,6 @@ func runNewtMain(ctx context.Context) {
|
|||||||
tlsClientCAs = append(tlsClientCAs, tlsClientCAsFlag...)
|
tlsClientCAs = append(tlsClientCAs, tlsClientCAsFlag...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if portStr != "" {
|
|
||||||
portInt, err := strconv.Atoi(portStr)
|
|
||||||
if err != nil {
|
|
||||||
logger.Warn("Failed to parse PORT, choosing a random port")
|
|
||||||
} else {
|
|
||||||
port = uint16(portInt)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if *version {
|
if *version {
|
||||||
fmt.Println("Newt version " + newtVersion)
|
fmt.Println("Newt version " + newtVersion)
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
@@ -565,7 +555,8 @@ func runNewtMain(ctx context.Context) {
|
|||||||
id, // CLI arg takes precedence
|
id, // CLI arg takes precedence
|
||||||
secret, // CLI arg takes precedence
|
secret, // CLI arg takes precedence
|
||||||
endpoint,
|
endpoint,
|
||||||
30*time.Second,
|
pingInterval,
|
||||||
|
pingTimeout,
|
||||||
opt,
|
opt,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -956,7 +947,7 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
"publicKey": publicKey.String(),
|
"publicKey": publicKey.String(),
|
||||||
"pingResults": pingResults,
|
"pingResults": pingResults,
|
||||||
"newtVersion": newtVersion,
|
"newtVersion": newtVersion,
|
||||||
}, 2*time.Second)
|
}, 1*time.Second)
|
||||||
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -1059,7 +1050,7 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
"publicKey": publicKey.String(),
|
"publicKey": publicKey.String(),
|
||||||
"pingResults": pingResults,
|
"pingResults": pingResults,
|
||||||
"newtVersion": newtVersion,
|
"newtVersion": newtVersion,
|
||||||
}, 2*time.Second)
|
}, 1*time.Second)
|
||||||
|
|
||||||
logger.Debug("Sent exit node ping results to cloud for selection: pingResults=%+v", pingResults)
|
logger.Debug("Sent exit node ping results to cloud for selection: pingResults=%+v", pingResults)
|
||||||
})
|
})
|
||||||
@@ -1164,153 +1155,6 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
// Register handler for syncing targets (TCP, UDP, and health checks)
|
|
||||||
client.RegisterHandler("newt/sync", func(msg websocket.WSMessage) {
|
|
||||||
logger.Info("Received sync message")
|
|
||||||
|
|
||||||
// if there is no wgData or pm, we can't sync targets
|
|
||||||
if wgData.TunnelIP == "" || pm == nil {
|
|
||||||
logger.Info(msgNoTunnelOrProxy)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Define the sync data structure
|
|
||||||
type SyncData struct {
|
|
||||||
Targets TargetsByType `json:"targets"`
|
|
||||||
HealthCheckTargets []healthcheck.Config `json:"healthCheckTargets"`
|
|
||||||
}
|
|
||||||
|
|
||||||
var syncData SyncData
|
|
||||||
jsonData, err := json.Marshal(msg.Data)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("Error marshaling sync data: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := json.Unmarshal(jsonData, &syncData); err != nil {
|
|
||||||
logger.Error("Error unmarshaling sync data: %v", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Debug("Sync data received: TCP targets=%d, UDP targets=%d, health check targets=%d",
|
|
||||||
len(syncData.Targets.TCP), len(syncData.Targets.UDP), len(syncData.HealthCheckTargets))
|
|
||||||
|
|
||||||
//TODO: TEST AND IMPLEMENT THIS
|
|
||||||
|
|
||||||
// // Build sets of desired targets (port -> target string)
|
|
||||||
// desiredTCP := make(map[int]string)
|
|
||||||
// for _, t := range syncData.Targets.TCP {
|
|
||||||
// parts := strings.Split(t, ":")
|
|
||||||
// if len(parts) != 3 {
|
|
||||||
// logger.Warn("Invalid TCP target format: %s", t)
|
|
||||||
// continue
|
|
||||||
// }
|
|
||||||
// port := 0
|
|
||||||
// if _, err := fmt.Sscanf(parts[0], "%d", &port); err != nil {
|
|
||||||
// logger.Warn("Invalid port in TCP target: %s", parts[0])
|
|
||||||
// continue
|
|
||||||
// }
|
|
||||||
// desiredTCP[port] = parts[1] + ":" + parts[2]
|
|
||||||
// }
|
|
||||||
|
|
||||||
// desiredUDP := make(map[int]string)
|
|
||||||
// for _, t := range syncData.Targets.UDP {
|
|
||||||
// parts := strings.Split(t, ":")
|
|
||||||
// if len(parts) != 3 {
|
|
||||||
// logger.Warn("Invalid UDP target format: %s", t)
|
|
||||||
// continue
|
|
||||||
// }
|
|
||||||
// port := 0
|
|
||||||
// if _, err := fmt.Sscanf(parts[0], "%d", &port); err != nil {
|
|
||||||
// logger.Warn("Invalid port in UDP target: %s", parts[0])
|
|
||||||
// continue
|
|
||||||
// }
|
|
||||||
// desiredUDP[port] = parts[1] + ":" + parts[2]
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // Get current targets from proxy manager
|
|
||||||
// currentTCP, currentUDP := pm.GetTargets()
|
|
||||||
|
|
||||||
// // Sync TCP targets
|
|
||||||
// // Remove TCP targets not in desired set
|
|
||||||
// if tcpForIP, ok := currentTCP[wgData.TunnelIP]; ok {
|
|
||||||
// for port := range tcpForIP {
|
|
||||||
// if _, exists := desiredTCP[port]; !exists {
|
|
||||||
// logger.Info("Sync: removing TCP target on port %d", port)
|
|
||||||
// targetStr := fmt.Sprintf("%d:%s", port, tcpForIP[port])
|
|
||||||
// updateTargets(pm, "remove", wgData.TunnelIP, "tcp", TargetData{Targets: []string{targetStr}})
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // Add TCP targets that are missing
|
|
||||||
// for port, target := range desiredTCP {
|
|
||||||
// needsAdd := true
|
|
||||||
// if tcpForIP, ok := currentTCP[wgData.TunnelIP]; ok {
|
|
||||||
// if currentTarget, exists := tcpForIP[port]; exists {
|
|
||||||
// // Check if target address changed
|
|
||||||
// if currentTarget == target {
|
|
||||||
// needsAdd = false
|
|
||||||
// } else {
|
|
||||||
// // Target changed, remove old one first
|
|
||||||
// logger.Info("Sync: updating TCP target on port %d", port)
|
|
||||||
// targetStr := fmt.Sprintf("%d:%s", port, currentTarget)
|
|
||||||
// updateTargets(pm, "remove", wgData.TunnelIP, "tcp", TargetData{Targets: []string{targetStr}})
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// if needsAdd {
|
|
||||||
// logger.Info("Sync: adding TCP target on port %d -> %s", port, target)
|
|
||||||
// targetStr := fmt.Sprintf("%d:%s", port, target)
|
|
||||||
// updateTargets(pm, "add", wgData.TunnelIP, "tcp", TargetData{Targets: []string{targetStr}})
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // Sync UDP targets
|
|
||||||
// // Remove UDP targets not in desired set
|
|
||||||
// if udpForIP, ok := currentUDP[wgData.TunnelIP]; ok {
|
|
||||||
// for port := range udpForIP {
|
|
||||||
// if _, exists := desiredUDP[port]; !exists {
|
|
||||||
// logger.Info("Sync: removing UDP target on port %d", port)
|
|
||||||
// targetStr := fmt.Sprintf("%d:%s", port, udpForIP[port])
|
|
||||||
// updateTargets(pm, "remove", wgData.TunnelIP, "udp", TargetData{Targets: []string{targetStr}})
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // Add UDP targets that are missing
|
|
||||||
// for port, target := range desiredUDP {
|
|
||||||
// needsAdd := true
|
|
||||||
// if udpForIP, ok := currentUDP[wgData.TunnelIP]; ok {
|
|
||||||
// if currentTarget, exists := udpForIP[port]; exists {
|
|
||||||
// // Check if target address changed
|
|
||||||
// if currentTarget == target {
|
|
||||||
// needsAdd = false
|
|
||||||
// } else {
|
|
||||||
// // Target changed, remove old one first
|
|
||||||
// logger.Info("Sync: updating UDP target on port %d", port)
|
|
||||||
// targetStr := fmt.Sprintf("%d:%s", port, currentTarget)
|
|
||||||
// updateTargets(pm, "remove", wgData.TunnelIP, "udp", TargetData{Targets: []string{targetStr}})
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
// if needsAdd {
|
|
||||||
// logger.Info("Sync: adding UDP target on port %d -> %s", port, target)
|
|
||||||
// targetStr := fmt.Sprintf("%d:%s", port, target)
|
|
||||||
// updateTargets(pm, "add", wgData.TunnelIP, "udp", TargetData{Targets: []string{targetStr}})
|
|
||||||
// }
|
|
||||||
// }
|
|
||||||
|
|
||||||
// // Sync health check targets
|
|
||||||
// if err := healthMonitor.SyncTargets(syncData.HealthCheckTargets); err != nil {
|
|
||||||
// logger.Error("Failed to sync health check targets: %v", err)
|
|
||||||
// } else {
|
|
||||||
// logger.Info("Successfully synced health check targets")
|
|
||||||
// }
|
|
||||||
|
|
||||||
logger.Info("Sync complete")
|
|
||||||
})
|
|
||||||
|
|
||||||
// Register handler for Docker socket check
|
// Register handler for Docker socket check
|
||||||
client.RegisterHandler("newt/socket/check", func(msg websocket.WSMessage) {
|
client.RegisterHandler("newt/socket/check", func(msg websocket.WSMessage) {
|
||||||
logger.Debug("Received Docker socket check request")
|
logger.Debug("Received Docker socket check request")
|
||||||
@@ -1534,18 +1378,15 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
|
|
||||||
// Define the structure of the incoming message
|
// Define the structure of the incoming message
|
||||||
type SSHCertData struct {
|
type SSHCertData struct {
|
||||||
MessageId int `json:"messageId"`
|
MessageId int `json:"messageId"`
|
||||||
AgentPort int `json:"agentPort"`
|
AgentPort int `json:"agentPort"`
|
||||||
AgentHost string `json:"agentHost"`
|
AgentHost string `json:"agentHost"`
|
||||||
ExternalAuthDaemon bool `json:"externalAuthDaemon"`
|
CACert string `json:"caCert"`
|
||||||
CACert string `json:"caCert"`
|
Username string `json:"username"`
|
||||||
Username string `json:"username"`
|
NiceID string `json:"niceId"`
|
||||||
NiceID string `json:"niceId"`
|
Metadata struct {
|
||||||
Metadata struct {
|
Sudo bool `json:"sudo"`
|
||||||
SudoMode string `json:"sudoMode"`
|
Homedir bool `json:"homedir"`
|
||||||
SudoCommands []string `json:"sudoCommands"`
|
|
||||||
Homedir bool `json:"homedir"`
|
|
||||||
Groups []string `json:"groups"`
|
|
||||||
} `json:"metadata"`
|
} `json:"metadata"`
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1565,7 +1406,7 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Check if we're running the auth daemon internally
|
// Check if we're running the auth daemon internally
|
||||||
if authDaemonServer != nil && !certData.ExternalAuthDaemon { // if the auth daemon is running internally and the external auth daemon is not enabled
|
if authDaemonServer != nil {
|
||||||
// Call ProcessConnection directly when running internally
|
// Call ProcessConnection directly when running internally
|
||||||
logger.Debug("Calling internal auth daemon ProcessConnection for user %s", certData.Username)
|
logger.Debug("Calling internal auth daemon ProcessConnection for user %s", certData.Username)
|
||||||
|
|
||||||
@@ -1574,10 +1415,8 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
NiceId: certData.NiceID,
|
NiceId: certData.NiceID,
|
||||||
Username: certData.Username,
|
Username: certData.Username,
|
||||||
Metadata: authdaemon.ConnectionMetadata{
|
Metadata: authdaemon.ConnectionMetadata{
|
||||||
SudoMode: certData.Metadata.SudoMode,
|
Sudo: certData.Metadata.Sudo,
|
||||||
SudoCommands: certData.Metadata.SudoCommands,
|
Homedir: certData.Metadata.Homedir,
|
||||||
Homedir: certData.Metadata.Homedir,
|
|
||||||
Groups: certData.Metadata.Groups,
|
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -1611,10 +1450,8 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
"niceId": certData.NiceID,
|
"niceId": certData.NiceID,
|
||||||
"username": certData.Username,
|
"username": certData.Username,
|
||||||
"metadata": map[string]interface{}{
|
"metadata": map[string]interface{}{
|
||||||
"sudoMode": certData.Metadata.SudoMode,
|
"sudo": certData.Metadata.Sudo,
|
||||||
"sudoCommands": certData.Metadata.SudoCommands,
|
"homedir": certData.Metadata.Homedir,
|
||||||
"homedir": certData.Metadata.Homedir,
|
|
||||||
"groups": certData.Metadata.Groups,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1793,8 +1630,6 @@ persistent_keepalive_interval=5`, util.FixKey(privateKey.String()), util.FixKey(
|
|||||||
pm.Stop()
|
pm.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
client.SendMessage("newt/disconnecting", map[string]any{})
|
|
||||||
|
|
||||||
if client != nil {
|
if client != nil {
|
||||||
client.Close()
|
client.Close()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -48,21 +48,113 @@ type SubnetRule struct {
|
|||||||
PortRanges []PortRange // empty slice means all ports allowed
|
PortRanges []PortRange // empty slice means all ports allowed
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllRules returns a copy of all subnet rules
|
// ruleKey is used as a map key for fast O(1) lookups
|
||||||
func (sl *SubnetLookup) GetAllRules() []SubnetRule {
|
type ruleKey struct {
|
||||||
|
sourcePrefix string
|
||||||
|
destPrefix string
|
||||||
|
}
|
||||||
|
|
||||||
|
// SubnetLookup provides fast IP subnet and port matching with O(1) lookup performance
|
||||||
|
type SubnetLookup struct {
|
||||||
|
mu sync.RWMutex
|
||||||
|
rules map[ruleKey]*SubnetRule // Map for O(1) lookups by prefix combination
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSubnetLookup creates a new subnet lookup table
|
||||||
|
func NewSubnetLookup() *SubnetLookup {
|
||||||
|
return &SubnetLookup{
|
||||||
|
rules: make(map[ruleKey]*SubnetRule),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddSubnet adds a subnet rule with source and destination prefixes and optional port restrictions
|
||||||
|
// If portRanges is nil or empty, all ports are allowed for this subnet
|
||||||
|
// rewriteTo can be either an IP/CIDR (e.g., "192.168.1.1/32") or a domain name (e.g., "example.com")
|
||||||
|
func (sl *SubnetLookup) AddSubnet(sourcePrefix, destPrefix netip.Prefix, rewriteTo string, portRanges []PortRange, disableIcmp bool) {
|
||||||
|
sl.mu.Lock()
|
||||||
|
defer sl.mu.Unlock()
|
||||||
|
|
||||||
|
key := ruleKey{
|
||||||
|
sourcePrefix: sourcePrefix.String(),
|
||||||
|
destPrefix: destPrefix.String(),
|
||||||
|
}
|
||||||
|
|
||||||
|
sl.rules[key] = &SubnetRule{
|
||||||
|
SourcePrefix: sourcePrefix,
|
||||||
|
DestPrefix: destPrefix,
|
||||||
|
DisableIcmp: disableIcmp,
|
||||||
|
RewriteTo: rewriteTo,
|
||||||
|
PortRanges: portRanges,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveSubnet removes a subnet rule from the lookup table
|
||||||
|
func (sl *SubnetLookup) RemoveSubnet(sourcePrefix, destPrefix netip.Prefix) {
|
||||||
|
sl.mu.Lock()
|
||||||
|
defer sl.mu.Unlock()
|
||||||
|
|
||||||
|
key := ruleKey{
|
||||||
|
sourcePrefix: sourcePrefix.String(),
|
||||||
|
destPrefix: destPrefix.String(),
|
||||||
|
}
|
||||||
|
|
||||||
|
delete(sl.rules, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match checks if a source IP, destination IP, port, and protocol match any subnet rule
|
||||||
|
// Returns the matched rule if ALL of these conditions are met:
|
||||||
|
// - The source IP is in the rule's source prefix
|
||||||
|
// - The destination IP is in the rule's destination prefix
|
||||||
|
// - The port is in an allowed range (or no port restrictions exist)
|
||||||
|
// - The protocol matches (or the port range allows both protocols)
|
||||||
|
//
|
||||||
|
// proto should be header.TCPProtocolNumber or header.UDPProtocolNumber
|
||||||
|
// Returns nil if no rule matches
|
||||||
|
func (sl *SubnetLookup) Match(srcIP, dstIP netip.Addr, port uint16, proto tcpip.TransportProtocolNumber) *SubnetRule {
|
||||||
sl.mu.RLock()
|
sl.mu.RLock()
|
||||||
defer sl.mu.RUnlock()
|
defer sl.mu.RUnlock()
|
||||||
|
|
||||||
var rules []SubnetRule
|
// Iterate through all rules to find matching source and destination prefixes
|
||||||
for _, destTriePtr := range sl.sourceTrie.All() {
|
// This is O(n) but necessary since we need to check prefix containment, not exact match
|
||||||
if destTriePtr == nil {
|
for _, rule := range sl.rules {
|
||||||
|
// Check if source and destination IPs match their respective prefixes
|
||||||
|
if !rule.SourcePrefix.Contains(srcIP) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
for _, rule := range destTriePtr.rules {
|
if !rule.DestPrefix.Contains(dstIP) {
|
||||||
rules = append(rules, *rule)
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if rule.DisableIcmp && (proto == header.ICMPv4ProtocolNumber || proto == header.ICMPv6ProtocolNumber) {
|
||||||
|
// ICMP is disabled for this subnet
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Both IPs match - now check port restrictions
|
||||||
|
// If no port ranges specified, all ports are allowed
|
||||||
|
if len(rule.PortRanges) == 0 {
|
||||||
|
return rule
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if port and protocol are in any of the allowed ranges
|
||||||
|
for _, pr := range rule.PortRanges {
|
||||||
|
if port >= pr.Min && port <= pr.Max {
|
||||||
|
// Check protocol compatibility
|
||||||
|
if pr.Protocol == "" {
|
||||||
|
// Empty protocol means allow both TCP and UDP
|
||||||
|
return rule
|
||||||
|
}
|
||||||
|
// Check if the packet protocol matches the port range protocol
|
||||||
|
if (pr.Protocol == "tcp" && proto == header.TCPProtocolNumber) ||
|
||||||
|
(pr.Protocol == "udp" && proto == header.UDPProtocolNumber) {
|
||||||
|
return rule
|
||||||
|
}
|
||||||
|
// Port matches but protocol doesn't - continue checking other ranges
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return rules
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// connKey uniquely identifies a connection for NAT tracking
|
// connKey uniquely identifies a connection for NAT tracking
|
||||||
@@ -74,17 +166,6 @@ type connKey struct {
|
|||||||
proto uint8
|
proto uint8
|
||||||
}
|
}
|
||||||
|
|
||||||
// reverseConnKey uniquely identifies a connection for reverse NAT lookup (reply direction)
|
|
||||||
// Key structure: (rewrittenTo, originalSrcIP, originalSrcPort, originalDstPort, proto)
|
|
||||||
// This allows O(1) lookup of NAT entries for reply packets
|
|
||||||
type reverseConnKey struct {
|
|
||||||
rewrittenTo string // The address we rewrote to (becomes src in replies)
|
|
||||||
originalSrcIP string // Original source IP (becomes dst in replies)
|
|
||||||
originalSrcPort uint16 // Original source port (becomes dst port in replies)
|
|
||||||
originalDstPort uint16 // Original destination port (becomes src port in replies)
|
|
||||||
proto uint8
|
|
||||||
}
|
|
||||||
|
|
||||||
// destKey identifies a destination for handler lookups (without source port since it may change)
|
// destKey identifies a destination for handler lookups (without source port since it may change)
|
||||||
type destKey struct {
|
type destKey struct {
|
||||||
srcIP string
|
srcIP string
|
||||||
@@ -109,8 +190,7 @@ type ProxyHandler struct {
|
|||||||
icmpHandler *ICMPHandler
|
icmpHandler *ICMPHandler
|
||||||
subnetLookup *SubnetLookup
|
subnetLookup *SubnetLookup
|
||||||
natTable map[connKey]*natState
|
natTable map[connKey]*natState
|
||||||
reverseNatTable map[reverseConnKey]*natState // Reverse lookup map for O(1) reply packet NAT
|
destRewriteTable map[destKey]netip.Addr // Maps original dest to rewritten dest for handler lookups
|
||||||
destRewriteTable map[destKey]netip.Addr // Maps original dest to rewritten dest for handler lookups
|
|
||||||
natMu sync.RWMutex
|
natMu sync.RWMutex
|
||||||
enabled bool
|
enabled bool
|
||||||
icmpReplies chan []byte // Channel for ICMP reply packets to be sent back through the tunnel
|
icmpReplies chan []byte // Channel for ICMP reply packets to be sent back through the tunnel
|
||||||
@@ -135,7 +215,6 @@ func NewProxyHandler(options ProxyHandlerOptions) (*ProxyHandler, error) {
|
|||||||
enabled: true,
|
enabled: true,
|
||||||
subnetLookup: NewSubnetLookup(),
|
subnetLookup: NewSubnetLookup(),
|
||||||
natTable: make(map[connKey]*natState),
|
natTable: make(map[connKey]*natState),
|
||||||
reverseNatTable: make(map[reverseConnKey]*natState),
|
|
||||||
destRewriteTable: make(map[destKey]netip.Addr),
|
destRewriteTable: make(map[destKey]netip.Addr),
|
||||||
icmpReplies: make(chan []byte, 256), // Buffer for ICMP reply packets
|
icmpReplies: make(chan []byte, 256), // Buffer for ICMP reply packets
|
||||||
proxyEp: channel.New(1024, uint32(options.MTU), ""),
|
proxyEp: channel.New(1024, uint32(options.MTU), ""),
|
||||||
@@ -217,14 +296,6 @@ func (p *ProxyHandler) RemoveSubnetRule(sourcePrefix, destPrefix netip.Prefix) {
|
|||||||
p.subnetLookup.RemoveSubnet(sourcePrefix, destPrefix)
|
p.subnetLookup.RemoveSubnet(sourcePrefix, destPrefix)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllRules returns all subnet rules from the proxy handler
|
|
||||||
func (p *ProxyHandler) GetAllRules() []SubnetRule {
|
|
||||||
if p == nil || !p.enabled {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return p.subnetLookup.GetAllRules()
|
|
||||||
}
|
|
||||||
|
|
||||||
// LookupDestinationRewrite looks up the rewritten destination for a connection
|
// LookupDestinationRewrite looks up the rewritten destination for a connection
|
||||||
// This is used by TCP/UDP handlers to find the actual target address
|
// This is used by TCP/UDP handlers to find the actual target address
|
||||||
func (p *ProxyHandler) LookupDestinationRewrite(srcIP, dstIP string, dstPort uint16, proto uint8) (netip.Addr, bool) {
|
func (p *ProxyHandler) LookupDestinationRewrite(srcIP, dstIP string, dstPort uint16, proto uint8) (netip.Addr, bool) {
|
||||||
@@ -446,23 +517,10 @@ func (p *ProxyHandler) HandleIncomingPacket(packet []byte) bool {
|
|||||||
|
|
||||||
// Store NAT state for this connection
|
// Store NAT state for this connection
|
||||||
p.natMu.Lock()
|
p.natMu.Lock()
|
||||||
natEntry := &natState{
|
p.natTable[key] = &natState{
|
||||||
originalDst: dstAddr,
|
originalDst: dstAddr,
|
||||||
rewrittenTo: newDst,
|
rewrittenTo: newDst,
|
||||||
}
|
}
|
||||||
p.natTable[key] = natEntry
|
|
||||||
|
|
||||||
// Create reverse lookup key for O(1) reply packet lookups
|
|
||||||
// Key: (rewrittenTo, originalSrcIP, originalSrcPort, originalDstPort, proto)
|
|
||||||
reverseKey := reverseConnKey{
|
|
||||||
rewrittenTo: newDst.String(),
|
|
||||||
originalSrcIP: srcAddr.String(),
|
|
||||||
originalSrcPort: srcPort,
|
|
||||||
originalDstPort: dstPort,
|
|
||||||
proto: uint8(protocol),
|
|
||||||
}
|
|
||||||
p.reverseNatTable[reverseKey] = natEntry
|
|
||||||
|
|
||||||
// Store destination rewrite for handler lookups
|
// Store destination rewrite for handler lookups
|
||||||
p.destRewriteTable[dKey] = newDst
|
p.destRewriteTable[dKey] = newDst
|
||||||
p.natMu.Unlock()
|
p.natMu.Unlock()
|
||||||
@@ -661,22 +719,20 @@ func (p *ProxyHandler) ReadOutgoingPacket() *buffer.View {
|
|||||||
return view
|
return view
|
||||||
}
|
}
|
||||||
|
|
||||||
// Look up NAT state for reverse translation using O(1) reverse lookup map
|
// Look up NAT state for reverse translation
|
||||||
// Key: (rewrittenTo, originalSrcIP, originalSrcPort, originalDstPort, proto)
|
// The key uses the original dst (before rewrite), so for replies we need to
|
||||||
// For reply packets:
|
// find the entry where the rewritten address matches the current source
|
||||||
// - reply's srcIP = rewrittenTo (the address we rewrote to)
|
|
||||||
// - reply's dstIP = originalSrcIP (original source IP)
|
|
||||||
// - reply's srcPort = originalDstPort (original destination port)
|
|
||||||
// - reply's dstPort = originalSrcPort (original source port)
|
|
||||||
p.natMu.RLock()
|
p.natMu.RLock()
|
||||||
reverseKey := reverseConnKey{
|
var natEntry *natState
|
||||||
rewrittenTo: srcIP.String(), // Reply's source is the rewritten address
|
for k, entry := range p.natTable {
|
||||||
originalSrcIP: dstIP.String(), // Reply's destination is the original source
|
// Match: reply's dst should be original src, reply's src should be rewritten dst
|
||||||
originalSrcPort: dstPort, // Reply's destination port is the original source port
|
if k.srcIP == dstIP.String() && k.srcPort == dstPort &&
|
||||||
originalDstPort: srcPort, // Reply's source port is the original destination port
|
entry.rewrittenTo.String() == srcIP.String() && k.dstPort == srcPort &&
|
||||||
proto: uint8(protocol),
|
k.proto == uint8(protocol) {
|
||||||
|
natEntry = entry
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
natEntry := p.reverseNatTable[reverseKey]
|
|
||||||
p.natMu.RUnlock()
|
p.natMu.RUnlock()
|
||||||
|
|
||||||
if natEntry != nil {
|
if natEntry != nil {
|
||||||
|
|||||||
@@ -1,206 +0,0 @@
|
|||||||
package netstack2
|
|
||||||
|
|
||||||
import (
|
|
||||||
"net/netip"
|
|
||||||
"sync"
|
|
||||||
|
|
||||||
"github.com/gaissmai/bart"
|
|
||||||
"gvisor.dev/gvisor/pkg/tcpip"
|
|
||||||
"gvisor.dev/gvisor/pkg/tcpip/header"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SubnetLookup provides fast IP subnet and port matching using BART (Binary Aggregated Range Tree)
|
|
||||||
// This uses BART Table for O(log n) prefix matching with Supernets() for efficient lookups
|
|
||||||
//
|
|
||||||
// Architecture:
|
|
||||||
// - Two-level BART structure for matching both source AND destination prefixes
|
|
||||||
// - Level 1: Source prefix -> Level 2 (destination prefix -> rules)
|
|
||||||
// - This reduces search space: only check destination prefixes for matching source prefixes
|
|
||||||
type SubnetLookup struct {
|
|
||||||
mu sync.RWMutex
|
|
||||||
// Two-level BART structure:
|
|
||||||
// Level 1: Source prefix -> Level 2 (destination prefix -> rules)
|
|
||||||
// This allows us to first match source prefix, then only check destination prefixes
|
|
||||||
// for matching source prefixes, reducing the search space significantly
|
|
||||||
sourceTrie *bart.Table[*destTrie]
|
|
||||||
}
|
|
||||||
|
|
||||||
// destTrie is a BART for destination prefixes, containing the actual rules
|
|
||||||
type destTrie struct {
|
|
||||||
trie *bart.Table[[]*SubnetRule]
|
|
||||||
rules []*SubnetRule // All rules for this source prefix (for iteration if needed)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSubnetLookup creates a new subnet lookup table using BART
|
|
||||||
func NewSubnetLookup() *SubnetLookup {
|
|
||||||
return &SubnetLookup{
|
|
||||||
sourceTrie: &bart.Table[*destTrie]{},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// prefixEqual compares two prefixes after masking to handle host bits correctly.
|
|
||||||
// For example, 10.0.0.5/24 and 10.0.0.0/24 are treated as equal.
|
|
||||||
func prefixEqual(a, b netip.Prefix) bool {
|
|
||||||
return a.Masked() == b.Masked()
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddSubnet adds a subnet rule with source and destination prefixes and optional port restrictions
|
|
||||||
// If portRanges is nil or empty, all ports are allowed for this subnet
|
|
||||||
// rewriteTo can be either an IP/CIDR (e.g., "192.168.1.1/32") or a domain name (e.g., "example.com")
|
|
||||||
func (sl *SubnetLookup) AddSubnet(sourcePrefix, destPrefix netip.Prefix, rewriteTo string, portRanges []PortRange, disableIcmp bool) {
|
|
||||||
sl.mu.Lock()
|
|
||||||
defer sl.mu.Unlock()
|
|
||||||
|
|
||||||
rule := &SubnetRule{
|
|
||||||
SourcePrefix: sourcePrefix,
|
|
||||||
DestPrefix: destPrefix,
|
|
||||||
DisableIcmp: disableIcmp,
|
|
||||||
RewriteTo: rewriteTo,
|
|
||||||
PortRanges: portRanges,
|
|
||||||
}
|
|
||||||
|
|
||||||
// Canonicalize source prefix to handle host bits correctly
|
|
||||||
canonicalSourcePrefix := sourcePrefix.Masked()
|
|
||||||
|
|
||||||
// Get or create destination trie for this source prefix
|
|
||||||
destTriePtr, exists := sl.sourceTrie.Get(canonicalSourcePrefix)
|
|
||||||
if !exists {
|
|
||||||
// Create new destination trie for this source prefix
|
|
||||||
destTriePtr = &destTrie{
|
|
||||||
trie: &bart.Table[[]*SubnetRule]{},
|
|
||||||
rules: make([]*SubnetRule, 0),
|
|
||||||
}
|
|
||||||
sl.sourceTrie.Insert(canonicalSourcePrefix, destTriePtr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Canonicalize destination prefix to handle host bits correctly
|
|
||||||
// BART masks prefixes internally, so we need to match that behavior in our bookkeeping
|
|
||||||
canonicalDestPrefix := destPrefix.Masked()
|
|
||||||
|
|
||||||
// Add rule to destination trie
|
|
||||||
// Original behavior: overwrite if same (sourcePrefix, destPrefix) exists
|
|
||||||
// Store as single-element slice to match original overwrite behavior
|
|
||||||
destTriePtr.trie.Insert(canonicalDestPrefix, []*SubnetRule{rule})
|
|
||||||
|
|
||||||
// Update destTriePtr.rules - remove old rule with same canonical prefix if exists, then add new one
|
|
||||||
// Use canonical comparison to handle cases like 10.0.0.5/24 vs 10.0.0.0/24
|
|
||||||
newRules := make([]*SubnetRule, 0, len(destTriePtr.rules)+1)
|
|
||||||
for _, r := range destTriePtr.rules {
|
|
||||||
if !prefixEqual(r.DestPrefix, canonicalDestPrefix) || !prefixEqual(r.SourcePrefix, canonicalSourcePrefix) {
|
|
||||||
newRules = append(newRules, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
newRules = append(newRules, rule)
|
|
||||||
destTriePtr.rules = newRules
|
|
||||||
}
|
|
||||||
|
|
||||||
// RemoveSubnet removes a subnet rule from the lookup table
|
|
||||||
func (sl *SubnetLookup) RemoveSubnet(sourcePrefix, destPrefix netip.Prefix) {
|
|
||||||
sl.mu.Lock()
|
|
||||||
defer sl.mu.Unlock()
|
|
||||||
|
|
||||||
// Canonicalize prefixes to handle host bits correctly
|
|
||||||
canonicalSourcePrefix := sourcePrefix.Masked()
|
|
||||||
canonicalDestPrefix := destPrefix.Masked()
|
|
||||||
|
|
||||||
destTriePtr, exists := sl.sourceTrie.Get(canonicalSourcePrefix)
|
|
||||||
if !exists {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove the rule - original behavior: delete exact (sourcePrefix, destPrefix) combination
|
|
||||||
// BART masks prefixes internally, so Delete works with canonical form
|
|
||||||
destTriePtr.trie.Delete(canonicalDestPrefix)
|
|
||||||
|
|
||||||
// Also remove from destTriePtr.rules using canonical comparison
|
|
||||||
// This ensures we remove rules even if they were added with host bits set
|
|
||||||
newDestRules := make([]*SubnetRule, 0, len(destTriePtr.rules))
|
|
||||||
for _, r := range destTriePtr.rules {
|
|
||||||
if !prefixEqual(r.DestPrefix, canonicalDestPrefix) || !prefixEqual(r.SourcePrefix, canonicalSourcePrefix) {
|
|
||||||
newDestRules = append(newDestRules, r)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
destTriePtr.rules = newDestRules
|
|
||||||
|
|
||||||
// Check if the trie is actually empty using BART's Size() method
|
|
||||||
// This is more efficient than iterating and ensures we clean up empty tries
|
|
||||||
// even if there were stale entries in the rules slice (which shouldn't happen
|
|
||||||
// with proper canonicalization, but this provides a definitive check)
|
|
||||||
if destTriePtr.trie.Size() == 0 {
|
|
||||||
sl.sourceTrie.Delete(canonicalSourcePrefix)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Match checks if a source IP, destination IP, port, and protocol match any subnet rule
|
|
||||||
// Returns the matched rule if ALL of these conditions are met:
|
|
||||||
// - The source IP is in the rule's source prefix
|
|
||||||
// - The destination IP is in the rule's destination prefix
|
|
||||||
// - The port is in an allowed range (or no port restrictions exist)
|
|
||||||
// - The protocol matches (or the port range allows both protocols)
|
|
||||||
//
|
|
||||||
// proto should be header.TCPProtocolNumber, header.UDPProtocolNumber, or header.ICMPv4ProtocolNumber
|
|
||||||
// Returns nil if no rule matches
|
|
||||||
// This uses BART's Supernets() for O(log n) prefix matching instead of O(n) iteration
|
|
||||||
func (sl *SubnetLookup) Match(srcIP, dstIP netip.Addr, port uint16, proto tcpip.TransportProtocolNumber) *SubnetRule {
|
|
||||||
sl.mu.RLock()
|
|
||||||
defer sl.mu.RUnlock()
|
|
||||||
|
|
||||||
// Convert IP addresses to /32 (IPv4) or /128 (IPv6) prefixes
|
|
||||||
// Supernets() finds all prefixes that contain this IP (i.e., are supernets of /32 or /128)
|
|
||||||
srcPrefix := netip.PrefixFrom(srcIP, srcIP.BitLen())
|
|
||||||
dstPrefix := netip.PrefixFrom(dstIP, dstIP.BitLen())
|
|
||||||
|
|
||||||
// Step 1: Find all source prefixes that contain srcIP using BART's Supernets
|
|
||||||
// This is O(log n) instead of O(n) iteration
|
|
||||||
// Supernets returns all prefixes that are supernets (contain) the given prefix
|
|
||||||
for _, destTriePtr := range sl.sourceTrie.Supernets(srcPrefix) {
|
|
||||||
if destTriePtr == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 2: Find all destination prefixes that contain dstIP
|
|
||||||
// This is also O(log n) for each matching source prefix
|
|
||||||
for _, rules := range destTriePtr.trie.Supernets(dstPrefix) {
|
|
||||||
if rules == nil {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// Step 3: Check each rule for ICMP and port restrictions
|
|
||||||
for _, rule := range rules {
|
|
||||||
// Handle ICMP before port range check — ICMP has no ports
|
|
||||||
if proto == header.ICMPv4ProtocolNumber || proto == header.ICMPv6ProtocolNumber {
|
|
||||||
if rule.DisableIcmp {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
// ICMP is allowed; port ranges don't apply to ICMP
|
|
||||||
return rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check port restrictions
|
|
||||||
if len(rule.PortRanges) == 0 {
|
|
||||||
// No port restrictions, match!
|
|
||||||
return rule
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if port and protocol are in any of the allowed ranges
|
|
||||||
for _, pr := range rule.PortRanges {
|
|
||||||
if port >= pr.Min && port <= pr.Max {
|
|
||||||
// Check protocol compatibility
|
|
||||||
if pr.Protocol == "" {
|
|
||||||
// Empty protocol means allow both TCP and UDP
|
|
||||||
return rule
|
|
||||||
}
|
|
||||||
// Check if the packet protocol matches the port range protocol
|
|
||||||
if (pr.Protocol == "tcp" && proto == header.TCPProtocolNumber) ||
|
|
||||||
(pr.Protocol == "udp" && proto == header.UDPProtocolNumber) {
|
|
||||||
return rule
|
|
||||||
}
|
|
||||||
// Port matches but protocol doesn't - continue checking other ranges
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -369,15 +369,6 @@ func (net *Net) RemoveProxySubnetRule(sourcePrefix, destPrefix netip.Prefix) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetProxySubnetRules returns all subnet rules from the proxy handler
|
|
||||||
func (net *Net) GetProxySubnetRules() []SubnetRule {
|
|
||||||
tun := (*netTun)(net)
|
|
||||||
if tun.proxyHandler != nil {
|
|
||||||
return tun.proxyHandler.GetAllRules()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetProxyHandler returns the proxy handler (for advanced use cases)
|
// GetProxyHandler returns the proxy handler (for advanced use cases)
|
||||||
// Returns nil if proxy is not enabled
|
// Returns nil if proxy is not enabled
|
||||||
func (net *Net) GetProxyHandler() *ProxyHandler {
|
func (net *Net) GetProxyHandler() *ProxyHandler {
|
||||||
|
|||||||
2
newt.iss
2
newt.iss
@@ -32,7 +32,7 @@ DefaultGroupName={#MyAppName}
|
|||||||
DisableProgramGroupPage=yes
|
DisableProgramGroupPage=yes
|
||||||
; Uncomment the following line to run in non administrative install mode (install for current user only).
|
; Uncomment the following line to run in non administrative install mode (install for current user only).
|
||||||
;PrivilegesRequired=lowest
|
;PrivilegesRequired=lowest
|
||||||
OutputBaseFilename=newt_windows_installer
|
OutputBaseFilename=mysetup
|
||||||
SolidCompression=yes
|
SolidCompression=yes
|
||||||
WizardStyle=modern
|
WizardStyle=modern
|
||||||
; Add this to ensure PATH changes are applied and the system is prompted for a restart if needed
|
; Add this to ensure PATH changes are applied and the system is prompted for a restart if needed
|
||||||
|
|||||||
@@ -736,28 +736,3 @@ func (pm *ProxyManager) PrintTargets() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetTargets returns a copy of the current TCP and UDP targets
|
|
||||||
// Returns map[listenIP]map[port]targetAddress for both TCP and UDP
|
|
||||||
func (pm *ProxyManager) GetTargets() (tcpTargets map[string]map[int]string, udpTargets map[string]map[int]string) {
|
|
||||||
pm.mutex.RLock()
|
|
||||||
defer pm.mutex.RUnlock()
|
|
||||||
|
|
||||||
tcpTargets = make(map[string]map[int]string)
|
|
||||||
for listenIP, targets := range pm.tcpTargets {
|
|
||||||
tcpTargets[listenIP] = make(map[int]string)
|
|
||||||
for port, targetAddr := range targets {
|
|
||||||
tcpTargets[listenIP][port] = targetAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
udpTargets = make(map[string]map[int]string)
|
|
||||||
for listenIP, targets := range pm.udpTargets {
|
|
||||||
udpTargets[listenIP] = make(map[int]string)
|
|
||||||
for port, targetAddr := range targets {
|
|
||||||
udpTargets[listenIP][port] = targetAddr
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return tcpTargets, udpTargets
|
|
||||||
}
|
|
||||||
|
|||||||
22
scripts/append-release-notes.sh
Normal file
22
scripts/append-release-notes.sh
Normal file
@@ -0,0 +1,22 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
: "${TAG:?}"
|
||||||
|
: "${GHCR_REF:?}"
|
||||||
|
: "${DIGEST:?}"
|
||||||
|
|
||||||
|
NOTES_FILE="$(mktemp)"
|
||||||
|
|
||||||
|
existing_body="$(gh release view "${TAG}" --json body --jq '.body')"
|
||||||
|
cat > "${NOTES_FILE}" <<EOF
|
||||||
|
${existing_body}
|
||||||
|
|
||||||
|
## Container Images
|
||||||
|
- GHCR: \`${GHCR_REF}\`
|
||||||
|
- Docker Hub: \`${DH_REF:-N/A}\`
|
||||||
|
**Digest:** \`${DIGEST}\`
|
||||||
|
EOF
|
||||||
|
|
||||||
|
gh release edit "${TAG}" --draft --notes-file "${NOTES_FILE}"
|
||||||
|
|
||||||
|
rm -f "${NOTES_FILE}"
|
||||||
11
scripts/nfpm.yaml.tmpl
Normal file
11
scripts/nfpm.yaml.tmpl
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
name: __PKG_NAME__
|
||||||
|
arch: __ARCH__
|
||||||
|
platform: linux
|
||||||
|
version: __VERSION__
|
||||||
|
section: net
|
||||||
|
priority: optional
|
||||||
|
maintainer: fosrl
|
||||||
|
description: Newt - userspace tunnel client and TCP/UDP proxy
|
||||||
|
contents:
|
||||||
|
- src: build/newt
|
||||||
|
dst: /usr/bin/newt
|
||||||
149
scripts/publish-apt.sh
Normal file
149
scripts/publish-apt.sh
Normal file
@@ -0,0 +1,149 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
set -euo pipefail
|
||||||
|
|
||||||
|
# ---- required env ----
|
||||||
|
: "${GH_REPO:?}"
|
||||||
|
: "${S3_BUCKET:?}"
|
||||||
|
: "${AWS_REGION:?}"
|
||||||
|
: "${CLOUDFRONT_DISTRIBUTION_ID:?}"
|
||||||
|
: "${PKG_NAME:?}"
|
||||||
|
: "${SUITE:?}"
|
||||||
|
: "${COMPONENT:?}"
|
||||||
|
: "${APT_GPG_PRIVATE_KEY:?}"
|
||||||
|
|
||||||
|
S3_PREFIX="${S3_PREFIX:-}"
|
||||||
|
if [[ -n "${S3_PREFIX}" && "${S3_PREFIX}" != */ ]]; then
|
||||||
|
S3_PREFIX="${S3_PREFIX}/"
|
||||||
|
fi
|
||||||
|
|
||||||
|
WORKDIR="$(pwd)"
|
||||||
|
mkdir -p repo/apt assets build
|
||||||
|
|
||||||
|
download_asset() {
|
||||||
|
local tag="$1"
|
||||||
|
local pattern="$2"
|
||||||
|
local attempts=12
|
||||||
|
|
||||||
|
for attempt in $(seq 1 "${attempts}"); do
|
||||||
|
if gh release download "${tag}" -R "${GH_REPO}" -p "${pattern}" -D assets; then
|
||||||
|
return 0
|
||||||
|
fi
|
||||||
|
echo "Asset ${pattern} not available yet (attempt ${attempt}/${attempts}); retrying..."
|
||||||
|
sleep 5
|
||||||
|
done
|
||||||
|
|
||||||
|
echo "ERROR: Failed to download asset ${pattern} for ${tag} after ${attempts} attempts"
|
||||||
|
return 1
|
||||||
|
}
|
||||||
|
|
||||||
|
echo "${APT_GPG_PRIVATE_KEY}" | gpg --batch --import >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
KEYID="$(gpg --list-secret-keys --with-colons | awk -F: '$1=="sec"{print $5; exit}')"
|
||||||
|
if [[ -z "${KEYID}" ]]; then
|
||||||
|
echo "ERROR: No GPG secret key available after import."
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Determine which tags to process
|
||||||
|
TAGS=""
|
||||||
|
if [[ "${BACKFILL_ALL:-false}" == "true" ]]; then
|
||||||
|
echo "Backfill mode: collecting all release tags..."
|
||||||
|
TAGS="$(gh release list -R "${GH_REPO}" --limit 200 --json tagName --jq '.[].tagName')"
|
||||||
|
else
|
||||||
|
if [[ -n "${INPUT_TAG:-}" ]]; then
|
||||||
|
TAGS="${INPUT_TAG}"
|
||||||
|
elif [[ -n "${EVENT_TAG:-}" ]]; then
|
||||||
|
TAGS="${EVENT_TAG}"
|
||||||
|
elif [[ -n "${PUSH_TAG:-}" ]]; then
|
||||||
|
TAGS="${PUSH_TAG}"
|
||||||
|
else
|
||||||
|
echo "No tag provided; using latest release tag..."
|
||||||
|
TAGS="$(gh release view -R "${GH_REPO}" --json tagName --jq '.tagName')"
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo "Tags to process:"
|
||||||
|
printf '%s\n' "${TAGS}"
|
||||||
|
|
||||||
|
# Pull existing repo from S3 so we keep older versions
|
||||||
|
echo "Sync existing repo from S3..."
|
||||||
|
aws s3 sync "s3://${S3_BUCKET}/${S3_PREFIX}apt/" repo/apt/ >/dev/null 2>&1 || true
|
||||||
|
|
||||||
|
# Build and add packages
|
||||||
|
while IFS= read -r TAG; do
|
||||||
|
[[ -z "${TAG}" ]] && continue
|
||||||
|
echo "=== Processing tag: ${TAG} ==="
|
||||||
|
|
||||||
|
rm -rf assets build
|
||||||
|
mkdir -p assets build
|
||||||
|
|
||||||
|
deb_amd64="${PKG_NAME}_${TAG}_amd64.deb"
|
||||||
|
deb_arm64="${PKG_NAME}_${TAG}_arm64.deb"
|
||||||
|
|
||||||
|
download_asset "${TAG}" "${deb_amd64}"
|
||||||
|
download_asset "${TAG}" "${deb_arm64}"
|
||||||
|
|
||||||
|
if [[ ! -f "assets/${deb_amd64}" ]]; then
|
||||||
|
echo "ERROR: Missing release asset: ${deb_amd64}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
if [[ ! -f "assets/${deb_arm64}" ]]; then
|
||||||
|
echo "ERROR: Missing release asset: ${deb_arm64}"
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
mkdir -p "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||||
|
cp -v assets/*.deb "repo/apt/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||||
|
|
||||||
|
done <<< "${TAGS}"
|
||||||
|
|
||||||
|
# Regenerate metadata
|
||||||
|
cd repo/apt
|
||||||
|
|
||||||
|
for arch in amd64 arm64; do
|
||||||
|
mkdir -p "dists/${SUITE}/${COMPONENT}/binary-${arch}"
|
||||||
|
dpkg-scanpackages -a "${arch}" pool > "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||||
|
gzip -fk "dists/${SUITE}/${COMPONENT}/binary-${arch}/Packages"
|
||||||
|
done
|
||||||
|
|
||||||
|
# Release file with hashes
|
||||||
|
cat > apt-ftparchive.conf <<EOF
|
||||||
|
APT::FTPArchive::Release::Origin "fosrl";
|
||||||
|
APT::FTPArchive::Release::Label "newt";
|
||||||
|
APT::FTPArchive::Release::Suite "${SUITE}";
|
||||||
|
APT::FTPArchive::Release::Codename "${SUITE}";
|
||||||
|
APT::FTPArchive::Release::Architectures "amd64 arm64";
|
||||||
|
APT::FTPArchive::Release::Components "${COMPONENT}";
|
||||||
|
APT::FTPArchive::Release::Description "Newt APT repository";
|
||||||
|
EOF
|
||||||
|
|
||||||
|
apt-ftparchive -c apt-ftparchive.conf release "dists/${SUITE}" > "dists/${SUITE}/Release"
|
||||||
|
|
||||||
|
# Sign Release
|
||||||
|
cd "dists/${SUITE}"
|
||||||
|
|
||||||
|
gpg --batch --yes --pinentry-mode loopback \
|
||||||
|
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||||
|
--local-user "${KEYID}" \
|
||||||
|
--clearsign -o InRelease Release
|
||||||
|
|
||||||
|
gpg --batch --yes --pinentry-mode loopback \
|
||||||
|
${APT_GPG_PASSPHRASE:+--passphrase "${APT_GPG_PASSPHRASE}"} \
|
||||||
|
--local-user "${KEYID}" \
|
||||||
|
-abs -o Release.gpg Release
|
||||||
|
|
||||||
|
# Export public key into apt repo root
|
||||||
|
cd ../../..
|
||||||
|
gpg --batch --yes --armor --export "${KEYID}" > "${WORKDIR}/repo/apt/public.key"
|
||||||
|
|
||||||
|
# Upload to S3
|
||||||
|
echo "Uploading to S3..."
|
||||||
|
aws s3 sync "${WORKDIR}/repo/apt" "s3://${S3_BUCKET}/${S3_PREFIX}apt/" --delete
|
||||||
|
|
||||||
|
# Invalidate metadata
|
||||||
|
echo "CloudFront invalidation..."
|
||||||
|
aws cloudfront create-invalidation \
|
||||||
|
--distribution-id "${CLOUDFRONT_DISTRIBUTION_ID}" \
|
||||||
|
--paths "/${S3_PREFIX}apt/dists/*" "/${S3_PREFIX}apt/public.key"
|
||||||
|
|
||||||
|
echo "Done. Repo base: ${REPO_BASE_URL}"
|
||||||
94
util/util.go
94
util/util.go
@@ -1,7 +1,6 @@
|
|||||||
package util
|
package util
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
@@ -15,99 +14,6 @@ import (
|
|||||||
"golang.zx2c4.com/wireguard/device"
|
"golang.zx2c4.com/wireguard/device"
|
||||||
)
|
)
|
||||||
|
|
||||||
func ResolveDomainUpstream(domain string, publicDNS []string) (string, error) {
|
|
||||||
// trim whitespace
|
|
||||||
domain = strings.TrimSpace(domain)
|
|
||||||
|
|
||||||
// Remove any protocol prefix if present (do this first, before splitting host/port)
|
|
||||||
domain = strings.TrimPrefix(domain, "http://")
|
|
||||||
domain = strings.TrimPrefix(domain, "https://")
|
|
||||||
|
|
||||||
// if there are any trailing slashes, remove them
|
|
||||||
domain = strings.TrimSuffix(domain, "/")
|
|
||||||
|
|
||||||
// Check if there's a port in the domain
|
|
||||||
host, port, err := net.SplitHostPort(domain)
|
|
||||||
if err != nil {
|
|
||||||
// No port found, use the domain as is
|
|
||||||
host = domain
|
|
||||||
port = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if host is already an IP address (IPv4 or IPv6)
|
|
||||||
// For IPv6, the host from SplitHostPort will already have brackets stripped
|
|
||||||
// but if there was no port, we need to handle bracketed IPv6 addresses
|
|
||||||
cleanHost := strings.TrimPrefix(strings.TrimSuffix(host, "]"), "[")
|
|
||||||
if ip := net.ParseIP(cleanHost); ip != nil {
|
|
||||||
// It's already an IP address, no need to resolve
|
|
||||||
ipAddr := ip.String()
|
|
||||||
if port != "" {
|
|
||||||
return net.JoinHostPort(ipAddr, port), nil
|
|
||||||
}
|
|
||||||
return ipAddr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Lookup IP addresses using the upstream DNS servers if provided
|
|
||||||
var ips []net.IP
|
|
||||||
if len(publicDNS) > 0 {
|
|
||||||
var lastErr error
|
|
||||||
for _, server := range publicDNS {
|
|
||||||
// Ensure the upstream DNS address has a port
|
|
||||||
dnsAddr := server
|
|
||||||
if _, _, err := net.SplitHostPort(dnsAddr); err != nil {
|
|
||||||
// No port specified, default to 53
|
|
||||||
dnsAddr = net.JoinHostPort(server, "53")
|
|
||||||
}
|
|
||||||
|
|
||||||
resolver := &net.Resolver{
|
|
||||||
PreferGo: true,
|
|
||||||
Dial: func(ctx context.Context, network, address string) (net.Conn, error) {
|
|
||||||
d := net.Dialer{}
|
|
||||||
return d.DialContext(ctx, "udp", dnsAddr)
|
|
||||||
},
|
|
||||||
}
|
|
||||||
ips, lastErr = resolver.LookupIP(context.Background(), "ip", host)
|
|
||||||
if lastErr == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if lastErr != nil {
|
|
||||||
return "", fmt.Errorf("DNS lookup failed using all upstream servers: %v", lastErr)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
ips, err = net.LookupIP(host)
|
|
||||||
if err != nil {
|
|
||||||
return "", fmt.Errorf("DNS lookup failed: %v", err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if len(ips) == 0 {
|
|
||||||
return "", fmt.Errorf("no IP addresses found for domain %s", host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Get the first IPv4 address if available
|
|
||||||
var ipAddr string
|
|
||||||
for _, ip := range ips {
|
|
||||||
if ipv4 := ip.To4(); ipv4 != nil {
|
|
||||||
ipAddr = ipv4.String()
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// If no IPv4 found, use the first IP (might be IPv6)
|
|
||||||
if ipAddr == "" {
|
|
||||||
ipAddr = ips[0].String()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add port back if it existed
|
|
||||||
if port != "" {
|
|
||||||
ipAddr = net.JoinHostPort(ipAddr, port)
|
|
||||||
}
|
|
||||||
|
|
||||||
return ipAddr, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
func ResolveDomain(domain string) (string, error) {
|
func ResolveDomain(domain string) (string, error) {
|
||||||
// trim whitespace
|
// trim whitespace
|
||||||
domain = strings.TrimSpace(domain)
|
domain = strings.TrimSpace(domain)
|
||||||
|
|||||||
@@ -2,7 +2,6 @@ package websocket
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"compress/gzip"
|
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"crypto/x509"
|
"crypto/x509"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
@@ -38,6 +37,7 @@ type Client struct {
|
|||||||
isConnected bool
|
isConnected bool
|
||||||
reconnectMux sync.RWMutex
|
reconnectMux sync.RWMutex
|
||||||
pingInterval time.Duration
|
pingInterval time.Duration
|
||||||
|
pingTimeout time.Duration
|
||||||
onConnect func() error
|
onConnect func() error
|
||||||
onTokenUpdate func(token string)
|
onTokenUpdate func(token string)
|
||||||
writeMux sync.Mutex
|
writeMux sync.Mutex
|
||||||
@@ -47,11 +47,6 @@ type Client struct {
|
|||||||
metricsCtx context.Context
|
metricsCtx context.Context
|
||||||
configNeedsSave bool // Flag to track if config needs to be saved
|
configNeedsSave bool // Flag to track if config needs to be saved
|
||||||
serverVersion string
|
serverVersion string
|
||||||
configVersion int64 // Latest config version received from server
|
|
||||||
configVersionMux sync.RWMutex
|
|
||||||
processingMessage bool // Flag to track if a message is currently being processed
|
|
||||||
processingMux sync.RWMutex // Protects processingMessage
|
|
||||||
processingWg sync.WaitGroup // WaitGroup to wait for message processing to complete
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type ClientOption func(*Client)
|
type ClientOption func(*Client)
|
||||||
@@ -116,7 +111,7 @@ func (c *Client) MetricsContext() context.Context {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// NewClient creates a new websocket client
|
// NewClient creates a new websocket client
|
||||||
func NewClient(clientType string, ID, secret string, endpoint string, pingInterval time.Duration, opts ...ClientOption) (*Client, error) {
|
func NewClient(clientType string, ID, secret string, endpoint string, pingInterval time.Duration, pingTimeout time.Duration, opts ...ClientOption) (*Client, error) {
|
||||||
config := &Config{
|
config := &Config{
|
||||||
ID: ID,
|
ID: ID,
|
||||||
Secret: secret,
|
Secret: secret,
|
||||||
@@ -131,6 +126,7 @@ func NewClient(clientType string, ID, secret string, endpoint string, pingInterv
|
|||||||
reconnectInterval: 3 * time.Second,
|
reconnectInterval: 3 * time.Second,
|
||||||
isConnected: false,
|
isConnected: false,
|
||||||
pingInterval: pingInterval,
|
pingInterval: pingInterval,
|
||||||
|
pingTimeout: pingTimeout,
|
||||||
clientType: clientType,
|
clientType: clientType,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,20 +154,6 @@ func (c *Client) GetServerVersion() string {
|
|||||||
return c.serverVersion
|
return c.serverVersion
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetConfigVersion returns the latest config version received from server
|
|
||||||
func (c *Client) GetConfigVersion() int64 {
|
|
||||||
c.configVersionMux.RLock()
|
|
||||||
defer c.configVersionMux.RUnlock()
|
|
||||||
return c.configVersion
|
|
||||||
}
|
|
||||||
|
|
||||||
// setConfigVersion updates the config version
|
|
||||||
func (c *Client) setConfigVersion(version int64) {
|
|
||||||
c.configVersionMux.Lock()
|
|
||||||
defer c.configVersionMux.Unlock()
|
|
||||||
c.configVersion = version
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connect establishes the WebSocket connection
|
// Connect establishes the WebSocket connection
|
||||||
func (c *Client) Connect() error {
|
func (c *Client) Connect() error {
|
||||||
go c.connectWithRetry()
|
go c.connectWithRetry()
|
||||||
@@ -659,57 +641,7 @@ func (c *Client) setupPKCS12TLS() (*tls.Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// pingMonitor sends pings at a short interval and triggers reconnect on failure
|
// pingMonitor sends pings at a short interval and triggers reconnect on failure
|
||||||
func (c *Client) sendPing() {
|
|
||||||
if c.conn == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Skip ping if a message is currently being processed
|
|
||||||
c.processingMux.RLock()
|
|
||||||
isProcessing := c.processingMessage
|
|
||||||
c.processingMux.RUnlock()
|
|
||||||
if isProcessing {
|
|
||||||
logger.Debug("Skipping ping, message is being processed")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
c.configVersionMux.RLock()
|
|
||||||
configVersion := c.configVersion
|
|
||||||
c.configVersionMux.RUnlock()
|
|
||||||
|
|
||||||
pingMsg := WSMessage{
|
|
||||||
Type: "newt/ping",
|
|
||||||
Data: map[string]interface{}{},
|
|
||||||
ConfigVersion: configVersion,
|
|
||||||
}
|
|
||||||
|
|
||||||
c.writeMux.Lock()
|
|
||||||
err := c.conn.WriteJSON(pingMsg)
|
|
||||||
if err == nil {
|
|
||||||
telemetry.IncWSMessage(c.metricsContext(), "out", "ping")
|
|
||||||
}
|
|
||||||
c.writeMux.Unlock()
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
// Check if we're shutting down before logging error and reconnecting
|
|
||||||
select {
|
|
||||||
case <-c.done:
|
|
||||||
// Expected during shutdown
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
logger.Error("Ping failed: %v", err)
|
|
||||||
telemetry.IncWSKeepaliveFailure(c.metricsContext(), "ping_write")
|
|
||||||
telemetry.IncWSReconnect(c.metricsContext(), "ping_write")
|
|
||||||
c.reconnect()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *Client) pingMonitor() {
|
func (c *Client) pingMonitor() {
|
||||||
// Send an immediate ping as soon as we connect
|
|
||||||
c.sendPing()
|
|
||||||
|
|
||||||
ticker := time.NewTicker(c.pingInterval)
|
ticker := time.NewTicker(c.pingInterval)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
|
|
||||||
@@ -718,7 +650,29 @@ func (c *Client) pingMonitor() {
|
|||||||
case <-c.done:
|
case <-c.done:
|
||||||
return
|
return
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
c.sendPing()
|
if c.conn == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.writeMux.Lock()
|
||||||
|
err := c.conn.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(c.pingTimeout))
|
||||||
|
if err == nil {
|
||||||
|
telemetry.IncWSMessage(c.metricsContext(), "out", "ping")
|
||||||
|
}
|
||||||
|
c.writeMux.Unlock()
|
||||||
|
if err != nil {
|
||||||
|
// Check if we're shutting down before logging error and reconnecting
|
||||||
|
select {
|
||||||
|
case <-c.done:
|
||||||
|
// Expected during shutdown
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
logger.Error("Ping failed: %v", err)
|
||||||
|
telemetry.IncWSKeepaliveFailure(c.metricsContext(), "ping_write")
|
||||||
|
telemetry.IncWSReconnect(c.metricsContext(), "ping_write")
|
||||||
|
c.reconnect()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -755,13 +709,10 @@ func (c *Client) readPumpWithDisconnectDetection(started time.Time) {
|
|||||||
disconnectResult = "success"
|
disconnectResult = "success"
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
msgType, p, err := c.conn.ReadMessage()
|
var msg WSMessage
|
||||||
|
err := c.conn.ReadJSON(&msg)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if msgType == websocket.BinaryMessage {
|
telemetry.IncWSMessage(c.metricsContext(), "in", "text")
|
||||||
telemetry.IncWSMessage(c.metricsContext(), "in", "binary")
|
|
||||||
} else {
|
|
||||||
telemetry.IncWSMessage(c.metricsContext(), "in", "text")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Check if we're shutting down before logging error
|
// Check if we're shutting down before logging error
|
||||||
@@ -786,47 +737,9 @@ func (c *Client) readPumpWithDisconnectDetection(started time.Time) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update config version from incoming message
|
|
||||||
var data []byte
|
|
||||||
if msgType == websocket.BinaryMessage {
|
|
||||||
gr, err := gzip.NewReader(bytes.NewReader(p))
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("WebSocket failed to create gzip reader: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
data, err = io.ReadAll(gr)
|
|
||||||
gr.Close()
|
|
||||||
if err != nil {
|
|
||||||
logger.Error("WebSocket failed to decompress message: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
data = p
|
|
||||||
}
|
|
||||||
|
|
||||||
var msg WSMessage
|
|
||||||
if err = json.Unmarshal(data, &msg); err != nil {
|
|
||||||
logger.Error("WebSocket failed to parse message: %v", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
c.setConfigVersion(msg.ConfigVersion)
|
|
||||||
|
|
||||||
c.handlersMux.RLock()
|
c.handlersMux.RLock()
|
||||||
if handler, ok := c.handlers[msg.Type]; ok {
|
if handler, ok := c.handlers[msg.Type]; ok {
|
||||||
// Mark that we're processing a message
|
|
||||||
c.processingMux.Lock()
|
|
||||||
c.processingMessage = true
|
|
||||||
c.processingMux.Unlock()
|
|
||||||
c.processingWg.Add(1)
|
|
||||||
|
|
||||||
handler(msg)
|
handler(msg)
|
||||||
|
|
||||||
// Mark that we're done processing
|
|
||||||
c.processingWg.Done()
|
|
||||||
c.processingMux.Lock()
|
|
||||||
c.processingMessage = false
|
|
||||||
c.processingMux.Unlock()
|
|
||||||
}
|
}
|
||||||
c.handlersMux.RUnlock()
|
c.handlersMux.RUnlock()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -17,7 +17,6 @@ type TokenResponse struct {
|
|||||||
}
|
}
|
||||||
|
|
||||||
type WSMessage struct {
|
type WSMessage struct {
|
||||||
Type string `json:"type"`
|
Type string `json:"type"`
|
||||||
Data interface{} `json:"data"`
|
Data interface{} `json:"data"`
|
||||||
ConfigVersion int64 `json:"configVersion,omitempty"`
|
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user