Compare commits

..

76 Commits

Author SHA1 Message Date
dependabot[bot]
e7ef3a9b37 Bump actions/setup-go from 6.1.0 to 6.2.0
Bumps [actions/setup-go](https://github.com/actions/setup-go) from 6.1.0 to 6.2.0.
- [Release notes](https://github.com/actions/setup-go/releases)
- [Commits](4dc6199c7b...7a3fe6cf4c)

---
updated-dependencies:
- dependency-name: actions/setup-go
  dependency-version: 6.2.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: 3d3f32d95f
2026-01-21 20:56:09 +00:00
Owen
e3f5497176 Add stale bot
Former-commit-id: 313dee9ba8
2026-01-19 17:12:15 -08:00
dependabot[bot]
6a5dcc01a6 Bump actions/checkout from 5.0.0 to 6.0.1
Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.0 to 6.0.1.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](08c6903cd8...8e8c483db8)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: 6.0.1
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: e19b33e2fa
2026-01-19 17:08:10 -08:00
dependabot[bot]
18b6d3bb0f Bump the patch-updates group across 1 directory with 3 updates
Bumps the patch-updates group with 3 updates in the / directory: [github.com/fosrl/newt](https://github.com/fosrl/newt), [github.com/godbus/dbus/v5](https://github.com/godbus/dbus) and [github.com/miekg/dns](https://github.com/miekg/dns).


Updates `github.com/fosrl/newt` from 1.8.0 to 1.8.1
- [Release notes](https://github.com/fosrl/newt/releases)
- [Commits](https://github.com/fosrl/newt/compare/1.8.0...1.8.1)

Updates `github.com/godbus/dbus/v5` from 5.2.0 to 5.2.2
- [Release notes](https://github.com/godbus/dbus/releases)
- [Commits](https://github.com/godbus/dbus/compare/v5.2.0...v5.2.2)

Updates `github.com/miekg/dns` from 1.1.68 to 1.1.70
- [Commits](https://github.com/miekg/dns/compare/v1.1.68...v1.1.70)

---
updated-dependencies:
- dependency-name: github.com/fosrl/newt
  dependency-version: 1.8.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: patch-updates
- dependency-name: github.com/godbus/dbus/v5
  dependency-version: 5.2.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: patch-updates
- dependency-name: github.com/miekg/dns
  dependency-version: 1.1.70
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: patch-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: 69f25032cb
2026-01-19 17:08:00 -08:00
dependabot[bot]
ccbfdc5265 Bump docker/metadata-action from 5.9.0 to 5.10.0
Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 5.9.0 to 5.10.0.
- [Release notes](https://github.com/docker/metadata-action/releases)
- [Commits](318604b99e...c299e40c65)

---
updated-dependencies:
- dependency-name: docker/metadata-action
  dependency-version: 5.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: 225779c665
2026-01-19 17:06:44 -08:00
dependabot[bot]
ab04537278 Bump softprops/action-gh-release from 2.4.2 to 2.5.0
Bumps [softprops/action-gh-release](https://github.com/softprops/action-gh-release) from 2.4.2 to 2.5.0.
- [Release notes](https://github.com/softprops/action-gh-release/releases)
- [Changelog](https://github.com/softprops/action-gh-release/blob/master/CHANGELOG.md)
- [Commits](5be0e66d93...a06a81a03e)

---
updated-dependencies:
- dependency-name: softprops/action-gh-release
  dependency-version: 2.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: a7f029e232
2026-01-19 17:06:23 -08:00
dependabot[bot]
29c36c9837 Bump docker/setup-buildx-action from 3.11.1 to 3.12.0
Bumps [docker/setup-buildx-action](https://github.com/docker/setup-buildx-action) from 3.11.1 to 3.12.0.
- [Release notes](https://github.com/docker/setup-buildx-action/releases)
- [Commits](e468171a9d...8d2750c68a)

---
updated-dependencies:
- dependency-name: docker/setup-buildx-action
  dependency-version: 3.12.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: af4e74de81
2026-01-19 17:05:50 -08:00
dependabot[bot]
c47e9bf547 Bump actions/cache from 4.3.0 to 5.0.2
Bumps [actions/cache](https://github.com/actions/cache) from 4.3.0 to 5.0.2.
- [Release notes](https://github.com/actions/cache/releases)
- [Changelog](https://github.com/actions/cache/blob/main/RELEASES.md)
- [Commits](0057852bfa...8b402f58fb)

---
updated-dependencies:
- dependency-name: actions/cache
  dependency-version: 5.0.2
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: f87d043d59
2026-01-19 17:05:42 -08:00
dependabot[bot]
abb682c935 Bump the minor-updates group across 1 directory with 2 updates
Bumps the minor-updates group with 2 updates in the / directory: [golang.org/x/sys](https://github.com/golang/sys) and software.sslmate.com/src/go-pkcs12.


Updates `golang.org/x/sys` from 0.38.0 to 0.40.0
- [Commits](https://github.com/golang/sys/compare/v0.38.0...v0.40.0)

Updates `software.sslmate.com/src/go-pkcs12` from 0.6.0 to 0.7.0

---
updated-dependencies:
- dependency-name: golang.org/x/sys
  dependency-version: 0.40.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: minor-updates
- dependency-name: software.sslmate.com/src/go-pkcs12
  dependency-version: 0.7.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: minor-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: ae1436c5d1
2026-01-19 17:05:19 -08:00
Lokowitz
a83cc2a3a3 clean up dependabot
Former-commit-id: a37f0514c4
2026-01-17 15:43:06 -08:00
Lokowitz
d56537d0fd add docker build dev
Former-commit-id: b983216808
2026-01-17 15:43:06 -08:00
Lokowitz
31bb483e40 add qemu
Former-commit-id: 172eb97aa1
2026-01-17 15:43:06 -08:00
Lokowitz
cd91ae6e3a update test
Former-commit-id: b034f81ed9
2026-01-17 15:43:06 -08:00
Lokowitz
a9ec1e61d3 fix test
Former-commit-id: 076d01b48c
2026-01-17 15:43:06 -08:00
Owen
7bb004cf50 Update docs
Former-commit-id: 543ca05eb9
2025-12-29 22:15:01 -05:00
Owen
88cc57bcef Update mod
Former-commit-id: 1b474ebc1c
2025-12-23 18:00:15 -05:00
Owen
385c64c364 Dont run on v tags
Former-commit-id: 69a00b6231
2025-12-23 17:54:04 -05:00
Owen
4e3e824276 Fix latest
Former-commit-id: 6fcd8ac6cb
2025-12-22 21:32:59 -05:00
Owen
effc1a31ac Update readme
Former-commit-id: 44282226b4
2025-12-22 17:24:51 -05:00
Owen
03051a37fe Update mod
Former-commit-id: ca5105b6b2
2025-12-22 16:19:45 -05:00
Owen
8cf2a28b6f Merge branch 'main' into dev
Former-commit-id: 075daed0a0
2025-12-22 14:05:04 -05:00
Owen
9f3422de1b Parallel the go build
Former-commit-id: aee6f24001
2025-12-22 14:02:18 -05:00
Owen
e6d0e9bb13 Update test
Former-commit-id: 91c9c48507
2025-12-21 21:07:26 -05:00
Owen
da0ad21fd4 Update test
Former-commit-id: 449e631aae
2025-12-21 21:07:14 -05:00
Owen
2940f16f19 Build binaries and do release
Former-commit-id: 2813de80ff
2025-12-21 21:04:54 -05:00
Owen
44c8d871c2 Build binaries and do release
Former-commit-id: 8aaefde72a
2025-12-21 21:04:24 -05:00
Owen
96a88057f9 Update mod
Former-commit-id: b026bea86e
2025-12-21 21:03:48 -05:00
Owen
d96fe6391e Remove replace
Former-commit-id: 5551eff130
2025-12-21 21:03:48 -05:00
Owen
fe7fd31955 Sending DNS over the tunnel works
Former-commit-id: ca763fff2d
2025-12-21 21:03:48 -05:00
Owen
86b19f243e Remove exit nodes from HPing if peers are removed
Former-commit-id: 0c96d3c25c
2025-12-21 21:03:48 -05:00
Owen
d0940d03c4 Cleanup unclean shutdown cross platform
Former-commit-id: c1a2efd9d2
2025-12-21 21:03:48 -05:00
Owen
5a51753dbf Update mod
Former-commit-id: cebefa9800
2025-12-21 21:02:45 -05:00
Owen
70be82d68a Remove replace
Former-commit-id: 014eccaf62
2025-12-21 20:58:33 -05:00
Owen
8b68f00f59 Sending DNS over the tunnel works
Former-commit-id: 304174ca2f
2025-12-19 10:55:37 -05:00
Owen
fe197f0a0b Remove exit nodes from HPing if peers are removed
Former-commit-id: a4365988eb
2025-12-18 15:04:20 -05:00
Owen
675c934ce1 Cleanup unclean shutdown cross platform
Former-commit-id: de18c0cc6d
2025-12-18 11:33:59 -05:00
Owen Schwartz
708c761fa6 Merge pull request #63 from fosrl/wildcard-resources
Wildcard resources

Former-commit-id: ee8b93b13a
2025-12-16 21:50:22 -05:00
Owen
78dc6508a4 Support wildcard alias records
Former-commit-id: cec79bf014
2025-12-16 21:33:41 -05:00
Owen
7f6c824122 Pull 21820 from config
Former-commit-id: 56f4614899
2025-12-16 18:36:04 -05:00
Owen
9ba3569573 Remove acciential file
Former-commit-id: c3a12bd2a9
2025-12-11 23:26:21 -05:00
Owen
fd38f4cc59 Fix test
Former-commit-id: e0efe8f950
2025-12-11 23:25:56 -05:00
Owen
c5d5fcedd9 Make sure to process version first
Former-commit-id: f0309857b9
2025-12-11 19:37:51 -05:00
Owen
13c0a082b5 Update cicd
Former-commit-id: 5a6fcadf91
2025-12-11 16:40:55 -05:00
Owen
48962d4b65 Make cicd create draft
Former-commit-id: 6e31d3dcd5
2025-12-11 16:09:34 -05:00
Owen
c469707986 Update cicd to use right username
Former-commit-id: 41e6324c79
2025-12-11 16:04:25 -05:00
Owen
13c40f6b2c Update cicd
Former-commit-id: 5757e8dca8
2025-12-11 16:00:20 -05:00
Owen
6071be0d08 Merge branch 'dev'
Former-commit-id: 382515a85c
2025-12-11 15:40:51 -05:00
Owen
4b269782ea Update iss
Former-commit-id: 5da2198b35
2025-12-11 12:29:43 -05:00
Owen
518bf0e36a Update link
Former-commit-id: 14f7682be5
2025-12-10 21:03:17 -05:00
Owen
c80bb9740a Update readme
Former-commit-id: 0d27206b28
2025-12-10 16:23:58 -05:00
Owen
3ceef1ef74 Small adjustments
Former-commit-id: df1c2c18e0
2025-12-10 14:06:36 -05:00
Owen
acb0b4a9a5 Fix ipv6 connectivity
Former-commit-id: 61065def17
2025-12-10 10:34:30 -05:00
Owen
29aa68ecf7 Fix docker ignore
Former-commit-id: f24add4f72
2025-12-08 14:05:48 -05:00
Owen
50a97b19d1 Use explicit newt version not local
Former-commit-id: 630b55008b
2025-12-08 14:00:59 -05:00
Owen
229ce7504f Merge branch 'dev'
Former-commit-id: ead73ca1c5
2025-12-08 12:12:47 -05:00
Owen
b4f3619aff Update test
Former-commit-id: 25644db2f3
2025-12-08 12:12:35 -05:00
Owen Schwartz
e77a4fbd66 Merge pull request #57 from fosrl/dev
Add robust client connectivity support

Former-commit-id: 1f15ecc4b2
2025-12-08 12:05:52 -05:00
Owen
f8f368a981 Update readme
Former-commit-id: 1687099c52
2025-12-07 21:25:24 -05:00
Owen
153b986100 Adapt args to work on windows
Former-commit-id: 7546fc82ac
2025-12-07 17:44:10 -05:00
Owen
1c47c0981c Fix small bugs =
Former-commit-id: 02c838eb86
2025-12-07 12:05:27 -05:00
Owen
defd85e118 Add site name
Former-commit-id: 2a60de4f1f
2025-12-07 10:52:22 -05:00
Owen
ec1085f5f7 Merge branch 'main' into dev
Former-commit-id: b2814dc157
2025-12-06 21:09:44 -05:00
Owen Schwartz
a39e6d4f2b Merge pull request #54 from fosrl/dependabot/go_modules/prod-minor-updates-dd7da38a6b
Bump golang.org/x/crypto from 0.44.0 to 0.45.0 in the prod-minor-updates group

Former-commit-id: a9efe7c91a
2025-12-06 12:01:08 -05:00
Owen Schwartz
4875835024 Merge pull request #55 from fosrl/dependabot/github_actions/actions/checkout-6
Bump actions/checkout from 5 to 6

Former-commit-id: 50b9dae88c
2025-12-06 12:00:51 -05:00
Owen Schwartz
f5a74c36f8 Merge pull request #56 from fosrl/dependabot/docker/minor-updates-60be0b6e22
Bump alpine from 3.22 to 3.23 in the minor-updates group

Former-commit-id: 84257a094e
2025-12-06 12:00:38 -05:00
dependabot[bot]
3e24a77625 Bump alpine from 3.22 to 3.23 in the minor-updates group
Bumps the minor-updates group with 1 update: alpine.


Updates `alpine` from 3.22 to 3.23

---
updated-dependencies:
- dependency-name: alpine
  dependency-version: '3.23'
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: minor-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: a9423b01e6
2025-12-03 20:24:29 +00:00
dependabot[bot]
534631fb27 Bump actions/checkout from 5 to 6
Bumps [actions/checkout](https://github.com/actions/checkout) from 5 to 6.
- [Release notes](https://github.com/actions/checkout/releases)
- [Changelog](https://github.com/actions/checkout/blob/main/CHANGELOG.md)
- [Commits](https://github.com/actions/checkout/compare/v5...v6)

---
updated-dependencies:
- dependency-name: actions/checkout
  dependency-version: '6'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: bd9e8857bf
2025-11-24 20:46:59 +00:00
dependabot[bot]
f93f73f541 Bump golang.org/x/crypto in the prod-minor-updates group
Bumps the prod-minor-updates group with 1 update: [golang.org/x/crypto](https://github.com/golang/crypto).


Updates `golang.org/x/crypto` from 0.44.0 to 0.45.0
- [Commits](https://github.com/golang/crypto/compare/v0.44.0...v0.45.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-version: 0.45.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: prod-minor-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: 0fc7f22f1a
2025-11-19 20:29:21 +00:00
Owen Schwartz
b87f90c211 Merge pull request #48 from fosrl/dependabot/github_actions/actions/upload-artifact-5
Bump actions/upload-artifact from 4 to 5

Former-commit-id: f7f072b919
2025-11-14 10:28:22 -05:00
Owen Schwartz
3e0cefa3dc Merge pull request #53 from fosrl/dependabot/go_modules/prod-minor-updates-aca723b595
Bump the prod-minor-updates group across 1 directory with 2 updates

Former-commit-id: 616ab62bc1
2025-11-14 10:27:44 -05:00
Owen Schwartz
2fe3359ae8 Merge pull request #52 from fosrl/copilot/fix-system-path-removal
Fix Windows uninstaller deleting entire System PATH instead of OLM entry

Former-commit-id: 0398e68788
2025-11-14 10:27:30 -05:00
dependabot[bot]
0aa8f07be3 Bump the prod-minor-updates group across 1 directory with 2 updates
Bumps the prod-minor-updates group with 1 update in the / directory: [golang.org/x/crypto](https://github.com/golang/crypto).


Updates `golang.org/x/crypto` from 0.43.0 to 0.44.0
- [Commits](https://github.com/golang/crypto/compare/v0.43.0...v0.44.0)

Updates `golang.org/x/sys` from 0.37.0 to 0.38.0
- [Commits](https://github.com/golang/sys/compare/v0.37.0...v0.38.0)

---
updated-dependencies:
- dependency-name: golang.org/x/crypto
  dependency-version: 0.44.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: prod-minor-updates
- dependency-name: golang.org/x/sys
  dependency-version: 0.38.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: prod-minor-updates
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: e061955e90
2025-11-11 20:19:38 +00:00
copilot-swe-agent[bot]
36d47a7331 Refactor PATH removal to use TStringList for more robust parsing
Co-authored-by: oschwartz10612 <4999704+oschwartz10612@users.noreply.github.com>

Former-commit-id: 91f0230d21
2025-11-11 01:46:51 +00:00
copilot-swe-agent[bot]
10fa5acb0b Fix Windows PATH removal issue by implementing custom uninstall procedure
Co-authored-by: oschwartz10612 <4999704+oschwartz10612@users.noreply.github.com>

Former-commit-id: 1168f5541c
2025-11-11 01:44:40 +00:00
copilot-swe-agent[bot]
e3a679609f Initial plan
Former-commit-id: d910034ea1
2025-11-11 01:41:18 +00:00
dependabot[bot]
b7a04dc511 Bump actions/upload-artifact from 4 to 5
Bumps [actions/upload-artifact](https://github.com/actions/upload-artifact) from 4 to 5.
- [Release notes](https://github.com/actions/upload-artifact/releases)
- [Commits](https://github.com/actions/upload-artifact/compare/v4...v5)

---
updated-dependencies:
- dependency-name: actions/upload-artifact
  dependency-version: '5'
  dependency-type: direct:production
  update-type: version-update:semver-major
...

Signed-off-by: dependabot[bot] <support@github.com>
Former-commit-id: 33e28ead68
2025-10-27 21:16:53 +00:00
33 changed files with 2464 additions and 582 deletions

View File

@@ -1,9 +1,9 @@
.gitignore
.dockerignore
olm
*.json
README.md
Makefile
public/
LICENSE
CONTRIBUTING.md
CONTRIBUTING.md
bin/

View File

@@ -5,20 +5,10 @@ updates:
schedule:
interval: "daily"
groups:
dev-patch-updates:
dependency-type: "development"
patch-updates:
update-types:
- "patch"
dev-minor-updates:
dependency-type: "development"
update-types:
- "minor"
prod-patch-updates:
dependency-type: "production"
update-types:
- "patch"
prod-minor-updates:
dependency-type: "production"
minor-updates:
update-types:
- "minor"

View File

@@ -1,60 +1,615 @@
name: CI/CD Pipeline
permissions:
contents: write # gh-release
packages: write # GHCR push
id-token: write # Keyless-Signatures & Attestations
attestations: write # actions/attest-build-provenance
security-events: write # upload-sarif
actions: read
on:
push:
tags:
- "*"
push:
tags:
- "[0-9]+.[0-9]+.[0-9]+"
- "[0-9]+.[0-9]+.[0-9]+-rc.[0-9]+"
workflow_dispatch:
inputs:
version:
description: "SemVer version to release (e.g., 1.2.3, no leading 'v')"
required: true
type: string
publish_latest:
description: "Also publish the 'latest' image tag"
required: true
type: boolean
default: false
publish_minor:
description: "Also publish the 'major.minor' image tag (e.g., 1.2)"
required: true
type: boolean
default: false
target_branch:
description: "Branch to tag"
required: false
default: "main"
concurrency:
group: ${{ github.workflow }}-${{ github.event_name == 'workflow_dispatch' && github.event.inputs.version || github.ref_name }}
cancel-in-progress: true
jobs:
release:
name: Build and Release
runs-on: amd64-runner
prepare:
if: github.event_name == 'workflow_dispatch'
name: Prepare release (create tag)
runs-on: ubuntu-24.04
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
fetch-depth: 0
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Validate version input
shell: bash
env:
INPUT_VERSION: ${{ inputs.version }}
run: |
set -euo pipefail
if ! [[ "$INPUT_VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
echo "Invalid version: $INPUT_VERSION (expected X.Y.Z or X.Y.Z-rc.N)" >&2
exit 1
fi
- name: Create and push tag
shell: bash
env:
TARGET_BRANCH: ${{ inputs.target_branch }}
VERSION: ${{ inputs.version }}
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
set -euo pipefail
git config user.name "github-actions[bot]"
git config user.email "41898282+github-actions[bot]@users.noreply.github.com"
git fetch --prune origin
git checkout "$TARGET_BRANCH"
git pull --ff-only origin "$TARGET_BRANCH"
if git rev-parse -q --verify "refs/tags/$VERSION" >/dev/null; then
echo "Tag $VERSION already exists" >&2
exit 1
fi
git tag -a "$VERSION" -m "Release $VERSION"
git push origin "refs/tags/$VERSION"
release:
if: ${{ github.event_name == 'workflow_dispatch' || (github.event_name == 'push' && github.actor != 'github-actions[bot]') }}
name: Build and Release
runs-on: ubuntu-24.04
timeout-minutes: 120
env:
DOCKERHUB_IMAGE: docker.io/fosrl/${{ github.event.repository.name }}
GHCR_IMAGE: ghcr.io/${{ github.repository_owner }}/${{ github.event.repository.name }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
steps:
- name: Checkout code
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
with:
fetch-depth: 0
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Capture created timestamp
run: echo "IMAGE_CREATED=$(date -u +%Y-%m-%dT%H:%M:%SZ)" >> $GITHUB_ENV
shell: bash
- name: Log in to Docker Hub
uses: docker/login-action@v3
with:
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Extract tag name
id: get-tag
run: echo "TAG=${GITHUB_REF#refs/tags/}" >> $GITHUB_ENV
- name: Set up 1.2.0 Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- name: Install Go
uses: actions/setup-go@v6
with:
go-version: 1.25
- name: Log in to Docker Hub
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: docker.io
username: ${{ secrets.DOCKER_HUB_USERNAME }}
password: ${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}
- name: Update version in main.go
run: |
TAG=${{ env.TAG }}
if [ -f main.go ]; then
sed -i 's/version_replaceme/'"$TAG"'/' main.go
echo "Updated main.go with version $TAG"
else
echo "main.go not found"
fi
- name: Build and push Docker images
run: |
TAG=${{ env.TAG }}
make docker-build-release tag=$TAG
- name: Log in to GHCR
uses: docker/login-action@5e57cd118135c172c3672efd75eb46360885c0ef # v3.6.0
with:
registry: ghcr.io
username: ${{ github.actor }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Build binaries
run: |
make go-build-release
- name: Normalize image names to lowercase
run: |
set -euo pipefail
echo "GHCR_IMAGE=${GHCR_IMAGE,,}" >> "$GITHUB_ENV"
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE,,}" >> "$GITHUB_ENV"
shell: bash
- name: Upload artifacts from /bin
uses: actions/upload-artifact@v4
with:
name: binaries
path: bin/
- name: Extract tag name
env:
EVENT_NAME: ${{ github.event_name }}
INPUT_VERSION: ${{ inputs.version }}
run: |
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
echo "TAG=${INPUT_VERSION}" >> $GITHUB_ENV
else
echo "TAG=${{ github.ref_name }}" >> $GITHUB_ENV
fi
shell: bash
- name: Validate pushed tag format (no leading 'v')
if: ${{ github.event_name == 'push' }}
shell: bash
env:
TAG_GOT: ${{ env.TAG }}
run: |
set -euo pipefail
if [[ "$TAG_GOT" =~ ^[0-9]+\.[0-9]+\.[0-9]+(-rc\.[0-9]+)?$ ]]; then
echo "Tag OK: $TAG_GOT"
exit 0
fi
echo "ERROR: Tag '$TAG_GOT' is not allowed. Use 'X.Y.Z' or 'X.Y.Z-rc.N' (no leading 'v')." >&2
exit 1
- name: Wait for tag to be visible (dispatch only)
if: ${{ github.event_name == 'workflow_dispatch' }}
run: |
set -euo pipefail
for i in {1..90}; do
if git ls-remote --tags origin "refs/tags/${TAG}" | grep -qE "refs/tags/${TAG}$"; then
echo "Tag ${TAG} is visible on origin"; exit 0
fi
echo "Tag not yet visible, retrying... ($i/90)"
sleep 2
done
echo "Tag ${TAG} not visible after waiting"; exit 1
shell: bash
- name: Update version in main.go
run: |
TAG=${{ env.TAG }}
if [ -f main.go ]; then
sed -i 's/version_replaceme/'"$TAG"'/' main.go
echo "Updated main.go with version $TAG"
else
echo "main.go not found"
fi
- name: Ensure repository is at the tagged commit (dispatch only)
if: ${{ github.event_name == 'workflow_dispatch' }}
run: |
set -euo pipefail
git fetch --tags --force
git checkout "refs/tags/${TAG}"
echo "Checked out $(git rev-parse --short HEAD) for tag ${TAG}"
shell: bash
- name: Detect release candidate (rc)
run: |
set -euo pipefail
if [[ "${TAG}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$ ]]; then
echo "IS_RC=true" >> $GITHUB_ENV
else
echo "IS_RC=false" >> $GITHUB_ENV
fi
shell: bash
- name: Install Go
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version-file: go.mod
- name: Resolve publish-latest flag
env:
EVENT_NAME: ${{ github.event_name }}
PL_INPUT: ${{ inputs.publish_latest }}
PL_VAR: ${{ vars.PUBLISH_LATEST }}
run: |
set -euo pipefail
val="false"
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
if [ "${PL_INPUT}" = "true" ]; then val="true"; fi
else
if [ "${PL_VAR}" = "true" ]; then val="true"; fi
fi
echo "PUBLISH_LATEST=$val" >> $GITHUB_ENV
shell: bash
- name: Resolve publish-minor flag
env:
EVENT_NAME: ${{ github.event_name }}
PM_INPUT: ${{ inputs.publish_minor }}
PM_VAR: ${{ vars.PUBLISH_MINOR }}
run: |
set -euo pipefail
val="false"
if [ "$EVENT_NAME" = "workflow_dispatch" ]; then
if [ "${PM_INPUT}" = "true" ]; then val="true"; fi
else
if [ "${PM_VAR}" = "true" ]; then val="true"; fi
fi
echo "PUBLISH_MINOR=$val" >> $GITHUB_ENV
shell: bash
- name: Cache Go modules
if: ${{ hashFiles('**/go.sum') != '' }}
uses: actions/cache@8b402f58fbc84540c8b491a91e594a4576fec3d7 # v5.0.2
with:
path: |
~/.cache/go-build
~/go/pkg/mod
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Go vet & test
if: ${{ hashFiles('**/go.mod') != '' }}
run: |
go version
go vet ./...
go test ./... -race -covermode=atomic
shell: bash
- name: Resolve license fallback
run: echo "IMAGE_LICENSE=${{ github.event.repository.license.spdx_id || 'NOASSERTION' }}" >> $GITHUB_ENV
shell: bash
- name: Resolve registries list (GHCR always, Docker Hub only if creds)
shell: bash
run: |
set -euo pipefail
images="${GHCR_IMAGE}"
if [ -n "${{ secrets.DOCKER_HUB_ACCESS_TOKEN }}" ] && [ -n "${{ secrets.DOCKER_HUB_USERNAME }}" ]; then
images="${images}\n${DOCKERHUB_IMAGE}"
fi
{
echo 'IMAGE_LIST<<EOF'
echo -e "$images"
echo 'EOF'
} >> "$GITHUB_ENV"
- name: Docker meta
id: meta
uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0
with:
images: ${{ env.IMAGE_LIST }}
tags: |
type=semver,pattern={{version}},value=${{ env.TAG }}
type=semver,pattern={{major}}.{{minor}},value=${{ env.TAG }},enable=${{ env.PUBLISH_MINOR == 'true' && env.IS_RC != 'true' }}
type=raw,value=latest,enable=${{ env.IS_RC != 'true' }}
flavor: |
latest=false
labels: |
org.opencontainers.image.title=${{ github.event.repository.name }}
org.opencontainers.image.version=${{ env.TAG }}
org.opencontainers.image.revision=${{ github.sha }}
org.opencontainers.image.source=${{ github.event.repository.html_url }}
org.opencontainers.image.url=${{ github.event.repository.html_url }}
org.opencontainers.image.documentation=${{ github.event.repository.html_url }}
org.opencontainers.image.description=${{ github.event.repository.description }}
org.opencontainers.image.licenses=${{ env.IMAGE_LICENSE }}
org.opencontainers.image.created=${{ env.IMAGE_CREATED }}
org.opencontainers.image.ref.name=${{ env.TAG }}
org.opencontainers.image.authors=${{ github.repository_owner }}
- name: Echo build config (non-secret)
shell: bash
env:
IMAGE_TITLE: ${{ github.event.repository.name }}
IMAGE_VERSION: ${{ env.TAG }}
IMAGE_REVISION: ${{ github.sha }}
IMAGE_SOURCE_URL: ${{ github.event.repository.html_url }}
IMAGE_URL: ${{ github.event.repository.html_url }}
IMAGE_DESCRIPTION: ${{ github.event.repository.description }}
IMAGE_LICENSE: ${{ env.IMAGE_LICENSE }}
DOCKERHUB_IMAGE: ${{ env.DOCKERHUB_IMAGE }}
GHCR_IMAGE: ${{ env.GHCR_IMAGE }}
DOCKER_HUB_USER: ${{ secrets.DOCKER_HUB_USERNAME }}
REPO: ${{ github.repository }}
OWNER: ${{ github.repository_owner }}
WORKFLOW_REF: ${{ github.workflow_ref }}
REF: ${{ github.ref }}
REF_NAME: ${{ github.ref_name }}
RUN_URL: https://github.com/${{ github.repository }}/actions/runs/${{ github.run_id }}
run: |
set -euo pipefail
echo "=== OCI Label Values ==="
echo "org.opencontainers.image.title=${IMAGE_TITLE}"
echo "org.opencontainers.image.version=${IMAGE_VERSION}"
echo "org.opencontainers.image.revision=${IMAGE_REVISION}"
echo "org.opencontainers.image.source=${IMAGE_SOURCE_URL}"
echo "org.opencontainers.image.url=${IMAGE_URL}"
echo "org.opencontainers.image.description=${IMAGE_DESCRIPTION}"
echo "org.opencontainers.image.licenses=${IMAGE_LICENSE}"
echo
echo "=== Images ==="
echo "DOCKERHUB_IMAGE=${DOCKERHUB_IMAGE}"
echo "GHCR_IMAGE=${GHCR_IMAGE}"
echo "DOCKER_HUB_USERNAME=${DOCKER_HUB_USER}"
echo
echo "=== GitHub Kontext ==="
echo "repository=${REPO}"
echo "owner=${OWNER}"
echo "workflow_ref=${WORKFLOW_REF}"
echo "ref=${REF}"
echo "ref_name=${REF_NAME}"
echo "run_url=${RUN_URL}"
echo
echo "=== docker/metadata-action outputs (Tags/Labels), raw ==="
echo "::group::tags"
echo "${{ steps.meta.outputs.tags }}"
echo "::endgroup::"
echo "::group::labels"
echo "${{ steps.meta.outputs.labels }}"
echo "::endgroup::"
- name: Build and push (Docker Hub + GHCR)
id: build
uses: docker/build-push-action@263435318d21b8e681c14492fe198d362a7d2c83 # v6.18.0
with:
context: .
push: true
platforms: linux/amd64,linux/arm64,linux/arm/v7
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
cache-from: type=gha,scope=${{ github.repository }}
cache-to: type=gha,mode=max,scope=${{ github.repository }}
provenance: mode=max
sbom: true
- name: Compute image digest refs
run: |
echo "DIGEST=${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
echo "GHCR_REF=$GHCR_IMAGE@${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
echo "DH_REF=$DOCKERHUB_IMAGE@${{ steps.build.outputs.digest }}" >> $GITHUB_ENV
echo "Built digest: ${{ steps.build.outputs.digest }}"
shell: bash
- name: Attest build provenance (GHCR)
id: attest-ghcr
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with:
subject-name: ${{ env.GHCR_IMAGE }}
subject-digest: ${{ steps.build.outputs.digest }}
push-to-registry: true
show-summary: true
- name: Attest build provenance (Docker Hub)
continue-on-error: true
id: attest-dh
uses: actions/attest-build-provenance@977bb373ede98d70efdf65b84cb5f73e068dcc2a # v3.0.0
with:
subject-name: index.docker.io/fosrl/${{ github.event.repository.name }}
subject-digest: ${{ steps.build.outputs.digest }}
push-to-registry: true
show-summary: true
- name: Install cosign
uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0
with:
cosign-release: 'v3.0.2'
- name: Sanity check cosign private key
env:
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
run: |
set -euo pipefail
cosign public-key --key env://COSIGN_PRIVATE_KEY >/dev/null
shell: bash
- name: Sign GHCR image (digest) with key (recursive)
env:
COSIGN_YES: "true"
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
run: |
set -euo pipefail
echo "Signing ${GHCR_REF} (digest) recursively with provided key"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${GHCR_REF}"
echo "Waiting 30 seconds for signatures to propagate..."
shell: bash
- name: Generate SBOM (SPDX JSON)
uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
with:
image-ref: ${{ env.GHCR_IMAGE }}@${{ steps.build.outputs.digest }}
format: spdx-json
output: sbom.spdx.json
- name: Validate SBOM JSON
run: jq -e . sbom.spdx.json >/dev/null
shell: bash
- name: Minify SBOM JSON (optional hardening)
run: jq -c . sbom.spdx.json > sbom.min.json && mv sbom.min.json sbom.spdx.json
shell: bash
- name: Create SBOM attestation (GHCR, private key)
env:
COSIGN_YES: "true"
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
run: |
set -euo pipefail
cosign attest \
--key env://COSIGN_PRIVATE_KEY \
--type spdxjson \
--predicate sbom.spdx.json \
"${GHCR_REF}"
shell: bash
- name: Create SBOM attestation (Docker Hub, private key)
continue-on-error: true
env:
COSIGN_YES: "true"
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_DOCKER_MEDIA_TYPES: "1"
run: |
set -euo pipefail
cosign attest \
--key env://COSIGN_PRIVATE_KEY \
--type spdxjson \
--predicate sbom.spdx.json \
"${DH_REF}"
shell: bash
- name: Keyless sign & verify GHCR digest (OIDC)
env:
COSIGN_YES: "true"
WORKFLOW_REF: ${{ github.workflow_ref }} # owner/repo/.github/workflows/<file>@refs/tags/<tag>
ISSUER: https://token.actions.githubusercontent.com
run: |
set -euo pipefail
echo "Keyless signing ${GHCR_REF}"
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${GHCR_REF}"
echo "Verify keyless (OIDC) signature policy on ${GHCR_REF}"
cosign verify \
--certificate-oidc-issuer "${ISSUER}" \
--certificate-identity "https://github.com/${WORKFLOW_REF}" \
"${GHCR_REF}" -o text
shell: bash
- name: Sign Docker Hub image (digest) with key (recursive)
continue-on-error: true
env:
COSIGN_YES: "true"
COSIGN_PRIVATE_KEY: ${{ secrets.COSIGN_PRIVATE_KEY }}
COSIGN_PASSWORD: ${{ secrets.COSIGN_PASSWORD }}
COSIGN_DOCKER_MEDIA_TYPES: "1"
run: |
set -euo pipefail
echo "Signing ${DH_REF} (digest) recursively with provided key (Docker media types fallback)"
cosign sign --key env://COSIGN_PRIVATE_KEY --recursive "${DH_REF}"
shell: bash
- name: Keyless sign & verify Docker Hub digest (OIDC)
continue-on-error: true
env:
COSIGN_YES: "true"
ISSUER: https://token.actions.githubusercontent.com
COSIGN_DOCKER_MEDIA_TYPES: "1"
run: |
set -euo pipefail
echo "Keyless signing ${DH_REF} (force public-good Rekor)"
cosign sign --rekor-url https://rekor.sigstore.dev --recursive "${DH_REF}"
echo "Keyless verify via Rekor (strict identity)"
if ! cosign verify \
--rekor-url https://rekor.sigstore.dev \
--certificate-oidc-issuer "${ISSUER}" \
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
"${DH_REF}" -o text; then
echo "Rekor verify failed — retry offline bundle verify (no Rekor)"
if ! cosign verify \
--offline \
--certificate-oidc-issuer "${ISSUER}" \
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
"${DH_REF}" -o text; then
echo "Offline bundle verify failed — ignore tlog (TEMP for debugging)"
cosign verify \
--insecure-ignore-tlog=true \
--certificate-oidc-issuer "${ISSUER}" \
--certificate-identity "https://github.com/${{ github.workflow_ref }}" \
"${DH_REF}" -o text || true
fi
fi
- name: Verify signature (public key) GHCR digest + tag
env:
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
run: |
set -euo pipefail
TAG_VAR="${TAG}"
echo "Verifying (digest) ${GHCR_REF}"
cosign verify --key env://COSIGN_PUBLIC_KEY "$GHCR_REF" -o text
echo "Verifying (tag) $GHCR_IMAGE:$TAG_VAR"
cosign verify --key env://COSIGN_PUBLIC_KEY "$GHCR_IMAGE:$TAG_VAR" -o text
shell: bash
- name: Verify SBOM attestation (GHCR)
env:
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
run: cosign verify-attestation --key env://COSIGN_PUBLIC_KEY --type spdxjson "$GHCR_REF" -o text
shell: bash
- name: Verify SLSA provenance (GHCR)
env:
ISSUER: https://token.actions.githubusercontent.com
WFREF: ${{ github.workflow_ref }}
run: |
set -euo pipefail
# (optional) show which predicate types are present to aid debugging
cosign download attestation "$GHCR_REF" \
| jq -r '.payload | @base64d | fromjson | .predicateType' | sort -u || true
# Verify the SLSA v1 provenance attestation (predicate URL)
cosign verify-attestation \
--type 'https://slsa.dev/provenance/v1' \
--certificate-oidc-issuer "$ISSUER" \
--certificate-identity "https://github.com/${WFREF}" \
--rekor-url https://rekor.sigstore.dev \
"$GHCR_REF" -o text
shell: bash
- name: Verify signature (public key) Docker Hub digest
continue-on-error: true
env:
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_DOCKER_MEDIA_TYPES: "1"
run: |
set -euo pipefail
echo "Verifying (digest) ${DH_REF} with Docker media types"
cosign verify --key env://COSIGN_PUBLIC_KEY "${DH_REF}" -o text
shell: bash
- name: Verify signature (public key) Docker Hub tag
continue-on-error: true
env:
COSIGN_PUBLIC_KEY: ${{ secrets.COSIGN_PUBLIC_KEY }}
COSIGN_DOCKER_MEDIA_TYPES: "1"
run: |
set -euo pipefail
echo "Verifying (tag) $DOCKERHUB_IMAGE:$TAG with Docker media types"
cosign verify --key env://COSIGN_PUBLIC_KEY "$DOCKERHUB_IMAGE:$TAG" -o text
shell: bash
# - name: Trivy scan (GHCR image)
# id: trivy
# uses: aquasecurity/trivy-action@b6643a29fecd7f34b3597bc6acb0a98b03d33ff8 # v0.33.1
# with:
# image-ref: ${{ env.GHCR_IMAGE }}@${{ steps.build.outputs.digest }}
# format: sarif
# output: trivy-ghcr.sarif
# ignore-unfixed: true
# vuln-type: os,library
# severity: CRITICAL,HIGH
# exit-code: ${{ (vars.TRIVY_FAIL || '0') }}
# - name: Upload SARIF
# if: ${{ always() && hashFiles('trivy-ghcr.sarif') != '' }}
# uses: github/codeql-action/upload-sarif@fdbfb4d2750291e159f0156def62b853c2798ca2 # v4.31.5
# with:
# sarif_file: trivy-ghcr.sarif
# category: Image Vulnerability Scan
- name: Build binaries
env:
CGO_ENABLED: "0"
GOFLAGS: "-trimpath"
run: |
set -euo pipefail
TAG_VAR="${TAG}"
make go-build-release tag=$TAG_VAR
shell: bash
- name: Create GitHub Release
uses: softprops/action-gh-release@a06a81a03ee405af7f2048a818ed3f03bbf83c7b # v2.5.0
with:
tag_name: ${{ env.TAG }}
generate_release_notes: true
prerelease: ${{ env.IS_RC == 'true' }}
files: |
bin/*
fail_on_unmatched_files: true
draft: true
body: |
## Container Images
- GHCR: `${{ env.GHCR_REF }}`
- Docker Hub: `${{ env.DH_REF || 'N/A' }}`
**Digest:** `${{ steps.build.outputs.digest }}`

37
.github/workflows/stale-bot.yml vendored Normal file
View File

@@ -0,0 +1,37 @@
name: Mark and Close Stale Issues
on:
schedule:
- cron: '0 0 * * *'
workflow_dispatch: # Allow manual trigger
permissions:
contents: write # only for delete-branch option
issues: write
pull-requests: write
jobs:
stale:
runs-on: ubuntu-latest
steps:
- uses: actions/stale@997185467fa4f803885201cee163a9f38240193d # v10.1.1
with:
days-before-stale: 14
days-before-close: 14
stale-issue-message: 'This issue has been automatically marked as stale due to 14 days of inactivity. It will be closed in 14 days if no further activity occurs.'
close-issue-message: 'This issue has been automatically closed due to inactivity. If you believe this is still relevant, please open a new issue with up-to-date information.'
stale-issue-label: 'stale'
exempt-issue-labels: 'needs investigating, networking, new feature, reverse proxy, bug, api, authentication, documentation, enhancement, help wanted, good first issue, question'
exempt-all-issue-assignees: true
only-labels: ''
exempt-pr-labels: ''
days-before-pr-stale: -1
days-before-pr-close: -1
operations-per-run: 100
remove-stale-when-updated: true
delete-branch: false
enable-statistics: true

View File

@@ -1,5 +1,8 @@
name: Run Tests
permissions:
contents: read
on:
pull_request:
branches:
@@ -7,22 +10,33 @@ on:
- dev
jobs:
test:
build-go:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Set up Go
uses: actions/setup-go@v6
uses: actions/setup-go@7a3fe6cf4cb3a834922a1244abfce67bcef6a0c5 # v6.2.0
with:
go-version: 1.25
- name: Build go
run: go build
- name: Build Docker image
run: make build
- name: Build binaries
run: make go-build-release
build-docker:
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@8e8c483db84b4bee98b60c0593521ed34d9990e8 # v6.0.1
- name: Set up QEMU
uses: docker/setup-qemu-action@c7c53464625b32c7a7e944ae62b3e17d2b600130 # v3.7.0
- name: Set up 1.2.0 Buildx
uses: docker/setup-buildx-action@8d2750c68a42422c14e847fe6c8ac0403b4cbd6f # v3.12.0
- name: Build Docker image
run: make docker-build-dev

306
API.md Normal file
View File

@@ -0,0 +1,306 @@
## API
Olm can be controlled with an embedded API server when using `--enable-api`. This allows you to start it as a daemon and trigger it with the following endpoints. The API can listen on either a TCP address or a Unix socket/Windows named pipe.
### Socket vs TCP
When `--enable-api` is used, Olm can listen on a TCP address when configured via `--http-addr` (like `:9452`). Alternatively, Olm can listen on a Unix socket (Linux/macOS) or Windows named pipe for local-only communication with better security when using `--socket-path` (like `/var/run/olm.sock`).
**Unix Socket (Linux/macOS):**
- Socket path example: `/var/run/olm/olm.sock`
- The directory is created automatically if it doesn't exist
- Socket permissions are set to `0666` to allow access
- Existing socket files are automatically removed on startup
- Socket file is cleaned up when Olm stops
**Windows Named Pipe:**
- Pipe path example: `\\.\pipe\olm`
- If the path doesn't start with `\`, it's automatically prefixed with `\\.\pipe\`
- Security descriptor grants full access to Everyone and the current owner
- Named pipes are automatically cleaned up by Windows
**Connecting to the Socket:**
```bash
# Linux/macOS - using curl with Unix socket
curl --unix-socket /var/run/olm/olm.sock http://localhost/status
---
### POST /connect
Initiates a new connection request to a Pangolin server.
**Request Body:**
```json
{
"id": "string",
"secret": "string",
"endpoint": "string",
"userToken": "string",
"mtu": 1280,
"dns": "8.8.8.8",
"dnsProxyIP": "string",
"upstreamDNS": ["8.8.8.8:53", "1.1.1.1:53"],
"interfaceName": "olm",
"holepunch": false,
"tlsClientCert": "string",
"pingInterval": "3s",
"pingTimeout": "5s",
"orgId": "string"
}
```
**Required Fields:**
- `id`: Olm ID generated by Pangolin
- `secret`: Authentication secret for the Olm ID
- `endpoint`: Target Pangolin endpoint URL
**Optional Fields:**
- `userToken`: User authentication token
- `mtu`: MTU for the internal WireGuard interface (default: 1280)
- `dns`: DNS server to use for resolving the endpoint
- `dnsProxyIP`: DNS proxy IP address
- `upstreamDNS`: Array of upstream DNS servers
- `interfaceName`: Name of the WireGuard interface (default: olm)
- `holepunch`: Enable NAT hole punching (default: false)
- `tlsClientCert`: TLS client certificate
- `pingInterval`: Interval for pinging the server (default: 3s)
- `pingTimeout`: Timeout for each ping (default: 5s)
- `orgId`: Organization ID to connect to
**Response:**
- **Status Code:** `202 Accepted`
- **Content-Type:** `application/json`
```json
{
"status": "connection request accepted"
}
```
**Error Responses:**
- `405 Method Not Allowed` - Non-POST requests
- `400 Bad Request` - Invalid JSON or missing required fields
- `409 Conflict` - Already connected to a server (disconnect first)
---
### GET /status
Returns the current connection status, registration state, and peer information.
**Response:**
- **Status Code:** `200 OK`
- **Content-Type:** `application/json`
```json
{
"connected": true,
"registered": true,
"terminated": false,
"version": "1.0.0",
"agent": "olm",
"orgId": "org_123",
"peers": {
"10": {
"siteId": 10,
"name": "Site A",
"connected": true,
"rtt": 145338339,
"lastSeen": "2025-08-13T14:39:17.208334428-07:00",
"endpoint": "p.fosrl.io:21820",
"isRelay": true,
"peerAddress": "100.89.128.5",
"holepunchConnected": false
},
"8": {
"siteId": 8,
"name": "Site B",
"connected": false,
"rtt": 0,
"lastSeen": "2025-08-13T14:39:19.663823645-07:00",
"endpoint": "p.fosrl.io:21820",
"isRelay": true,
"peerAddress": "100.89.128.10",
"holepunchConnected": false
}
},
"networkSettings": {
"tunnelIP": "100.89.128.3/20"
}
}
```
**Fields:**
- `connected`: Boolean indicating if connected to Pangolin
- `registered`: Boolean indicating if registered with the server
- `terminated`: Boolean indicating if the connection was terminated
- `version`: Olm version string
- `agent`: Agent identifier
- `orgId`: Current organization ID
- `peers`: Map of peer statuses by site ID
- `siteId`: Peer site identifier
- `name`: Site name
- `connected`: Boolean peer connection state
- `rtt`: Peer round-trip time (integer, nanoseconds)
- `lastSeen`: Last time peer was seen (RFC3339 timestamp)
- `endpoint`: Peer endpoint address
- `isRelay`: Whether the peer is relayed (true) or direct (false)
- `peerAddress`: Peer's IP address in the tunnel
- `holepunchConnected`: Whether holepunch connection is established
- `networkSettings`: Current network configuration including tunnel IP
**Error Responses:**
- `405 Method Not Allowed` - Non-GET requests
---
### POST /disconnect
Disconnects from the current Pangolin server and tears down the WireGuard tunnel.
**Request Body:** None required
**Response:**
- **Status Code:** `200 OK`
- **Content-Type:** `application/json`
```json
{
"status": "disconnect initiated"
}
```
**Error Responses:**
- `405 Method Not Allowed` - Non-POST requests
- `409 Conflict` - Not currently connected to a server
---
### POST /switch-org
Switches to a different organization while maintaining the connection.
**Request Body:**
```json
{
"orgId": "string"
}
```
**Required Fields:**
- `orgId`: The organization ID to switch to
**Response:**
- **Status Code:** `200 OK`
- **Content-Type:** `application/json`
```json
{
"status": "org switch request accepted"
}
```
**Error Responses:**
- `405 Method Not Allowed` - Non-POST requests
- `400 Bad Request` - Invalid JSON or missing orgId field
- `500 Internal Server Error` - Org switch failed
---
### POST /exit
Initiates a graceful shutdown of the Olm process.
**Request Body:** None required
**Response:**
- **Status Code:** `200 OK`
- **Content-Type:** `application/json`
```json
{
"status": "shutdown initiated"
}
```
**Note:** The response is sent before shutdown begins. There is a 100ms delay before the actual shutdown to ensure the response is delivered.
**Error Responses:**
- `405 Method Not Allowed` - Non-POST requests
---
### GET /health
Simple health check endpoint to verify the API server is running.
**Response:**
- **Status Code:** `200 OK`
- **Content-Type:** `application/json`
```json
{
"status": "ok"
}
```
**Error Responses:**
- `405 Method Not Allowed` - Non-GET requests
---
## Usage Examples
### Connect to a peer
```bash
curl -X POST http://localhost:9452/connect \
-H "Content-Type: application/json" \
-d '{
"id": "31frd0uzbjvp721",
"secret": "h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6",
"endpoint": "https://example.com"
}'
```
### Connect with additional options
```bash
curl -X POST http://localhost:9452/connect \
-H "Content-Type: application/json" \
-d '{
"id": "31frd0uzbjvp721",
"secret": "h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6",
"endpoint": "https://example.com",
"mtu": 1400,
"holepunch": true,
"pingInterval": "5s"
}'
```
### Check connection status
```bash
curl http://localhost:9452/status
```
### Switch organization
```bash
curl -X POST http://localhost:9452/switch-org \
-H "Content-Type: application/json" \
-d '{"orgId": "org_456"}'
```
### Disconnect from server
```bash
curl -X POST http://localhost:9452/disconnect
```
### Health check
```bash
curl http://localhost:9452/health
```
### Shutdown Olm
```bash
curl -X POST http://localhost:9452/exit
```
### Using Unix socket (Linux/macOS)
```bash
curl --unix-socket /var/run/olm/olm.sock http://localhost/status
curl --unix-socket /var/run/olm/olm.sock -X POST http://localhost/disconnect
```

View File

@@ -16,7 +16,7 @@ COPY . .
RUN CGO_ENABLED=0 GOOS=linux go build -o /olm
# Start a new stage from scratch
FROM alpine:3.22 AS runner
FROM alpine:3.23 AS runner
RUN apk --no-cache add ca-certificates

View File

@@ -1,26 +1,64 @@
.PHONY: all local docker-build-release
all: go-build-release
all: local
local:
CGO_ENABLED=0 go build -o ./bin/olm
docker-build-release:
@if [ -z "$(tag)" ]; then \
echo "Error: tag is required. Usage: make docker-build-release tag=<tag>"; \
exit 1; \
fi
docker buildx build --platform linux/arm/v7,linux/arm64,linux/amd64 -t fosrl/olm:latest -f Dockerfile --push .
docker buildx build --platform linux/arm/v7,linux/arm64,linux/amd64 -t fosrl/olm:$(tag) -f Dockerfile --push .
docker buildx build . \
--platform linux/arm/v7,linux/arm64,linux/amd64 \
-t fosrl/olm:latest \
-t fosrl/olm:$(tag) \
-f Dockerfile \
--push
local:
CGO_ENABLED=0 go build -o bin/olm
docker-build-dev:
docker buildx build . \
--platform linux/arm/v7,linux/arm64,linux/amd64 \
-t fosrl/olm:latest \
-f Dockerfile
build:
docker build -t fosrl/olm:latest .
.PHONY: go-build-release \
go-build-release-linux-arm64 go-build-release-linux-arm32-v7 \
go-build-release-linux-arm32-v6 go-build-release-linux-amd64 \
go-build-release-linux-riscv64 go-build-release-darwin-arm64 \
go-build-release-darwin-amd64 go-build-release-windows-amd64
go-build-release:
go-build-release: \
go-build-release-linux-arm64 \
go-build-release-linux-arm32-v7 \
go-build-release-linux-arm32-v6 \
go-build-release-linux-amd64 \
go-build-release-linux-riscv64 \
go-build-release-darwin-arm64 \
go-build-release-darwin-amd64 \
go-build-release-windows-amd64 \
go-build-release-linux-arm64:
CGO_ENABLED=0 GOOS=linux GOARCH=arm64 go build -o bin/olm_linux_arm64
go-build-release-linux-arm32-v7:
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=7 go build -o bin/olm_linux_arm32
go-build-release-linux-arm32-v6:
CGO_ENABLED=0 GOOS=linux GOARCH=arm GOARM=6 go build -o bin/olm_linux_arm32v6
go-build-release-linux-amd64:
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -o bin/olm_linux_amd64
go-build-release-linux-riscv64:
CGO_ENABLED=0 GOOS=linux GOARCH=riscv64 go build -o bin/olm_linux_riscv64
go-build-release-darwin-arm64:
CGO_ENABLED=0 GOOS=darwin GOARCH=arm64 go build -o bin/olm_darwin_arm64
go-build-release-darwin-amd64:
CGO_ENABLED=0 GOOS=darwin GOARCH=amd64 go build -o bin/olm_darwin_amd64
go-build-release-windows-amd64:
CGO_ENABLED=0 GOOS=windows GOARCH=amd64 go build -o bin/olm_windows_amd64.exe
clean:
rm olm

271
README.md
View File

@@ -6,7 +6,7 @@ Olm is a [WireGuard](https://www.wireguard.com/) tunnel client designed to secur
Olm is used with Pangolin and Newt as part of the larger system. See documentation below:
- [Full Documentation](https://docs.pangolin.net)
- [Full Documentation](https://docs.pangolin.net/manage/clients/understanding-clients)
## Key Functions
@@ -18,281 +18,18 @@ Using the Olm ID and a secret, the olm will make HTTP requests to Pangolin to re
When Olm receives WireGuard control messages, it will use the information encoded (endpoint, public key) to bring up a WireGuard tunnel on your computer to a remote Newt. It will ping over the tunnel to ensure the peer is brought up.
## CLI Args
- `endpoint`: The endpoint where both Gerbil and Pangolin reside in order to connect to the websocket.
- `id`: Olm ID generated by Pangolin to identify the olm.
- `secret`: A unique secret (not shared and kept private) used to authenticate the olm ID with the websocket in order to receive commands.
- `mtu` (optional): MTU for the internal WG interface. Default: 1280
- `dns` (optional): DNS server to use to resolve the endpoint. Default: 8.8.8.8
- `log-level` (optional): The log level to use (DEBUG, INFO, WARN, ERROR, FATAL). Default: INFO
- `ping-interval` (optional): Interval for pinging the server. Default: 3s
- `ping-timeout` (optional): Timeout for each ping. Default: 5s
- `interface` (optional): Name of the WireGuard interface. Default: olm
- `enable-http` (optional): Enable HTTP server for receiving connection requests. Default: false
- `http-addr` (optional): HTTP server address (e.g., ':9452'). Default: :9452
- `holepunch` (optional): Enable hole punching. Default: false
## Environment Variables
All CLI arguments can also be set via environment variables:
- `PANGOLIN_ENDPOINT`: Equivalent to `--endpoint`
- `OLM_ID`: Equivalent to `--id`
- `OLM_SECRET`: Equivalent to `--secret`
- `MTU`: Equivalent to `--mtu`
- `DNS`: Equivalent to `--dns`
- `LOG_LEVEL`: Equivalent to `--log-level`
- `INTERFACE`: Equivalent to `--interface`
- `HTTP_ADDR`: Equivalent to `--http-addr`
- `PING_INTERVAL`: Equivalent to `--ping-interval`
- `PING_TIMEOUT`: Equivalent to `--ping-timeout`
- `HOLEPUNCH`: Set to "true" to enable hole punching (equivalent to `--holepunch`)
- `CONFIG_FILE`: Set to the location of a JSON file to load secret values
Examples:
```bash
olm \
--id 31frd0uzbjvp721 \
--secret h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6 \
--endpoint https://example.com
```
You can also run it with Docker compose. For example, a service in your `docker-compose.yml` might look like this using environment vars (recommended):
```yaml
services:
olm:
image: fosrl/olm
container_name: olm
restart: unless-stopped
network_mode: host
devices:
- /dev/net/tun:/dev/net/tun
environment:
- PANGOLIN_ENDPOINT=https://example.com
- OLM_ID=31frd0uzbjvp721
- OLM_SECRET=h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6
```
You can also pass the CLI args to the container:
```yaml
services:
olm:
image: fosrl/olm
container_name: olm
restart: unless-stopped
network_mode: host
devices:
- /dev/net/tun:/dev/net/tun
command:
- --id 31frd0uzbjvp721
- --secret h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6
- --endpoint https://example.com
```
**Docker Configuration Notes:**
- `network_mode: host` brings the olm network interface to the host system, allowing the WireGuard tunnel to function properly
- `devices: - /dev/net/tun:/dev/net/tun` is required to give the container access to the TUN device for creating WireGuard interfaces
## Loading secrets from files
You can use `CONFIG_FILE` to define a location of a config file to store the credentials between runs.
```
$ cat ~/.config/olm-client/config.json
{
"id": "spmzu8rbpzj1qq6",
"secret": "f6v61mjutwme2kkydbw3fjo227zl60a2tsf5psw9r25hgae3",
"endpoint": "https://app.pangolin.net",
"tlsClientCert": ""
}
```
This file is also written to when newt first starts up. So you do not need to run every time with --id and secret if you have run it once!
Default locations:
- **macOS**: `~/Library/Application Support/olm-client/config.json`
- **Windows**: `%PROGRAMDATA%\olm\olm-client\config.json`
- **Linux/Others**: `~/.config/olm-client/config.json`
## Hole Punching
In the default mode, olm "relays" traffic through Gerbil in the cloud to get down to newt. This is a little more reliable. Support for NAT hole punching is also EXPERIMENTAL right now using the `--holepunch` flag. This will attempt to orchestrate a NAT hole punch between the two sites so that traffic flows directly. This will save data costs and speed. If it fails it should fall back to relaying.
Right now, basic NAT hole punching is supported. We plan to add:
- [ ] Birthday paradox
- [ ] UPnP
- [ ] LAN detection
## Windows Service
On Windows, olm has to be installed and run as a Windows service. When running it with the cli args live above it will attempt to install and run the service to function like a cli tool. You can also run the following:
### Service Management Commands
```
# Install the service
olm.exe install
# Start the service
olm.exe start
# Stop the service
olm.exe stop
# Check service status
olm.exe status
# Remove the service
olm.exe remove
# Run in debug mode (console output) with our without id & secret
olm.exe debug
# Show help
olm.exe help
```
Note running the service requires credentials in `%PROGRAMDATA%\olm\olm-client\config.json`.
### Service Configuration
When running as a service, Olm will read configuration from environment variables or you can modify the service to include command-line arguments:
1. Install the service: `olm.exe install`
2. Set the credentials in `%PROGRAMDATA%\olm\olm-client\config.json`. Hint: if you run olm once with --id and --secret this file will be populated!
3. Start the service: `olm.exe start`
### Service Logs
When running as a service, logs are written to:
- Windows Event Log (Application log, source: "OlmWireguardService")
- Log files in: `%PROGRAMDATA%\olm\logs\olm.log`
You can view the Windows Event Log using Event Viewer or PowerShell:
```powershell
Get-EventLog -LogName Application -Source "OlmWireguardService" -Newest 10
```
## HTTP Endpoints
Olm can be controlled with an embedded http server when using `--enable-http`. This allows you to start it as a daemon and trigger it with the following endpoints:
### POST /connect
Initiates a new connection request.
**Request Body:**
```json
{
"id": "string",
"secret": "string",
"endpoint": "string"
}
```
**Required Fields:**
- `id`: Connection identifier
- `secret`: Authentication secret
- `endpoint`: Target endpoint URL
**Response:**
- **Status Code:** `202 Accepted`
- **Content-Type:** `application/json`
```json
{
"status": "connection request accepted"
}
```
**Error Responses:**
- `405 Method Not Allowed` - Non-POST requests
- `400 Bad Request` - Invalid JSON or missing required fields
### GET /status
Returns the current connection status and peer information.
**Response:**
- **Status Code:** `200 OK`
- **Content-Type:** `application/json`
```json
{
"status": "connected",
"connected": true,
"tunnelIP": "100.89.128.3/20",
"version": "version_replaceme",
"peers": {
"10": {
"siteId": 10,
"connected": true,
"rtt": 145338339,
"lastSeen": "2025-08-13T14:39:17.208334428-07:00",
"endpoint": "p.fosrl.io:21820",
"isRelay": true
},
"8": {
"siteId": 8,
"connected": false,
"rtt": 0,
"lastSeen": "2025-08-13T14:39:19.663823645-07:00",
"endpoint": "p.fosrl.io:21820",
"isRelay": true
}
}
}
```
**Fields:**
- `status`: Overall connection status ("connected" or "disconnected")
- `connected`: Boolean connection state
- `tunnelIP`: IP address and subnet of the tunnel (when connected)
- `version`: Olm version string
- `peers`: Map of peer statuses by site ID
- `siteId`: Peer site identifier
- `connected`: Boolean peer connection state
- `rtt`: Peer round-trip time (integer, nanoseconds)
- `lastSeen`: Last time peer was seen (RFC3339 timestamp)
- `endpoint`: Peer endpoint address
- `isRelay`: Whether the peer is relayed (true) or direct (false)
**Error Responses:**
- `405 Method Not Allowed` - Non-GET requests
## Usage Examples
### Connect to a peer
```bash
curl -X POST http://localhost:8080/connect \
-H "Content-Type: application/json" \
-d '{
"id": "31frd0uzbjvp721",
"secret": "h51mmlknrvrwv8s4r1i210azhumt6isgbpyavxodibx1k2d6",
"endpoint": "https://example.com"
}'
```
### Check connection status
```bash
curl http://localhost:8080/status
```
In the default mode, olm uses both relaying through Gerbil and NAT hole punching to connect to Newt. Hole punching attempts to orchestrate a NAT traversal between the two sites so that traffic flows directly, which can save data costs and improve speed. If hole punching fails, traffic will fall back to relaying through Gerbil.
## Build
### Binary
Make sure to have Go 1.23.1 installed.
Make sure to have Go 1.25 installed.
```bash
make local
make
```
## Licensing

View File

@@ -38,6 +38,7 @@ type SwitchOrgRequest struct {
// PeerStatus represents the status of a peer connection
type PeerStatus struct {
SiteID int `json:"siteId"`
Name string `json:"name"`
Connected bool `json:"connected"`
RTT time.Duration `json:"rtt"`
LastSeen time.Time `json:"lastSeen"`
@@ -170,6 +171,26 @@ func (s *API) Stop() error {
return nil
}
func (s *API) AddPeerStatus(siteID int, siteName string, connected bool, rtt time.Duration, endpoint string, isRelay bool) {
s.statusMu.Lock()
defer s.statusMu.Unlock()
status, exists := s.peerStatuses[siteID]
if !exists {
status = &PeerStatus{
SiteID: siteID,
}
s.peerStatuses[siteID] = status
}
status.Name = siteName
status.Connected = connected
status.RTT = rtt
status.LastSeen = time.Now()
status.Endpoint = endpoint
status.IsRelay = isRelay
}
// UpdatePeerStatus updates the status of a peer including endpoint and relay info
func (s *API) UpdatePeerStatus(siteID int, connected bool, rtt time.Duration, endpoint string, isRelay bool) {
s.statusMu.Lock()

View File

@@ -40,10 +40,11 @@ type OlmConfig struct {
PingTimeout string `json:"pingTimeout"`
// Advanced
Holepunch bool `json:"holepunch"`
TlsClientCert string `json:"tlsClientCert"`
OverrideDNS bool `json:"overrideDNS"`
DisableRelay bool `json:"disableRelay"`
DisableHolepunch bool `json:"disableHolepunch"`
TlsClientCert string `json:"tlsClientCert"`
OverrideDNS bool `json:"overrideDNS"`
TunnelDNS bool `json:"tunnelDNS"`
DisableRelay bool `json:"disableRelay"`
// DoNotCreateNewClient bool `json:"doNotCreateNewClient"`
// Parsed values (not in JSON)
@@ -78,16 +79,17 @@ func DefaultConfig() *OlmConfig {
}
config := &OlmConfig{
MTU: 1280,
DNS: "8.8.8.8",
UpstreamDNS: []string{"8.8.8.8:53"},
LogLevel: "INFO",
InterfaceName: "olm",
EnableAPI: false,
SocketPath: socketPath,
PingInterval: "3s",
PingTimeout: "5s",
Holepunch: false,
MTU: 1280,
DNS: "8.8.8.8",
UpstreamDNS: []string{"8.8.8.8:53"},
LogLevel: "INFO",
InterfaceName: "olm",
EnableAPI: false,
SocketPath: socketPath,
PingInterval: "3s",
PingTimeout: "5s",
DisableHolepunch: false,
TunnelDNS: false,
// DoNotCreateNewClient: false,
sources: make(map[string]string),
}
@@ -103,8 +105,9 @@ func DefaultConfig() *OlmConfig {
config.sources["socketPath"] = string(SourceDefault)
config.sources["pingInterval"] = string(SourceDefault)
config.sources["pingTimeout"] = string(SourceDefault)
config.sources["holepunch"] = string(SourceDefault)
config.sources["disableHolepunch"] = string(SourceDefault)
config.sources["overrideDNS"] = string(SourceDefault)
config.sources["tunnelDNS"] = string(SourceDefault)
config.sources["disableRelay"] = string(SourceDefault)
// config.sources["doNotCreateNewClient"] = string(SourceDefault)
@@ -253,9 +256,9 @@ func loadConfigFromEnv(config *OlmConfig) {
config.SocketPath = val
config.sources["socketPath"] = string(SourceEnv)
}
if val := os.Getenv("HOLEPUNCH"); val == "true" {
config.Holepunch = true
config.sources["holepunch"] = string(SourceEnv)
if val := os.Getenv("DISABLE_HOLEPUNCH"); val == "true" {
config.DisableHolepunch = true
config.sources["disableHolepunch"] = string(SourceEnv)
}
if val := os.Getenv("OVERRIDE_DNS"); val == "true" {
config.OverrideDNS = true
@@ -265,6 +268,10 @@ func loadConfigFromEnv(config *OlmConfig) {
config.DisableRelay = true
config.sources["disableRelay"] = string(SourceEnv)
}
if val := os.Getenv("TUNNEL_DNS"); val == "true" {
config.TunnelDNS = true
config.sources["tunnelDNS"] = string(SourceEnv)
}
// if val := os.Getenv("DO_NOT_CREATE_NEW_CLIENT"); val == "true" {
// config.DoNotCreateNewClient = true
// config.sources["doNotCreateNewClient"] = string(SourceEnv)
@@ -277,24 +284,25 @@ func loadConfigFromCLI(config *OlmConfig, args []string) (bool, bool, error) {
// Store original values to detect changes
origValues := map[string]interface{}{
"endpoint": config.Endpoint,
"id": config.ID,
"secret": config.Secret,
"org": config.OrgID,
"userToken": config.UserToken,
"mtu": config.MTU,
"dns": config.DNS,
"upstreamDNS": fmt.Sprintf("%v", config.UpstreamDNS),
"logLevel": config.LogLevel,
"interface": config.InterfaceName,
"httpAddr": config.HTTPAddr,
"socketPath": config.SocketPath,
"pingInterval": config.PingInterval,
"pingTimeout": config.PingTimeout,
"enableApi": config.EnableAPI,
"holepunch": config.Holepunch,
"overrideDNS": config.OverrideDNS,
"disableRelay": config.DisableRelay,
"endpoint": config.Endpoint,
"id": config.ID,
"secret": config.Secret,
"org": config.OrgID,
"userToken": config.UserToken,
"mtu": config.MTU,
"dns": config.DNS,
"upstreamDNS": fmt.Sprintf("%v", config.UpstreamDNS),
"logLevel": config.LogLevel,
"interface": config.InterfaceName,
"httpAddr": config.HTTPAddr,
"socketPath": config.SocketPath,
"pingInterval": config.PingInterval,
"pingTimeout": config.PingTimeout,
"enableApi": config.EnableAPI,
"disableHolepunch": config.DisableHolepunch,
"overrideDNS": config.OverrideDNS,
"disableRelay": config.DisableRelay,
"tunnelDNS": config.TunnelDNS,
// "doNotCreateNewClient": config.DoNotCreateNewClient,
}
@@ -315,9 +323,10 @@ func loadConfigFromCLI(config *OlmConfig, args []string) (bool, bool, error) {
serviceFlags.StringVar(&config.PingInterval, "ping-interval", config.PingInterval, "Interval for pinging the server")
serviceFlags.StringVar(&config.PingTimeout, "ping-timeout", config.PingTimeout, "Timeout for each ping")
serviceFlags.BoolVar(&config.EnableAPI, "enable-api", config.EnableAPI, "Enable API server for receiving connection requests")
serviceFlags.BoolVar(&config.Holepunch, "holepunch", config.Holepunch, "Enable hole punching")
serviceFlags.BoolVar(&config.DisableHolepunch, "disable-holepunch", config.DisableHolepunch, "Disable hole punching")
serviceFlags.BoolVar(&config.OverrideDNS, "override-dns", config.OverrideDNS, "Override system DNS settings")
serviceFlags.BoolVar(&config.DisableRelay, "disable-relay", config.DisableRelay, "Disable relay connections")
serviceFlags.BoolVar(&config.TunnelDNS, "tunnel-dns", config.TunnelDNS, "Use tunnel for DNS traffic")
// serviceFlags.BoolVar(&config.DoNotCreateNewClient, "do-not-create-new-client", config.DoNotCreateNewClient, "Do not create new client")
version := serviceFlags.Bool("version", false, "Print the version")
@@ -384,8 +393,8 @@ func loadConfigFromCLI(config *OlmConfig, args []string) (bool, bool, error) {
if config.EnableAPI != origValues["enableApi"].(bool) {
config.sources["enableApi"] = string(SourceCLI)
}
if config.Holepunch != origValues["holepunch"].(bool) {
config.sources["holepunch"] = string(SourceCLI)
if config.DisableHolepunch != origValues["disableHolepunch"].(bool) {
config.sources["disableHolepunch"] = string(SourceCLI)
}
if config.OverrideDNS != origValues["overrideDNS"].(bool) {
config.sources["overrideDNS"] = string(SourceCLI)
@@ -393,6 +402,9 @@ func loadConfigFromCLI(config *OlmConfig, args []string) (bool, bool, error) {
if config.DisableRelay != origValues["disableRelay"].(bool) {
config.sources["disableRelay"] = string(SourceCLI)
}
if config.TunnelDNS != origValues["tunnelDNS"].(bool) {
config.sources["tunnelDNS"] = string(SourceCLI)
}
// if config.DoNotCreateNewClient != origValues["doNotCreateNewClient"].(bool) {
// config.sources["doNotCreateNewClient"] = string(SourceCLI)
// }
@@ -505,9 +517,9 @@ func mergeConfigs(dest, src *OlmConfig) {
dest.EnableAPI = src.EnableAPI
dest.sources["enableApi"] = string(SourceFile)
}
if src.Holepunch {
dest.Holepunch = src.Holepunch
dest.sources["holepunch"] = string(SourceFile)
if src.DisableHolepunch {
dest.DisableHolepunch = src.DisableHolepunch
dest.sources["disableHolepunch"] = string(SourceFile)
}
if src.OverrideDNS {
dest.OverrideDNS = src.OverrideDNS
@@ -604,8 +616,9 @@ func (c *OlmConfig) ShowConfig() {
// Advanced
fmt.Println("\nAdvanced:")
fmt.Printf(" holepunch = %v [%s]\n", c.Holepunch, getSource("holepunch"))
fmt.Printf(" disable-holepunch = %v [%s]\n", c.DisableHolepunch, getSource("disableHolepunch"))
fmt.Printf(" override-dns = %v [%s]\n", c.OverrideDNS, getSource("overrideDNS"))
fmt.Printf(" tunnel-dns = %v [%s]\n", c.TunnelDNS, getSource("tunnelDNS"))
fmt.Printf(" disable-relay = %v [%s]\n", c.DisableRelay, getSource("disableRelay"))
// fmt.Printf(" do-not-create-new-client = %v [%s]\n", c.DoNotCreateNewClient, getSource("doNotCreateNewClient"))
if c.TlsClientCert != "" {

View File

@@ -34,18 +34,26 @@ type DNSProxy struct {
ep *channel.Endpoint
proxyIP netip.Addr
upstreamDNS []string
tunnelDNS bool // Whether to tunnel DNS queries over WireGuard or to spit them out locally
mtu int
tunDevice tun.Device // Direct reference to underlying TUN device for responses
middleDevice *device.MiddleDevice // Reference to MiddleDevice for packet filtering
recordStore *DNSRecordStore // Local DNS records
// Tunnel DNS fields - for sending queries over WireGuard
tunnelIP netip.Addr // WireGuard interface IP (source for tunneled queries)
tunnelStack *stack.Stack // Separate netstack for outbound tunnel queries
tunnelEp *channel.Endpoint
tunnelActivePorts map[uint16]bool
tunnelPortsLock sync.Mutex
ctx context.Context
cancel context.CancelFunc
wg sync.WaitGroup
}
// NewDNSProxy creates a new DNS proxy
func NewDNSProxy(tunDevice tun.Device, middleDevice *device.MiddleDevice, mtu int, utilitySubnet string, upstreamDns []string) (*DNSProxy, error) {
func NewDNSProxy(tunDevice tun.Device, middleDevice *device.MiddleDevice, mtu int, utilitySubnet string, upstreamDns []string, tunnelDns bool, tunnelIP string) (*DNSProxy, error) {
proxyIP, err := PickIPFromSubnet(utilitySubnet)
if err != nil {
return nil, fmt.Errorf("failed to pick DNS proxy IP from subnet: %v", err)
@@ -58,17 +66,28 @@ func NewDNSProxy(tunDevice tun.Device, middleDevice *device.MiddleDevice, mtu in
ctx, cancel := context.WithCancel(context.Background())
proxy := &DNSProxy{
proxyIP: proxyIP,
mtu: mtu,
tunDevice: tunDevice,
middleDevice: middleDevice,
upstreamDNS: upstreamDns,
recordStore: NewDNSRecordStore(),
ctx: ctx,
cancel: cancel,
proxyIP: proxyIP,
mtu: mtu,
tunDevice: tunDevice,
middleDevice: middleDevice,
upstreamDNS: upstreamDns,
tunnelDNS: tunnelDns,
recordStore: NewDNSRecordStore(),
tunnelActivePorts: make(map[uint16]bool),
ctx: ctx,
cancel: cancel,
}
// Create gvisor netstack
// Parse tunnel IP if provided (needed for tunneled DNS)
if tunnelIP != "" {
addr, err := netip.ParseAddr(tunnelIP)
if err != nil {
return nil, fmt.Errorf("failed to parse tunnel IP: %v", err)
}
proxy.tunnelIP = addr
}
// Create gvisor netstack for receiving DNS queries
stackOpts := stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
@@ -101,9 +120,104 @@ func NewDNSProxy(tunDevice tun.Device, middleDevice *device.MiddleDevice, mtu in
NIC: 1,
})
// Initialize tunnel netstack if tunnel DNS is enabled
if tunnelDns {
if !proxy.tunnelIP.IsValid() {
return nil, fmt.Errorf("tunnel IP is required when tunnelDNS is enabled")
}
// TODO: DO WE NEED TO ESTABLISH ANOTHER NETSTACK HERE OR CAN WE COMBINE WITH WGTESTER?
if err := proxy.initTunnelNetstack(); err != nil {
return nil, fmt.Errorf("failed to initialize tunnel netstack: %v", err)
}
}
return proxy, nil
}
// initTunnelNetstack creates a separate netstack for outbound DNS queries through the tunnel
func (p *DNSProxy) initTunnelNetstack() error {
// Create gvisor netstack for outbound tunnel queries
stackOpts := stack.Options{
NetworkProtocols: []stack.NetworkProtocolFactory{ipv4.NewProtocol, ipv6.NewProtocol},
TransportProtocols: []stack.TransportProtocolFactory{udp.NewProtocol},
HandleLocal: true,
}
p.tunnelEp = channel.New(256, uint32(p.mtu), "")
p.tunnelStack = stack.New(stackOpts)
// Create NIC
if err := p.tunnelStack.CreateNIC(1, p.tunnelEp); err != nil {
return fmt.Errorf("failed to create tunnel NIC: %v", err)
}
// Add tunnel IP address (WireGuard interface IP)
ipBytes := p.tunnelIP.As4()
protoAddr := tcpip.ProtocolAddress{
Protocol: ipv4.ProtocolNumber,
AddressWithPrefix: tcpip.AddrFrom4(ipBytes).WithPrefix(),
}
if err := p.tunnelStack.AddProtocolAddress(1, protoAddr, stack.AddressProperties{}); err != nil {
return fmt.Errorf("failed to add tunnel protocol address: %v", err)
}
// Add default route
p.tunnelStack.AddRoute(tcpip.Route{
Destination: header.IPv4EmptySubnet,
NIC: 1,
})
// Register filter rule on MiddleDevice to intercept responses
p.middleDevice.AddRule(p.tunnelIP, p.handleTunnelResponse)
return nil
}
// handleTunnelResponse handles packets coming back from the tunnel destined for the tunnel IP
func (p *DNSProxy) handleTunnelResponse(packet []byte) bool {
// Check if it's UDP
proto, ok := util.GetProtocol(packet)
if !ok || proto != 17 { // UDP
return false
}
// Check destination port - should be one of our active outbound ports
port, ok := util.GetDestPort(packet)
if !ok {
return false
}
// Check if we are expecting a response on this port
p.tunnelPortsLock.Lock()
active := p.tunnelActivePorts[uint16(port)]
p.tunnelPortsLock.Unlock()
if !active {
return false
}
// Inject into tunnel netstack
version := packet[0] >> 4
pkb := stack.NewPacketBuffer(stack.PacketBufferOptions{
Payload: buffer.MakeWithData(packet),
})
switch version {
case 4:
p.tunnelEp.InjectInbound(ipv4.ProtocolNumber, pkb)
case 6:
p.tunnelEp.InjectInbound(ipv6.ProtocolNumber, pkb)
default:
pkb.DecRef()
return false
}
pkb.DecRef()
return true // Handled
}
// Start starts the DNS proxy and registers with the filter
func (p *DNSProxy) Start() error {
// Install packet filter rule
@@ -114,7 +228,13 @@ func (p *DNSProxy) Start() error {
go p.runDNSListener()
go p.runPacketSender()
logger.Info("DNS proxy started on %s:%d", p.proxyIP.String(), DNSPort)
// Start tunnel packet sender if tunnel DNS is enabled
if p.tunnelDNS {
p.wg.Add(1)
go p.runTunnelPacketSender()
}
logger.Info("DNS proxy started on %s:%d (tunnelDNS=%v)", p.proxyIP.String(), DNSPort, p.tunnelDNS)
return nil
}
@@ -122,6 +242,9 @@ func (p *DNSProxy) Start() error {
func (p *DNSProxy) Stop() {
if p.middleDevice != nil {
p.middleDevice.RemoveRule(p.proxyIP)
if p.tunnelDNS && p.tunnelIP.IsValid() {
p.middleDevice.RemoveRule(p.tunnelIP)
}
}
p.cancel()
@@ -130,12 +253,21 @@ func (p *DNSProxy) Stop() {
p.ep.Close()
}
// Close tunnel endpoint if it exists
if p.tunnelEp != nil {
p.tunnelEp.Close()
}
p.wg.Wait()
if p.stack != nil {
p.stack.Close()
}
if p.tunnelStack != nil {
p.tunnelStack.Close()
}
logger.Info("DNS proxy stopped")
}
@@ -348,8 +480,16 @@ func (p *DNSProxy) forwardToUpstream(query *dns.Msg) *dns.Msg {
return response
}
// queryUpstream sends a DNS query to upstream server using miekg/dns
// queryUpstream sends a DNS query to upstream server
func (p *DNSProxy) queryUpstream(server string, query *dns.Msg, timeout time.Duration) (*dns.Msg, error) {
if p.tunnelDNS {
return p.queryUpstreamTunnel(server, query, timeout)
}
return p.queryUpstreamDirect(server, query, timeout)
}
// queryUpstreamDirect sends a DNS query to upstream server using miekg/dns directly (host networking)
func (p *DNSProxy) queryUpstreamDirect(server string, query *dns.Msg, timeout time.Duration) (*dns.Msg, error) {
client := &dns.Client{
Timeout: timeout,
}
@@ -362,6 +502,155 @@ func (p *DNSProxy) queryUpstream(server string, query *dns.Msg, timeout time.Dur
return response, nil
}
// queryUpstreamTunnel sends a DNS query through the WireGuard tunnel
func (p *DNSProxy) queryUpstreamTunnel(server string, query *dns.Msg, timeout time.Duration) (*dns.Msg, error) {
// Dial through the tunnel netstack
conn, port, err := p.dialTunnel("udp", server)
if err != nil {
return nil, fmt.Errorf("failed to dial tunnel: %v", err)
}
defer func() {
conn.Close()
p.removeTunnelPort(port)
}()
// Pack the query
queryData, err := query.Pack()
if err != nil {
return nil, fmt.Errorf("failed to pack query: %v", err)
}
// Set deadline
conn.SetDeadline(time.Now().Add(timeout))
// Send the query
_, err = conn.Write(queryData)
if err != nil {
return nil, fmt.Errorf("failed to send query: %v", err)
}
// Read the response
buf := make([]byte, 4096)
n, err := conn.Read(buf)
if err != nil {
return nil, fmt.Errorf("failed to read response: %v", err)
}
// Parse the response
response := new(dns.Msg)
if err := response.Unpack(buf[:n]); err != nil {
return nil, fmt.Errorf("failed to unpack response: %v", err)
}
return response, nil
}
// dialTunnel creates a UDP connection through the tunnel netstack
func (p *DNSProxy) dialTunnel(network, addr string) (net.Conn, uint16, error) {
if p.tunnelStack == nil {
return nil, 0, fmt.Errorf("tunnel netstack not initialized")
}
// Parse remote address
raddr, err := net.ResolveUDPAddr("udp", addr)
if err != nil {
return nil, 0, err
}
// Use tunnel IP as source
ipBytes := p.tunnelIP.As4()
// Create UDP connection with ephemeral port
laddr := &tcpip.FullAddress{
NIC: 1,
Addr: tcpip.AddrFrom4(ipBytes),
Port: 0,
}
raddrTcpip := &tcpip.FullAddress{
NIC: 1,
Addr: tcpip.AddrFrom4([4]byte(raddr.IP.To4())),
Port: uint16(raddr.Port),
}
conn, err := gonet.DialUDP(p.tunnelStack, laddr, raddrTcpip, ipv4.ProtocolNumber)
if err != nil {
return nil, 0, err
}
// Get local port
localAddr := conn.LocalAddr().(*net.UDPAddr)
port := uint16(localAddr.Port)
// Register port so we can receive responses
p.tunnelPortsLock.Lock()
p.tunnelActivePorts[port] = true
p.tunnelPortsLock.Unlock()
return conn, port, nil
}
// removeTunnelPort removes a port from the active ports map
func (p *DNSProxy) removeTunnelPort(port uint16) {
p.tunnelPortsLock.Lock()
delete(p.tunnelActivePorts, port)
p.tunnelPortsLock.Unlock()
}
// runTunnelPacketSender reads packets from tunnel netstack and injects them into WireGuard
func (p *DNSProxy) runTunnelPacketSender() {
defer p.wg.Done()
logger.Debug("DNS tunnel packet sender goroutine started")
ticker := time.NewTicker(1 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-p.ctx.Done():
logger.Debug("DNS tunnel packet sender exiting")
// Drain any remaining packets
for {
pkt := p.tunnelEp.Read()
if pkt == nil {
break
}
pkt.DecRef()
}
return
case <-ticker.C:
// Try to read packets
for i := 0; i < 10; i++ {
pkt := p.tunnelEp.Read()
if pkt == nil {
break
}
// Extract packet data
slices := pkt.AsSlices()
if len(slices) > 0 {
var totalSize int
for _, slice := range slices {
totalSize += len(slice)
}
buf := make([]byte, totalSize)
pos := 0
for _, slice := range slices {
copy(buf[pos:], slice)
pos += len(slice)
}
// Inject into MiddleDevice (outbound to WG)
p.middleDevice.InjectOutbound(buf)
}
pkt.DecRef()
}
}
}
}
// runPacketSender sends packets from netstack back to TUN
func (p *DNSProxy) runPacketSender() {
defer p.wg.Done()

View File

@@ -2,6 +2,7 @@ package dns
import (
"net"
"strings"
"sync"
"github.com/miekg/dns"
@@ -17,21 +18,26 @@ const (
// DNSRecordStore manages local DNS records for A and AAAA queries
type DNSRecordStore struct {
mu sync.RWMutex
aRecords map[string][]net.IP // domain -> list of IPv4 addresses
aaaaRecords map[string][]net.IP // domain -> list of IPv6 addresses
mu sync.RWMutex
aRecords map[string][]net.IP // domain -> list of IPv4 addresses
aaaaRecords map[string][]net.IP // domain -> list of IPv6 addresses
aWildcards map[string][]net.IP // wildcard pattern -> list of IPv4 addresses
aaaaWildcards map[string][]net.IP // wildcard pattern -> list of IPv6 addresses
}
// NewDNSRecordStore creates a new DNS record store
func NewDNSRecordStore() *DNSRecordStore {
return &DNSRecordStore{
aRecords: make(map[string][]net.IP),
aaaaRecords: make(map[string][]net.IP),
aRecords: make(map[string][]net.IP),
aaaaRecords: make(map[string][]net.IP),
aWildcards: make(map[string][]net.IP),
aaaaWildcards: make(map[string][]net.IP),
}
}
// AddRecord adds a DNS record mapping (A or AAAA)
// domain should be in FQDN format (e.g., "example.com.")
// domain can contain wildcards: * (0+ chars) and ? (exactly 1 char)
// ip should be a valid IPv4 or IPv6 address
func (s *DNSRecordStore) AddRecord(domain string, ip net.IP) error {
s.mu.Lock()
@@ -45,12 +51,23 @@ func (s *DNSRecordStore) AddRecord(domain string, ip net.IP) error {
// Normalize domain to lowercase
domain = dns.Fqdn(domain)
// Check if domain contains wildcards
isWildcard := strings.ContainsAny(domain, "*?")
if ip.To4() != nil {
// IPv4 address
s.aRecords[domain] = append(s.aRecords[domain], ip)
if isWildcard {
s.aWildcards[domain] = append(s.aWildcards[domain], ip)
} else {
s.aRecords[domain] = append(s.aRecords[domain], ip)
}
} else if ip.To16() != nil {
// IPv6 address
s.aaaaRecords[domain] = append(s.aaaaRecords[domain], ip)
if isWildcard {
s.aaaaWildcards[domain] = append(s.aaaaWildcards[domain], ip)
} else {
s.aaaaRecords[domain] = append(s.aaaaRecords[domain], ip)
}
} else {
return &net.ParseError{Type: "IP address", Text: ip.String()}
}
@@ -59,7 +76,7 @@ func (s *DNSRecordStore) AddRecord(domain string, ip net.IP) error {
}
// RemoveRecord removes a specific DNS record mapping
// If ip is nil, removes all records for the domain
// If ip is nil, removes all records for the domain (including wildcards)
func (s *DNSRecordStore) RemoveRecord(domain string, ip net.IP) {
s.mu.Lock()
defer s.mu.Unlock()
@@ -72,33 +89,60 @@ func (s *DNSRecordStore) RemoveRecord(domain string, ip net.IP) {
// Normalize domain to lowercase
domain = dns.Fqdn(domain)
// Check if domain contains wildcards
isWildcard := strings.ContainsAny(domain, "*?")
if ip == nil {
// Remove all records for this domain
delete(s.aRecords, domain)
delete(s.aaaaRecords, domain)
if isWildcard {
delete(s.aWildcards, domain)
delete(s.aaaaWildcards, domain)
} else {
delete(s.aRecords, domain)
delete(s.aaaaRecords, domain)
}
return
}
if ip.To4() != nil {
// Remove specific IPv4 address
if ips, ok := s.aRecords[domain]; ok {
s.aRecords[domain] = removeIP(ips, ip)
if len(s.aRecords[domain]) == 0 {
delete(s.aRecords, domain)
if isWildcard {
if ips, ok := s.aWildcards[domain]; ok {
s.aWildcards[domain] = removeIP(ips, ip)
if len(s.aWildcards[domain]) == 0 {
delete(s.aWildcards, domain)
}
}
} else {
if ips, ok := s.aRecords[domain]; ok {
s.aRecords[domain] = removeIP(ips, ip)
if len(s.aRecords[domain]) == 0 {
delete(s.aRecords, domain)
}
}
}
} else if ip.To16() != nil {
// Remove specific IPv6 address
if ips, ok := s.aaaaRecords[domain]; ok {
s.aaaaRecords[domain] = removeIP(ips, ip)
if len(s.aaaaRecords[domain]) == 0 {
delete(s.aaaaRecords, domain)
if isWildcard {
if ips, ok := s.aaaaWildcards[domain]; ok {
s.aaaaWildcards[domain] = removeIP(ips, ip)
if len(s.aaaaWildcards[domain]) == 0 {
delete(s.aaaaWildcards, domain)
}
}
} else {
if ips, ok := s.aaaaRecords[domain]; ok {
s.aaaaRecords[domain] = removeIP(ips, ip)
if len(s.aaaaRecords[domain]) == 0 {
delete(s.aaaaRecords, domain)
}
}
}
}
}
// GetRecords returns all IP addresses for a domain and record type
// First checks for exact matches, then checks wildcard patterns
func (s *DNSRecordStore) GetRecords(domain string, recordType RecordType) []net.IP {
s.mu.RLock()
defer s.mu.RUnlock()
@@ -109,16 +153,45 @@ func (s *DNSRecordStore) GetRecords(domain string, recordType RecordType) []net.
var records []net.IP
switch recordType {
case RecordTypeA:
// Check exact match first
if ips, ok := s.aRecords[domain]; ok {
// Return a copy to prevent external modifications
records = make([]net.IP, len(ips))
copy(records, ips)
return records
}
// Check wildcard patterns
for pattern, ips := range s.aWildcards {
if matchWildcard(pattern, domain) {
records = append(records, ips...)
}
}
if len(records) > 0 {
// Return a copy
result := make([]net.IP, len(records))
copy(result, records)
return result
}
case RecordTypeAAAA:
// Check exact match first
if ips, ok := s.aaaaRecords[domain]; ok {
// Return a copy to prevent external modifications
records = make([]net.IP, len(ips))
copy(records, ips)
return records
}
// Check wildcard patterns
for pattern, ips := range s.aaaaWildcards {
if matchWildcard(pattern, domain) {
records = append(records, ips...)
}
}
if len(records) > 0 {
// Return a copy
result := make([]net.IP, len(records))
copy(result, records)
return result
}
}
@@ -126,6 +199,7 @@ func (s *DNSRecordStore) GetRecords(domain string, recordType RecordType) []net.
}
// HasRecord checks if a domain has any records of the specified type
// Checks both exact matches and wildcard patterns
func (s *DNSRecordStore) HasRecord(domain string, recordType RecordType) bool {
s.mu.RLock()
defer s.mu.RUnlock()
@@ -135,11 +209,27 @@ func (s *DNSRecordStore) HasRecord(domain string, recordType RecordType) bool {
switch recordType {
case RecordTypeA:
_, ok := s.aRecords[domain]
return ok
// Check exact match
if _, ok := s.aRecords[domain]; ok {
return true
}
// Check wildcard patterns
for pattern := range s.aWildcards {
if matchWildcard(pattern, domain) {
return true
}
}
case RecordTypeAAAA:
_, ok := s.aaaaRecords[domain]
return ok
// Check exact match
if _, ok := s.aaaaRecords[domain]; ok {
return true
}
// Check wildcard patterns
for pattern := range s.aaaaWildcards {
if matchWildcard(pattern, domain) {
return true
}
}
}
return false
@@ -152,6 +242,8 @@ func (s *DNSRecordStore) Clear() {
s.aRecords = make(map[string][]net.IP)
s.aaaaRecords = make(map[string][]net.IP)
s.aWildcards = make(map[string][]net.IP)
s.aaaaWildcards = make(map[string][]net.IP)
}
// removeIP is a helper function to remove a specific IP from a slice
@@ -164,3 +256,70 @@ func removeIP(ips []net.IP, toRemove net.IP) []net.IP {
}
return result
}
// matchWildcard checks if a domain matches a wildcard pattern
// Pattern supports * (0+ chars) and ? (exactly 1 char)
// Special case: *.domain.com does not match domain.com itself
func matchWildcard(pattern, domain string) bool {
return matchWildcardInternal(pattern, domain, 0, 0)
}
// matchWildcardInternal performs the actual wildcard matching recursively
func matchWildcardInternal(pattern, domain string, pi, di int) bool {
plen := len(pattern)
dlen := len(domain)
// Base cases
if pi == plen && di == dlen {
return true
}
if pi == plen {
return false
}
// Handle wildcard characters
if pattern[pi] == '*' {
// Special case: if pattern starts with "*." and we're at the beginning,
// ensure we don't match the domain without a prefix
// e.g., *.autoco.internal should not match autoco.internal
if pi == 0 && pi+1 < plen && pattern[pi+1] == '.' {
// The * must match at least one character
if di == dlen {
return false
}
// Try matching 1 or more characters before the dot
for i := di + 1; i <= dlen; i++ {
if matchWildcardInternal(pattern, domain, pi+1, i) {
return true
}
}
return false
}
// Normal * matching (0 or more characters)
// Try matching 0 characters (skip the *)
if matchWildcardInternal(pattern, domain, pi+1, di) {
return true
}
// Try matching 1+ characters
if di < dlen {
return matchWildcardInternal(pattern, domain, pi, di+1)
}
return false
}
if pattern[pi] == '?' {
// ? matches exactly one character
if di >= dlen {
return false
}
return matchWildcardInternal(pattern, domain, pi+1, di+1)
}
// Regular character - must match exactly
if di >= dlen || pattern[pi] != domain[di] {
return false
}
return matchWildcardInternal(pattern, domain, pi+1, di+1)
}

350
dns/dns_records_test.go Normal file
View File

@@ -0,0 +1,350 @@
package dns
import (
"net"
"testing"
)
func TestWildcardMatching(t *testing.T) {
tests := []struct {
name string
pattern string
domain string
expected bool
}{
// Basic wildcard tests
{
name: "*.autoco.internal matches host.autoco.internal",
pattern: "*.autoco.internal.",
domain: "host.autoco.internal.",
expected: true,
},
{
name: "*.autoco.internal matches longerhost.autoco.internal",
pattern: "*.autoco.internal.",
domain: "longerhost.autoco.internal.",
expected: true,
},
{
name: "*.autoco.internal matches sub.host.autoco.internal",
pattern: "*.autoco.internal.",
domain: "sub.host.autoco.internal.",
expected: true,
},
{
name: "*.autoco.internal does NOT match autoco.internal",
pattern: "*.autoco.internal.",
domain: "autoco.internal.",
expected: false,
},
// Question mark wildcard tests
{
name: "host-0?.autoco.internal matches host-01.autoco.internal",
pattern: "host-0?.autoco.internal.",
domain: "host-01.autoco.internal.",
expected: true,
},
{
name: "host-0?.autoco.internal matches host-0a.autoco.internal",
pattern: "host-0?.autoco.internal.",
domain: "host-0a.autoco.internal.",
expected: true,
},
{
name: "host-0?.autoco.internal does NOT match host-0.autoco.internal",
pattern: "host-0?.autoco.internal.",
domain: "host-0.autoco.internal.",
expected: false,
},
{
name: "host-0?.autoco.internal does NOT match host-012.autoco.internal",
pattern: "host-0?.autoco.internal.",
domain: "host-012.autoco.internal.",
expected: false,
},
// Combined wildcard tests
{
name: "*.host-0?.autoco.internal matches sub.host-01.autoco.internal",
pattern: "*.host-0?.autoco.internal.",
domain: "sub.host-01.autoco.internal.",
expected: true,
},
{
name: "*.host-0?.autoco.internal matches prefix.host-0a.autoco.internal",
pattern: "*.host-0?.autoco.internal.",
domain: "prefix.host-0a.autoco.internal.",
expected: true,
},
{
name: "*.host-0?.autoco.internal does NOT match host-01.autoco.internal",
pattern: "*.host-0?.autoco.internal.",
domain: "host-01.autoco.internal.",
expected: false,
},
// Multiple asterisks
{
name: "*.*. autoco.internal matches any.thing.autoco.internal",
pattern: "*.*.autoco.internal.",
domain: "any.thing.autoco.internal.",
expected: true,
},
{
name: "*.*.autoco.internal does NOT match single.autoco.internal",
pattern: "*.*.autoco.internal.",
domain: "single.autoco.internal.",
expected: false,
},
// Asterisk in middle
{
name: "host-*.autoco.internal matches host-anything.autoco.internal",
pattern: "host-*.autoco.internal.",
domain: "host-anything.autoco.internal.",
expected: true,
},
{
name: "host-*.autoco.internal matches host-.autoco.internal (empty match)",
pattern: "host-*.autoco.internal.",
domain: "host-.autoco.internal.",
expected: true,
},
// Multiple question marks
{
name: "host-??.autoco.internal matches host-01.autoco.internal",
pattern: "host-??.autoco.internal.",
domain: "host-01.autoco.internal.",
expected: true,
},
{
name: "host-??.autoco.internal does NOT match host-1.autoco.internal",
pattern: "host-??.autoco.internal.",
domain: "host-1.autoco.internal.",
expected: false,
},
// Exact match (no wildcards)
{
name: "exact.autoco.internal matches exact.autoco.internal",
pattern: "exact.autoco.internal.",
domain: "exact.autoco.internal.",
expected: true,
},
{
name: "exact.autoco.internal does NOT match other.autoco.internal",
pattern: "exact.autoco.internal.",
domain: "other.autoco.internal.",
expected: false,
},
// Edge cases
{
name: "* matches anything",
pattern: "*",
domain: "anything.at.all.",
expected: true,
},
{
name: "*.* matches multi.level.",
pattern: "*.*",
domain: "multi.level.",
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := matchWildcard(tt.pattern, tt.domain)
if result != tt.expected {
t.Errorf("matchWildcard(%q, %q) = %v, want %v", tt.pattern, tt.domain, result, tt.expected)
}
})
}
}
func TestDNSRecordStoreWildcard(t *testing.T) {
store := NewDNSRecordStore()
// Add wildcard records
wildcardIP := net.ParseIP("10.0.0.1")
err := store.AddRecord("*.autoco.internal", wildcardIP)
if err != nil {
t.Fatalf("Failed to add wildcard record: %v", err)
}
// Add exact record
exactIP := net.ParseIP("10.0.0.2")
err = store.AddRecord("exact.autoco.internal", exactIP)
if err != nil {
t.Fatalf("Failed to add exact record: %v", err)
}
// Test exact match takes precedence
ips := store.GetRecords("exact.autoco.internal.", RecordTypeA)
if len(ips) != 1 {
t.Errorf("Expected 1 IP for exact match, got %d", len(ips))
}
if !ips[0].Equal(exactIP) {
t.Errorf("Expected exact IP %v, got %v", exactIP, ips[0])
}
// Test wildcard match
ips = store.GetRecords("host.autoco.internal.", RecordTypeA)
if len(ips) != 1 {
t.Errorf("Expected 1 IP for wildcard match, got %d", len(ips))
}
if !ips[0].Equal(wildcardIP) {
t.Errorf("Expected wildcard IP %v, got %v", wildcardIP, ips[0])
}
// Test non-match (base domain)
ips = store.GetRecords("autoco.internal.", RecordTypeA)
if len(ips) != 0 {
t.Errorf("Expected 0 IPs for base domain, got %d", len(ips))
}
}
func TestDNSRecordStoreComplexWildcard(t *testing.T) {
store := NewDNSRecordStore()
// Add complex wildcard pattern
ip1 := net.ParseIP("10.0.0.1")
err := store.AddRecord("*.host-0?.autoco.internal", ip1)
if err != nil {
t.Fatalf("Failed to add wildcard record: %v", err)
}
// Test matching domain
ips := store.GetRecords("sub.host-01.autoco.internal.", RecordTypeA)
if len(ips) != 1 {
t.Errorf("Expected 1 IP for complex wildcard match, got %d", len(ips))
}
if len(ips) > 0 && !ips[0].Equal(ip1) {
t.Errorf("Expected IP %v, got %v", ip1, ips[0])
}
// Test non-matching domain (missing prefix)
ips = store.GetRecords("host-01.autoco.internal.", RecordTypeA)
if len(ips) != 0 {
t.Errorf("Expected 0 IPs for domain without prefix, got %d", len(ips))
}
// Test non-matching domain (wrong ? position)
ips = store.GetRecords("sub.host-012.autoco.internal.", RecordTypeA)
if len(ips) != 0 {
t.Errorf("Expected 0 IPs for domain with wrong ? match, got %d", len(ips))
}
}
func TestDNSRecordStoreRemoveWildcard(t *testing.T) {
store := NewDNSRecordStore()
// Add wildcard record
ip := net.ParseIP("10.0.0.1")
err := store.AddRecord("*.autoco.internal", ip)
if err != nil {
t.Fatalf("Failed to add wildcard record: %v", err)
}
// Verify it exists
ips := store.GetRecords("host.autoco.internal.", RecordTypeA)
if len(ips) != 1 {
t.Errorf("Expected 1 IP before removal, got %d", len(ips))
}
// Remove wildcard record
store.RemoveRecord("*.autoco.internal", nil)
// Verify it's gone
ips = store.GetRecords("host.autoco.internal.", RecordTypeA)
if len(ips) != 0 {
t.Errorf("Expected 0 IPs after removal, got %d", len(ips))
}
}
func TestDNSRecordStoreMultipleWildcards(t *testing.T) {
store := NewDNSRecordStore()
// Add multiple wildcard patterns that don't overlap
ip1 := net.ParseIP("10.0.0.1")
ip2 := net.ParseIP("10.0.0.2")
ip3 := net.ParseIP("10.0.0.3")
err := store.AddRecord("*.prod.autoco.internal", ip1)
if err != nil {
t.Fatalf("Failed to add first wildcard: %v", err)
}
err = store.AddRecord("*.dev.autoco.internal", ip2)
if err != nil {
t.Fatalf("Failed to add second wildcard: %v", err)
}
// Add a broader wildcard that matches both
err = store.AddRecord("*.autoco.internal", ip3)
if err != nil {
t.Fatalf("Failed to add third wildcard: %v", err)
}
// Test domain matching only the prod pattern and the broad pattern
ips := store.GetRecords("host.prod.autoco.internal.", RecordTypeA)
if len(ips) != 2 {
t.Errorf("Expected 2 IPs (prod + broad), got %d", len(ips))
}
// Test domain matching only the dev pattern and the broad pattern
ips = store.GetRecords("service.dev.autoco.internal.", RecordTypeA)
if len(ips) != 2 {
t.Errorf("Expected 2 IPs (dev + broad), got %d", len(ips))
}
// Test domain matching only the broad pattern
ips = store.GetRecords("host.test.autoco.internal.", RecordTypeA)
if len(ips) != 1 {
t.Errorf("Expected 1 IP (broad only), got %d", len(ips))
}
}
func TestDNSRecordStoreIPv6Wildcard(t *testing.T) {
store := NewDNSRecordStore()
// Add IPv6 wildcard record
ip := net.ParseIP("2001:db8::1")
err := store.AddRecord("*.autoco.internal", ip)
if err != nil {
t.Fatalf("Failed to add IPv6 wildcard record: %v", err)
}
// Test wildcard match for IPv6
ips := store.GetRecords("host.autoco.internal.", RecordTypeAAAA)
if len(ips) != 1 {
t.Errorf("Expected 1 IPv6 for wildcard match, got %d", len(ips))
}
if len(ips) > 0 && !ips[0].Equal(ip) {
t.Errorf("Expected IPv6 %v, got %v", ip, ips[0])
}
}
func TestHasRecordWildcard(t *testing.T) {
store := NewDNSRecordStore()
// Add wildcard record
ip := net.ParseIP("10.0.0.1")
err := store.AddRecord("*.autoco.internal", ip)
if err != nil {
t.Fatalf("Failed to add wildcard record: %v", err)
}
// Test HasRecord with wildcard match
if !store.HasRecord("host.autoco.internal.", RecordTypeA) {
t.Error("Expected HasRecord to return true for wildcard match")
}
// Test HasRecord with non-match
if store.HasRecord("autoco.internal.", RecordTypeA) {
t.Error("Expected HasRecord to return false for base domain")
}
}

View File

@@ -5,9 +5,13 @@ package dns
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"net/netip"
"os"
"os/exec"
"path/filepath"
"runtime"
"strconv"
"strings"
@@ -28,19 +32,38 @@ const (
keyServerPort = "ServerPort"
arraySymbol = "* "
digitSymbol = "# "
// State file name for crash recovery
dnsStateFileName = "dns_state.json"
)
// DNSPersistentState represents the state saved to disk for crash recovery
type DNSPersistentState struct {
CreatedKeys []string `json:"created_keys"`
}
// DarwinDNSConfigurator manages DNS settings on macOS using scutil
type DarwinDNSConfigurator struct {
createdKeys map[string]struct{}
originalState *DNSState
stateFilePath string
}
// NewDarwinDNSConfigurator creates a new macOS DNS configurator
func NewDarwinDNSConfigurator() (*DarwinDNSConfigurator, error) {
return &DarwinDNSConfigurator{
createdKeys: make(map[string]struct{}),
}, nil
stateFilePath := getDNSStateFilePath()
configurator := &DarwinDNSConfigurator{
createdKeys: make(map[string]struct{}),
stateFilePath: stateFilePath,
}
// Clean up any leftover state from a previous crash
if err := configurator.CleanupUncleanShutdown(); err != nil {
logger.Warn("Failed to cleanup previous DNS state: %v", err)
}
return configurator, nil
}
// Name returns the configurator name
@@ -67,6 +90,11 @@ func (d *DarwinDNSConfigurator) SetDNS(servers []netip.Addr) ([]netip.Addr, erro
return nil, fmt.Errorf("apply DNS servers: %w", err)
}
// Persist state to disk for crash recovery
if err := d.saveState(); err != nil {
logger.Warn("Failed to save DNS state for crash recovery: %v", err)
}
// Flush DNS cache
if err := d.flushDNSCache(); err != nil {
// Non-fatal, just log
@@ -85,6 +113,11 @@ func (d *DarwinDNSConfigurator) RestoreDNS() error {
}
}
// Clear state file after successful restoration
if err := d.clearState(); err != nil {
logger.Warn("Failed to clear DNS state file: %v", err)
}
// Flush DNS cache
if err := d.flushDNSCache(); err != nil {
fmt.Printf("warning: failed to flush DNS cache: %v\n", err)
@@ -112,6 +145,47 @@ func (d *DarwinDNSConfigurator) GetCurrentDNS() ([]netip.Addr, error) {
return servers, nil
}
// CleanupUncleanShutdown removes any DNS keys left over from a previous crash
func (d *DarwinDNSConfigurator) CleanupUncleanShutdown() error {
state, err := d.loadState()
if err != nil {
if os.IsNotExist(err) {
// No state file, nothing to clean up
return nil
}
return fmt.Errorf("load state: %w", err)
}
if len(state.CreatedKeys) == 0 {
// No keys to clean up
return nil
}
logger.Info("Found DNS state from previous session, cleaning up %d keys", len(state.CreatedKeys))
// Remove all keys from previous session
var lastErr error
for _, key := range state.CreatedKeys {
logger.Debug("Removing leftover DNS key: %s", key)
if err := d.removeKeyDirect(key); err != nil {
logger.Warn("Failed to remove DNS key %s: %v", key, err)
lastErr = err
}
}
// Clear state file
if err := d.clearState(); err != nil {
logger.Warn("Failed to clear DNS state file: %v", err)
}
// Flush DNS cache after cleanup
if err := d.flushDNSCache(); err != nil {
logger.Warn("Failed to flush DNS cache after cleanup: %v", err)
}
return lastErr
}
// applyDNSServers applies the DNS server configuration
func (d *DarwinDNSConfigurator) applyDNSServers(servers []netip.Addr) error {
if len(servers) == 0 {
@@ -156,15 +230,25 @@ func (d *DarwinDNSConfigurator) addDNSState(state, domains string, dnsServer net
return nil
}
// removeKey removes a DNS configuration key
// removeKey removes a DNS configuration key and updates internal state
func (d *DarwinDNSConfigurator) removeKey(key string) error {
if err := d.removeKeyDirect(key); err != nil {
return err
}
delete(d.createdKeys, key)
return nil
}
// removeKeyDirect removes a DNS configuration key without updating internal state
// Used for cleanup operations
func (d *DarwinDNSConfigurator) removeKeyDirect(key string) error {
cmd := fmt.Sprintf("remove %s\n", key)
if _, err := d.runScutil(cmd); err != nil {
return fmt.Errorf("remove key: %w", err)
}
delete(d.createdKeys, key)
return nil
}
@@ -266,3 +350,70 @@ func (d *DarwinDNSConfigurator) runScutil(commands string) ([]byte, error) {
return output, nil
}
// getDNSStateFilePath returns the path to the DNS state file
func getDNSStateFilePath() string {
var stateDir string
switch runtime.GOOS {
case "darwin":
stateDir = filepath.Join(os.Getenv("HOME"), "Library", "Application Support", "olm-client")
default:
stateDir = filepath.Join(os.Getenv("HOME"), ".config", "olm-client")
}
if err := os.MkdirAll(stateDir, 0755); err != nil {
logger.Warn("Failed to create state directory: %v", err)
}
return filepath.Join(stateDir, dnsStateFileName)
}
// saveState persists the current DNS state to disk
func (d *DarwinDNSConfigurator) saveState() error {
keys := make([]string, 0, len(d.createdKeys))
for key := range d.createdKeys {
keys = append(keys, key)
}
state := DNSPersistentState{
CreatedKeys: keys,
}
data, err := json.MarshalIndent(state, "", " ")
if err != nil {
return fmt.Errorf("marshal state: %w", err)
}
if err := os.WriteFile(d.stateFilePath, data, 0644); err != nil {
return fmt.Errorf("write state file: %w", err)
}
logger.Debug("Saved DNS state to %s", d.stateFilePath)
return nil
}
// loadState loads the DNS state from disk
func (d *DarwinDNSConfigurator) loadState() (*DNSPersistentState, error) {
data, err := os.ReadFile(d.stateFilePath)
if err != nil {
return nil, err
}
var state DNSPersistentState
if err := json.Unmarshal(data, &state); err != nil {
return nil, fmt.Errorf("unmarshal state: %w", err)
}
return &state, nil
}
// clearState removes the DNS state file
func (d *DarwinDNSConfigurator) clearState() error {
err := os.Remove(d.stateFilePath)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("remove state file: %w", err)
}
logger.Debug("Cleared DNS state file")
return nil
}

View File

@@ -22,7 +22,11 @@ type FileDNSConfigurator struct {
// NewFileDNSConfigurator creates a new file-based DNS configurator
func NewFileDNSConfigurator() (*FileDNSConfigurator, error) {
return &FileDNSConfigurator{}, nil
f := &FileDNSConfigurator{}
if err := f.CleanupUncleanShutdown(); err != nil {
return nil, fmt.Errorf("cleanup unclean shutdown: %w", err)
}
return f, nil
}
// Name returns the configurator name
@@ -78,6 +82,30 @@ func (f *FileDNSConfigurator) RestoreDNS() error {
return nil
}
// CleanupUncleanShutdown removes any DNS configuration left over from a previous crash
// For the file-based configurator, we check if a backup file exists (indicating a crash
// happened while DNS was configured) and restore from it if so.
func (f *FileDNSConfigurator) CleanupUncleanShutdown() error {
// Check if backup file exists from a previous session
if !f.isBackupExists() {
// No backup file, nothing to clean up
return nil
}
// A backup exists, which means we crashed while DNS was configured
// Restore the original resolv.conf
if err := copyFile(resolvConfBackupPath, resolvConfPath); err != nil {
return fmt.Errorf("restore from backup during cleanup: %w", err)
}
// Remove backup file
if err := os.Remove(resolvConfBackupPath); err != nil {
return fmt.Errorf("remove backup file during cleanup: %w", err)
}
return nil
}
// GetCurrentDNS returns the currently configured DNS servers
func (f *FileDNSConfigurator) GetCurrentDNS() ([]netip.Addr, error) {
content, err := os.ReadFile(resolvConfPath)

View File

@@ -50,11 +50,18 @@ func NewNetworkManagerDNSConfigurator(ifaceName string) (*NetworkManagerDNSConfi
return nil, fmt.Errorf("NetworkManager conf.d directory not found: %s", networkManagerConfDir)
}
return &NetworkManagerDNSConfigurator{
configurator := &NetworkManagerDNSConfigurator{
ifaceName: ifaceName,
confPath: networkManagerConfDir + "/" + networkManagerDNSConfFile,
dispatchPath: networkManagerDispatcherDir + "/" + networkManagerDispatcherFile,
}, nil
}
// Clean up any stale configuration from a previous unclean shutdown
if err := configurator.CleanupUncleanShutdown(); err != nil {
return nil, fmt.Errorf("cleanup unclean shutdown: %w", err)
}
return configurator, nil
}
// Name returns the configurator name
@@ -100,6 +107,30 @@ func (n *NetworkManagerDNSConfigurator) RestoreDNS() error {
return nil
}
// CleanupUncleanShutdown removes any DNS configuration left over from a previous crash
// For NetworkManager, we check if our config file exists and remove it if so.
// This ensures that if the process crashed while DNS was configured, the stale
// configuration is removed on the next startup.
func (n *NetworkManagerDNSConfigurator) CleanupUncleanShutdown() error {
// Check if our config file exists from a previous session
if _, err := os.Stat(n.confPath); os.IsNotExist(err) {
// No config file, nothing to clean up
return nil
}
// Remove the stale configuration file
if err := os.Remove(n.confPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("remove stale DNS config file: %w", err)
}
// Reload NetworkManager to apply the change
if err := n.reloadNetworkManager(); err != nil {
return fmt.Errorf("reload NetworkManager after cleanup: %w", err)
}
return nil
}
// GetCurrentDNS returns the currently configured DNS servers by reading /etc/resolv.conf
func (n *NetworkManagerDNSConfigurator) GetCurrentDNS() ([]netip.Addr, error) {
content, err := os.ReadFile("/etc/resolv.conf")

View File

@@ -31,10 +31,17 @@ func NewResolvconfDNSConfigurator(ifaceName string) (*ResolvconfDNSConfigurator,
return nil, fmt.Errorf("detect resolvconf type: %w", err)
}
return &ResolvconfDNSConfigurator{
configurator := &ResolvconfDNSConfigurator{
ifaceName: ifaceName,
implType: implType,
}, nil
}
// Call cleanup function to remove any stale DNS config for this interface
if err := configurator.CleanupUncleanShutdown(); err != nil {
return nil, fmt.Errorf("cleanup unclean shutdown: %w", err)
}
return configurator, nil
}
// Name returns the configurator name
@@ -84,6 +91,28 @@ func (r *ResolvconfDNSConfigurator) RestoreDNS() error {
return nil
}
// CleanupUncleanShutdown removes any DNS configuration left over from a previous crash
// For resolvconf, we attempt to delete any entry for the interface name.
// This ensures that if the process crashed while DNS was configured, the stale
// entry is removed on the next startup.
func (r *ResolvconfDNSConfigurator) CleanupUncleanShutdown() error {
// Try to delete any existing entry for this interface
// This is idempotent - if no entry exists, resolvconf will just return success
var cmd *exec.Cmd
switch r.implType {
case "openresolv":
cmd = exec.Command(resolvconfCommand, "-f", "-d", r.ifaceName)
default:
cmd = exec.Command(resolvconfCommand, "-d", r.ifaceName)
}
// Ignore errors - the entry may not exist, which is fine
_ = cmd.Run()
return nil
}
// GetCurrentDNS returns the currently configured DNS servers
func (r *ResolvconfDNSConfigurator) GetCurrentDNS() ([]netip.Addr, error) {
// resolvconf doesn't provide a direct way to query per-interface DNS

View File

@@ -73,10 +73,17 @@ func NewSystemdResolvedDNSConfigurator(ifaceName string) (*SystemdResolvedDNSCon
return nil, fmt.Errorf("get link: %w", err)
}
return &SystemdResolvedDNSConfigurator{
config := &SystemdResolvedDNSConfigurator{
ifaceName: ifaceName,
dbusLinkObject: dbus.ObjectPath(linkPath),
}, nil
}
// Call cleanup function here
if err := config.CleanupUncleanShutdown(); err != nil {
fmt.Printf("warning: cleanup unclean shutdown failed: %v\n", err)
}
return config, nil
}
// Name returns the configurator name
@@ -133,6 +140,17 @@ func (s *SystemdResolvedDNSConfigurator) RestoreDNS() error {
return nil
}
// CleanupUncleanShutdown removes any DNS configuration left over from a previous crash
// For systemd-resolved, the DNS configuration is tied to the network interface.
// When the interface is destroyed and recreated, systemd-resolved automatically
// clears the per-link DNS settings, so there's nothing to clean up.
func (s *SystemdResolvedDNSConfigurator) CleanupUncleanShutdown() error {
// systemd-resolved DNS configuration is per-link and automatically cleared
// when the link (interface) is destroyed. Since the WireGuard interface is
// recreated on restart, there's no leftover state to clean up.
return nil
}
// GetCurrentDNS returns the currently configured DNS servers
// Note: systemd-resolved doesn't easily expose current per-link DNS servers via D-Bus
// This is a placeholder that returns an empty list

View File

@@ -17,6 +17,10 @@ type DNSConfigurator interface {
// Name returns the name of this configurator implementation
Name() string
// CleanupUncleanShutdown removes any DNS configuration left over from
// a previous crash or unclean shutdown. This should be called on startup.
CleanupUncleanShutdown() error
}
// DNSConfig contains the configuration for DNS override

View File

@@ -113,6 +113,18 @@ func (w *WindowsDNSConfigurator) RestoreDNS() error {
return nil
}
// CleanupUncleanShutdown removes any DNS configuration left over from a previous crash
// On Windows, we rely on the registry-based approach which doesn't leave orphaned state
// in the same way as macOS scutil. The DNS settings are tied to the interface which
// gets recreated on restart.
func (w *WindowsDNSConfigurator) CleanupUncleanShutdown() error {
// Windows DNS configuration via registry is interface-specific.
// When the WireGuard interface is recreated, it gets a new GUID,
// so there's no leftover state to clean up from previous sessions.
// The old interface's registry keys are effectively orphaned but harmless.
return nil
}
// GetCurrentDNS returns the currently configured DNS servers
func (w *WindowsDNSConfigurator) GetCurrentDNS() ([]netip.Addr, error) {
regKey, err := w.getInterfaceRegistryKey(registry.QUERY_VALUE)

27
go.mod
View File

@@ -4,30 +4,29 @@ go 1.25
require (
github.com/Microsoft/go-winio v0.6.2
github.com/fosrl/newt v0.0.0
github.com/godbus/dbus/v5 v5.2.0
github.com/fosrl/newt v1.8.1
github.com/godbus/dbus/v5 v5.2.2
github.com/gorilla/websocket v1.5.3
github.com/miekg/dns v1.1.68
github.com/vishvananda/netlink v1.3.1
golang.org/x/sys v0.38.0
github.com/miekg/dns v1.1.70
golang.org/x/sys v0.40.0
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20241231184526-a9ab2273dd10
golang.zx2c4.com/wireguard/windows v0.5.3
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c
software.sslmate.com/src/go-pkcs12 v0.6.0
software.sslmate.com/src/go-pkcs12 v0.7.0
)
require (
github.com/google/btree v1.1.3 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/vishvananda/netlink v1.3.1 // indirect
github.com/vishvananda/netns v0.0.5 // indirect
golang.org/x/crypto v0.44.0 // indirect
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 // indirect
golang.org/x/mod v0.30.0 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/sync v0.18.0 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/time v0.12.0 // indirect
golang.org/x/tools v0.39.0 // indirect
golang.org/x/tools v0.40.0 // indirect
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 // indirect
golang.zx2c4.com/wireguard/windows v0.5.3 // indirect
)
replace github.com/fosrl/newt => ../newt

38
go.sum
View File

@@ -1,37 +1,39 @@
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/godbus/dbus/v5 v5.2.0 h1:3WexO+U+yg9T70v9FdHr9kCxYlazaAXUhx2VMkbfax8=
github.com/godbus/dbus/v5 v5.2.0/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
github.com/fosrl/newt v1.8.1 h1:oP3xBEISoO/TENsHccqqs6LXpoOWCt6aiP75CfIWpvk=
github.com/fosrl/newt v1.8.1/go.mod h1:pol958CEs0nQmo/35Ltv0CGksheIKCS2hoNvdTVLEcI=
github.com/godbus/dbus/v5 v5.2.2 h1:TUR3TgtSVDmjiXOgAAyaZbYmIeP3DPkld3jgKGV8mXQ=
github.com/godbus/dbus/v5 v5.2.2/go.mod h1:3AAv2+hPq5rdnr5txxxRwiGjPXamgoIHgz9FPBfOp3c=
github.com/google/btree v1.1.3 h1:CVpQJjYgC4VbzxeGVHfvZrv1ctoYCAI8vbl07Fcxlyg=
github.com/google/btree v1.1.3/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/miekg/dns v1.1.68 h1:jsSRkNozw7G/mnmXULynzMNIsgY2dHC8LO6U6Ij2JEA=
github.com/miekg/dns v1.1.68/go.mod h1:fujopn7TB3Pu3JM69XaawiU0wqjpL9/8xGop5UrTPps=
github.com/miekg/dns v1.1.70 h1:DZ4u2AV35VJxdD9Fo9fIWm119BsQL5cZU1cQ9s0LkqA=
github.com/miekg/dns v1.1.70/go.mod h1:+EuEPhdHOsfk6Wk5TT2CzssZdqkmFhf8r+aVyDEToIs=
github.com/vishvananda/netlink v1.3.1 h1:3AEMt62VKqz90r0tmNhog0r/PpWKmrEShJU0wJW6bV0=
github.com/vishvananda/netlink v1.3.1/go.mod h1:ARtKouGSTGchR8aMwmkzC0qiNPrrWO5JS/XMVl45+b4=
github.com/vishvananda/netns v0.0.5 h1:DfiHV+j8bA32MFM7bfEunvT8IAqQ/NzSJHtcmW5zdEY=
github.com/vishvananda/netns v0.0.5/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM=
golang.org/x/crypto v0.44.0 h1:A97SsFvM3AIwEEmTBiaxPPTYpDC47w720rdiiUvgoAU=
golang.org/x/crypto v0.44.0/go.mod h1:013i+Nw79BMiQiMsOPcVCB5ZIJbYkerPrGnOa00tvmc=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6 h1:zfMcR1Cs4KNuomFFgGefv5N0czO2XZpUbxGUy8i8ug0=
golang.org/x/exp v0.0.0-20251113190631-e25ba8c21ef6/go.mod h1:46edojNIoXTNOhySWIWdix628clX9ODXwPsQuG6hsK0=
golang.org/x/mod v0.30.0 h1:fDEXFVZ/fmCKProc/yAXXUijritrDzahmwwefnjoPFk=
golang.org/x/mod v0.30.0/go.mod h1:lAsf5O2EvJeSFMiBxXDki7sCgAxEUcZHXoXMKT4GJKc=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I=
golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc=
golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.39.0 h1:ik4ho21kwuQln40uelmciQPp9SipgNDdrafrYA4TmQQ=
golang.org/x/tools v0.39.0/go.mod h1:JnefbkDPyD8UU2kI5fuf8ZX4/yUeh9W877ZeBONxUqQ=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2 h1:B82qJJgjvYKsXS9jeunTOisW56dUokqW/FOteYJJ/yg=
golang.zx2c4.com/wintun v0.0.0-20230126152724-0fa3db229ce2/go.mod h1:deeaetjYA+DHMHg+sMSMI58GrEteJUUzzw7en6TJQcI=
golang.zx2c4.com/wireguard v0.0.0-20250521234502-f333402bd9cb h1:whnFRlWMcXI9d+ZbWg+4sHnLp52d5yiIPUxMBSt4X9A=
@@ -42,5 +44,5 @@ golang.zx2c4.com/wireguard/windows v0.5.3 h1:On6j2Rpn3OEMXqBq00QEDC7bWSZrPIHKIus
golang.zx2c4.com/wireguard/windows v0.5.3/go.mod h1:9TEe8TJmtwyQebdFwAkEWOPr3prrtqm+REGFifP60hI=
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c h1:m/r7OM+Y2Ty1sgBQ7Qb27VgIMBW8ZZhT4gLnUyDIhzI=
gvisor.dev/gvisor v0.0.0-20250503011706-39ed1f5ac29c/go.mod h1:3r5CMtNQMKIvBlrmM9xWUNamjKBYPOWyXOjmg5Kts3g=
software.sslmate.com/src/go-pkcs12 v0.6.0 h1:f3sQittAeF+pao32Vb+mkli+ZyT+VwKaD014qFGq6oU=
software.sslmate.com/src/go-pkcs12 v0.6.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=
software.sslmate.com/src/go-pkcs12 v0.7.0 h1:Db8W44cB54TWD7stUFFSWxdfpdn6fZVcDl0w3R4RVM0=
software.sslmate.com/src/go-pkcs12 v0.7.0/go.mod h1:Qiz0EyvDRJjjxGyUQa2cCNZn/wMyzrRJ/qcDXOQazLI=

View File

@@ -177,7 +177,8 @@ func runOlmMainWithArgs(ctx context.Context, cancel context.CancelFunc, signalCt
// Load configuration from file, env vars, and CLI args
// Priority: CLI args > Env vars > Config file > Defaults
config, showVersion, showConfig, err := LoadConfig(os.Args[1:])
// Use the passed args parameter instead of os.Args[1:] to support Windows service mode
config, showVersion, showConfig, err := LoadConfig(args)
if err != nil {
fmt.Printf("Failed to load configuration: %v\n", err)
return
@@ -235,7 +236,7 @@ func runOlmMainWithArgs(ctx context.Context, cancel context.CancelFunc, signalCt
DNS: config.DNS,
UpstreamDNS: config.UpstreamDNS,
InterfaceName: config.InterfaceName,
Holepunch: config.Holepunch,
Holepunch: !config.DisableHolepunch,
TlsClientCert: config.TlsClientCert,
PingIntervalDuration: config.PingIntervalDuration,
PingTimeoutDuration: config.PingTimeoutDuration,

78
olm.iss
View File

@@ -44,8 +44,8 @@ Name: "english"; MessagesFile: "compiler:Default.isl"
[Files]
; The 'DestName' flag ensures that 'olm_windows_amd64.exe' is installed as 'olm.exe'
Source: "C:\Users\Administrator\Downloads\olm_windows_amd64.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}"; Flags: ignoreversion
Source: "C:\Users\Administrator\Downloads\wintun.dll"; DestDir: "{app}"; Flags: ignoreversion
Source: "Z:\olm_windows_amd64.exe"; DestDir: "{app}"; DestName: "{#MyAppExeName}"; Flags: ignoreversion
Source: "Z:\wintun.dll"; DestDir: "{app}"; Flags: ignoreversion
; NOTE: Don't use "Flags: ignoreversion" on any shared system files
[Icons]
@@ -57,13 +57,13 @@ Name: "{group}\{#MyAppName}"; Filename: "{app}\{#MyAppExeName}"
; The 'Path' variable is located under 'SYSTEM\CurrentControlSet\Control\Session Manager\Environment'.
; ValueType: expandsz allows for environment variables (like %ProgramFiles%) in the path.
; ValueData: "{olddata};{app}" appends the current application directory to the existing PATH.
; Flags: uninsdeletevalue ensures the entry is removed upon uninstallation.
; Check: IsWin64 ensures this is applied on 64-bit systems, which matches ArchitecturesAllowed.
; Note: Removal during uninstallation is handled by CurUninstallStepChanged procedure in [Code] section.
; Check: NeedsAddPath ensures this is applied only if the path is not already present.
[Registry]
; Add the application's installation directory to the system PATH.
Root: HKLM; Subkey: "SYSTEM\CurrentControlSet\Control\Session Manager\Environment"; \
ValueType: expandsz; ValueName: "Path"; ValueData: "{olddata};{app}"; \
Flags: uninsdeletevalue; Check: NeedsAddPath(ExpandConstant('{app}'))
Check: NeedsAddPath(ExpandConstant('{app}'))
[Code]
function NeedsAddPath(Path: string): boolean;
@@ -78,11 +78,75 @@ begin
Result := True;
exit;
end;
// Perform a case-insensitive check to see if the path is already present.
// We add semicolons to prevent partial matches (e.g., matching C:\App in C:\App2).
if Pos(';' + UpperCase(Path) + ';', ';' + UpperCase(OrigPath) + ';') > 0 then
Result := False
else
Result := True;
end;
end;
procedure RemovePathEntry(PathToRemove: string);
var
OrigPath: string;
NewPath: string;
PathList: TStringList;
I: Integer;
begin
if not RegQueryStringValue(HKEY_LOCAL_MACHINE,
'SYSTEM\CurrentControlSet\Control\Session Manager\Environment',
'Path', OrigPath)
then begin
// Path variable doesn't exist, nothing to remove
exit;
end;
// Create a string list to parse the PATH entries
PathList := TStringList.Create;
try
// Split the PATH by semicolons
PathList.Delimiter := ';';
PathList.StrictDelimiter := True;
PathList.DelimitedText := OrigPath;
// Find and remove the matching entry (case-insensitive)
for I := PathList.Count - 1 downto 0 do
begin
if CompareText(Trim(PathList[I]), Trim(PathToRemove)) = 0 then
begin
Log('Found and removing PATH entry: ' + PathList[I]);
PathList.Delete(I);
end;
end;
// Reconstruct the PATH
NewPath := PathList.DelimitedText;
// Write the new PATH back to the registry
if RegWriteExpandStringValue(HKEY_LOCAL_MACHINE,
'SYSTEM\CurrentControlSet\Control\Session Manager\Environment',
'Path', NewPath)
then
Log('Successfully removed path entry: ' + PathToRemove)
else
Log('Failed to write modified PATH to registry');
finally
PathList.Free;
end;
end;
procedure CurUninstallStepChanged(CurUninstallStep: TUninstallStep);
var
AppPath: string;
begin
if CurUninstallStep = usUninstall then
begin
// Get the application installation path
AppPath := ExpandConstant('{app}');
Log('Removing PATH entry for: ' + AppPath);
// Remove only our path entry from the system PATH
RemovePathEntry(AppPath);
end;
end;

View File

@@ -374,8 +374,14 @@ func StartTunnel(config TunnelConfig) {
logger.Error("Failed to bring up WireGuard device: %v", err)
}
// Extract interface IP (strip CIDR notation if present)
interfaceIP := wgData.TunnelIP
if strings.Contains(interfaceIP, "/") {
interfaceIP = strings.Split(interfaceIP, "/")[0]
}
// Create and start DNS proxy
dnsProxy, err = dns.NewDNSProxy(tdev, middleDev, config.MTU, wgData.UtilitySubnet, config.UpstreamDNS)
dnsProxy, err = dns.NewDNSProxy(tdev, middleDev, config.MTU, wgData.UtilitySubnet, config.UpstreamDNS, config.TunnelDNS, interfaceIP)
if err != nil {
logger.Error("Failed to create DNS proxy: %v", err)
}
@@ -388,12 +394,6 @@ func StartTunnel(config TunnelConfig) {
logger.Error("Failed to add route for utility subnet: %v", err)
}
// TODO: seperate adding the callback to this so we can init it above with the interface
interfaceIP := wgData.TunnelIP
if strings.Contains(interfaceIP, "/") {
interfaceIP = strings.Split(interfaceIP, "/")[0]
}
// Create peer manager with integrated peer monitoring
peerManager = peers.NewPeerManager(peers.PeerManagerConfig{
Device: dev,
@@ -417,7 +417,7 @@ func StartTunnel(config TunnelConfig) {
siteEndpoint = site.Endpoint
}
apiServer.UpdatePeerStatus(site.SiteId, false, 0, siteEndpoint, false)
apiServer.AddPeerStatus(site.SiteId, site.Name, false, 0, siteEndpoint, false)
if err := peerManager.AddPeer(site); err != nil {
logger.Error("Failed to add peer: %v", err)
@@ -471,7 +471,7 @@ func StartTunnel(config TunnelConfig) {
// Get existing peer from PeerManager
existingPeer, exists := peerManager.GetPeer(updateData.SiteId)
if !exists {
logger.Error("Peer with site ID %d not found", updateData.SiteId)
logger.Warn("Peer with site ID %d not found", updateData.SiteId)
return
}
@@ -502,6 +502,13 @@ func StartTunnel(config TunnelConfig) {
return
}
// If the endpoint changed, trigger holepunch to refresh NAT mappings
if updateData.Endpoint != "" && updateData.Endpoint != existingPeer.Endpoint {
logger.Info("Endpoint changed for site %d, triggering holepunch to refresh NAT mappings", updateData.SiteId)
holePunchManager.TriggerHolePunch()
holePunchManager.ResetInterval()
}
// Update successful
logger.Info("Successfully updated peer for site %d", updateData.SiteId)
})
@@ -559,6 +566,14 @@ func StartTunnel(config TunnelConfig) {
return
}
// Remove any exit nodes associated with this peer from hole punching
if holePunchManager != nil {
removed := holePunchManager.RemoveExitNodesByPeer(removeData.SiteId)
if removed > 0 {
logger.Info("Removed %d exit nodes associated with peer %d from hole punch rotation", removed, removeData.SiteId)
}
}
// Remove successful
logger.Info("Successfully removed peer for site %d", removeData.SiteId)
})
@@ -720,7 +735,7 @@ func StartTunnel(config TunnelConfig) {
// Update HTTP server to mark this peer as using relay
apiServer.UpdatePeerRelayStatus(relayData.SiteId, relayData.RelayEndpoint, true)
peerManager.RelayPeer(relayData.SiteId, primaryRelay)
peerManager.RelayPeer(relayData.SiteId, primaryRelay, relayData.RelayPort)
})
olm.RegisterHandler("olm/wg/peer/unrelay", func(msg websocket.WSMessage) {
@@ -770,6 +785,7 @@ func StartTunnel(config TunnelConfig) {
ExitNode struct {
PublicKey string `json:"publicKey"`
Endpoint string `json:"endpoint"`
RelayPort uint16 `json:"relayPort"`
} `json:"exitNode"`
}
@@ -778,23 +794,36 @@ func StartTunnel(config TunnelConfig) {
return
}
// Add exit node to holepunch rotation if we have a holepunch manager
if holePunchManager != nil {
exitNode := holepunch.ExitNode{
Endpoint: handshakeData.ExitNode.Endpoint,
PublicKey: handshakeData.ExitNode.PublicKey,
}
added := holePunchManager.AddExitNode(exitNode)
if added {
logger.Info("Added exit node %s to holepunch rotation for handshake", exitNode.Endpoint)
} else {
logger.Debug("Exit node %s already in holepunch rotation", exitNode.Endpoint)
}
holePunchManager.ResetInterval() // start sending immediately again so we fill in the endpoint on the cloud
// Get existing peer from PeerManager
_, exists := peerManager.GetPeer(handshakeData.SiteId)
if exists {
logger.Warn("Peer with site ID %d already added", handshakeData.SiteId)
return
}
relayPort := handshakeData.ExitNode.RelayPort
if relayPort == 0 {
relayPort = 21820 // default relay port
}
siteId := handshakeData.SiteId
exitNode := holepunch.ExitNode{
Endpoint: handshakeData.ExitNode.Endpoint,
RelayPort: relayPort,
PublicKey: handshakeData.ExitNode.PublicKey,
SiteIds: []int{siteId},
}
added := holePunchManager.AddExitNode(exitNode)
if added {
logger.Info("Added exit node %s to holepunch rotation for handshake", exitNode.Endpoint)
} else {
logger.Debug("Exit node %s already in holepunch rotation", exitNode.Endpoint)
}
holePunchManager.TriggerHolePunch() // Trigger immediate hole punch attempt
holePunchManager.ResetInterval() // start sending immediately again so we fill in the endpoint on the cloud
// Send handshake acknowledgment back to server with retry
stopPeerSend, _ = olm.SendMessageInterval("olm/wg/server/peer/add", map[string]interface{}{
"siteId": handshakeData.SiteId,
@@ -859,28 +888,33 @@ func StartTunnel(config TunnelConfig) {
})
olm.OnTokenUpdate(func(token string, exitNodes []websocket.ExitNode) {
if holePunchManager != nil {
holePunchManager.SetToken(token)
holePunchManager.SetToken(token)
logger.Debug("Got exit nodes for hole punching: %v", exitNodes)
logger.Debug("Got exit nodes for hole punching: %v", exitNodes)
// Convert websocket.ExitNode to holepunch.ExitNode
hpExitNodes := make([]holepunch.ExitNode, len(exitNodes))
for i, node := range exitNodes {
hpExitNodes[i] = holepunch.ExitNode{
Endpoint: node.Endpoint,
PublicKey: node.PublicKey,
}
// Convert websocket.ExitNode to holepunch.ExitNode
hpExitNodes := make([]holepunch.ExitNode, len(exitNodes))
for i, node := range exitNodes {
relayPort := node.RelayPort
if relayPort == 0 {
relayPort = 21820 // default relay port
}
logger.Debug("Updated hole punch exit nodes: %v", hpExitNodes)
// Start hole punching using the manager
logger.Info("Starting hole punch for %d exit nodes", len(exitNodes))
if err := holePunchManager.StartMultipleExitNodes(hpExitNodes); err != nil {
logger.Warn("Failed to start hole punch: %v", err)
hpExitNodes[i] = holepunch.ExitNode{
Endpoint: node.Endpoint,
RelayPort: relayPort,
PublicKey: node.PublicKey,
SiteIds: node.SiteIds,
}
}
logger.Debug("Updated hole punch exit nodes: %v", hpExitNodes)
// Start hole punching using the manager
logger.Info("Starting hole punch for %d exit nodes", len(exitNodes))
if err := holePunchManager.StartMultipleExitNodes(hpExitNodes); err != nil {
logger.Warn("Failed to start hole punch: %v", err)
}
})
olm.OnAuthError(func(statusCode int, message string) {

View File

@@ -61,6 +61,7 @@ type TunnelConfig struct {
EnableUAPI bool
OverrideDNS bool
TunnelDNS bool
DisableRelay bool
}

View File

@@ -1,9 +1,6 @@
package olm
import (
"fmt"
"net"
"strings"
"time"
"github.com/fosrl/newt/logger"
@@ -11,33 +8,6 @@ import (
"github.com/fosrl/olm/websocket"
)
// Helper function to format endpoints correctly
func formatEndpoint(endpoint string) string {
if endpoint == "" {
return ""
}
// Check if it's already a valid host:port that SplitHostPort can parse (e.g., [::1]:8080 or 1.2.3.4:8080)
_, _, err := net.SplitHostPort(endpoint)
if err == nil {
return endpoint // Already valid, no change needed
}
// If it failed, it might be our malformed "ipv6:port" string. Let's check and fix it.
lastColon := strings.LastIndex(endpoint, ":")
if lastColon > 0 { // Ensure there is a colon and it's not the first character
hostPart := endpoint[:lastColon]
// Check if the host part is a literal IPv6 address
if ip := net.ParseIP(hostPart); ip != nil && ip.To4() == nil {
// It is! Reformat it with brackets.
portPart := endpoint[lastColon+1:]
return fmt.Sprintf("[%s]:%s", hostPart, portPart)
}
}
// If it's not the specific malformed case, return it as is.
return endpoint
}
func sendPing(olm *websocket.Client) error {
err := olm.SendMessage("olm/ping", map[string]interface{}{
"timestamp": time.Now().Unix(),
@@ -83,16 +53,3 @@ func GetNetworkSettingsJSON() (string, error) {
func GetNetworkSettingsIncrementor() int {
return network.GetIncrementor()
}
// stringSlicesEqual compares two string slices for equality
func stringSlicesEqual(a, b []string) bool {
if len(a) != len(b) {
return false
}
for i := range a {
if a[i] != b[i] {
return false
}
}
return true
}

View File

@@ -150,6 +150,8 @@ func (pm *PeerManager) AddPeer(siteConfig SiteConfig) error {
pm.peers[siteConfig.SiteId] = siteConfig
pm.APIServer.AddPeerStatus(siteConfig.SiteId, siteConfig.Name, false, 0, siteConfig.Endpoint, false)
// Perform rapid initial holepunch test (outside of lock to avoid blocking)
// This quickly determines if holepunch is viable and triggers relay if not
go pm.performRapidInitialTest(siteConfig.SiteId, siteConfig.Endpoint)
@@ -741,7 +743,7 @@ func (pm *PeerManager) RemoveAlias(siteId int, aliasName string) error {
}
// RelayPeer handles failover to the relay server when a peer is disconnected
func (pm *PeerManager) RelayPeer(siteId int, relayEndpoint string) {
func (pm *PeerManager) RelayPeer(siteId int, relayEndpoint string, relayPort uint16) {
pm.mu.Lock()
peer, exists := pm.peers[siteId]
if exists {
@@ -762,10 +764,14 @@ func (pm *PeerManager) RelayPeer(siteId int, relayEndpoint string) {
formattedEndpoint = fmt.Sprintf("[%s]", relayEndpoint)
}
if relayPort == 0 {
relayPort = 21820 // fall back to 21820 for backward compatibility
}
// Update only the endpoint for this peer (update_only preserves other settings)
wgConfig := fmt.Sprintf(`public_key=%s
update_only=true
endpoint=%s:21820`, util.FixKey(peer.PublicKey), formattedEndpoint)
endpoint=%s:%d`, util.FixKey(peer.PublicKey), formattedEndpoint, relayPort)
err := pm.device.IpcSet(wgConfig)
if err != nil {

View File

@@ -191,12 +191,12 @@ func (pm *PeerMonitor) AddPeer(siteID int, endpoint string, holepunchEndpoint st
// update holepunch endpoint for a peer
func (pm *PeerMonitor) UpdateHolepunchEndpoint(siteID int, endpoint string) {
go func() {
time.Sleep(3 * time.Second)
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.holepunchEndpoints[siteID] = endpoint
}()
// Short delay to allow WireGuard peer reconfiguration to complete
// The NAT mapping refresh is handled separately by TriggerHolePunch in olm.go
pm.mutex.Lock()
defer pm.mutex.Unlock()
pm.holepunchEndpoints[siteID] = endpoint
logger.Debug("Updated holepunch endpoint for site %d to %s", siteID, endpoint)
}
// RapidTestPeer performs a rapid connectivity test for a newly added peer.
@@ -294,6 +294,12 @@ func (pm *PeerMonitor) RemovePeer(siteID int) {
pm.removePeerUnlocked(siteID)
}
func (pm *PeerMonitor) RemoveHolepunchEndpoint(siteID int) {
pm.mutex.Lock()
defer pm.mutex.Unlock()
delete(pm.holepunchEndpoints, siteID)
}
// Start begins monitoring all peers
func (pm *PeerMonitor) Start() {
pm.mutex.Lock()
@@ -499,7 +505,7 @@ func (pm *PeerMonitor) checkHolepunchEndpoints() {
pm.mutex.Unlock()
for siteID, endpoint := range endpoints {
logger.Debug("Testing holepunch endpoint for site %d: %s", siteID, endpoint)
// logger.Debug("Testing holepunch endpoint for site %d: %s", siteID, endpoint)
result := pm.holepunchTester.TestEndpoint(endpoint, timeout)
pm.mutex.Lock()

View File

@@ -9,6 +9,7 @@ type PeerAction struct {
// UpdatePeerData represents the data needed to update a peer
type SiteConfig struct {
SiteId int `json:"siteId"`
Name string `json:"name,omitempty"`
Endpoint string `json:"endpoint,omitempty"`
RelayEndpoint string `json:"relayEndpoint,omitempty"`
PublicKey string `json:"publicKey,omitempty"`
@@ -32,6 +33,7 @@ type PeerRemove struct {
type RelayPeerData struct {
SiteId int `json:"siteId"`
RelayEndpoint string `json:"relayEndpoint"`
RelayPort uint16 `json:"relayPort"`
}
type UnRelayPeerData struct {

View File

@@ -99,15 +99,32 @@ func (s *olmService) Execute(args []string, r <-chan svc.ChangeRequest, changes
// Continue with empty args if loading fails
savedArgs = []string{}
}
s.elog.Info(1, fmt.Sprintf("Loaded saved service args: %v", savedArgs))
// Combine service start args with saved args, giving priority to service start args
// Note: When the service is started via SCM, args[0] is the service name
// When started via s.Start(args...), the args passed are exactly what we provide
finalArgs := []string{}
// Check if we have args passed directly to Execute (from s.Start())
if len(args) > 0 {
// Skip the first arg which is typically the service name
if len(args) > 1 {
// The first arg from SCM is the service name, but when we call s.Start(args...),
// the args we pass become args[1:] in Execute. However, if started by SCM without
// args, args[0] will be the service name.
// We need to check if args[0] looks like the service name or a flag
if len(args) == 1 && args[0] == serviceName {
// Only service name, no actual args
s.elog.Info(1, "Only service name in args, checking saved args")
} else if len(args) > 1 && args[0] == serviceName {
// Service name followed by actual args
finalArgs = append(finalArgs, args[1:]...)
s.elog.Info(1, fmt.Sprintf("Using service start parameters (after service name): %v", finalArgs))
} else {
// Args don't start with service name, use them all
// This happens when args are passed via s.Start(args...)
finalArgs = append(finalArgs, args...)
s.elog.Info(1, fmt.Sprintf("Using service start parameters (direct): %v", finalArgs))
}
s.elog.Info(1, fmt.Sprintf("Using service start parameters: %v", finalArgs))
}
// If no service start parameters, use saved args
@@ -116,6 +133,7 @@ func (s *olmService) Execute(args []string, r <-chan svc.ChangeRequest, changes
s.elog.Info(1, fmt.Sprintf("Using saved service args: %v", finalArgs))
}
s.elog.Info(1, fmt.Sprintf("Final args to use: %v", finalArgs))
s.args = finalArgs
// Start the main olm functionality
@@ -325,12 +343,15 @@ func removeService() error {
}
func startService(args []string) error {
// Save the service arguments as backup
if len(args) > 0 {
err := saveServiceArgs(args)
if err != nil {
return fmt.Errorf("failed to save service args: %v", err)
}
fmt.Printf("Starting service with args: %v\n", args)
// Always save the service arguments so they can be loaded on service restart
err := saveServiceArgs(args)
if err != nil {
fmt.Printf("Warning: failed to save service args: %v\n", err)
// Continue anyway, args will still be passed directly
} else {
fmt.Printf("Saved service args to: %s\n", getServiceArgsPath())
}
m, err := mgr.Connect()
@@ -346,6 +367,7 @@ func startService(args []string) error {
defer s.Close()
// Pass arguments directly to the service start call
// Note: These args will appear in Execute() after the service name
err = s.Start(args...)
if err != nil {
return fmt.Errorf("failed to start service: %v", err)

View File

@@ -38,8 +38,9 @@ func IsAuthError(err error) bool {
type TokenResponse struct {
Data struct {
Token string `json:"token"`
ExitNodes []ExitNode `json:"exitNodes"`
Token string `json:"token"`
ExitNodes []ExitNode `json:"exitNodes"`
ServerVersion string `json:"serverVersion"`
} `json:"data"`
Success bool `json:"success"`
Message string `json:"message"`
@@ -47,7 +48,9 @@ type TokenResponse struct {
type ExitNode struct {
Endpoint string `json:"endpoint"`
RelayPort uint16 `json:"relayPort"`
PublicKey string `json:"publicKey"`
SiteIds []int `json:"siteIds"`
}
type WSMessage struct {
@@ -348,6 +351,9 @@ func (c *Client) getToken() (string, []ExitNode, error) {
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-CSRF-Token", "x-csrf-protection")
// print out the request for debugging
logger.Debug("Requesting token from %s with body: %s", req.URL.String(), string(jsonData))
// Make the request
client := &http.Client{}
if tlsConfig != nil {