mirror of
https://github.com/netbirdio/netbird.git
synced 2026-05-03 15:46:38 +00:00
Compare commits
18 Commits
v0.2.2-bet
...
v0.3.3
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bafa71fc2e | ||
|
|
319632ffe8 | ||
|
|
828410b34c | ||
|
|
4d2b194570 | ||
|
|
a67b9a16af | ||
|
|
6ae27c9a9b | ||
|
|
ff6e369a21 | ||
|
|
5c3b5e7f40 | ||
|
|
8c75ef8bef | ||
|
|
fdc11fff47 | ||
|
|
3dca2d6953 | ||
|
|
6b7d4cf644 | ||
|
|
edd4125742 | ||
|
|
7bf9793f85 | ||
|
|
fcbf980588 | ||
|
|
d08e5efbce | ||
|
|
95ef8547f3 | ||
|
|
ed1e4dfc51 |
44
.github/workflows/golang-test-build.yml
vendored
Normal file
44
.github/workflows/golang-test-build.yml
vendored
Normal file
@@ -0,0 +1,44 @@
|
|||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- main
|
||||||
|
pull_request:
|
||||||
|
name: Test Build On Platforms
|
||||||
|
jobs:
|
||||||
|
test_build:
|
||||||
|
strategy:
|
||||||
|
matrix:
|
||||||
|
os: [ windows, linux, darwin ]
|
||||||
|
go-version: [1.17.x]
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
|
- name: Install Go
|
||||||
|
uses: actions/setup-go@v2
|
||||||
|
with:
|
||||||
|
go-version: ${{ matrix.go-version }}
|
||||||
|
|
||||||
|
- name: Cache Go modules
|
||||||
|
uses: actions/cache@v1
|
||||||
|
with:
|
||||||
|
path: ~/go/pkg/mod
|
||||||
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
|
restore-keys: |
|
||||||
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
|
- name: Install modules
|
||||||
|
run: go mod tidy
|
||||||
|
|
||||||
|
- name: run build client
|
||||||
|
run: GOOS=${{ matrix.os }} go build .
|
||||||
|
working-directory: client
|
||||||
|
|
||||||
|
- name: run build management
|
||||||
|
run: GOOS=${{ matrix.os }} go build .
|
||||||
|
working-directory: management
|
||||||
|
|
||||||
|
- name: run build signal
|
||||||
|
run: GOOS=${{ matrix.os }} go build .
|
||||||
|
working-directory: signal
|
||||||
54
.github/workflows/golang-test.yml
vendored
54
.github/workflows/golang-test.yml
vendored
@@ -3,57 +3,31 @@ on:
|
|||||||
branches:
|
branches:
|
||||||
- main
|
- main
|
||||||
pull_request:
|
pull_request:
|
||||||
name: Test
|
name: Test Code
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test:
|
||||||
strategy:
|
strategy:
|
||||||
matrix:
|
matrix:
|
||||||
go-version: [1.16.x]
|
go-version: [1.17.x]
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
steps:
|
steps:
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: ${{ matrix.go-version }}
|
go-version: ${{ matrix.go-version }}
|
||||||
|
- name: update limits.d
|
||||||
|
run: |
|
||||||
|
cat <<'EOF' | sudo tee -a /etc/security/limits.d/wt.conf
|
||||||
|
root soft nproc 65535
|
||||||
|
root hard nproc 65535
|
||||||
|
root soft nofile 65535
|
||||||
|
root hard nofile 65535
|
||||||
|
$(whoami) soft nproc 65535
|
||||||
|
$(whoami) hard nproc 65535
|
||||||
|
$(whoami) soft nofile 65535
|
||||||
|
$(whoami) hard nofile 65535
|
||||||
|
EOF
|
||||||
- name: Checkout code
|
- name: Checkout code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
- name: Test
|
- name: Test
|
||||||
run: GOBIN=$(which go) && sudo --preserve-env=GOROOT $GOBIN test -p 1 ./...
|
run: GOBIN=$(which go) && sudo --preserve-env=GOROOT $GOBIN test -p 1 ./...
|
||||||
|
|
||||||
test_build:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
os: [ windows, linux, darwin ]
|
|
||||||
go-version: [1.16.x]
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v2
|
|
||||||
with:
|
|
||||||
go-version: ${{ matrix.go-version }}
|
|
||||||
|
|
||||||
- name: Cache Go modules
|
|
||||||
uses: actions/cache@v1
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Install modules
|
|
||||||
run: go mod tidy
|
|
||||||
|
|
||||||
- name: run build client
|
|
||||||
run: GOOS=${{ matrix.os }} go build .
|
|
||||||
working-directory: client
|
|
||||||
|
|
||||||
- name: run build management
|
|
||||||
run: GOOS=${{ matrix.os }} go build .
|
|
||||||
working-directory: management
|
|
||||||
|
|
||||||
- name: run build signal
|
|
||||||
run: GOOS=${{ matrix.os }} go build .
|
|
||||||
working-directory: signal
|
|
||||||
2
.github/workflows/release.yml
vendored
2
.github/workflows/release.yml
vendored
@@ -18,7 +18,7 @@ jobs:
|
|||||||
name: Set up Go
|
name: Set up Go
|
||||||
uses: actions/setup-go@v2
|
uses: actions/setup-go@v2
|
||||||
with:
|
with:
|
||||||
go-version: 1.16
|
go-version: 1.17
|
||||||
-
|
-
|
||||||
name: Cache Go modules
|
name: Cache Go modules
|
||||||
uses: actions/cache@v1
|
uses: actions/cache@v1
|
||||||
|
|||||||
113
.goreleaser.yaml
113
.goreleaser.yaml
@@ -37,6 +37,7 @@ builds:
|
|||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
- arm64
|
- arm64
|
||||||
|
- arm
|
||||||
ldflags:
|
ldflags:
|
||||||
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
|
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
|
||||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||||
@@ -50,6 +51,7 @@ builds:
|
|||||||
goarch:
|
goarch:
|
||||||
- amd64
|
- amd64
|
||||||
- arm64
|
- arm64
|
||||||
|
- arm
|
||||||
ldflags:
|
ldflags:
|
||||||
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
|
- -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.CommitDate}} -X main.builtBy=goreleaser
|
||||||
mod_timestamp: '{{ .CommitTimestamp }}'
|
mod_timestamp: '{{ .CommitTimestamp }}'
|
||||||
@@ -83,6 +85,52 @@ nfpms:
|
|||||||
postinstall: "release_files/post_install.sh"
|
postinstall: "release_files/post_install.sh"
|
||||||
preremove: "release_files/pre_remove.sh"
|
preremove: "release_files/pre_remove.sh"
|
||||||
dockers:
|
dockers:
|
||||||
|
- image_templates:
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-amd64
|
||||||
|
ids:
|
||||||
|
- wiretrustee
|
||||||
|
goarch: amd64
|
||||||
|
use: buildx
|
||||||
|
dockerfile: client/Dockerfile
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/amd64"
|
||||||
|
- "--label=org.opencontainers.image.created={{.Date}}"
|
||||||
|
- "--label=org.opencontainers.image.title={{.ProjectName}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
|
- image_templates:
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-arm64v8
|
||||||
|
ids:
|
||||||
|
- wiretrustee
|
||||||
|
goarch: arm64
|
||||||
|
use: buildx
|
||||||
|
dockerfile: client/Dockerfile
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm64"
|
||||||
|
- "--label=org.opencontainers.image.created={{.Date}}"
|
||||||
|
- "--label=org.opencontainers.image.title={{.ProjectName}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
|
- image_templates:
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-arm
|
||||||
|
ids:
|
||||||
|
- wiretrustee
|
||||||
|
goarch: arm
|
||||||
|
goarm: 6
|
||||||
|
use: buildx
|
||||||
|
dockerfile: client/Dockerfile
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm"
|
||||||
|
- "--label=org.opencontainers.image.created={{.Date}}"
|
||||||
|
- "--label=org.opencontainers.image.title={{.ProjectName}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
- image_templates:
|
- image_templates:
|
||||||
- wiretrustee/signal:{{ .Version }}-amd64
|
- wiretrustee/signal:{{ .Version }}-amd64
|
||||||
ids:
|
ids:
|
||||||
@@ -113,6 +161,22 @@ dockers:
|
|||||||
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
- "--label=org.opencontainers.image.version={{.Version}}"
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
|
- image_templates:
|
||||||
|
- wiretrustee/signal:{{ .Version }}-arm
|
||||||
|
ids:
|
||||||
|
- wiretrustee-signal
|
||||||
|
goarch: arm
|
||||||
|
goarm: 6
|
||||||
|
use: buildx
|
||||||
|
dockerfile: signal/Dockerfile
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm"
|
||||||
|
- "--label=org.opencontainers.image.created={{.Date}}"
|
||||||
|
- "--label=org.opencontainers.image.title={{.ProjectName}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
- image_templates:
|
- image_templates:
|
||||||
- wiretrustee/management:{{ .Version }}-amd64
|
- wiretrustee/management:{{ .Version }}-amd64
|
||||||
ids:
|
ids:
|
||||||
@@ -143,6 +207,22 @@ dockers:
|
|||||||
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
- "--label=org.opencontainers.image.version={{.Version}}"
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
|
- image_templates:
|
||||||
|
- wiretrustee/management:{{ .Version }}-arm
|
||||||
|
ids:
|
||||||
|
- wiretrustee-mgmt
|
||||||
|
goarch: arm
|
||||||
|
goarm: 6
|
||||||
|
use: buildx
|
||||||
|
dockerfile: management/Dockerfile
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm"
|
||||||
|
- "--label=org.opencontainers.image.created={{.Date}}"
|
||||||
|
- "--label=org.opencontainers.image.title={{.ProjectName}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
- image_templates:
|
- image_templates:
|
||||||
- wiretrustee/management:{{ .Version }}-debug-amd64
|
- wiretrustee/management:{{ .Version }}-debug-amd64
|
||||||
ids:
|
ids:
|
||||||
@@ -174,30 +254,63 @@ dockers:
|
|||||||
- "--label=org.opencontainers.image.version={{.Version}}"
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
|
|
||||||
|
- image_templates:
|
||||||
|
- wiretrustee/management:{{ .Version }}-debug-arm
|
||||||
|
ids:
|
||||||
|
- wiretrustee-mgmt
|
||||||
|
goarch: arm
|
||||||
|
goarm: 6
|
||||||
|
use: buildx
|
||||||
|
dockerfile: management/Dockerfile.debug
|
||||||
|
build_flag_templates:
|
||||||
|
- "--platform=linux/arm"
|
||||||
|
- "--label=org.opencontainers.image.created={{.Date}}"
|
||||||
|
- "--label=org.opencontainers.image.title={{.ProjectName}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=org.opencontainers.image.revision={{.FullCommit}}"
|
||||||
|
- "--label=org.opencontainers.image.version={{.Version}}"
|
||||||
|
- "--label=maintainer=wiretrustee@wiretrustee.com"
|
||||||
docker_manifests:
|
docker_manifests:
|
||||||
|
- name_template: wiretrustee/wiretrustee:{{ .Version }}
|
||||||
|
image_templates:
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-arm64v8
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-arm
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-amd64
|
||||||
|
|
||||||
|
- name_template: wiretrustee/wiretrustee:latest
|
||||||
|
image_templates:
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-arm64v8
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-arm
|
||||||
|
- wiretrustee/wiretrustee:{{ .Version }}-amd64
|
||||||
|
|
||||||
- name_template: wiretrustee/signal:{{ .Version }}
|
- name_template: wiretrustee/signal:{{ .Version }}
|
||||||
image_templates:
|
image_templates:
|
||||||
- wiretrustee/signal:{{ .Version }}-arm64v8
|
- wiretrustee/signal:{{ .Version }}-arm64v8
|
||||||
|
- wiretrustee/signal:{{ .Version }}-arm
|
||||||
- wiretrustee/signal:{{ .Version }}-amd64
|
- wiretrustee/signal:{{ .Version }}-amd64
|
||||||
|
|
||||||
- name_template: wiretrustee/signal:latest
|
- name_template: wiretrustee/signal:latest
|
||||||
image_templates:
|
image_templates:
|
||||||
- wiretrustee/signal:{{ .Version }}-arm64v8
|
- wiretrustee/signal:{{ .Version }}-arm64v8
|
||||||
|
- wiretrustee/signal:{{ .Version }}-arm
|
||||||
- wiretrustee/signal:{{ .Version }}-amd64
|
- wiretrustee/signal:{{ .Version }}-amd64
|
||||||
|
|
||||||
- name_template: wiretrustee/management:{{ .Version }}
|
- name_template: wiretrustee/management:{{ .Version }}
|
||||||
image_templates:
|
image_templates:
|
||||||
- wiretrustee/management:{{ .Version }}-arm64v8
|
- wiretrustee/management:{{ .Version }}-arm64v8
|
||||||
|
- wiretrustee/management:{{ .Version }}-arm
|
||||||
- wiretrustee/management:{{ .Version }}-amd64
|
- wiretrustee/management:{{ .Version }}-amd64
|
||||||
|
|
||||||
- name_template: wiretrustee/management:latest
|
- name_template: wiretrustee/management:latest
|
||||||
image_templates:
|
image_templates:
|
||||||
- wiretrustee/management:{{ .Version }}-arm64v8
|
- wiretrustee/management:{{ .Version }}-arm64v8
|
||||||
|
- wiretrustee/management:{{ .Version }}-arm
|
||||||
- wiretrustee/management:{{ .Version }}-amd64
|
- wiretrustee/management:{{ .Version }}-amd64
|
||||||
|
|
||||||
- name_template: wiretrustee/management:debug-latest
|
- name_template: wiretrustee/management:debug-latest
|
||||||
image_templates:
|
image_templates:
|
||||||
- wiretrustee/management:{{ .Version }}-debug-arm64v8
|
- wiretrustee/management:{{ .Version }}-debug-arm64v8
|
||||||
|
- wiretrustee/management:{{ .Version }}-debug-arm
|
||||||
- wiretrustee/management:{{ .Version }}-debug-amd64
|
- wiretrustee/management:{{ .Version }}-debug-amd64
|
||||||
|
|
||||||
brews:
|
brews:
|
||||||
|
|||||||
26
README.md
26
README.md
@@ -15,7 +15,7 @@
|
|||||||
<strong>
|
<strong>
|
||||||
Start using Wiretrustee at <a href="https://app.wiretrustee.com/">app.wiretrustee.com</a>
|
Start using Wiretrustee at <a href="https://app.wiretrustee.com/">app.wiretrustee.com</a>
|
||||||
<br/>
|
<br/>
|
||||||
See <a href="docs/README.md">Documentation</a>
|
See <a href="https://docs.wiretrustee.com">Documentation</a>
|
||||||
<br/>
|
<br/>
|
||||||
Join our <a href="https://join.slack.com/t/wiretrustee/shared_invite/zt-vrahf41g-ik1v7fV8du6t0RwxSrJ96A">Slack channel</a>
|
Join our <a href="https://join.slack.com/t/wiretrustee/shared_invite/zt-vrahf41g-ik1v7fV8du6t0RwxSrJ96A">Slack channel</a>
|
||||||
<br/>
|
<br/>
|
||||||
@@ -27,12 +27,11 @@
|
|||||||
|
|
||||||
**Wiretrustee is an open-source VPN platform built on top of WireGuard® making it easy to create secure private networks for your organization or home.**
|
**Wiretrustee is an open-source VPN platform built on top of WireGuard® making it easy to create secure private networks for your organization or home.**
|
||||||
|
|
||||||
It requires zero configuration effort leaving behind the hassle of opening ports, complex firewall rules, vpn gateways, and so forth.
|
It requires zero configuration effort leaving behind the hassle of opening ports, complex firewall rules, VPN gateways, and so forth.
|
||||||
|
|
||||||
There is no centralized VPN server with Wiretrustee - your computers, devices, machines, and servers connect to each other directly over a fast encrypted tunnel.
|
|
||||||
|
|
||||||
**Wiretrustee automates Wireguard-based networks, offering a management layer with:**
|
**Wiretrustee automates Wireguard-based networks, offering a management layer with:**
|
||||||
* Centralized Peer IP management with a neat UI dashboard.
|
* Centralized Peer IP management with a UI dashboard.
|
||||||
|
* Encrypted peer-to-peet connections without a centralized VPN gateway.
|
||||||
* Automatic Peer discovery and configuration.
|
* Automatic Peer discovery and configuration.
|
||||||
* UDP hole punching to establish peer-to-peer connections behind NAT, firewall, and without a public static IP.
|
* UDP hole punching to establish peer-to-peer connections behind NAT, firewall, and without a public static IP.
|
||||||
* Connection relay fallback in case a peer-to-peer connection is not possible.
|
* Connection relay fallback in case a peer-to-peer connection is not possible.
|
||||||
@@ -40,6 +39,7 @@ There is no centralized VPN server with Wiretrustee - your computers, devices, m
|
|||||||
* Client application SSO with MFA (coming soon).
|
* Client application SSO with MFA (coming soon).
|
||||||
* Access Controls (coming soon).
|
* Access Controls (coming soon).
|
||||||
* Activity Monitoring (coming soon).
|
* Activity Monitoring (coming soon).
|
||||||
|
* Private DNS (coming baoon)
|
||||||
|
|
||||||
### Secure peer-to-peer VPN in minutes
|
### Secure peer-to-peer VPN in minutes
|
||||||
<p float="left" align="middle">
|
<p float="left" align="middle">
|
||||||
@@ -56,14 +56,17 @@ Hosted demo version:
|
|||||||
|
|
||||||
|
|
||||||
### A bit on Wiretrustee internals
|
### A bit on Wiretrustee internals
|
||||||
* Wiretrustee features a Management Service that offers peer IP management and network updates distribution (e.g. when new peer joins the network).
|
* Wiretrustee features a Management Service that offers peer IP management and network updates distribution (e.g. when a new peer joins the network).
|
||||||
* Wiretrustee uses WebRTC ICE implemented in [pion/ice library](https://github.com/pion/ice) to discover connection candidates when establishing a peer-to-peer connection between devices.
|
* Wiretrustee uses WebRTC ICE implemented in [pion/ice library](https://github.com/pion/ice) to discover connection candidates when establishing a peer-to-peer connection between devices.
|
||||||
* Peers negotiate connection through [Signal Service](signal/).
|
* Peers negotiate connection through [Signal Service](signal/).
|
||||||
* Signal Service uses public Wireguard keys to route messages between peers.
|
* Signal Service uses public Wireguard keys to route messages between peers.
|
||||||
Contents of the messages sent between peers through the signaling server are encrypted with Wireguard keys, making it impossible to inspect them.
|
Contents of the messages sent between peers through the signaling server are encrypted with Wireguard keys, making it impossible to inspect them.
|
||||||
* Occasionally, the NAT-traversal is unsuccessful due to strict NATs (e.g. mobile carrier-grade NAT).
|
* Occasionally, the NAT traversal is unsuccessful due to strict NATs (e.g. mobile carrier-grade NAT). When this occurs the system falls back to the relay server (TURN), and a secure Wireguard tunnel is established via the TURN server. [Coturn](https://github.com/coturn/coturn) is the one that has been successfully used for STUN and TURN in Wiretrustee setups.
|
||||||
When this occurs the system falls back to relay server (TURN), and a secure Wireguard tunnel is established via TURN server.
|
|
||||||
[Coturn](https://github.com/coturn/coturn) is the one that has been successfully used for STUN and TURN in Wiretrustee setups.
|
<p float="left" align="middle">
|
||||||
|
<img src="https://docs.wiretrustee.com/img/architecture/high-level-dia.png" width="700"/>
|
||||||
|
</p>
|
||||||
|
|
||||||
|
|
||||||
### Product Roadmap
|
### Product Roadmap
|
||||||
- [Public Roadmap](https://github.com/wiretrustee/wiretrustee/projects/2)
|
- [Public Roadmap](https://github.com/wiretrustee/wiretrustee/projects/2)
|
||||||
@@ -145,6 +148,11 @@ For **Windows** systems, start powershell as administrator and:
|
|||||||
```shell
|
```shell
|
||||||
wiretrustee up --setup-key <SETUP KEY>
|
wiretrustee up --setup-key <SETUP KEY>
|
||||||
```
|
```
|
||||||
|
For **Docker**, you can run with the following command:
|
||||||
|
```shell
|
||||||
|
docker run --network host --privileged --rm -d -e WT_SETUP_KEY=<SETUP KEY> -v wiretrustee-client:/etc/wiretrustee wiretrustee/wiretrustee:<TAG>
|
||||||
|
```
|
||||||
|
> TAG > 0.3.0 version
|
||||||
|
|
||||||
Alternatively, if you are hosting your own Management Service provide `--management-url` property pointing to your Management Service:
|
Alternatively, if you are hosting your own Management Service provide `--management-url` property pointing to your Management Service:
|
||||||
```shell
|
```shell
|
||||||
|
|||||||
4
client/Dockerfile
Normal file
4
client/Dockerfile
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
FROM gcr.io/distroless/base:debug
|
||||||
|
ENV WT_LOG_FILE=console
|
||||||
|
ENTRYPOINT [ "/go/bin/wiretrustee","up"]
|
||||||
|
COPY wiretrustee /go/bin/wiretrustee
|
||||||
@@ -18,22 +18,21 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
setupKey string
|
|
||||||
|
|
||||||
loginCmd = &cobra.Command{
|
loginCmd = &cobra.Command{
|
||||||
Use: "login",
|
Use: "login",
|
||||||
Short: "login to the Wiretrustee Management Service (first run)",
|
Short: "login to the Wiretrustee Management Service (first run)",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
|
|
||||||
err := util.InitLog(logLevel, logFile)
|
err := util.InitLog(logLevel, logFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed initializing log %v", err)
|
log.Errorf("failed initializing log %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
config, err := internal.GetConfig(managementURL, configPath)
|
config, err := internal.GetConfig(managementURL, configPath, preSharedKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed getting config %s %v", configPath, err)
|
log.Errorf("failed getting config %s %v", configPath, err)
|
||||||
//os.Exit(ExitSetupFailed)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -41,7 +40,6 @@ var (
|
|||||||
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
|
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
|
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
|
||||||
//os.Exit(ExitSetupFailed)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -56,7 +54,6 @@ var (
|
|||||||
mgmClient, err := mgm.NewClient(ctx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
|
mgmClient, err := mgm.NewClient(ctx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed connecting to Management Service %s %v", config.ManagementURL.String(), err)
|
log.Errorf("failed connecting to Management Service %s %v", config.ManagementURL.String(), err)
|
||||||
//os.Exit(ExitSetupFailed)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
log.Debugf("connected to anagement Service %s", config.ManagementURL.String())
|
log.Debugf("connected to anagement Service %s", config.ManagementURL.String())
|
||||||
@@ -64,21 +61,18 @@ var (
|
|||||||
serverKey, err := mgmClient.GetServerPublicKey()
|
serverKey, err := mgmClient.GetServerPublicKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed while getting Management Service public key: %v", err)
|
log.Errorf("failed while getting Management Service public key: %v", err)
|
||||||
//os.Exit(ExitSetupFailed)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = loginPeer(*serverKey, mgmClient, setupKey)
|
_, err = loginPeer(*serverKey, mgmClient, setupKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed logging-in peer on Management Service : %v", err)
|
log.Errorf("failed logging-in peer on Management Service : %v", err)
|
||||||
//os.Exit(ExitSetupFailed)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = mgmClient.Close()
|
err = mgmClient.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed closing Management Service client: %v", err)
|
log.Errorf("failed closing Management Service client: %v", err)
|
||||||
//os.Exit(ExitSetupFailed)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -151,6 +145,3 @@ func promptPeerSetupKey() (string, error) {
|
|||||||
|
|
||||||
return "", s.Err()
|
return "", s.Err()
|
||||||
}
|
}
|
||||||
|
|
||||||
//func init() {
|
|
||||||
//}
|
|
||||||
|
|||||||
@@ -4,19 +4,15 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
"github.com/spf13/pflag"
|
||||||
"github.com/wiretrustee/wiretrustee/client/internal"
|
"github.com/wiretrustee/wiretrustee/client/internal"
|
||||||
"os"
|
"os"
|
||||||
"os/signal"
|
"os/signal"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"strings"
|
||||||
"syscall"
|
"syscall"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
// ExitSetupFailed defines exit code
|
|
||||||
ExitSetupFailed = 1
|
|
||||||
DefaultConfigPath = ""
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
var (
|
||||||
configPath string
|
configPath string
|
||||||
defaultConfigPath string
|
defaultConfigPath string
|
||||||
@@ -24,6 +20,8 @@ var (
|
|||||||
defaultLogFile string
|
defaultLogFile string
|
||||||
logFile string
|
logFile string
|
||||||
managementURL string
|
managementURL string
|
||||||
|
setupKey string
|
||||||
|
preSharedKey string
|
||||||
rootCmd = &cobra.Command{
|
rootCmd = &cobra.Command{
|
||||||
Use: "wiretrustee",
|
Use: "wiretrustee",
|
||||||
Short: "",
|
Short: "",
|
||||||
@@ -56,6 +54,7 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "sets Wiretrustee log level")
|
rootCmd.PersistentFlags().StringVar(&logLevel, "log-level", "info", "sets Wiretrustee log level")
|
||||||
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Wiretrustee log path. If console is specified the the log will be output to stdout")
|
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Wiretrustee log path. If console is specified the the log will be output to stdout")
|
||||||
rootCmd.PersistentFlags().StringVar(&setupKey, "setup-key", "", "Setup key obtained from the Management Service Dashboard (used to register peer)")
|
rootCmd.PersistentFlags().StringVar(&setupKey, "setup-key", "", "Setup key obtained from the Management Service Dashboard (used to register peer)")
|
||||||
|
rootCmd.PersistentFlags().StringVar(&preSharedKey, "preshared-key", "", "Sets Wireguard PreSharedKey property. If set, then only peers that have the same key can communicate.")
|
||||||
rootCmd.AddCommand(serviceCmd)
|
rootCmd.AddCommand(serviceCmd)
|
||||||
rootCmd.AddCommand(upCmd)
|
rootCmd.AddCommand(upCmd)
|
||||||
rootCmd.AddCommand(loginCmd)
|
rootCmd.AddCommand(loginCmd)
|
||||||
@@ -75,3 +74,28 @@ func SetupCloseHandler() {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// SetFlagsFromEnvVars reads and updates flag values from environment variables with prefix WT_
|
||||||
|
func SetFlagsFromEnvVars() {
|
||||||
|
flags := rootCmd.PersistentFlags()
|
||||||
|
flags.VisitAll(func(f *pflag.Flag) {
|
||||||
|
|
||||||
|
envVar := FlagNameToEnvVar(f.Name)
|
||||||
|
|
||||||
|
if value, present := os.LookupEnv(envVar); present {
|
||||||
|
err := flags.Set(f.Name, value)
|
||||||
|
if err != nil {
|
||||||
|
log.Infof("unable to configure flag %s using variable %s, err: %v", f.Name, envVar, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// FlagNameToEnvVar converts flag name to environment var name adding a prefix,
|
||||||
|
// replacing dashes and making all uppercase (e.g. setup-keys is converted to WT_SETUP_KEYS)
|
||||||
|
func FlagNameToEnvVar(f string) string {
|
||||||
|
prefix := "WT_"
|
||||||
|
parsed := strings.ReplaceAll(f, "-", "_")
|
||||||
|
upper := strings.ToUpper(parsed)
|
||||||
|
return prefix + upper
|
||||||
|
}
|
||||||
|
|||||||
@@ -34,6 +34,3 @@ var (
|
|||||||
Short: "manages wiretrustee service",
|
Short: "manages wiretrustee service",
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -8,7 +8,7 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (p *program) Start(s service.Service) error {
|
func (p *program) Start(service.Service) error {
|
||||||
|
|
||||||
// Start should not block. Do the actual work async.
|
// Start should not block. Do the actual work async.
|
||||||
log.Info("starting service") //nolint
|
log.Info("starting service") //nolint
|
||||||
@@ -22,7 +22,7 @@ func (p *program) Start(s service.Service) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *program) Stop(s service.Service) error {
|
func (p *program) Stop(service.Service) error {
|
||||||
go func() {
|
go func() {
|
||||||
stopCh <- 1
|
stopCh <- 1
|
||||||
}()
|
}()
|
||||||
@@ -41,6 +41,7 @@ var (
|
|||||||
Use: "run",
|
Use: "run",
|
||||||
Short: "runs wiretrustee as service",
|
Short: "runs wiretrustee as service",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
|
|
||||||
err := util.InitLog(logLevel, logFile)
|
err := util.InitLog(logLevel, logFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -75,6 +76,8 @@ var (
|
|||||||
Use: "start",
|
Use: "start",
|
||||||
Short: "starts wiretrustee service",
|
Short: "starts wiretrustee service",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
|
|
||||||
err := util.InitLog(logLevel, logFile)
|
err := util.InitLog(logLevel, logFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed initializing log %v", err)
|
log.Errorf("failed initializing log %v", err)
|
||||||
@@ -101,6 +104,8 @@ var (
|
|||||||
Use: "stop",
|
Use: "stop",
|
||||||
Short: "stops wiretrustee service",
|
Short: "stops wiretrustee service",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
|
|
||||||
err := util.InitLog(logLevel, logFile)
|
err := util.InitLog(logLevel, logFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed initializing log %v", err)
|
log.Errorf("failed initializing log %v", err)
|
||||||
@@ -125,6 +130,8 @@ var (
|
|||||||
Use: "restart",
|
Use: "restart",
|
||||||
Short: "restarts wiretrustee service",
|
Short: "restarts wiretrustee service",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
|
|
||||||
err := util.InitLog(logLevel, logFile)
|
err := util.InitLog(logLevel, logFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed initializing log %v", err)
|
log.Errorf("failed initializing log %v", err)
|
||||||
@@ -143,6 +150,3 @@ var (
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,6 +10,7 @@ var (
|
|||||||
Use: "install",
|
Use: "install",
|
||||||
Short: "installs wiretrustee service",
|
Short: "installs wiretrustee service",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
|
|
||||||
svcConfig := newSVCConfig()
|
svcConfig := newSVCConfig()
|
||||||
|
|
||||||
@@ -49,6 +50,7 @@ var (
|
|||||||
Use: "uninstall",
|
Use: "uninstall",
|
||||||
Short: "uninstalls wiretrustee service from system",
|
Short: "uninstalls wiretrustee service from system",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
|
|
||||||
s, err := newSVC(&program{}, newSVCConfig())
|
s, err := newSVC(&program{}, newSVCConfig())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -65,6 +67,3 @@ var (
|
|||||||
},
|
},
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -21,7 +21,7 @@ var (
|
|||||||
Use: "up",
|
Use: "up",
|
||||||
Short: "install, login and start wiretrustee client",
|
Short: "install, login and start wiretrustee client",
|
||||||
RunE: func(cmd *cobra.Command, args []string) error {
|
RunE: func(cmd *cobra.Command, args []string) error {
|
||||||
|
SetFlagsFromEnvVars()
|
||||||
err := loginCmd.RunE(cmd, args)
|
err := loginCmd.RunE(cmd, args)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -63,12 +63,23 @@ func createEngineConfig(key wgtypes.Key, config *internal.Config, peerConfig *mg
|
|||||||
iFaceBlackList[config.IFaceBlackList[i]] = struct{}{}
|
iFaceBlackList[config.IFaceBlackList[i]] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
return &internal.EngineConfig{
|
engineConf := &internal.EngineConfig{
|
||||||
WgIface: config.WgIface,
|
WgIface: config.WgIface,
|
||||||
WgAddr: peerConfig.Address,
|
WgAddr: peerConfig.Address,
|
||||||
IFaceBlackList: iFaceBlackList,
|
IFaceBlackList: iFaceBlackList,
|
||||||
WgPrivateKey: key,
|
WgPrivateKey: key,
|
||||||
}, nil
|
WgPort: internal.WgPort,
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.PreSharedKey != "" {
|
||||||
|
preSharedKey, err := wgtypes.ParseKey(config.PreSharedKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
engineConf.PreSharedKey = &preSharedKey
|
||||||
|
}
|
||||||
|
|
||||||
|
return engineConf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// connectToSignal creates Signal Service client and established a connection
|
// connectToSignal creates Signal Service client and established a connection
|
||||||
|
|||||||
@@ -36,7 +36,7 @@ func TestUp_Start(t *testing.T) {
|
|||||||
|
|
||||||
func TestUp(t *testing.T) {
|
func TestUp(t *testing.T) {
|
||||||
|
|
||||||
defer iface.Close()
|
defer iface.Close("wt0")
|
||||||
|
|
||||||
tempDir := t.TempDir()
|
tempDir := t.TempDir()
|
||||||
confPath := tempDir + "/config.json"
|
confPath := tempDir + "/config.json"
|
||||||
|
|||||||
@@ -106,6 +106,7 @@ SectionEnd
|
|||||||
Section Uninstall
|
Section Uninstall
|
||||||
${INSTALL_TYPE}
|
${INSTALL_TYPE}
|
||||||
|
|
||||||
|
Exec '"$INSTDIR\${MAIN_APP_EXE}" service stop'
|
||||||
Exec '"$INSTDIR\${MAIN_APP_EXE}" service uninstall'
|
Exec '"$INSTDIR\${MAIN_APP_EXE}" service uninstall'
|
||||||
# wait the service uninstall take unblock the executable
|
# wait the service uninstall take unblock the executable
|
||||||
Sleep 3000
|
Sleep 3000
|
||||||
|
|||||||
@@ -1,32 +0,0 @@
|
|||||||
package internal
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// A Cond is a condition variable like sync.Cond, but using a channel so we can use select.
|
|
||||||
type Cond struct {
|
|
||||||
once sync.Once
|
|
||||||
C chan struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewCond creates a new condition variable.
|
|
||||||
func NewCond() *Cond {
|
|
||||||
return &Cond{C: make(chan struct{})}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Do runs f if the condition hasn't been signaled yet. Afterwards it will be signaled.
|
|
||||||
func (c *Cond) Do(f func()) {
|
|
||||||
c.once.Do(func() {
|
|
||||||
f()
|
|
||||||
close(c.C)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Signal closes the condition variable channel.
|
|
||||||
func (c *Cond) Signal() {
|
|
||||||
c.Do(func() {})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Wait waits for the condition variable channel to close.
|
|
||||||
func (c *Cond) Wait() {
|
|
||||||
<-c.C
|
|
||||||
}
|
|
||||||
@@ -28,13 +28,14 @@ func init() {
|
|||||||
type Config struct {
|
type Config struct {
|
||||||
// Wireguard private key of local peer
|
// Wireguard private key of local peer
|
||||||
PrivateKey string
|
PrivateKey string
|
||||||
|
PreSharedKey string
|
||||||
ManagementURL *url.URL
|
ManagementURL *url.URL
|
||||||
WgIface string
|
WgIface string
|
||||||
IFaceBlackList []string
|
IFaceBlackList []string
|
||||||
}
|
}
|
||||||
|
|
||||||
//createNewConfig creates a new config generating a new Wireguard key and saving to file
|
//createNewConfig creates a new config generating a new Wireguard key and saving to file
|
||||||
func createNewConfig(managementURL string, configPath string) (*Config, error) {
|
func createNewConfig(managementURL string, configPath string, preSharedKey string) (*Config, error) {
|
||||||
wgKey := generateKey()
|
wgKey := generateKey()
|
||||||
config := &Config{PrivateKey: wgKey, WgIface: iface.WgInterfaceDefault, IFaceBlackList: []string{}}
|
config := &Config{PrivateKey: wgKey, WgIface: iface.WgInterfaceDefault, IFaceBlackList: []string{}}
|
||||||
if managementURL != "" {
|
if managementURL != "" {
|
||||||
@@ -47,6 +48,10 @@ func createNewConfig(managementURL string, configPath string) (*Config, error) {
|
|||||||
config.ManagementURL = managementURLDefault
|
config.ManagementURL = managementURLDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if preSharedKey != "" {
|
||||||
|
config.PreSharedKey = preSharedKey
|
||||||
|
}
|
||||||
|
|
||||||
config.IFaceBlackList = []string{iface.WgInterfaceDefault, "tun0"}
|
config.IFaceBlackList = []string{iface.WgInterfaceDefault, "tun0"}
|
||||||
|
|
||||||
err := util.WriteJson(configPath, config)
|
err := util.WriteJson(configPath, config)
|
||||||
@@ -93,11 +98,11 @@ func ReadConfig(managementURL string, configPath string) (*Config, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// GetConfig reads existing config or generates a new one
|
// GetConfig reads existing config or generates a new one
|
||||||
func GetConfig(managementURL string, configPath string) (*Config, error) {
|
func GetConfig(managementURL string, configPath string, preSharedKey string) (*Config, error) {
|
||||||
|
|
||||||
if _, err := os.Stat(configPath); os.IsNotExist(err) {
|
if _, err := os.Stat(configPath); os.IsNotExist(err) {
|
||||||
log.Infof("generating new config %s", configPath)
|
log.Infof("generating new config %s", configPath)
|
||||||
return createNewConfig(managementURL, configPath)
|
return createNewConfig(managementURL, configPath, preSharedKey)
|
||||||
} else {
|
} else {
|
||||||
return ReadConfig(managementURL, configPath)
|
return ReadConfig(managementURL, configPath)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,417 +0,0 @@
|
|||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
"context"
|
|
||||||
"fmt"
|
|
||||||
ice "github.com/pion/ice/v2"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/wiretrustee/wiretrustee/iface"
|
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
|
||||||
"net"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
// DefaultWgKeepAlive default Wireguard keep alive constant
|
|
||||||
DefaultWgKeepAlive = 20 * time.Second
|
|
||||||
privateIPBlocks []*net.IPNet
|
|
||||||
)
|
|
||||||
|
|
||||||
type Status string
|
|
||||||
|
|
||||||
const (
|
|
||||||
StatusConnected Status = "Connected"
|
|
||||||
StatusConnecting Status = "Connecting"
|
|
||||||
StatusDisconnected Status = "Disconnected"
|
|
||||||
)
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
for _, cidr := range []string{
|
|
||||||
"127.0.0.0/8", // IPv4 loopback
|
|
||||||
"10.0.0.0/8", // RFC1918
|
|
||||||
"172.16.0.0/12", // RFC1918
|
|
||||||
"192.168.0.0/16", // RFC1918
|
|
||||||
"169.254.0.0/16", // RFC3927 link-local
|
|
||||||
"::1/128", // IPv6 loopback
|
|
||||||
"fe80::/10", // IPv6 link-local
|
|
||||||
"fc00::/7", // IPv6 unique local addr
|
|
||||||
} {
|
|
||||||
_, block, err := net.ParseCIDR(cidr)
|
|
||||||
if err != nil {
|
|
||||||
panic(fmt.Errorf("parse error on %q: %v", cidr, err))
|
|
||||||
}
|
|
||||||
privateIPBlocks = append(privateIPBlocks, block)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// ConnConfig Connection configuration struct
|
|
||||||
type ConnConfig struct {
|
|
||||||
// Local Wireguard listening address e.g. 127.0.0.1:51820
|
|
||||||
WgListenAddr string
|
|
||||||
// A Local Wireguard Peer IP address in CIDR notation e.g. 10.30.30.1/24
|
|
||||||
WgPeerIP string
|
|
||||||
// Local Wireguard Interface name (e.g. wg0)
|
|
||||||
WgIface string
|
|
||||||
// Wireguard allowed IPs (e.g. 10.30.30.2/32)
|
|
||||||
WgAllowedIPs string
|
|
||||||
// Local Wireguard private key
|
|
||||||
WgKey wgtypes.Key
|
|
||||||
// Remote Wireguard public key
|
|
||||||
RemoteWgKey wgtypes.Key
|
|
||||||
|
|
||||||
StunTurnURLS []*ice.URL
|
|
||||||
|
|
||||||
iFaceBlackList map[string]struct{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// IceCredentials ICE protocol credentials struct
|
|
||||||
type IceCredentials struct {
|
|
||||||
uFrag string
|
|
||||||
pwd string
|
|
||||||
}
|
|
||||||
|
|
||||||
// Connection Holds information about a connection and handles signal protocol
|
|
||||||
type Connection struct {
|
|
||||||
Config ConnConfig
|
|
||||||
// signalCandidate is a handler function to signal remote peer about local connection candidate
|
|
||||||
signalCandidate func(candidate ice.Candidate) error
|
|
||||||
|
|
||||||
// signalOffer is a handler function to signal remote peer our connection offer (credentials)
|
|
||||||
signalOffer func(uFrag string, pwd string) error
|
|
||||||
|
|
||||||
// signalOffer is a handler function to signal remote peer our connection answer (credentials)
|
|
||||||
signalAnswer func(uFrag string, pwd string) error
|
|
||||||
|
|
||||||
// remoteAuthChannel is a channel used to wait for remote credentials to proceed with the connection
|
|
||||||
remoteAuthChannel chan IceCredentials
|
|
||||||
|
|
||||||
// agent is an actual ice.Agent that is used to negotiate and maintain a connection to a remote peer
|
|
||||||
agent *ice.Agent
|
|
||||||
|
|
||||||
wgProxy *WgProxy
|
|
||||||
|
|
||||||
connected *Cond
|
|
||||||
closeCond *Cond
|
|
||||||
|
|
||||||
remoteAuthCond sync.Once
|
|
||||||
|
|
||||||
Status Status
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewConnection Creates a new connection and sets handling functions for signal protocol
|
|
||||||
func NewConnection(config ConnConfig,
|
|
||||||
signalCandidate func(candidate ice.Candidate) error,
|
|
||||||
signalOffer func(uFrag string, pwd string) error,
|
|
||||||
signalAnswer func(uFrag string, pwd string) error,
|
|
||||||
) *Connection {
|
|
||||||
|
|
||||||
return &Connection{
|
|
||||||
Config: config,
|
|
||||||
signalCandidate: signalCandidate,
|
|
||||||
signalOffer: signalOffer,
|
|
||||||
signalAnswer: signalAnswer,
|
|
||||||
remoteAuthChannel: make(chan IceCredentials, 1),
|
|
||||||
closeCond: NewCond(),
|
|
||||||
connected: NewCond(),
|
|
||||||
agent: nil,
|
|
||||||
wgProxy: NewWgProxy(config.WgIface, config.RemoteWgKey.String(), config.WgAllowedIPs, config.WgListenAddr),
|
|
||||||
Status: StatusDisconnected,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Open opens connection to a remote peer.
|
|
||||||
// Will block until the connection has successfully established
|
|
||||||
func (conn *Connection) Open(timeout time.Duration) error {
|
|
||||||
|
|
||||||
// create an ice.Agent that will be responsible for negotiating and establishing actual peer-to-peer connection
|
|
||||||
a, err := ice.NewAgent(&ice.AgentConfig{
|
|
||||||
// MulticastDNSMode: ice.MulticastDNSModeQueryAndGather,
|
|
||||||
NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4},
|
|
||||||
Urls: conn.Config.StunTurnURLS,
|
|
||||||
CandidateTypes: []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive, ice.CandidateTypeRelay},
|
|
||||||
InterfaceFilter: func(s string) bool {
|
|
||||||
if conn.Config.iFaceBlackList == nil {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
_, ok := conn.Config.iFaceBlackList[s]
|
|
||||||
return !ok
|
|
||||||
},
|
|
||||||
})
|
|
||||||
conn.agent = a
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = conn.listenOnLocalCandidates()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = conn.listenOnConnectionStateChanges()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = conn.signalCredentials()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
conn.Status = StatusConnecting
|
|
||||||
log.Debugf("trying to connect to peer %s", conn.Config.RemoteWgKey.String())
|
|
||||||
|
|
||||||
// wait until credentials have been sent from the remote peer (will arrive via a signal server)
|
|
||||||
select {
|
|
||||||
case remoteAuth := <-conn.remoteAuthChannel:
|
|
||||||
|
|
||||||
log.Debugf("got a connection confirmation from peer %s", conn.Config.RemoteWgKey.String())
|
|
||||||
|
|
||||||
err = conn.agent.GatherCandidates()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
isControlling := conn.Config.WgKey.PublicKey().String() > conn.Config.RemoteWgKey.String()
|
|
||||||
var remoteConn *ice.Conn
|
|
||||||
remoteConn, err = conn.openConnectionToRemote(isControlling, remoteAuth)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed establishing connection with the remote peer %s %s", conn.Config.RemoteWgKey.String(), err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
var pair *ice.CandidatePair
|
|
||||||
pair, err = conn.agent.GetSelectedCandidatePair()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
useProxy := useProxy(pair)
|
|
||||||
|
|
||||||
// in case the remote peer is in the local network or one of the peers has public static IP -> no need for a Wireguard proxy, direct communication is possible.
|
|
||||||
if !useProxy {
|
|
||||||
log.Debugf("it is possible to establish a direct connection (without proxy) to peer %s - my addr: %s, remote addr: %s", conn.Config.RemoteWgKey.String(), pair.Local, pair.Remote)
|
|
||||||
err = conn.wgProxy.StartLocal(fmt.Sprintf("%s:%d", pair.Remote.Address(), iface.WgPort))
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
} else {
|
|
||||||
log.Debugf("establishing secure tunnel to peer %s via selected candidate pair %s", conn.Config.RemoteWgKey.String(), pair)
|
|
||||||
err = conn.wgProxy.Start(remoteConn)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
relayed := pair.Remote.Type() == ice.CandidateTypeRelay || pair.Local.Type() == ice.CandidateTypeRelay
|
|
||||||
|
|
||||||
conn.Status = StatusConnected
|
|
||||||
log.Infof("opened connection to peer %s [localProxy=%v, relayed=%v]", conn.Config.RemoteWgKey.String(), useProxy, relayed)
|
|
||||||
case <-conn.closeCond.C:
|
|
||||||
conn.Status = StatusDisconnected
|
|
||||||
return fmt.Errorf("connection to peer %s has been closed", conn.Config.RemoteWgKey.String())
|
|
||||||
case <-time.After(timeout):
|
|
||||||
err = conn.Close()
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("error while closing connection to peer %s -> %s", conn.Config.RemoteWgKey.String(), err.Error())
|
|
||||||
}
|
|
||||||
conn.Status = StatusDisconnected
|
|
||||||
return fmt.Errorf("timeout of %vs exceeded while waiting for the remote peer %s", timeout.Seconds(), conn.Config.RemoteWgKey.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
// wait until connection has been closed
|
|
||||||
<-conn.closeCond.C
|
|
||||||
conn.Status = StatusDisconnected
|
|
||||||
return fmt.Errorf("connection to peer %s has been closed", conn.Config.RemoteWgKey.String())
|
|
||||||
}
|
|
||||||
|
|
||||||
func isPublicIP(ip net.IP) bool {
|
|
||||||
if ip.IsLoopback() || ip.IsLinkLocalUnicast() || ip.IsLinkLocalMulticast() {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, block := range privateIPBlocks {
|
|
||||||
if block.Contains(ip) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
//useProxy determines whether a direct connection (without a go proxy) is possible
|
|
||||||
//There are 3 cases: one of the peers has a public IP or both peers are in the same private network
|
|
||||||
//Please note, that this check happens when peers were already able to ping each other with ICE layer.
|
|
||||||
func useProxy(pair *ice.CandidatePair) bool {
|
|
||||||
remoteIP := net.ParseIP(pair.Remote.Address())
|
|
||||||
myIp := net.ParseIP(pair.Local.Address())
|
|
||||||
remoteIsPublic := isPublicIP(remoteIP)
|
|
||||||
myIsPublic := isPublicIP(myIp)
|
|
||||||
|
|
||||||
//one of the hosts has a public IP
|
|
||||||
if remoteIsPublic && pair.Remote.Type() == ice.CandidateTypeHost {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
if myIsPublic && pair.Local.Type() == ice.CandidateTypeHost {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if pair.Local.Type() == ice.CandidateTypeHost && pair.Remote.Type() == ice.CandidateTypeHost {
|
|
||||||
if !remoteIsPublic && !myIsPublic {
|
|
||||||
//both hosts are in the same private network
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close Closes a peer connection
|
|
||||||
func (conn *Connection) Close() error {
|
|
||||||
var err error
|
|
||||||
conn.closeCond.Do(func() {
|
|
||||||
|
|
||||||
log.Debugf("closing connection to peer %s", conn.Config.RemoteWgKey.String())
|
|
||||||
|
|
||||||
if a := conn.agent; a != nil {
|
|
||||||
e := a.Close()
|
|
||||||
if e != nil {
|
|
||||||
log.Warnf("error while closing ICE agent of peer connection %s", conn.Config.RemoteWgKey.String())
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if c := conn.wgProxy; c != nil {
|
|
||||||
e := c.Close()
|
|
||||||
if e != nil {
|
|
||||||
log.Warnf("error while closingWireguard proxy connection of peer connection %s", conn.Config.RemoteWgKey.String())
|
|
||||||
err = e
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnAnswer Handles the answer from the other peer
|
|
||||||
func (conn *Connection) OnAnswer(remoteAuth IceCredentials) error {
|
|
||||||
|
|
||||||
conn.remoteAuthCond.Do(func() {
|
|
||||||
log.Debugf("OnAnswer from peer %s", conn.Config.RemoteWgKey.String())
|
|
||||||
conn.remoteAuthChannel <- remoteAuth
|
|
||||||
})
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnOffer Handles the offer from the other peer
|
|
||||||
func (conn *Connection) OnOffer(remoteAuth IceCredentials) error {
|
|
||||||
|
|
||||||
conn.remoteAuthCond.Do(func() {
|
|
||||||
log.Debugf("OnOffer from peer %s", conn.Config.RemoteWgKey.String())
|
|
||||||
conn.remoteAuthChannel <- remoteAuth
|
|
||||||
uFrag, pwd, err := conn.agent.GetLocalUserCredentials()
|
|
||||||
if err != nil { //nolint
|
|
||||||
}
|
|
||||||
|
|
||||||
err = conn.signalAnswer(uFrag, pwd)
|
|
||||||
if err != nil { //nolint
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// OnRemoteCandidate Handles remote candidate provided by the peer.
|
|
||||||
func (conn *Connection) OnRemoteCandidate(candidate ice.Candidate) error {
|
|
||||||
|
|
||||||
log.Debugf("onRemoteCandidate from peer %s -> %s", conn.Config.RemoteWgKey.String(), candidate.String())
|
|
||||||
|
|
||||||
err := conn.agent.AddRemoteCandidate(candidate)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// openConnectionToRemote opens an ice.Conn to the remote peer. This is a real peer-to-peer connection
|
|
||||||
// blocks until connection has been established
|
|
||||||
func (conn *Connection) openConnectionToRemote(isControlling bool, credentials IceCredentials) (*ice.Conn, error) {
|
|
||||||
var realConn *ice.Conn
|
|
||||||
var err error
|
|
||||||
|
|
||||||
if isControlling {
|
|
||||||
realConn, err = conn.agent.Dial(context.TODO(), credentials.uFrag, credentials.pwd)
|
|
||||||
} else {
|
|
||||||
realConn, err = conn.agent.Accept(context.TODO(), credentials.uFrag, credentials.pwd)
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return realConn, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// signalCredentials prepares local user credentials and signals them to the remote peer
|
|
||||||
func (conn *Connection) signalCredentials() error {
|
|
||||||
localUFrag, localPwd, err := conn.agent.GetLocalUserCredentials()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = conn.signalOffer(localUFrag, localPwd)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// listenOnLocalCandidates registers callback of an ICE Agent to receive new local connection candidates and then
|
|
||||||
// signals them to the remote peer
|
|
||||||
func (conn *Connection) listenOnLocalCandidates() error {
|
|
||||||
err := conn.agent.OnCandidate(func(candidate ice.Candidate) {
|
|
||||||
if candidate != nil {
|
|
||||||
log.Debugf("discovered local candidate %s", candidate.String())
|
|
||||||
err := conn.signalCandidate(candidate)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed signaling candidate to the remote peer %s %s", conn.Config.RemoteWgKey.String(), err)
|
|
||||||
//todo ??
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// listenOnConnectionStateChanges registers callback of an ICE Agent to track connection state
|
|
||||||
func (conn *Connection) listenOnConnectionStateChanges() error {
|
|
||||||
err := conn.agent.OnConnectionStateChange(func(state ice.ConnectionState) {
|
|
||||||
log.Debugf("ICE Connection State has changed for peer %s -> %s", conn.Config.RemoteWgKey.String(), state.String())
|
|
||||||
if state == ice.ConnectionStateConnected {
|
|
||||||
// closed the connection has been established we can check the selected candidate pair
|
|
||||||
pair, err := conn.agent.GetSelectedCandidatePair()
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed selecting active ICE candidate pair %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
log.Debugf("ICE connected to peer %s via a selected connnection candidate pair %s", conn.Config.RemoteWgKey.String(), pair)
|
|
||||||
} else if state == ice.ConnectionStateDisconnected || state == ice.ConnectionStateFailed {
|
|
||||||
err := conn.Close()
|
|
||||||
if err != nil {
|
|
||||||
log.Warnf("error while closing connection to peer %s -> %s", conn.Config.RemoteWgKey.String(), err.Error())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
})
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
@@ -3,26 +3,32 @@ package internal
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/pion/ice/v2"
|
||||||
ice "github.com/pion/ice/v2"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/wiretrustee/wiretrustee/client/internal/peer"
|
||||||
|
"github.com/wiretrustee/wiretrustee/client/internal/proxy"
|
||||||
"github.com/wiretrustee/wiretrustee/iface"
|
"github.com/wiretrustee/wiretrustee/iface"
|
||||||
mgm "github.com/wiretrustee/wiretrustee/management/client"
|
mgm "github.com/wiretrustee/wiretrustee/management/client"
|
||||||
mgmProto "github.com/wiretrustee/wiretrustee/management/proto"
|
mgmProto "github.com/wiretrustee/wiretrustee/management/proto"
|
||||||
signal "github.com/wiretrustee/wiretrustee/signal/client"
|
signal "github.com/wiretrustee/wiretrustee/signal/client"
|
||||||
sProto "github.com/wiretrustee/wiretrustee/signal/proto"
|
sProto "github.com/wiretrustee/wiretrustee/signal/proto"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
|
"math/rand"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PeerConnectionTimeout is a timeout of an initial connection attempt to a remote peer.
|
// PeerConnectionTimeoutMax is a timeout of an initial connection attempt to a remote peer.
|
||||||
// E.g. this peer will wait PeerConnectionTimeout for the remote peer to respond, if not successful then it will retry the connection attempt.
|
// E.g. this peer will wait PeerConnectionTimeoutMax for the remote peer to respond, if not successful then it will retry the connection attempt.
|
||||||
const PeerConnectionTimeout = 40 * time.Second
|
const PeerConnectionTimeoutMax = 45000 //ms
|
||||||
|
const PeerConnectionTimeoutMin = 30000 //ms
|
||||||
|
|
||||||
|
const WgPort = 51820
|
||||||
|
|
||||||
// EngineConfig is a config for the Engine
|
// EngineConfig is a config for the Engine
|
||||||
type EngineConfig struct {
|
type EngineConfig struct {
|
||||||
|
WgPort int
|
||||||
WgIface string
|
WgIface string
|
||||||
// WgAddr is a Wireguard local address (Wiretrustee Network IP)
|
// WgAddr is a Wireguard local address (Wiretrustee Network IP)
|
||||||
WgAddr string
|
WgAddr string
|
||||||
@@ -30,6 +36,8 @@ type EngineConfig struct {
|
|||||||
WgPrivateKey wgtypes.Key
|
WgPrivateKey wgtypes.Key
|
||||||
// IFaceBlackList is a list of network interfaces to ignore when discovering connection candidates (ICE related)
|
// IFaceBlackList is a list of network interfaces to ignore when discovering connection candidates (ICE related)
|
||||||
IFaceBlackList map[string]struct{}
|
IFaceBlackList map[string]struct{}
|
||||||
|
|
||||||
|
PreSharedKey *wgtypes.Key
|
||||||
}
|
}
|
||||||
|
|
||||||
// Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers.
|
// Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers.
|
||||||
@@ -38,19 +46,13 @@ type Engine struct {
|
|||||||
signal *signal.Client
|
signal *signal.Client
|
||||||
// mgmClient is a Management Service client
|
// mgmClient is a Management Service client
|
||||||
mgmClient *mgm.Client
|
mgmClient *mgm.Client
|
||||||
// conns is a collection of remote peer connections indexed by local public key of the remote peers
|
// peerConns is a map that holds all the peers that are known to this peer
|
||||||
conns map[string]*Connection
|
peerConns map[string]*peer.Conn
|
||||||
|
|
||||||
// peerMux is used to sync peer operations (e.g. open connection, peer removal)
|
|
||||||
peerMux *sync.Mutex
|
|
||||||
// syncMsgMux is used to guarantee sequential Management Service message processing
|
// syncMsgMux is used to guarantee sequential Management Service message processing
|
||||||
syncMsgMux *sync.Mutex
|
syncMsgMux *sync.Mutex
|
||||||
|
|
||||||
config *EngineConfig
|
config *EngineConfig
|
||||||
|
|
||||||
// wgPort is a Wireguard local listen port
|
|
||||||
wgPort int
|
|
||||||
|
|
||||||
// STUNs is a list of STUN servers used by ICE
|
// STUNs is a list of STUN servers used by ICE
|
||||||
STUNs []*ice.URL
|
STUNs []*ice.URL
|
||||||
// TURNs is a list of STUN servers used by ICE
|
// TURNs is a list of STUN servers used by ICE
|
||||||
@@ -72,8 +74,7 @@ func NewEngine(signalClient *signal.Client, mgmClient *mgm.Client, config *Engin
|
|||||||
return &Engine{
|
return &Engine{
|
||||||
signal: signalClient,
|
signal: signalClient,
|
||||||
mgmClient: mgmClient,
|
mgmClient: mgmClient,
|
||||||
conns: map[string]*Connection{},
|
peerConns: map[string]*peer.Conn{},
|
||||||
peerMux: &sync.Mutex{},
|
|
||||||
syncMsgMux: &sync.Mutex{},
|
syncMsgMux: &sync.Mutex{},
|
||||||
config: config,
|
config: config,
|
||||||
STUNs: []*ice.URL{},
|
STUNs: []*ice.URL{},
|
||||||
@@ -84,13 +85,16 @@ func NewEngine(signalClient *signal.Client, mgmClient *mgm.Client, config *Engin
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) Stop() error {
|
func (e *Engine) Stop() error {
|
||||||
|
e.syncMsgMux.Lock()
|
||||||
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
err := e.removeAllPeerConnections()
|
err := e.removeAllPeerConnections()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("removing Wiretrustee interface %s", e.config.WgIface)
|
log.Debugf("removing Wiretrustee interface %s", e.config.WgIface)
|
||||||
err = iface.Close()
|
err = iface.Close(e.config.WgIface)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed closing Wiretrustee interface %s %v", e.config.WgIface, err)
|
log.Errorf("failed closing Wiretrustee interface %s %v", e.config.WgIface, err)
|
||||||
return err
|
return err
|
||||||
@@ -105,6 +109,8 @@ func (e *Engine) Stop() error {
|
|||||||
// Connections to remote peers are not established here.
|
// Connections to remote peers are not established here.
|
||||||
// However, they will be established once an event with a list of peers to connect to will be received from Management Service
|
// However, they will be established once an event with a list of peers to connect to will be received from Management Service
|
||||||
func (e *Engine) Start() error {
|
func (e *Engine) Start() error {
|
||||||
|
e.syncMsgMux.Lock()
|
||||||
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
wgIface := e.config.WgIface
|
wgIface := e.config.WgIface
|
||||||
wgAddr := e.config.WgAddr
|
wgAddr := e.config.WgAddr
|
||||||
@@ -116,79 +122,33 @@ func (e *Engine) Start() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = iface.Configure(wgIface, myPrivateKey.String())
|
err = iface.Configure(wgIface, myPrivateKey.String(), e.config.WgPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed configuring Wireguard interface [%s]: %s", wgIface, err.Error())
|
log.Errorf("failed configuring Wireguard interface [%s]: %s", wgIface, err.Error())
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
port, err := iface.GetListenPort(wgIface)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed getting Wireguard listen port [%s]: %s", wgIface, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
e.wgPort = *port
|
|
||||||
|
|
||||||
e.receiveSignalEvents()
|
e.receiveSignalEvents()
|
||||||
e.receiveManagementEvents()
|
e.receiveManagementEvents()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// initializePeer peer agent attempt to open connection
|
func (e *Engine) removePeers(peers []string) error {
|
||||||
func (e *Engine) initializePeer(peer Peer) {
|
for _, p := range peers {
|
||||||
var backOff = backoff.WithContext(&backoff.ExponentialBackOff{
|
err := e.removePeer(p)
|
||||||
InitialInterval: backoff.DefaultInitialInterval,
|
|
||||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
|
||||||
Multiplier: backoff.DefaultMultiplier,
|
|
||||||
MaxInterval: 5 * time.Second,
|
|
||||||
MaxElapsedTime: time.Duration(0), //never stop
|
|
||||||
Stop: backoff.Stop,
|
|
||||||
Clock: backoff.SystemClock,
|
|
||||||
}, e.ctx)
|
|
||||||
|
|
||||||
operation := func() error {
|
|
||||||
_, err := e.openPeerConnection(e.wgPort, e.config.WgPrivateKey, peer)
|
|
||||||
e.peerMux.Lock()
|
|
||||||
defer e.peerMux.Unlock()
|
|
||||||
if _, ok := e.conns[peer.WgPubKey]; !ok {
|
|
||||||
log.Debugf("removed connection attempt to peer: %v, not retrying", peer.WgPubKey)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Warnln(err)
|
|
||||||
log.Debugf("retrying connection because of error: %s", err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err := backoff.Retry(operation, backOff)
|
|
||||||
if err != nil {
|
|
||||||
// should actually never happen
|
|
||||||
panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Engine) removePeerConnections(peers []string) error {
|
|
||||||
e.peerMux.Lock()
|
|
||||||
defer e.peerMux.Unlock()
|
|
||||||
for _, peer := range peers {
|
|
||||||
err := e.removePeerConnection(peer)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
log.Infof("removed peer %s", p)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) removeAllPeerConnections() error {
|
func (e *Engine) removeAllPeerConnections() error {
|
||||||
log.Debugf("removing all peer connections")
|
log.Debugf("removing all peer connections")
|
||||||
e.peerMux.Lock()
|
for p := range e.peerConns {
|
||||||
defer e.peerMux.Unlock()
|
err := e.removePeer(p)
|
||||||
for peer := range e.conns {
|
|
||||||
err := e.removePeerConnection(peer)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -196,66 +156,41 @@ func (e *Engine) removeAllPeerConnections() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// removePeerConnection closes existing peer connection and removes peer
|
// removePeer closes an existing peer connection and removes a peer
|
||||||
func (e *Engine) removePeerConnection(peerKey string) error {
|
func (e *Engine) removePeer(peerKey string) error {
|
||||||
conn, exists := e.conns[peerKey]
|
log.Debugf("removing peer from engine %s", peerKey)
|
||||||
if exists && conn != nil {
|
conn, exists := e.peerConns[peerKey]
|
||||||
delete(e.conns, peerKey)
|
if exists {
|
||||||
|
delete(e.peerConns, peerKey)
|
||||||
return conn.Close()
|
return conn.Close()
|
||||||
}
|
}
|
||||||
log.Infof("removed connection to peer %s", peerKey)
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPeerConnectionStatus returns a connection Status or nil if peer connection wasn't found
|
// GetPeerConnectionStatus returns a connection Status or nil if peer connection wasn't found
|
||||||
func (e *Engine) GetPeerConnectionStatus(peerKey string) *Status {
|
func (e *Engine) GetPeerConnectionStatus(peerKey string) peer.ConnStatus {
|
||||||
e.peerMux.Lock()
|
|
||||||
defer e.peerMux.Unlock()
|
|
||||||
|
|
||||||
conn, exists := e.conns[peerKey]
|
conn, exists := e.peerConns[peerKey]
|
||||||
if exists && conn != nil {
|
if exists && conn != nil {
|
||||||
return &conn.Status
|
return conn.Status()
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return -1
|
||||||
}
|
}
|
||||||
|
|
||||||
// openPeerConnection opens a new remote peer connection
|
// GetConnectedPeers returns a connection Status or nil if peer connection wasn't found
|
||||||
func (e *Engine) openPeerConnection(wgPort int, myKey wgtypes.Key, peer Peer) (*Connection, error) {
|
func (e *Engine) GetConnectedPeers() []string {
|
||||||
e.peerMux.Lock()
|
e.syncMsgMux.Lock()
|
||||||
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
remoteKey, _ := wgtypes.ParseKey(peer.WgPubKey)
|
peers := []string{}
|
||||||
connConfig := &ConnConfig{
|
for s, conn := range e.peerConns {
|
||||||
WgListenAddr: fmt.Sprintf("127.0.0.1:%d", wgPort),
|
if conn.Status() == peer.StatusConnected {
|
||||||
WgPeerIP: e.config.WgAddr,
|
peers = append(peers, s)
|
||||||
WgIface: e.config.WgIface,
|
}
|
||||||
WgAllowedIPs: peer.WgAllowedIps,
|
|
||||||
WgKey: myKey,
|
|
||||||
RemoteWgKey: remoteKey,
|
|
||||||
StunTurnURLS: append(e.STUNs, e.TURNs...),
|
|
||||||
iFaceBlackList: e.config.IFaceBlackList,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
signalOffer := func(uFrag string, pwd string) error {
|
return peers
|
||||||
return signalAuth(uFrag, pwd, myKey, remoteKey, e.signal, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
signalAnswer := func(uFrag string, pwd string) error {
|
|
||||||
return signalAuth(uFrag, pwd, myKey, remoteKey, e.signal, true)
|
|
||||||
}
|
|
||||||
signalCandidate := func(candidate ice.Candidate) error {
|
|
||||||
return signalCandidate(candidate, myKey, remoteKey, e.signal)
|
|
||||||
}
|
|
||||||
conn := NewConnection(*connConfig, signalCandidate, signalOffer, signalAnswer)
|
|
||||||
e.conns[remoteKey.String()] = conn
|
|
||||||
e.peerMux.Unlock()
|
|
||||||
|
|
||||||
// blocks until the connection is open (or timeout)
|
|
||||||
err := conn.Open(PeerConnectionTimeout)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return conn, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func signalCandidate(candidate ice.Candidate, myKey wgtypes.Key, remoteKey wgtypes.Key, s *signal.Client) error {
|
func signalCandidate(candidate ice.Candidate, myKey wgtypes.Key, remoteKey wgtypes.Key, s *signal.Client) error {
|
||||||
@@ -332,6 +267,8 @@ func (e *Engine) receiveManagementEvents() {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
// happens if management is unavailable for a long time.
|
||||||
|
// We want to cancel the operation of the whole client
|
||||||
e.cancel()
|
e.cancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -379,103 +316,186 @@ func (e *Engine) updateTURNs(turns []*mgmProto.ProtectedHostConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) updatePeers(remotePeers []*mgmProto.RemotePeerConfig) error {
|
func (e *Engine) updatePeers(remotePeers []*mgmProto.RemotePeerConfig) error {
|
||||||
log.Debugf("got peers update from Management Service, updating")
|
log.Debugf("got peers update from Management Service, total peers to connect to = %d", len(remotePeers))
|
||||||
remotePeerMap := make(map[string]struct{})
|
remotePeerMap := make(map[string]struct{})
|
||||||
for _, peer := range remotePeers {
|
for _, p := range remotePeers {
|
||||||
remotePeerMap[peer.GetWgPubKey()] = struct{}{}
|
remotePeerMap[p.GetWgPubKey()] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
//remove peers that are no longer available for us
|
//remove peers that are no longer available for us
|
||||||
toRemove := []string{}
|
toRemove := []string{}
|
||||||
for p := range e.conns {
|
for p := range e.peerConns {
|
||||||
if _, ok := remotePeerMap[p]; !ok {
|
if _, ok := remotePeerMap[p]; !ok {
|
||||||
toRemove = append(toRemove, p)
|
toRemove = append(toRemove, p)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err := e.removePeerConnections(toRemove)
|
err := e.removePeers(toRemove)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// add new peers
|
// add new peers
|
||||||
for _, peer := range remotePeers {
|
for _, p := range remotePeers {
|
||||||
peerKey := peer.GetWgPubKey()
|
peerKey := p.GetWgPubKey()
|
||||||
peerIPs := peer.GetAllowedIps()
|
peerIPs := p.GetAllowedIps()
|
||||||
if _, ok := e.conns[peerKey]; !ok {
|
if _, ok := e.peerConns[peerKey]; !ok {
|
||||||
go e.initializePeer(Peer{
|
conn, err := e.createPeerConn(peerKey, strings.Join(peerIPs, ","))
|
||||||
WgPubKey: peerKey,
|
if err != nil {
|
||||||
WgAllowedIps: strings.Join(peerIPs, ","),
|
return err
|
||||||
})
|
}
|
||||||
|
e.peerConns[peerKey] = conn
|
||||||
|
|
||||||
|
go e.connWorker(conn, peerKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e Engine) connWorker(conn *peer.Conn, peerKey string) {
|
||||||
|
for {
|
||||||
|
|
||||||
|
// randomize starting time a bit
|
||||||
|
min := 500
|
||||||
|
max := 2000
|
||||||
|
time.Sleep(time.Duration(rand.Intn(max-min)+min) * time.Millisecond)
|
||||||
|
|
||||||
|
// if peer has been removed -> give up
|
||||||
|
if !e.peerExists(peerKey) {
|
||||||
|
log.Infof("peer %s doesn't exist anymore, won't retry connection", peerKey)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !e.signal.Ready() {
|
||||||
|
log.Infof("signal client isn't ready, skipping connection attempt %s", peerKey)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
err := conn.Open()
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("connection to peer %s failed: %v", peerKey, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e Engine) peerExists(peerKey string) bool {
|
||||||
|
e.syncMsgMux.Lock()
|
||||||
|
defer e.syncMsgMux.Unlock()
|
||||||
|
_, ok := e.peerConns[peerKey]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e Engine) createPeerConn(pubKey string, allowedIPs string) (*peer.Conn, error) {
|
||||||
|
|
||||||
|
var stunTurn []*ice.URL
|
||||||
|
stunTurn = append(stunTurn, e.STUNs...)
|
||||||
|
stunTurn = append(stunTurn, e.TURNs...)
|
||||||
|
|
||||||
|
interfaceBlacklist := make([]string, 0, len(e.config.IFaceBlackList))
|
||||||
|
for k := range e.config.IFaceBlackList {
|
||||||
|
interfaceBlacklist = append(interfaceBlacklist, k)
|
||||||
|
}
|
||||||
|
|
||||||
|
proxyConfig := proxy.Config{
|
||||||
|
RemoteKey: pubKey,
|
||||||
|
WgListenAddr: fmt.Sprintf("127.0.0.1:%d", e.config.WgPort),
|
||||||
|
WgInterface: e.config.WgIface,
|
||||||
|
AllowedIps: allowedIPs,
|
||||||
|
PreSharedKey: e.config.PreSharedKey,
|
||||||
|
}
|
||||||
|
|
||||||
|
// randomize connection timeout
|
||||||
|
timeout := time.Duration(rand.Intn(PeerConnectionTimeoutMax-PeerConnectionTimeoutMin)+PeerConnectionTimeoutMin) * time.Millisecond
|
||||||
|
config := peer.ConnConfig{
|
||||||
|
Key: pubKey,
|
||||||
|
LocalKey: e.config.WgPrivateKey.PublicKey().String(),
|
||||||
|
StunTurn: stunTurn,
|
||||||
|
InterfaceBlackList: interfaceBlacklist,
|
||||||
|
Timeout: timeout,
|
||||||
|
ProxyConfig: proxyConfig,
|
||||||
|
}
|
||||||
|
|
||||||
|
peerConn, err := peer.NewConn(config)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
wgPubKey, err := wgtypes.ParseKey(pubKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
signalOffer := func(uFrag string, pwd string) error {
|
||||||
|
return signalAuth(uFrag, pwd, e.config.WgPrivateKey, wgPubKey, e.signal, false)
|
||||||
|
}
|
||||||
|
|
||||||
|
signalCandidate := func(candidate ice.Candidate) error {
|
||||||
|
return signalCandidate(candidate, e.config.WgPrivateKey, wgPubKey, e.signal)
|
||||||
|
}
|
||||||
|
|
||||||
|
signalAnswer := func(uFrag string, pwd string) error {
|
||||||
|
return signalAuth(uFrag, pwd, e.config.WgPrivateKey, wgPubKey, e.signal, true)
|
||||||
|
}
|
||||||
|
|
||||||
|
peerConn.SetSignalCandidate(signalCandidate)
|
||||||
|
peerConn.SetSignalOffer(signalOffer)
|
||||||
|
peerConn.SetSignalAnswer(signalAnswer)
|
||||||
|
|
||||||
|
return peerConn, nil
|
||||||
|
}
|
||||||
|
|
||||||
// receiveSignalEvents connects to the Signal Service event stream to negotiate connection with remote peers
|
// receiveSignalEvents connects to the Signal Service event stream to negotiate connection with remote peers
|
||||||
func (e *Engine) receiveSignalEvents() {
|
func (e *Engine) receiveSignalEvents() {
|
||||||
|
|
||||||
|
go func() {
|
||||||
// connect to a stream of messages coming from the signal server
|
// connect to a stream of messages coming from the signal server
|
||||||
e.signal.Receive(func(msg *sProto.Message) error {
|
err := e.signal.Receive(func(msg *sProto.Message) error {
|
||||||
|
|
||||||
e.syncMsgMux.Lock()
|
e.syncMsgMux.Lock()
|
||||||
defer e.syncMsgMux.Unlock()
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
conn := e.conns[msg.Key]
|
conn := e.peerConns[msg.Key]
|
||||||
if conn == nil {
|
if conn == nil {
|
||||||
return fmt.Errorf("wrongly addressed message %s", msg.Key)
|
return fmt.Errorf("wrongly addressed message %s", msg.Key)
|
||||||
}
|
}
|
||||||
|
|
||||||
if conn.Config.RemoteWgKey.String() != msg.Key {
|
|
||||||
return fmt.Errorf("unknown peer %s", msg.Key)
|
|
||||||
}
|
|
||||||
|
|
||||||
switch msg.GetBody().Type {
|
switch msg.GetBody().Type {
|
||||||
case sProto.Body_OFFER:
|
case sProto.Body_OFFER:
|
||||||
remoteCred, err := signal.UnMarshalCredential(msg)
|
remoteCred, err := signal.UnMarshalCredential(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = conn.OnOffer(IceCredentials{
|
conn.OnRemoteOffer(peer.IceCredentials{
|
||||||
uFrag: remoteCred.UFrag,
|
UFrag: remoteCred.UFrag,
|
||||||
pwd: remoteCred.Pwd,
|
Pwd: remoteCred.Pwd,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
case sProto.Body_ANSWER:
|
case sProto.Body_ANSWER:
|
||||||
remoteCred, err := signal.UnMarshalCredential(msg)
|
remoteCred, err := signal.UnMarshalCredential(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = conn.OnAnswer(IceCredentials{
|
conn.OnRemoteAnswer(peer.IceCredentials{
|
||||||
uFrag: remoteCred.UFrag,
|
UFrag: remoteCred.UFrag,
|
||||||
pwd: remoteCred.Pwd,
|
Pwd: remoteCred.Pwd,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
case sProto.Body_CANDIDATE:
|
case sProto.Body_CANDIDATE:
|
||||||
|
|
||||||
candidate, err := ice.UnmarshalCandidate(msg.GetBody().Payload)
|
candidate, err := ice.UnmarshalCandidate(msg.GetBody().Payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed on parsing remote candidate %s -> %s", candidate, err)
|
log.Errorf("failed on parsing remote candidate %s -> %s", candidate, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
conn.OnRemoteCandidate(candidate)
|
||||||
err = conn.OnRemoteCandidate(candidate)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error handling CANDIATE from %s", msg.Key)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
if err != nil {
|
||||||
e.signal.WaitConnected()
|
// happens if signal is unavailable for a long time.
|
||||||
|
// We want to cancel the operation of the whole client
|
||||||
|
e.cancel()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
e.signal.WaitStreamConnected()
|
||||||
}
|
}
|
||||||
|
|||||||
208
client/internal/engine_test.go
Normal file
208
client/internal/engine_test.go
Normal file
@@ -0,0 +1,208 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
mgmt "github.com/wiretrustee/wiretrustee/management/client"
|
||||||
|
mgmtProto "github.com/wiretrustee/wiretrustee/management/proto"
|
||||||
|
"github.com/wiretrustee/wiretrustee/management/server"
|
||||||
|
signal "github.com/wiretrustee/wiretrustee/signal/client"
|
||||||
|
"github.com/wiretrustee/wiretrustee/signal/proto"
|
||||||
|
signalServer "github.com/wiretrustee/wiretrustee/signal/server"
|
||||||
|
"github.com/wiretrustee/wiretrustee/util"
|
||||||
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/keepalive"
|
||||||
|
"net"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kaep = keepalive.EnforcementPolicy{
|
||||||
|
MinTime: 15 * time.Second,
|
||||||
|
PermitWithoutStream: true,
|
||||||
|
}
|
||||||
|
|
||||||
|
kasp = keepalive.ServerParameters{
|
||||||
|
MaxConnectionIdle: 15 * time.Second,
|
||||||
|
MaxConnectionAgeGrace: 5 * time.Second,
|
||||||
|
Time: 5 * time.Second,
|
||||||
|
Timeout: 2 * time.Second,
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestEngine_MultiplePeers(t *testing.T) {
|
||||||
|
|
||||||
|
//log.SetLevel(log.DebugLevel)
|
||||||
|
|
||||||
|
dir := t.TempDir()
|
||||||
|
|
||||||
|
err := util.CopyFileContents("../testdata/store.json", filepath.Join(dir, "store.json"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
os.Remove(filepath.Join(dir, "store.json")) //nolint
|
||||||
|
}()
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
defer cancel()
|
||||||
|
|
||||||
|
signalServer, err := startSignal(10000)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer signalServer.Stop()
|
||||||
|
|
||||||
|
mgmtServer, err := startManagement(33071, &server.Config{
|
||||||
|
Stuns: []*server.Host{},
|
||||||
|
TURNConfig: &server.TURNConfig{},
|
||||||
|
Signal: &server.Host{
|
||||||
|
Proto: "http",
|
||||||
|
URI: "localhost:10000",
|
||||||
|
},
|
||||||
|
Datadir: dir,
|
||||||
|
HttpConfig: nil,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer mgmtServer.Stop()
|
||||||
|
|
||||||
|
setupKey := "A2C8E62B-38F5-4553-B31E-DD66C696CEBB"
|
||||||
|
|
||||||
|
mu := sync.Mutex{}
|
||||||
|
engines := []*Engine{}
|
||||||
|
numPeers := 10
|
||||||
|
wg := sync.WaitGroup{}
|
||||||
|
wg.Add(numPeers)
|
||||||
|
// create and start peers
|
||||||
|
for i := 0; i < numPeers; i++ {
|
||||||
|
j := i
|
||||||
|
go func() {
|
||||||
|
engine, err := createEngine(ctx, cancel, setupKey, j)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
engine.Start() //nolint
|
||||||
|
engines = append(engines, engine)
|
||||||
|
wg.Done()
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
// wait until all have been created and started
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
// check whether all the peer have expected peers connected
|
||||||
|
expectedConnected := numPeers * (numPeers - 1)
|
||||||
|
for {
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
totalConnected := 0
|
||||||
|
for _, engine := range engines {
|
||||||
|
totalConnected = totalConnected + len(engine.GetConnectedPeers())
|
||||||
|
}
|
||||||
|
if totalConnected == expectedConnected {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
log.Infof("total connected=%d", totalConnected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey string, i int) (*Engine, error) {
|
||||||
|
|
||||||
|
key, err := wgtypes.GenerateKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mgmtClient, err := mgmt.NewClient(ctx, "localhost:33071", key, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
signalClient, err := signal.NewClient(ctx, "localhost:10000", key, false)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
publicKey, err := mgmtClient.GetServerPublicKey()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := mgmtClient.Register(*publicKey, setupKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var ifaceName string
|
||||||
|
if runtime.GOOS == "darwin" {
|
||||||
|
ifaceName = fmt.Sprintf("utun1%d", i)
|
||||||
|
} else {
|
||||||
|
ifaceName = fmt.Sprintf("wt%d", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
conf := &EngineConfig{
|
||||||
|
WgIface: ifaceName,
|
||||||
|
WgAddr: resp.PeerConfig.Address,
|
||||||
|
WgPrivateKey: key,
|
||||||
|
WgPort: 33100 + i,
|
||||||
|
}
|
||||||
|
|
||||||
|
return NewEngine(signalClient, mgmtClient, conf, cancel, ctx), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func startSignal(port int) (*grpc.Server, error) {
|
||||||
|
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
||||||
|
|
||||||
|
lis, err := net.Listen("tcp", fmt.Sprintf(":%d", port))
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed to listen: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
proto.RegisterSignalExchangeServer(s, signalServer.NewServer())
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
if err = s.Serve(lis); err != nil {
|
||||||
|
log.Fatalf("failed to serve: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func startManagement(port int, config *server.Config) (*grpc.Server, error) {
|
||||||
|
|
||||||
|
lis, err := net.Listen("tcp", fmt.Sprintf("localhost:%d", port))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
||||||
|
store, err := server.NewStore(config.Datadir)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatalf("failed creating a store: %s: %v", config.Datadir, err)
|
||||||
|
}
|
||||||
|
peersUpdateManager := server.NewPeersUpdateManager()
|
||||||
|
accountManager := server.NewManager(store, peersUpdateManager)
|
||||||
|
turnManager := server.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig)
|
||||||
|
mgmtServer, err := server.NewServer(config, accountManager, peersUpdateManager, turnManager)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
mgmtProto.RegisterManagementServiceServer(s, mgmtServer)
|
||||||
|
go func() {
|
||||||
|
if err = s.Serve(lis); err != nil {
|
||||||
|
log.Fatalf("failed to serve: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
421
client/internal/peer/conn.go
Normal file
421
client/internal/peer/conn.go
Normal file
@@ -0,0 +1,421 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"github.com/pion/ice/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/wiretrustee/wiretrustee/client/internal/proxy"
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConnConfig is a peer Connection configuration
|
||||||
|
type ConnConfig struct {
|
||||||
|
|
||||||
|
// Key is a public key of a remote peer
|
||||||
|
Key string
|
||||||
|
// LocalKey is a public key of a local peer
|
||||||
|
LocalKey string
|
||||||
|
|
||||||
|
// StunTurn is a list of STUN and TURN URLs
|
||||||
|
StunTurn []*ice.URL
|
||||||
|
|
||||||
|
// InterfaceBlackList is a list of machine interfaces that should be filtered out by ICE Candidate gathering
|
||||||
|
// (e.g. if eth0 is in the list, host candidate of this interface won't be used)
|
||||||
|
InterfaceBlackList []string
|
||||||
|
|
||||||
|
Timeout time.Duration
|
||||||
|
|
||||||
|
ProxyConfig proxy.Config
|
||||||
|
}
|
||||||
|
|
||||||
|
// IceCredentials ICE protocol credentials struct
|
||||||
|
type IceCredentials struct {
|
||||||
|
UFrag string
|
||||||
|
Pwd string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Conn struct {
|
||||||
|
config ConnConfig
|
||||||
|
mu sync.Mutex
|
||||||
|
|
||||||
|
// signalCandidate is a handler function to signal remote peer about local connection candidate
|
||||||
|
signalCandidate func(candidate ice.Candidate) error
|
||||||
|
// signalOffer is a handler function to signal remote peer our connection offer (credentials)
|
||||||
|
signalOffer func(uFrag string, pwd string) error
|
||||||
|
signalAnswer func(uFrag string, pwd string) error
|
||||||
|
|
||||||
|
// remoteOffersCh is a channel used to wait for remote credentials to proceed with the connection
|
||||||
|
remoteOffersCh chan IceCredentials
|
||||||
|
// remoteAnswerCh is a channel used to wait for remote credentials answer (confirmation of our offer) to proceed with the connection
|
||||||
|
remoteAnswerCh chan IceCredentials
|
||||||
|
closeCh chan struct{}
|
||||||
|
ctx context.Context
|
||||||
|
notifyDisconnected context.CancelFunc
|
||||||
|
|
||||||
|
agent *ice.Agent
|
||||||
|
status ConnStatus
|
||||||
|
|
||||||
|
proxy proxy.Proxy
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConn creates a new not opened Conn to the remote peer.
|
||||||
|
// To establish a connection run Conn.Open
|
||||||
|
func NewConn(config ConnConfig) (*Conn, error) {
|
||||||
|
return &Conn{
|
||||||
|
config: config,
|
||||||
|
mu: sync.Mutex{},
|
||||||
|
status: StatusDisconnected,
|
||||||
|
closeCh: make(chan struct{}),
|
||||||
|
remoteOffersCh: make(chan IceCredentials),
|
||||||
|
remoteAnswerCh: make(chan IceCredentials),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// interfaceFilter is a function passed to ICE Agent to filter out blacklisted interfaces
|
||||||
|
func interfaceFilter(blackList []string) func(string) bool {
|
||||||
|
var blackListMap map[string]struct{}
|
||||||
|
if blackList != nil {
|
||||||
|
blackListMap = make(map[string]struct{})
|
||||||
|
for _, s := range blackList {
|
||||||
|
blackListMap[s] = struct{}{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return func(iFace string) bool {
|
||||||
|
if len(blackListMap) == 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
_, ok := blackListMap[iFace]
|
||||||
|
return !ok
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *Conn) reCreateAgent() error {
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
|
||||||
|
failedTimeout := 6 * time.Second
|
||||||
|
var err error
|
||||||
|
conn.agent, err = ice.NewAgent(&ice.AgentConfig{
|
||||||
|
MulticastDNSMode: ice.MulticastDNSModeDisabled,
|
||||||
|
NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4},
|
||||||
|
Urls: conn.config.StunTurn,
|
||||||
|
CandidateTypes: []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive, ice.CandidateTypeRelay},
|
||||||
|
FailedTimeout: &failedTimeout,
|
||||||
|
InterfaceFilter: interfaceFilter(conn.config.InterfaceBlackList),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = conn.agent.OnCandidate(conn.onICECandidate)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = conn.agent.OnConnectionStateChange(conn.onICEConnectionStateChange)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = conn.agent.OnSelectedCandidatePairChange(conn.onICESelectedCandidatePair)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open opens connection to the remote peer starting ICE candidate gathering process.
|
||||||
|
// Blocks until connection has been closed or connection timeout.
|
||||||
|
// ConnStatus will be set accordingly
|
||||||
|
func (conn *Conn) Open() error {
|
||||||
|
log.Debugf("trying to connect to peer %s", conn.config.Key)
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
err := conn.cleanup()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while cleaning up peer connection %s: %v", conn.config.Key, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
err := conn.reCreateAgent()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = conn.sendOffer()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("connection offer sent to peer %s, waiting for the confirmation", conn.config.Key)
|
||||||
|
|
||||||
|
// Only continue once we got a connection confirmation from the remote peer.
|
||||||
|
// The connection timeout could have happened before a confirmation received from the remote.
|
||||||
|
// The connection could have also been closed externally (e.g. when we received an update from the management that peer shouldn't be connected)
|
||||||
|
var remoteCredentials IceCredentials
|
||||||
|
select {
|
||||||
|
case remoteCredentials = <-conn.remoteOffersCh:
|
||||||
|
// received confirmation from the remote peer -> ready to proceed
|
||||||
|
err = conn.sendAnswer()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
case remoteCredentials = <-conn.remoteAnswerCh:
|
||||||
|
case <-time.After(conn.config.Timeout):
|
||||||
|
return NewConnectionTimeoutError(conn.config.Key, conn.config.Timeout)
|
||||||
|
case <-conn.closeCh:
|
||||||
|
// closed externally
|
||||||
|
return NewConnectionClosedError(conn.config.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("received connection confirmation from peer %s", conn.config.Key)
|
||||||
|
|
||||||
|
//at this point we received offer/answer and we are ready to gather candidates
|
||||||
|
conn.mu.Lock()
|
||||||
|
conn.status = StatusConnecting
|
||||||
|
conn.ctx, conn.notifyDisconnected = context.WithCancel(context.Background())
|
||||||
|
defer conn.notifyDisconnected()
|
||||||
|
conn.mu.Unlock()
|
||||||
|
|
||||||
|
err = conn.agent.GatherCandidates()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// will block until connection succeeded
|
||||||
|
// but it won't release if ICE Agent went into Disconnected or Failed state,
|
||||||
|
// so we have to cancel it with the provided context once agent detected a broken connection
|
||||||
|
isControlling := conn.config.LocalKey > conn.config.Key
|
||||||
|
var remoteConn *ice.Conn
|
||||||
|
if isControlling {
|
||||||
|
remoteConn, err = conn.agent.Dial(conn.ctx, remoteCredentials.UFrag, remoteCredentials.Pwd)
|
||||||
|
} else {
|
||||||
|
remoteConn, err = conn.agent.Accept(conn.ctx, remoteCredentials.UFrag, remoteCredentials.Pwd)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// the connection has been established successfully so we are ready to start the proxy
|
||||||
|
err = conn.startProxy(remoteConn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("connected to peer %s [laddr <-> raddr] [%s <-> %s]", conn.config.Key, remoteConn.LocalAddr().String(), remoteConn.RemoteAddr().String())
|
||||||
|
|
||||||
|
// wait until connection disconnected or has been closed externally (upper layer, e.g. engine)
|
||||||
|
select {
|
||||||
|
case <-conn.closeCh:
|
||||||
|
// closed externally
|
||||||
|
return NewConnectionClosedError(conn.config.Key)
|
||||||
|
case <-conn.ctx.Done():
|
||||||
|
// disconnected from the remote peer
|
||||||
|
return NewConnectionDisconnectedError(conn.config.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// startProxy starts proxying traffic from/to local Wireguard and sets connection status to StatusConnected
|
||||||
|
func (conn *Conn) startProxy(remoteConn net.Conn) error {
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
|
||||||
|
conn.proxy = proxy.NewWireguardProxy(conn.config.ProxyConfig)
|
||||||
|
err := conn.proxy.Start(remoteConn)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
conn.status = StatusConnected
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// cleanup closes all open resources and sets status to StatusDisconnected
|
||||||
|
func (conn *Conn) cleanup() error {
|
||||||
|
log.Debugf("trying to cleanup %s", conn.config.Key)
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
|
||||||
|
if conn.agent != nil {
|
||||||
|
err := conn.agent.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
conn.agent = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if conn.proxy != nil {
|
||||||
|
err := conn.proxy.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
conn.proxy = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if conn.notifyDisconnected != nil {
|
||||||
|
conn.notifyDisconnected()
|
||||||
|
conn.notifyDisconnected = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
conn.status = StatusDisconnected
|
||||||
|
|
||||||
|
log.Debugf("cleaned up connection to peer %s", conn.config.Key)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSignalOffer sets a handler function to be triggered by Conn when a new connection offer has to be signalled to the remote peer
|
||||||
|
func (conn *Conn) SetSignalOffer(handler func(uFrag string, pwd string) error) {
|
||||||
|
conn.signalOffer = handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSignalAnswer sets a handler function to be triggered by Conn when a new connection answer has to be signalled to the remote peer
|
||||||
|
func (conn *Conn) SetSignalAnswer(handler func(uFrag string, pwd string) error) {
|
||||||
|
conn.signalAnswer = handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSignalCandidate sets a handler function to be triggered by Conn when a new ICE local connection candidate has to be signalled to the remote peer
|
||||||
|
func (conn *Conn) SetSignalCandidate(handler func(candidate ice.Candidate) error) {
|
||||||
|
conn.signalCandidate = handler
|
||||||
|
}
|
||||||
|
|
||||||
|
// onICECandidate is a callback attached to an ICE Agent to receive new local connection candidates
|
||||||
|
// and then signals them to the remote peer
|
||||||
|
func (conn *Conn) onICECandidate(candidate ice.Candidate) {
|
||||||
|
if candidate != nil {
|
||||||
|
//log.Debugf("discovered local candidate %s", candidate.String())
|
||||||
|
go func() {
|
||||||
|
err := conn.signalCandidate(candidate)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed signaling candidate to the remote peer %s %s", conn.config.Key, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *Conn) onICESelectedCandidatePair(c1 ice.Candidate, c2 ice.Candidate) {
|
||||||
|
log.Debugf("selected candidate pair [local <-> remote] -> [%s <-> %s], peer %s", conn.config.Key,
|
||||||
|
c1.String(), c2.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// onICEConnectionStateChange registers callback of an ICE Agent to track connection state
|
||||||
|
func (conn *Conn) onICEConnectionStateChange(state ice.ConnectionState) {
|
||||||
|
log.Debugf("peer %s ICE ConnectionState has changed to %s", conn.config.Key, state.String())
|
||||||
|
if state == ice.ConnectionStateFailed || state == ice.ConnectionStateDisconnected {
|
||||||
|
conn.notifyDisconnected()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (conn *Conn) sendAnswer() error {
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
|
||||||
|
localUFrag, localPwd, err := conn.agent.GetLocalUserCredentials()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Debugf("sending asnwer to %s", conn.config.Key)
|
||||||
|
err = conn.signalAnswer(localUFrag, localPwd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendOffer prepares local user credentials and signals them to the remote peer
|
||||||
|
func (conn *Conn) sendOffer() error {
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
|
||||||
|
localUFrag, localPwd, err := conn.agent.GetLocalUserCredentials()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
err = conn.signalOffer(localUFrag, localPwd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes this peer Conn issuing a close event to the Conn closeCh
|
||||||
|
func (conn *Conn) Close() error {
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
select {
|
||||||
|
case conn.closeCh <- struct{}{}:
|
||||||
|
default:
|
||||||
|
// probably could happen when peer has been added and removed right after not even starting to connect
|
||||||
|
// todo further investigate
|
||||||
|
// this really happens due to unordered messages coming from management
|
||||||
|
// more importantly it causes inconsistency -> 2 Conn objects for the same peer
|
||||||
|
// e.g. this flow:
|
||||||
|
// update from management has peers: [1,2,3,4]
|
||||||
|
// engine creates a Conn for peers: [1,2,3,4] and schedules Open in ~1sec
|
||||||
|
// before conn.Open() another update from management arrives with peers: [1,2,3]
|
||||||
|
// engine removes peer 4 and calls conn.Close() which does nothing (this default clause)
|
||||||
|
// before conn.Open() another update from management arrives with peers: [1,2,3,4,5]
|
||||||
|
// engine adds a new Conn for 4 and 5
|
||||||
|
// therefore peer 4 has 2 Conn objects
|
||||||
|
log.Warnf("closing not started coonection %s", conn.config.Key)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Status returns current status of the Conn
|
||||||
|
func (conn *Conn) Status() ConnStatus {
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
return conn.status
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRemoteOffer handles an offer from the remote peer
|
||||||
|
// can block until Conn restarts
|
||||||
|
func (conn *Conn) OnRemoteOffer(remoteAuth IceCredentials) {
|
||||||
|
log.Debugf("OnRemoteOffer from peer %s on status %s", conn.config.Key, conn.status.String())
|
||||||
|
|
||||||
|
select {
|
||||||
|
case conn.remoteOffersCh <- remoteAuth:
|
||||||
|
default:
|
||||||
|
log.Debugf("OnRemoteOffer skipping message from peer %s on status %s because is not ready", conn.config.Key, conn.status.String())
|
||||||
|
//connection might not be ready yet to receive so we ignore the message
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRemoteAnswer handles an offer from the remote peer
|
||||||
|
// can block until Conn restarts
|
||||||
|
func (conn *Conn) OnRemoteAnswer(remoteAuth IceCredentials) {
|
||||||
|
log.Debugf("OnRemoteAnswer from peer %s on status %s", conn.config.Key, conn.status.String())
|
||||||
|
|
||||||
|
select {
|
||||||
|
case conn.remoteAnswerCh <- remoteAuth:
|
||||||
|
default:
|
||||||
|
//connection might not be ready yet to receive so we ignore the message
|
||||||
|
log.Debugf("OnRemoteAnswer skipping message from peer %s on status %s because is not ready", conn.config.Key, conn.status.String())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRemoteCandidate Handles ICE connection Candidate provided by the remote peer.
|
||||||
|
func (conn *Conn) OnRemoteCandidate(candidate ice.Candidate) {
|
||||||
|
log.Debugf("OnRemoteCandidate from peer %s -> %s", conn.config.Key, candidate.String())
|
||||||
|
go func() {
|
||||||
|
conn.mu.Lock()
|
||||||
|
defer conn.mu.Unlock()
|
||||||
|
|
||||||
|
if conn.agent == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := conn.agent.AddRemoteCandidate(candidate)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while handling remote candidate from peer %s", conn.config.Key)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
56
client/internal/peer/error.go
Normal file
56
client/internal/peer/error.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ConnectionTimeoutError is an error indicating that a peer Conn has been timed out
|
||||||
|
type ConnectionTimeoutError struct {
|
||||||
|
peer string
|
||||||
|
timeout time.Duration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ConnectionTimeoutError) Error() string {
|
||||||
|
return fmt.Sprintf("connection to peer %s timed out after %s", e.peer, e.timeout.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConnectionTimeoutError creates a new ConnectionTimeoutError error
|
||||||
|
func NewConnectionTimeoutError(peer string, timeout time.Duration) error {
|
||||||
|
return &ConnectionTimeoutError{
|
||||||
|
peer: peer,
|
||||||
|
timeout: timeout,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectionClosedError is an error indicating that a peer Conn has been forcefully closed
|
||||||
|
type ConnectionClosedError struct {
|
||||||
|
peer string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ConnectionClosedError) Error() string {
|
||||||
|
return fmt.Sprintf("connection to peer %s has been closed", e.peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConnectionClosedError creates a new ConnectionClosedError error
|
||||||
|
func NewConnectionClosedError(peer string) error {
|
||||||
|
return &ConnectionClosedError{
|
||||||
|
peer: peer,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConnectionDisconnectedError is an error indicating that a peer Conn has ctx from the remote
|
||||||
|
type ConnectionDisconnectedError struct {
|
||||||
|
peer string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *ConnectionDisconnectedError) Error() string {
|
||||||
|
return fmt.Sprintf("disconnected from peer %s", e.peer)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewConnectionDisconnectedError creates a new ConnectionDisconnectedError error
|
||||||
|
func NewConnectionDisconnectedError(peer string) error {
|
||||||
|
return &ConnectionDisconnectedError{
|
||||||
|
peer: peer,
|
||||||
|
}
|
||||||
|
}
|
||||||
25
client/internal/peer/status.go
Normal file
25
client/internal/peer/status.go
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
type ConnStatus int
|
||||||
|
|
||||||
|
func (s ConnStatus) String() string {
|
||||||
|
switch s {
|
||||||
|
case StatusConnecting:
|
||||||
|
return "StatusConnecting"
|
||||||
|
case StatusConnected:
|
||||||
|
return "StatusConnected"
|
||||||
|
case StatusDisconnected:
|
||||||
|
return "StatusDisconnected"
|
||||||
|
default:
|
||||||
|
log.Errorf("unknown status: %d", s)
|
||||||
|
return "INVALID_PEER_CONNECTION_STATUS"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
StatusConnected = iota
|
||||||
|
StatusConnecting
|
||||||
|
StatusDisconnected
|
||||||
|
)
|
||||||
68
client/internal/proxy/dummy.go
Normal file
68
client/internal/proxy/dummy.go
Normal file
@@ -0,0 +1,68 @@
|
|||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DummyProxy just sends pings to the RemoteKey peer and reads responses
|
||||||
|
type DummyProxy struct {
|
||||||
|
conn net.Conn
|
||||||
|
remote string
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewDummyProxy(remote string) *DummyProxy {
|
||||||
|
p := &DummyProxy{remote: remote}
|
||||||
|
p.ctx, p.cancel = context.WithCancel(context.Background())
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *DummyProxy) Close() error {
|
||||||
|
p.cancel()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *DummyProxy) Start(remoteConn net.Conn) error {
|
||||||
|
p.conn = remoteConn
|
||||||
|
go func() {
|
||||||
|
buf := make([]byte, 1500)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-p.ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
_, err := p.conn.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while reading RemoteKey %s proxy %v", p.remote, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
//log.Debugf("received %s from %s", string(buf[:n]), p.remote)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-p.ctx.Done():
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
_, err := p.conn.Write([]byte("hello"))
|
||||||
|
//log.Debugf("sent ping to %s", p.remote)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while writing to RemoteKey %s proxy %v", p.remote, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
time.Sleep(5 * time.Second)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
24
client/internal/proxy/proxy.go
Normal file
24
client/internal/proxy/proxy.go
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
const DefaultWgKeepAlive = 25 * time.Second
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
WgListenAddr string
|
||||||
|
RemoteKey string
|
||||||
|
WgInterface string
|
||||||
|
AllowedIps string
|
||||||
|
PreSharedKey *wgtypes.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
type Proxy interface {
|
||||||
|
io.Closer
|
||||||
|
// Start creates a local remoteConn and starts proxying data from/to remoteConn
|
||||||
|
Start(remoteConn net.Conn) error
|
||||||
|
}
|
||||||
121
client/internal/proxy/wireguard.go
Normal file
121
client/internal/proxy/wireguard.go
Normal file
@@ -0,0 +1,121 @@
|
|||||||
|
package proxy
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/wiretrustee/wiretrustee/iface"
|
||||||
|
"net"
|
||||||
|
)
|
||||||
|
|
||||||
|
// WireguardProxy proxies
|
||||||
|
type WireguardProxy struct {
|
||||||
|
ctx context.Context
|
||||||
|
cancel context.CancelFunc
|
||||||
|
|
||||||
|
config Config
|
||||||
|
|
||||||
|
remoteConn net.Conn
|
||||||
|
localConn net.Conn
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWireguardProxy(config Config) *WireguardProxy {
|
||||||
|
p := &WireguardProxy{config: config}
|
||||||
|
p.ctx, p.cancel = context.WithCancel(context.Background())
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WireguardProxy) updateEndpoint() error {
|
||||||
|
// add local proxy connection as a Wireguard peer
|
||||||
|
err := iface.UpdatePeer(p.config.WgInterface, p.config.RemoteKey, p.config.AllowedIps, DefaultWgKeepAlive,
|
||||||
|
p.localConn.LocalAddr().String(), p.config.PreSharedKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WireguardProxy) Start(remoteConn net.Conn) error {
|
||||||
|
p.remoteConn = remoteConn
|
||||||
|
|
||||||
|
var err error
|
||||||
|
p.localConn, err = net.Dial("udp", p.config.WgListenAddr)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed dialing to local Wireguard port %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = p.updateEndpoint()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("error while updating Wireguard peer endpoint [%s] %v", p.config.RemoteKey, err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go p.proxyToRemote()
|
||||||
|
go p.proxyToLocal()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *WireguardProxy) Close() error {
|
||||||
|
p.cancel()
|
||||||
|
if c := p.localConn; c != nil {
|
||||||
|
err := p.localConn.Close()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err := iface.RemovePeer(p.config.WgInterface, p.config.RemoteKey)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// proxyToRemote proxies everything from Wireguard to the RemoteKey peer
|
||||||
|
// blocks
|
||||||
|
func (p *WireguardProxy) proxyToRemote() {
|
||||||
|
|
||||||
|
buf := make([]byte, 1500)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-p.ctx.Done():
|
||||||
|
log.Debugf("stopped proxying to remote peer %s due to closed connection", p.config.RemoteKey)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
n, err := p.localConn.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = p.remoteConn.Write(buf[:n])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// proxyToLocal proxies everything from the RemoteKey peer to local Wireguard
|
||||||
|
// blocks
|
||||||
|
func (p *WireguardProxy) proxyToLocal() {
|
||||||
|
|
||||||
|
buf := make([]byte, 1500)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-p.ctx.Done():
|
||||||
|
log.Debugf("stopped proxying from remote peer %s due to closed connection", p.config.RemoteKey)
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
n, err := p.remoteConn.Read(buf)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = p.localConn.Write(buf[:n])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,132 +0,0 @@
|
|||||||
package internal
|
|
||||||
|
|
||||||
import (
|
|
||||||
ice "github.com/pion/ice/v2"
|
|
||||||
log "github.com/sirupsen/logrus"
|
|
||||||
"github.com/wiretrustee/wiretrustee/iface"
|
|
||||||
"net"
|
|
||||||
)
|
|
||||||
|
|
||||||
// WgProxy an instance of an instance of the Connection Wireguard Proxy
|
|
||||||
type WgProxy struct {
|
|
||||||
iface string
|
|
||||||
remoteKey string
|
|
||||||
allowedIps string
|
|
||||||
wgAddr string
|
|
||||||
close chan struct{}
|
|
||||||
wgConn net.Conn
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWgProxy creates a new Connection Wireguard Proxy
|
|
||||||
func NewWgProxy(iface string, remoteKey string, allowedIps string, wgAddr string) *WgProxy {
|
|
||||||
return &WgProxy{
|
|
||||||
iface: iface,
|
|
||||||
remoteKey: remoteKey,
|
|
||||||
allowedIps: allowedIps,
|
|
||||||
wgAddr: wgAddr,
|
|
||||||
close: make(chan struct{}),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the proxy
|
|
||||||
func (p *WgProxy) Close() error {
|
|
||||||
|
|
||||||
close(p.close)
|
|
||||||
if c := p.wgConn; c != nil {
|
|
||||||
err := p.wgConn.Close()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
err := iface.RemovePeer(p.iface, p.remoteKey)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// StartLocal configure the interface with a peer using a direct IP:Port endpoint to the remote host
|
|
||||||
func (p *WgProxy) StartLocal(host string) error {
|
|
||||||
err := iface.UpdatePeer(p.iface, p.remoteKey, p.allowedIps, DefaultWgKeepAlive, host)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error while configuring Wireguard peer [%s] %s", p.remoteKey, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start starts a new proxy using the ICE connection
|
|
||||||
func (p *WgProxy) Start(remoteConn *ice.Conn) error {
|
|
||||||
|
|
||||||
wgConn, err := net.Dial("udp", p.wgAddr)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed dialing to local Wireguard port %s", err)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
p.wgConn = wgConn
|
|
||||||
// add local proxy connection as a Wireguard peer
|
|
||||||
err = iface.UpdatePeer(p.iface, p.remoteKey, p.allowedIps, DefaultWgKeepAlive,
|
|
||||||
wgConn.LocalAddr().String())
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("error while configuring Wireguard peer [%s] %s", p.remoteKey, err.Error())
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
go func() { p.proxyToRemotePeer(remoteConn) }()
|
|
||||||
go func() { p.proxyToLocalWireguard(remoteConn) }()
|
|
||||||
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// proxyToRemotePeer proxies everything from Wireguard to the remote peer
|
|
||||||
// blocks
|
|
||||||
func (p *WgProxy) proxyToRemotePeer(remoteConn *ice.Conn) {
|
|
||||||
|
|
||||||
buf := make([]byte, 1500)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-p.close:
|
|
||||||
log.Debugf("stopped proxying from remote peer %s due to closed connection", p.remoteKey)
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
n, err := p.wgConn.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
//log.Warnln("failed reading from peer: ", err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = remoteConn.Write(buf[:n])
|
|
||||||
if err != nil {
|
|
||||||
//log.Warnln("failed writing to remote peer: ", err.Error())
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// proxyToLocalWireguard proxies everything from the remote peer to local Wireguard
|
|
||||||
// blocks
|
|
||||||
func (p *WgProxy) proxyToLocalWireguard(remoteConn *ice.Conn) {
|
|
||||||
|
|
||||||
buf := make([]byte, 1500)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-p.close:
|
|
||||||
log.Debugf("stopped proxying from remote peer %s due to closed connection", p.remoteKey)
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
n, err := remoteConn.Read(buf)
|
|
||||||
if err != nil {
|
|
||||||
//log.Errorf("failed reading from remote connection %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
_, err = p.wgConn.Write(buf[:n])
|
|
||||||
if err != nil {
|
|
||||||
//log.Errorf("failed writing to local Wireguard instance %s", err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
13
client/testdata/store.json
vendored
13
client/testdata/store.json
vendored
@@ -11,6 +11,7 @@
|
|||||||
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
"ExpiresAt": "2321-09-18T20:46:20.005936822+02:00",
|
||||||
"Revoked": false,
|
"Revoked": false,
|
||||||
"UsedTimes": 0
|
"UsedTimes": 0
|
||||||
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"Network": {
|
"Network": {
|
||||||
@@ -21,7 +22,17 @@
|
|||||||
},
|
},
|
||||||
"Dns": null
|
"Dns": null
|
||||||
},
|
},
|
||||||
"Peers": {}
|
"Peers": {},
|
||||||
|
"Users": {
|
||||||
|
"edafee4e-63fb-11ec-90d6-0242ac120003": {
|
||||||
|
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||||
|
"Role": "admin"
|
||||||
|
},
|
||||||
|
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
|
||||||
|
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||||
|
"Role": "user"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
59
go.mod
59
go.mod
@@ -1,27 +1,60 @@
|
|||||||
module github.com/wiretrustee/wiretrustee
|
module github.com/wiretrustee/wiretrustee
|
||||||
|
|
||||||
go 1.16
|
go 1.17
|
||||||
|
|
||||||
require (
|
require (
|
||||||
github.com/cenkalti/backoff/v4 v4.1.0
|
github.com/cenkalti/backoff/v4 v4.1.2
|
||||||
github.com/golang-jwt/jwt v3.2.2+incompatible
|
github.com/golang-jwt/jwt v3.2.2+incompatible
|
||||||
github.com/golang/protobuf v1.5.2
|
github.com/golang/protobuf v1.5.2
|
||||||
github.com/google/uuid v1.2.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/gorilla/mux v1.8.0
|
github.com/gorilla/mux v1.8.0
|
||||||
github.com/kardianos/service v1.2.1-0.20210728001519-a323c3813bc7
|
github.com/kardianos/service v1.2.1-0.20210728001519-a323c3813bc7 //keep this version otherwise wiretrustee up command breaks
|
||||||
github.com/onsi/ginkgo v1.16.4
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/gomega v1.13.0
|
github.com/onsi/gomega v1.17.0
|
||||||
github.com/pion/ice/v2 v2.1.7
|
github.com/pion/ice/v2 v2.1.17
|
||||||
github.com/rs/cors v1.8.0
|
github.com/rs/cors v1.8.0
|
||||||
github.com/sirupsen/logrus v1.7.0
|
github.com/sirupsen/logrus v1.8.1
|
||||||
github.com/spf13/cobra v1.1.3
|
github.com/spf13/cobra v1.3.0
|
||||||
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/vishvananda/netlink v1.1.0
|
github.com/vishvananda/netlink v1.1.0
|
||||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97
|
golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3
|
||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
|
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
|
||||||
golang.zx2c4.com/wireguard v0.0.0-20210805125648-3957e9b9dd19
|
golang.zx2c4.com/wireguard v0.0.0-20210805125648-3957e9b9dd19
|
||||||
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20210803171230-4253848d036c
|
golang.zx2c4.com/wireguard/wgctrl v0.0.0-20210803171230-4253848d036c
|
||||||
golang.zx2c4.com/wireguard/windows v0.4.5
|
golang.zx2c4.com/wireguard/windows v0.4.5
|
||||||
google.golang.org/grpc v1.32.0
|
google.golang.org/grpc v1.43.0
|
||||||
google.golang.org/protobuf v1.26.0
|
google.golang.org/protobuf v1.27.1
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||||
)
|
)
|
||||||
|
|
||||||
|
require github.com/rs/xid v1.3.0
|
||||||
|
|
||||||
|
require (
|
||||||
|
github.com/BurntSushi/toml v0.4.1 // indirect
|
||||||
|
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||||
|
github.com/google/go-cmp v0.5.6 // indirect
|
||||||
|
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||||
|
github.com/josharian/native v0.0.0-20200817173448-b6b71def0850 // indirect
|
||||||
|
github.com/mdlayher/genetlink v1.1.0 // indirect
|
||||||
|
github.com/mdlayher/netlink v1.4.2 // indirect
|
||||||
|
github.com/mdlayher/socket v0.0.0-20211102153432-57e3fa563ecb // indirect
|
||||||
|
github.com/nxadm/tail v1.4.8 // indirect
|
||||||
|
github.com/pion/dtls/v2 v2.0.12 // indirect
|
||||||
|
github.com/pion/logging v0.2.2 // indirect
|
||||||
|
github.com/pion/mdns v0.0.5 // indirect
|
||||||
|
github.com/pion/randutil v0.1.0 // indirect
|
||||||
|
github.com/pion/stun v0.3.5 // indirect
|
||||||
|
github.com/pion/transport v0.12.3 // indirect
|
||||||
|
github.com/pion/turn/v2 v2.0.5 // indirect
|
||||||
|
github.com/pion/udp v0.1.1 // indirect
|
||||||
|
github.com/vishvananda/netns v0.0.0-20191106174202-0a2b9b5464df // indirect
|
||||||
|
golang.org/x/mod v0.5.1 // indirect
|
||||||
|
golang.org/x/net v0.0.0-20211208012354-db4efeb81f4b // indirect
|
||||||
|
golang.org/x/text v0.3.8-0.20211105212822-18b340fc7af2 // indirect
|
||||||
|
golang.org/x/tools v0.1.8 // indirect
|
||||||
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
|
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa // indirect
|
||||||
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
|
honnef.co/go/tools v0.2.2 // indirect
|
||||||
|
)
|
||||||
|
|||||||
@@ -17,8 +17,6 @@ const (
|
|||||||
|
|
||||||
var (
|
var (
|
||||||
tunIface tun.Device
|
tunIface tun.Device
|
||||||
// todo check after move the WgPort constant to the client
|
|
||||||
WgPort = 51820
|
|
||||||
)
|
)
|
||||||
|
|
||||||
// CreateWithUserspace Creates a new Wireguard interface, using wireguard-go userspace implementation
|
// CreateWithUserspace Creates a new Wireguard interface, using wireguard-go userspace implementation
|
||||||
@@ -103,7 +101,7 @@ func Exists(iface string) (*bool, error) {
|
|||||||
|
|
||||||
// Configure configures a Wireguard interface
|
// Configure configures a Wireguard interface
|
||||||
// The interface must exist before calling this method (e.g. call interface.Create() before)
|
// The interface must exist before calling this method (e.g. call interface.Create() before)
|
||||||
func Configure(iface string, privateKey string) error {
|
func Configure(iface string, privateKey string, port int) error {
|
||||||
|
|
||||||
log.Debugf("configuring Wireguard interface %s", iface)
|
log.Debugf("configuring Wireguard interface %s", iface)
|
||||||
|
|
||||||
@@ -113,12 +111,11 @@ func Configure(iface string, privateKey string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
fwmark := 0
|
fwmark := 0
|
||||||
p := WgPort
|
|
||||||
config := wgtypes.Config{
|
config := wgtypes.Config{
|
||||||
PrivateKey: &key,
|
PrivateKey: &key,
|
||||||
ReplacePeers: false,
|
ReplacePeers: false,
|
||||||
FirewallMark: &fwmark,
|
FirewallMark: &fwmark,
|
||||||
ListenPort: &p,
|
ListenPort: &port,
|
||||||
}
|
}
|
||||||
|
|
||||||
return configureDevice(iface, config)
|
return configureDevice(iface, config)
|
||||||
@@ -146,7 +143,7 @@ func GetListenPort(iface string) (*int, error) {
|
|||||||
|
|
||||||
// UpdatePeer updates existing Wireguard Peer or creates a new one if doesn't exist
|
// UpdatePeer updates existing Wireguard Peer or creates a new one if doesn't exist
|
||||||
// Endpoint is optional
|
// Endpoint is optional
|
||||||
func UpdatePeer(iface string, peerKey string, allowedIps string, keepAlive time.Duration, endpoint string) error {
|
func UpdatePeer(iface string, peerKey string, allowedIps string, keepAlive time.Duration, endpoint string, preSharedKey *wgtypes.Key) error {
|
||||||
|
|
||||||
log.Debugf("updating interface %s peer %s: endpoint %s ", iface, peerKey, endpoint)
|
log.Debugf("updating interface %s peer %s: endpoint %s ", iface, peerKey, endpoint)
|
||||||
|
|
||||||
@@ -165,6 +162,7 @@ func UpdatePeer(iface string, peerKey string, allowedIps string, keepAlive time.
|
|||||||
ReplaceAllowedIPs: true,
|
ReplaceAllowedIPs: true,
|
||||||
AllowedIPs: []net.IPNet{*ipNet},
|
AllowedIPs: []net.IPNet{*ipNet},
|
||||||
PersistentKeepaliveInterval: &keepAlive,
|
PersistentKeepaliveInterval: &keepAlive,
|
||||||
|
PresharedKey: preSharedKey,
|
||||||
}
|
}
|
||||||
|
|
||||||
config := wgtypes.Config{
|
config := wgtypes.Config{
|
||||||
@@ -234,7 +232,7 @@ func RemovePeer(iface string, peerKey string) error {
|
|||||||
return configureDevice(iface, config)
|
return configureDevice(iface, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Closes the User Space tunnel interface
|
// CloseWithUserspace closes the User Space tunnel interface
|
||||||
func CloseWithUserspace() error {
|
func CloseWithUserspace() error {
|
||||||
return tunIface.Close()
|
return tunIface.Close()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -40,7 +40,7 @@ func addRoute(iface string, ipNet *net.IPNet) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Closes the tunnel interface
|
// Closes the tunnel interface
|
||||||
func Close() error {
|
func Close(iFace string) error {
|
||||||
name, err := tunIface.Name()
|
name, err := tunIface.Name()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -1,10 +1,8 @@
|
|||||||
package iface
|
package iface
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/vishvananda/netlink"
|
"github.com/vishvananda/netlink"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl"
|
|
||||||
"os"
|
"os"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -131,34 +129,14 @@ func (w *wgLink) Type() string {
|
|||||||
return "wireguard"
|
return "wireguard"
|
||||||
}
|
}
|
||||||
|
|
||||||
// Closes the tunnel interface
|
// Close closes the tunnel interface
|
||||||
func Close() error {
|
func Close(iFace string) error {
|
||||||
|
|
||||||
if tunIface != nil {
|
if tunIface != nil {
|
||||||
return CloseWithUserspace()
|
return CloseWithUserspace()
|
||||||
} else {
|
} else {
|
||||||
var iface = ""
|
|
||||||
wg, err := wgctrl.New()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer wg.Close()
|
|
||||||
devList, err := wg.Devices()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
for _, wgDev := range devList {
|
|
||||||
// todo check after move the WgPort constant to the client
|
|
||||||
if wgDev.ListenPort == WgPort {
|
|
||||||
iface = wgDev.Name
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if iface == "" {
|
|
||||||
return fmt.Errorf("Wireguard Interface not found")
|
|
||||||
}
|
|
||||||
attrs := netlink.NewLinkAttrs()
|
attrs := netlink.NewLinkAttrs()
|
||||||
attrs.Name = iface
|
attrs.Name = iFace
|
||||||
|
|
||||||
link := wgLink{
|
link := wgLink{
|
||||||
attrs: &attrs,
|
attrs: &attrs,
|
||||||
|
|||||||
@@ -14,6 +14,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
key = "0PMI6OkB5JmB+Jj/iWWHekuQRx+bipZirWCWKFXexHc="
|
key = "0PMI6OkB5JmB+Jj/iWWHekuQRx+bipZirWCWKFXexHc="
|
||||||
peerPubKey = "Ok0mC0qlJyXEPKh2UFIpsI2jG0L7LRpC3sLAusSJ5CQ="
|
peerPubKey = "Ok0mC0qlJyXEPKh2UFIpsI2jG0L7LRpC3sLAusSJ5CQ="
|
||||||
|
WgPort = 51820
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
@@ -29,7 +30,7 @@ func Test_CreateInterface(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err = Close()
|
err = Close(ifaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
@@ -44,13 +45,6 @@ func Test_CreateInterface(t *testing.T) {
|
|||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
d, err := wg.Device(ifaceName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// todo move the WgPort constant to the client
|
|
||||||
WgPort = d.ListenPort
|
|
||||||
}
|
}
|
||||||
func Test_ConfigureInterface(t *testing.T) {
|
func Test_ConfigureInterface(t *testing.T) {
|
||||||
ifaceName := "utun1000"
|
ifaceName := "utun1000"
|
||||||
@@ -60,13 +54,13 @@ func Test_ConfigureInterface(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err = Close()
|
err = Close(ifaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err = Configure(ifaceName, key)
|
err = Configure(ifaceName, key, WgPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -99,19 +93,19 @@ func Test_UpdatePeer(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err = Close()
|
err = Close(ifaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = Configure(ifaceName, key)
|
err = Configure(ifaceName, key, WgPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
keepAlive := 15 * time.Second
|
keepAlive := 15 * time.Second
|
||||||
allowedIP := "10.99.99.2/32"
|
allowedIP := "10.99.99.2/32"
|
||||||
endpoint := "127.0.0.1:9900"
|
endpoint := "127.0.0.1:9900"
|
||||||
err = UpdatePeer(ifaceName, peerPubKey, allowedIP, keepAlive, endpoint)
|
err = UpdatePeer(ifaceName, peerPubKey, allowedIP, keepAlive, endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -151,19 +145,19 @@ func Test_UpdatePeerEndpoint(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err = Close()
|
err = Close(ifaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = Configure(ifaceName, key)
|
err = Configure(ifaceName, key, WgPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
keepAlive := 15 * time.Second
|
keepAlive := 15 * time.Second
|
||||||
allowedIP := "10.99.99.2/32"
|
allowedIP := "10.99.99.2/32"
|
||||||
endpoint := "127.0.0.1:9900"
|
endpoint := "127.0.0.1:9900"
|
||||||
err = UpdatePeer(ifaceName, peerPubKey, allowedIP, keepAlive, endpoint)
|
err = UpdatePeer(ifaceName, peerPubKey, allowedIP, keepAlive, endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -192,19 +186,19 @@ func Test_RemovePeer(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
err = Close()
|
err = Close(ifaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Error(err)
|
t.Error(err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
err = Configure(ifaceName, key)
|
err = Configure(ifaceName, key, WgPort)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
keepAlive := 15 * time.Second
|
keepAlive := 15 * time.Second
|
||||||
allowedIP := "10.99.99.2/32"
|
allowedIP := "10.99.99.2/32"
|
||||||
endpoint := "127.0.0.1:9900"
|
endpoint := "127.0.0.1:9900"
|
||||||
err = UpdatePeer(ifaceName, peerPubKey, allowedIP, keepAlive, endpoint)
|
err = UpdatePeer(ifaceName, peerPubKey, allowedIP, keepAlive, endpoint, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -235,13 +229,7 @@ func Test_Close(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
d, err := wg.Device(ifaceName)
|
err = Close(ifaceName)
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
// todo move the WgPort constant to the client
|
|
||||||
WgPort = d.ListenPort
|
|
||||||
err = Close()
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -41,6 +41,6 @@ func getUAPI(iface string) (net.Listener, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Closes the tunnel interface
|
// Closes the tunnel interface
|
||||||
func Close() error {
|
func Close(iFace string) error {
|
||||||
return CloseWithUserspace()
|
return CloseWithUserspace()
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package client
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/wiretrustee/wiretrustee/client/system"
|
"github.com/wiretrustee/wiretrustee/client/system"
|
||||||
@@ -10,7 +11,9 @@ import (
|
|||||||
"github.com/wiretrustee/wiretrustee/management/proto"
|
"github.com/wiretrustee/wiretrustee/management/proto"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
@@ -26,7 +29,7 @@ type Client struct {
|
|||||||
// NewClient creates a new client to Management service
|
// NewClient creates a new client to Management service
|
||||||
func NewClient(ctx context.Context, addr string, ourPrivateKey wgtypes.Key, tlsEnabled bool) (*Client, error) {
|
func NewClient(ctx context.Context, addr string, ourPrivateKey wgtypes.Key, tlsEnabled bool) (*Client, error) {
|
||||||
|
|
||||||
transportOption := grpc.WithInsecure()
|
transportOption := grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||||
|
|
||||||
if tlsEnabled {
|
if tlsEnabled {
|
||||||
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
||||||
@@ -71,12 +74,18 @@ func defaultBackoff(ctx context.Context) backoff.BackOff {
|
|||||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||||
Multiplier: backoff.DefaultMultiplier,
|
Multiplier: backoff.DefaultMultiplier,
|
||||||
MaxInterval: 10 * time.Second,
|
MaxInterval: 10 * time.Second,
|
||||||
MaxElapsedTime: 30 * time.Minute, //stop after an 30 min of trying, the error will be propagated to the general retry of the client
|
MaxElapsedTime: 12 * time.Hour, //stop after 12 hours of trying, the error will be propagated to the general retry of the client
|
||||||
Stop: backoff.Stop,
|
Stop: backoff.Stop,
|
||||||
Clock: backoff.SystemClock,
|
Clock: backoff.SystemClock,
|
||||||
}, ctx)
|
}, ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ready indicates whether the client is okay and ready to be used
|
||||||
|
// for now it just checks whether gRPC connection to the service is ready
|
||||||
|
func (c *Client) ready() bool {
|
||||||
|
return c.conn.GetState() == connectivity.Ready || c.conn.GetState() == connectivity.Idle
|
||||||
|
}
|
||||||
|
|
||||||
// Sync wraps the real client's Sync endpoint call and takes care of retries and encryption/decryption of messages
|
// Sync wraps the real client's Sync endpoint call and takes care of retries and encryption/decryption of messages
|
||||||
// Blocking request. The result will be sent via msgHandler callback function
|
// Blocking request. The result will be sent via msgHandler callback function
|
||||||
func (c *Client) Sync(msgHandler func(msg *proto.SyncResponse) error) error {
|
func (c *Client) Sync(msgHandler func(msg *proto.SyncResponse) error) error {
|
||||||
@@ -85,6 +94,12 @@ func (c *Client) Sync(msgHandler func(msg *proto.SyncResponse) error) error {
|
|||||||
|
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
|
|
||||||
|
log.Debugf("management connection state %v", c.conn.GetState())
|
||||||
|
|
||||||
|
if !c.ready() {
|
||||||
|
return fmt.Errorf("no connection to management")
|
||||||
|
}
|
||||||
|
|
||||||
// todo we already have it since we did the Login, maybe cache it locally?
|
// todo we already have it since we did the Login, maybe cache it locally?
|
||||||
serverPubKey, err := c.GetServerPublicKey()
|
serverPubKey, err := c.GetServerPublicKey()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -98,7 +113,7 @@ func (c *Client) Sync(msgHandler func(msg *proto.SyncResponse) error) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("connected to the Management Service Stream")
|
log.Infof("connected to the Management Service stream")
|
||||||
|
|
||||||
// blocking until error
|
// blocking until error
|
||||||
err = c.receiveEvents(stream, *serverPubKey, msgHandler)
|
err = c.receiveEvents(stream, *serverPubKey, msgHandler)
|
||||||
@@ -139,7 +154,7 @@ func (c *Client) receiveEvents(stream proto.ManagementService_SyncClient, server
|
|||||||
for {
|
for {
|
||||||
update, err := stream.Recv()
|
update, err := stream.Recv()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
log.Errorf("managment stream was closed: %s", err)
|
log.Errorf("Management stream has been closed by server: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -165,6 +180,10 @@ func (c *Client) receiveEvents(stream proto.ManagementService_SyncClient, server
|
|||||||
|
|
||||||
// GetServerPublicKey returns server Wireguard public key (used later for encrypting messages sent to the server)
|
// GetServerPublicKey returns server Wireguard public key (used later for encrypting messages sent to the server)
|
||||||
func (c *Client) GetServerPublicKey() (*wgtypes.Key, error) {
|
func (c *Client) GetServerPublicKey() (*wgtypes.Key, error) {
|
||||||
|
if !c.ready() {
|
||||||
|
return nil, fmt.Errorf("no connection to management")
|
||||||
|
}
|
||||||
|
|
||||||
mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second) //todo make a general setting
|
mgmCtx, cancel := context.WithTimeout(c.ctx, 5*time.Second) //todo make a general setting
|
||||||
defer cancel()
|
defer cancel()
|
||||||
resp, err := c.realClient.GetServerKey(mgmCtx, &proto.Empty{})
|
resp, err := c.realClient.GetServerKey(mgmCtx, &proto.Empty{})
|
||||||
@@ -181,6 +200,9 @@ func (c *Client) GetServerPublicKey() (*wgtypes.Key, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*proto.LoginResponse, error) {
|
func (c *Client) login(serverKey wgtypes.Key, req *proto.LoginRequest) (*proto.LoginResponse, error) {
|
||||||
|
if !c.ready() {
|
||||||
|
return nil, fmt.Errorf("no connection to management")
|
||||||
|
}
|
||||||
loginReq, err := encryption.EncryptMessage(serverKey, c.key, req)
|
loginReq, err := encryption.EncryptMessage(serverKey, c.key, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to encrypt message: %s", err)
|
log.Errorf("failed to encrypt message: %s", err)
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/rs/xid"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/wiretrustee/wiretrustee/util"
|
"github.com/wiretrustee/wiretrustee/util"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
@@ -20,9 +21,38 @@ type AccountManager struct {
|
|||||||
// Account represents a unique account of the system
|
// Account represents a unique account of the system
|
||||||
type Account struct {
|
type Account struct {
|
||||||
Id string
|
Id string
|
||||||
|
// User.Id it was created by
|
||||||
|
CreatedBy string
|
||||||
SetupKeys map[string]*SetupKey
|
SetupKeys map[string]*SetupKey
|
||||||
Network *Network
|
Network *Network
|
||||||
Peers map[string]*Peer
|
Peers map[string]*Peer
|
||||||
|
Users map[string]*User
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Account) Copy() *Account {
|
||||||
|
peers := map[string]*Peer{}
|
||||||
|
for id, peer := range a.Peers {
|
||||||
|
peers[id] = peer.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
users := map[string]*User{}
|
||||||
|
for id, user := range a.Users {
|
||||||
|
users[id] = user.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
setupKeys := map[string]*SetupKey{}
|
||||||
|
for id, key := range a.SetupKeys {
|
||||||
|
setupKeys[id] = key.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
return &Account{
|
||||||
|
Id: a.Id,
|
||||||
|
CreatedBy: a.CreatedBy,
|
||||||
|
SetupKeys: setupKeys,
|
||||||
|
Network: a.Network.Copy(),
|
||||||
|
Peers: peers,
|
||||||
|
Users: users,
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewManager creates a new AccountManager with a provided Store
|
// NewManager creates a new AccountManager with a provided Store
|
||||||
@@ -125,29 +155,6 @@ func (am *AccountManager) GetAccount(accountId string) (*Account, error) {
|
|||||||
return account, nil
|
return account, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetOrCreateAccount returns an existing account or creates a new one if doesn't exist
|
|
||||||
func (am *AccountManager) GetOrCreateAccount(accountId string) (*Account, error) {
|
|
||||||
am.mux.Lock()
|
|
||||||
defer am.mux.Unlock()
|
|
||||||
|
|
||||||
_, err := am.Store.GetAccount(accountId)
|
|
||||||
if err != nil {
|
|
||||||
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
|
|
||||||
return am.createAccount(accountId)
|
|
||||||
} else {
|
|
||||||
// other error
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
account, err := am.Store.GetAccount(accountId)
|
|
||||||
if err != nil {
|
|
||||||
return nil, status.Errorf(codes.Internal, "failed retrieving account")
|
|
||||||
}
|
|
||||||
|
|
||||||
return account, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
//AccountExists checks whether account exists (returns true) or not (returns false)
|
//AccountExists checks whether account exists (returns true) or not (returns false)
|
||||||
func (am *AccountManager) AccountExists(accountId string) (*bool, error) {
|
func (am *AccountManager) AccountExists(accountId string) (*bool, error) {
|
||||||
am.mux.Lock()
|
am.mux.Lock()
|
||||||
@@ -168,18 +175,18 @@ func (am *AccountManager) AccountExists(accountId string) (*bool, error) {
|
|||||||
return &res, nil
|
return &res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// AddAccount generates a new Account with a provided accountId and saves to the Store
|
// AddAccount generates a new Account with a provided accountId and userId, saves to the Store
|
||||||
func (am *AccountManager) AddAccount(accountId string) (*Account, error) {
|
func (am *AccountManager) AddAccount(accountId string, userId string) (*Account, error) {
|
||||||
|
|
||||||
am.mux.Lock()
|
am.mux.Lock()
|
||||||
defer am.mux.Unlock()
|
defer am.mux.Unlock()
|
||||||
|
|
||||||
return am.createAccount(accountId)
|
return am.createAccount(accountId, userId)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (am *AccountManager) createAccount(accountId string) (*Account, error) {
|
func (am *AccountManager) createAccount(accountId string, userId string) (*Account, error) {
|
||||||
account, _ := newAccountWithId(accountId)
|
account, _ := newAccountWithId(accountId, userId)
|
||||||
|
|
||||||
err := am.Store.SaveAccount(account)
|
err := am.Store.SaveAccount(account)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -190,7 +197,7 @@ func (am *AccountManager) createAccount(accountId string) (*Account, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// newAccountWithId creates a new Account with a default SetupKey (doesn't store in a Store) and provided id
|
// newAccountWithId creates a new Account with a default SetupKey (doesn't store in a Store) and provided id
|
||||||
func newAccountWithId(accountId string) (*Account, *SetupKey) {
|
func newAccountWithId(accountId string, userId string) (*Account, *SetupKey) {
|
||||||
|
|
||||||
log.Debugf("creating new account")
|
log.Debugf("creating new account")
|
||||||
|
|
||||||
@@ -204,16 +211,17 @@ func newAccountWithId(accountId string) (*Account, *SetupKey) {
|
|||||||
Net: net.IPNet{IP: net.ParseIP("100.64.0.0"), Mask: net.IPMask{255, 192, 0, 0}},
|
Net: net.IPNet{IP: net.ParseIP("100.64.0.0"), Mask: net.IPMask{255, 192, 0, 0}},
|
||||||
Dns: ""}
|
Dns: ""}
|
||||||
peers := make(map[string]*Peer)
|
peers := make(map[string]*Peer)
|
||||||
|
users := make(map[string]*User)
|
||||||
|
|
||||||
log.Debugf("created new account %s with setup key %s", accountId, defaultKey.Key)
|
log.Debugf("created new account %s with setup key %s", accountId, defaultKey.Key)
|
||||||
|
|
||||||
return &Account{Id: accountId, SetupKeys: setupKeys, Network: network, Peers: peers}, defaultKey
|
return &Account{Id: accountId, SetupKeys: setupKeys, Network: network, Peers: peers, Users: users, CreatedBy: userId}, defaultKey
|
||||||
}
|
}
|
||||||
|
|
||||||
// newAccount creates a new Account with a default SetupKey (doesn't store in a Store)
|
// newAccount creates a new Account with a default SetupKey and a provided User.Id of a user who issued account creation (doesn't store in a Store)
|
||||||
func newAccount() (*Account, *SetupKey) {
|
func newAccount(userId string) (*Account, *SetupKey) {
|
||||||
accountId := uuid.New().String()
|
accountId := xid.New().String()
|
||||||
return newAccountWithId(accountId)
|
return newAccountWithId(accountId, userId)
|
||||||
}
|
}
|
||||||
|
|
||||||
func getAccountSetupKeyById(acc *Account, keyId string) *SetupKey {
|
func getAccountSetupKeyById(acc *Account, keyId string) *SetupKey {
|
||||||
|
|||||||
@@ -2,12 +2,36 @@ package server
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
"google.golang.org/grpc/codes"
|
|
||||||
"google.golang.org/grpc/status"
|
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func TestAccountManager_GetOrCreateAccountByUser(t *testing.T) {
|
||||||
|
manager, err := createManager(t)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
userId := "test_user"
|
||||||
|
account, err := manager.GetOrCreateAccountByUser(userId)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if account == nil {
|
||||||
|
t.Fatalf("expected to create an account for a user %s", userId)
|
||||||
|
}
|
||||||
|
|
||||||
|
account, err = manager.GetAccountByUser(userId)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("expected to get existing account after creation, no account was found for a user %s", userId)
|
||||||
|
}
|
||||||
|
|
||||||
|
if account != nil && account.Users[userId] == nil {
|
||||||
|
t.Fatalf("expected to create an account for a user %s but no user was found after creation udner the account %s", userId, account.Id)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestAccountManager_AddAccount(t *testing.T) {
|
func TestAccountManager_AddAccount(t *testing.T) {
|
||||||
manager, err := createManager(t)
|
manager, err := createManager(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -16,6 +40,7 @@ func TestAccountManager_AddAccount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
expectedId := "test_account"
|
expectedId := "test_account"
|
||||||
|
userId := "account_creator"
|
||||||
expectedPeersSize := 0
|
expectedPeersSize := 0
|
||||||
expectedSetupKeysSize := 2
|
expectedSetupKeysSize := 2
|
||||||
expectedNetwork := net.IPNet{
|
expectedNetwork := net.IPNet{
|
||||||
@@ -23,7 +48,7 @@ func TestAccountManager_AddAccount(t *testing.T) {
|
|||||||
Mask: net.IPMask{255, 192, 0, 0},
|
Mask: net.IPMask{255, 192, 0, 0},
|
||||||
}
|
}
|
||||||
|
|
||||||
account, err := manager.AddAccount(expectedId)
|
account, err := manager.AddAccount(expectedId, userId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -45,46 +70,6 @@ func TestAccountManager_AddAccount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAccountManager_GetOrCreateAccount(t *testing.T) {
|
|
||||||
manager, err := createManager(t)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
expectedId := "test_account"
|
|
||||||
|
|
||||||
//make sure account doesn't exist
|
|
||||||
account, err := manager.GetAccount(expectedId)
|
|
||||||
if err != nil {
|
|
||||||
errStatus, ok := status.FromError(err)
|
|
||||||
if !(ok && errStatus.Code() == codes.NotFound) {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if account != nil {
|
|
||||||
t.Fatal("expecting empty account")
|
|
||||||
}
|
|
||||||
|
|
||||||
account, err = manager.GetOrCreateAccount(expectedId)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if account.Id != expectedId {
|
|
||||||
t.Fatalf("expected to create an account, got wrong account")
|
|
||||||
}
|
|
||||||
|
|
||||||
account, err = manager.GetOrCreateAccount(expectedId)
|
|
||||||
if err != nil {
|
|
||||||
t.Errorf("expected to get existing account after creation, failed")
|
|
||||||
}
|
|
||||||
|
|
||||||
if account.Id != expectedId {
|
|
||||||
t.Fatalf("expected to create an account, got wrong account")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestAccountManager_AccountExists(t *testing.T) {
|
func TestAccountManager_AccountExists(t *testing.T) {
|
||||||
manager, err := createManager(t)
|
manager, err := createManager(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -93,7 +78,8 @@ func TestAccountManager_AccountExists(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
expectedId := "test_account"
|
expectedId := "test_account"
|
||||||
_, err = manager.AddAccount(expectedId)
|
userId := "account_creator"
|
||||||
|
_, err = manager.AddAccount(expectedId, userId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -117,7 +103,8 @@ func TestAccountManager_GetAccount(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
expectedId := "test_account"
|
expectedId := "test_account"
|
||||||
account, err := manager.AddAccount(expectedId)
|
userId := "account_creator"
|
||||||
|
account, err := manager.AddAccount(expectedId, userId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -154,7 +141,7 @@ func TestAccountManager_AddPeer(t *testing.T) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
account, err := manager.AddAccount("test_account")
|
account, err := manager.AddAccount("test_account", "account_creator")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -20,6 +20,7 @@ type FileStore struct {
|
|||||||
Accounts map[string]*Account
|
Accounts map[string]*Account
|
||||||
SetupKeyId2AccountId map[string]string `json:"-"`
|
SetupKeyId2AccountId map[string]string `json:"-"`
|
||||||
PeerKeyId2AccountId map[string]string `json:"-"`
|
PeerKeyId2AccountId map[string]string `json:"-"`
|
||||||
|
UserId2AccountId map[string]string `json:"-"`
|
||||||
|
|
||||||
// mutex to synchronise Store read/write operations
|
// mutex to synchronise Store read/write operations
|
||||||
mux sync.Mutex `json:"-"`
|
mux sync.Mutex `json:"-"`
|
||||||
@@ -45,6 +46,7 @@ func restore(file string) (*FileStore, error) {
|
|||||||
mux: sync.Mutex{},
|
mux: sync.Mutex{},
|
||||||
SetupKeyId2AccountId: make(map[string]string),
|
SetupKeyId2AccountId: make(map[string]string),
|
||||||
PeerKeyId2AccountId: make(map[string]string),
|
PeerKeyId2AccountId: make(map[string]string),
|
||||||
|
UserId2AccountId: make(map[string]string),
|
||||||
storeFile: file,
|
storeFile: file,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -65,6 +67,7 @@ func restore(file string) (*FileStore, error) {
|
|||||||
store.storeFile = file
|
store.storeFile = file
|
||||||
store.SetupKeyId2AccountId = make(map[string]string)
|
store.SetupKeyId2AccountId = make(map[string]string)
|
||||||
store.PeerKeyId2AccountId = make(map[string]string)
|
store.PeerKeyId2AccountId = make(map[string]string)
|
||||||
|
store.UserId2AccountId = make(map[string]string)
|
||||||
for accountId, account := range store.Accounts {
|
for accountId, account := range store.Accounts {
|
||||||
for setupKeyId := range account.SetupKeys {
|
for setupKeyId := range account.SetupKeys {
|
||||||
store.SetupKeyId2AccountId[strings.ToUpper(setupKeyId)] = accountId
|
store.SetupKeyId2AccountId[strings.ToUpper(setupKeyId)] = accountId
|
||||||
@@ -72,6 +75,9 @@ func restore(file string) (*FileStore, error) {
|
|||||||
for _, peer := range account.Peers {
|
for _, peer := range account.Peers {
|
||||||
store.PeerKeyId2AccountId[peer.Key] = accountId
|
store.PeerKeyId2AccountId[peer.Key] = accountId
|
||||||
}
|
}
|
||||||
|
for _, user := range account.Users {
|
||||||
|
store.UserId2AccountId[user.Id] = accountId
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return store, nil
|
return store, nil
|
||||||
@@ -168,6 +174,10 @@ func (s *FileStore) SaveAccount(account *Account) error {
|
|||||||
s.PeerKeyId2AccountId[peer.Key] = account.Id
|
s.PeerKeyId2AccountId[peer.Key] = account.Id
|
||||||
}
|
}
|
||||||
|
|
||||||
|
for _, user := range account.Users {
|
||||||
|
s.UserId2AccountId[user.Id] = account.Id
|
||||||
|
}
|
||||||
|
|
||||||
err := s.persist(s.storeFile)
|
err := s.persist(s.storeFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -217,6 +227,18 @@ func (s *FileStore) GetAccount(accountId string) (*Account, error) {
|
|||||||
return account, nil
|
return account, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *FileStore) GetUserAccount(userId string) (*Account, error) {
|
||||||
|
s.mux.Lock()
|
||||||
|
defer s.mux.Unlock()
|
||||||
|
|
||||||
|
accountId, accountIdFound := s.UserId2AccountId[userId]
|
||||||
|
if !accountIdFound {
|
||||||
|
return nil, status.Errorf(codes.NotFound, "account not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
return s.GetAccount(accountId)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *FileStore) GetPeerAccount(peerKey string) (*Account, error) {
|
func (s *FileStore) GetPeerAccount(peerKey string) (*Account, error) {
|
||||||
s.mux.Lock()
|
s.mux.Lock()
|
||||||
defer s.mux.Unlock()
|
defer s.mux.Unlock()
|
||||||
|
|||||||
171
management/server/file_store_test.go
Normal file
171
management/server/file_store_test.go
Normal file
@@ -0,0 +1,171 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/wiretrustee/wiretrustee/util"
|
||||||
|
"net"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewStore(t *testing.T) {
|
||||||
|
store := newStore(t)
|
||||||
|
|
||||||
|
if store.Accounts == nil || len(store.Accounts) != 0 {
|
||||||
|
t.Errorf("expected to create a new empty Accounts map when creating a new FileStore")
|
||||||
|
}
|
||||||
|
|
||||||
|
if store.SetupKeyId2AccountId == nil || len(store.SetupKeyId2AccountId) != 0 {
|
||||||
|
t.Errorf("expected to create a new empty SetupKeyId2AccountId map when creating a new FileStore")
|
||||||
|
}
|
||||||
|
|
||||||
|
if store.PeerKeyId2AccountId == nil || len(store.PeerKeyId2AccountId) != 0 {
|
||||||
|
t.Errorf("expected to create a new empty PeerKeyId2AccountId map when creating a new FileStore")
|
||||||
|
}
|
||||||
|
|
||||||
|
if store.UserId2AccountId == nil || len(store.UserId2AccountId) != 0 {
|
||||||
|
t.Errorf("expected to create a new empty UserId2AccountId map when creating a new FileStore")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSaveAccount(t *testing.T) {
|
||||||
|
store := newStore(t)
|
||||||
|
|
||||||
|
account, _ := newAccount("testuser")
|
||||||
|
account.Users["testuser"] = NewAdminUser("testuser")
|
||||||
|
setupKey := GenerateDefaultSetupKey()
|
||||||
|
account.SetupKeys[setupKey.Key] = setupKey
|
||||||
|
account.Peers["testpeer"] = &Peer{
|
||||||
|
Key: "peerkey",
|
||||||
|
SetupKey: "peerkeysetupkey",
|
||||||
|
IP: net.IP{127, 0, 0, 1},
|
||||||
|
Meta: PeerSystemMeta{},
|
||||||
|
Name: "peer name",
|
||||||
|
Status: &PeerStatus{Connected: true, LastSeen: time.Now()},
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveAccount should trigger persist
|
||||||
|
err := store.SaveAccount(account)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if store.Accounts[account.Id] == nil {
|
||||||
|
t.Errorf("expecting Account to be stored after SaveAccount()")
|
||||||
|
}
|
||||||
|
|
||||||
|
if store.PeerKeyId2AccountId["peerkey"] == "" {
|
||||||
|
t.Errorf("expecting PeerKeyId2AccountId index updated after SaveAccount()")
|
||||||
|
}
|
||||||
|
|
||||||
|
if store.UserId2AccountId["testuser"] == "" {
|
||||||
|
t.Errorf("expecting UserId2AccountId index updated after SaveAccount()")
|
||||||
|
}
|
||||||
|
|
||||||
|
if store.SetupKeyId2AccountId[setupKey.Key] == "" {
|
||||||
|
t.Errorf("expecting SetupKeyId2AccountId index updated after SaveAccount()")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore(t *testing.T) {
|
||||||
|
store := newStore(t)
|
||||||
|
|
||||||
|
account, _ := newAccount("testuser")
|
||||||
|
account.Users["testuser"] = NewAdminUser("testuser")
|
||||||
|
account.Peers["testpeer"] = &Peer{
|
||||||
|
Key: "peerkey",
|
||||||
|
SetupKey: "peerkeysetupkey",
|
||||||
|
IP: net.IP{127, 0, 0, 1},
|
||||||
|
Meta: PeerSystemMeta{},
|
||||||
|
Name: "peer name",
|
||||||
|
Status: &PeerStatus{Connected: true, LastSeen: time.Now()},
|
||||||
|
}
|
||||||
|
|
||||||
|
// SaveAccount should trigger persist
|
||||||
|
err := store.SaveAccount(account)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
restored, err := NewStore(store.storeFile)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
restoredAccount := restored.Accounts[account.Id]
|
||||||
|
if restoredAccount == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Account %s", account.Id)
|
||||||
|
}
|
||||||
|
|
||||||
|
if restoredAccount != nil && restoredAccount.Peers["testpeer"] == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Peer testpeer")
|
||||||
|
}
|
||||||
|
|
||||||
|
if restoredAccount != nil && restoredAccount.CreatedBy != "testuser" {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Account CreatedBy")
|
||||||
|
}
|
||||||
|
|
||||||
|
if restoredAccount != nil && restoredAccount.Users["testuser"] == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing User testuser")
|
||||||
|
}
|
||||||
|
|
||||||
|
if restoredAccount != nil && restoredAccount.Network == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Network")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestRestore(t *testing.T) {
|
||||||
|
storeDir := t.TempDir()
|
||||||
|
|
||||||
|
err := util.CopyFileContents("testdata/store.json", filepath.Join(storeDir, "store.json"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := NewStore(storeDir)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
account := store.Accounts["bf1c8084-ba50-4ce7-9439-34653001fc3b"]
|
||||||
|
if account == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing account bf1c8084-ba50-4ce7-9439-34653001fc3b")
|
||||||
|
}
|
||||||
|
|
||||||
|
if account != nil && account.Users["edafee4e-63fb-11ec-90d6-0242ac120003"] == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Account User edafee4e-63fb-11ec-90d6-0242ac120003")
|
||||||
|
}
|
||||||
|
|
||||||
|
if account != nil && account.Users["f4f6d672-63fb-11ec-90d6-0242ac120003"] == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Account User f4f6d672-63fb-11ec-90d6-0242ac120003")
|
||||||
|
}
|
||||||
|
|
||||||
|
if account != nil && account.Network == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Account Network")
|
||||||
|
}
|
||||||
|
|
||||||
|
if account != nil && account.SetupKeys["A2C8E62B-38F5-4553-B31E-DD66C696CEBB"] == nil {
|
||||||
|
t.Errorf("failed to restore a FileStore file - missing Account SetupKey A2C8E62B-38F5-4553-B31E-DD66C696CEBB")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(store.UserId2AccountId) != 2 {
|
||||||
|
t.Errorf("failed to restore a FileStore wrong UserId2AccountId mapping")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(store.SetupKeyId2AccountId) != 1 {
|
||||||
|
t.Errorf("failed to restore a FileStore wrong SetupKeyId2AccountId mapping")
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
func newStore(t *testing.T) *FileStore {
|
||||||
|
store, err := NewStore(t.TempDir())
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed creating a new store")
|
||||||
|
}
|
||||||
|
|
||||||
|
return store
|
||||||
|
}
|
||||||
@@ -62,7 +62,13 @@ func (h *Peers) deletePeer(accountId string, peer *server.Peer, w http.ResponseW
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *Peers) HandlePeer(w http.ResponseWriter, r *http.Request) {
|
func (h *Peers) HandlePeer(w http.ResponseWriter, r *http.Request) {
|
||||||
accountId := extractAccountIdFromRequestContext(r)
|
userId := extractUserIdFromRequestContext(r)
|
||||||
|
account, err := h.accountManager.GetOrCreateAccountByUser(userId)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed getting account of a user %s: %v", userId, err)
|
||||||
|
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
peerId := vars["id"] //effectively peer IP address
|
peerId := vars["id"] //effectively peer IP address
|
||||||
if len(peerId) == 0 {
|
if len(peerId) == 0 {
|
||||||
@@ -70,7 +76,7 @@ func (h *Peers) HandlePeer(w http.ResponseWriter, r *http.Request) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
peer, err := h.accountManager.GetPeerByIP(accountId, peerId)
|
peer, err := h.accountManager.GetPeerByIP(account.Id, peerId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
http.Error(w, "peer not found", http.StatusNotFound)
|
http.Error(w, "peer not found", http.StatusNotFound)
|
||||||
return
|
return
|
||||||
@@ -78,10 +84,10 @@ func (h *Peers) HandlePeer(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
switch r.Method {
|
switch r.Method {
|
||||||
case http.MethodDelete:
|
case http.MethodDelete:
|
||||||
h.deletePeer(accountId, peer, w, r)
|
h.deletePeer(account.Id, peer, w, r)
|
||||||
return
|
return
|
||||||
case http.MethodPut:
|
case http.MethodPut:
|
||||||
h.updatePeer(accountId, peer, w, r)
|
h.updatePeer(account.Id, peer, w, r)
|
||||||
return
|
return
|
||||||
case http.MethodGet:
|
case http.MethodGet:
|
||||||
writeJSONObject(w, toPeerResponse(peer))
|
writeJSONObject(w, toPeerResponse(peer))
|
||||||
@@ -96,11 +102,11 @@ func (h *Peers) HandlePeer(w http.ResponseWriter, r *http.Request) {
|
|||||||
func (h *Peers) GetPeers(w http.ResponseWriter, r *http.Request) {
|
func (h *Peers) GetPeers(w http.ResponseWriter, r *http.Request) {
|
||||||
switch r.Method {
|
switch r.Method {
|
||||||
case http.MethodGet:
|
case http.MethodGet:
|
||||||
accountId := extractAccountIdFromRequestContext(r)
|
userId := extractUserIdFromRequestContext(r)
|
||||||
//new user -> create a new account
|
//new user -> create a new account
|
||||||
account, err := h.accountManager.GetOrCreateAccount(accountId)
|
account, err := h.accountManager.GetOrCreateAccountByUser(userId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed getting user account %s: %v", accountId, err)
|
log.Errorf("failed getting account of a user %s: %v", userId, err)
|
||||||
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -118,7 +118,14 @@ func (h *SetupKeys) createKey(accountId string, w http.ResponseWriter, r *http.R
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (h *SetupKeys) HandleKey(w http.ResponseWriter, r *http.Request) {
|
func (h *SetupKeys) HandleKey(w http.ResponseWriter, r *http.Request) {
|
||||||
accountId := extractAccountIdFromRequestContext(r)
|
userId := extractUserIdFromRequestContext(r)
|
||||||
|
account, err := h.accountManager.GetOrCreateAccountByUser(userId)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed getting account of a user %s: %v", userId, err)
|
||||||
|
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
keyId := vars["id"]
|
keyId := vars["id"]
|
||||||
if len(keyId) == 0 {
|
if len(keyId) == 0 {
|
||||||
@@ -128,10 +135,10 @@ func (h *SetupKeys) HandleKey(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
switch r.Method {
|
switch r.Method {
|
||||||
case http.MethodPut:
|
case http.MethodPut:
|
||||||
h.updateKey(accountId, keyId, w, r)
|
h.updateKey(account.Id, keyId, w, r)
|
||||||
return
|
return
|
||||||
case http.MethodGet:
|
case http.MethodGet:
|
||||||
h.getKey(accountId, keyId, w, r)
|
h.getKey(account.Id, keyId, w, r)
|
||||||
return
|
return
|
||||||
default:
|
default:
|
||||||
http.Error(w, "", http.StatusNotFound)
|
http.Error(w, "", http.StatusNotFound)
|
||||||
@@ -140,21 +147,20 @@ func (h *SetupKeys) HandleKey(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
func (h *SetupKeys) GetKeys(w http.ResponseWriter, r *http.Request) {
|
func (h *SetupKeys) GetKeys(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
accountId := extractAccountIdFromRequestContext(r)
|
userId := extractUserIdFromRequestContext(r)
|
||||||
|
|
||||||
switch r.Method {
|
|
||||||
case http.MethodPost:
|
|
||||||
h.createKey(accountId, w, r)
|
|
||||||
return
|
|
||||||
case http.MethodGet:
|
|
||||||
|
|
||||||
//new user -> create a new account
|
//new user -> create a new account
|
||||||
account, err := h.accountManager.GetOrCreateAccount(accountId)
|
account, err := h.accountManager.GetOrCreateAccountByUser(userId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed getting user account %s: %v", accountId, err)
|
log.Errorf("failed getting account of a user %s: %v", userId, err)
|
||||||
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
switch r.Method {
|
||||||
|
case http.MethodPost:
|
||||||
|
h.createKey(account.Id, w, r)
|
||||||
|
return
|
||||||
|
case http.MethodGet:
|
||||||
w.WriteHeader(200)
|
w.WriteHeader(200)
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
@@ -165,7 +171,7 @@ func (h *SetupKeys) GetKeys(w http.ResponseWriter, r *http.Request) {
|
|||||||
|
|
||||||
err = json.NewEncoder(w).Encode(respBody)
|
err = json.NewEncoder(w).Encode(respBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed encoding account peers %s: %v", accountId, err)
|
log.Errorf("failed encoding account peers %s: %v", account.Id, err)
|
||||||
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
http.Redirect(w, r, "/", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -8,8 +8,8 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
// extractAccountIdFromRequestContext extracts accountId from the request context previously filled by the JWT token (after auth)
|
// extractUserIdFromRequestContext extracts accountId from the request context previously filled by the JWT token (after auth)
|
||||||
func extractAccountIdFromRequestContext(r *http.Request) string {
|
func extractUserIdFromRequestContext(r *http.Request) string {
|
||||||
token := r.Context().Value("user").(*jwt.Token)
|
token := r.Context().Value("user").(*jwt.Token)
|
||||||
claims := token.Claims.(jwt.MapClaims)
|
claims := token.Claims.(jwt.MapClaims)
|
||||||
|
|
||||||
|
|||||||
@@ -3,6 +3,7 @@ package server_test
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
server "github.com/wiretrustee/wiretrustee/management/server"
|
server "github.com/wiretrustee/wiretrustee/management/server"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
@@ -472,7 +473,9 @@ func loginPeerWithValidSetupKey(serverPubKey wgtypes.Key, key wgtypes.Key, clien
|
|||||||
func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.ClientConn) {
|
func createRawClient(addr string) (mgmtProto.ManagementServiceClient, *grpc.ClientConn) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(),
|
|
||||||
|
conn, err := grpc.DialContext(ctx, addr,
|
||||||
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
grpc.WithBlock(),
|
grpc.WithBlock(),
|
||||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||||
Time: 10 * time.Second,
|
Time: 10 * time.Second,
|
||||||
|
|||||||
13
management/server/migration/README.md
Normal file
13
management/server/migration/README.md
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
## Migration from Store v2 to Store v2
|
||||||
|
|
||||||
|
Previously Account.Id was an Auth0 user id.
|
||||||
|
Conversion moves user id to Account.CreatedBy and generates a new Account.Id using xid.
|
||||||
|
It also adds a User with id = old Account.Id with a role Admin.
|
||||||
|
|
||||||
|
To start a conversion simply run the command below providing your current Wiretrustee Management datadir (where store.json file is located)
|
||||||
|
and a new data directory location (where a converted store.js will be stored):
|
||||||
|
```shell
|
||||||
|
./migration --oldDir /var/wiretrustee/datadir --newDir /var/wiretrustee/newdatadir/
|
||||||
|
```
|
||||||
|
|
||||||
|
Afterwards you can run the Management service providing ```/var/wiretrustee/newdatadir/ ``` as a datadir.
|
||||||
56
management/server/migration/convert_accounts.go
Normal file
56
management/server/migration/convert_accounts.go
Normal file
@@ -0,0 +1,56 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"github.com/rs/xid"
|
||||||
|
"github.com/wiretrustee/wiretrustee/management/server"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
|
||||||
|
oldDir := flag.String("oldDir", "old store directory", "/var/wiretrustee/datadir")
|
||||||
|
newDir := flag.String("newDir", "new store directory", "/var/wiretrustee/newdatadir")
|
||||||
|
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
oldStore, err := server.NewStore(*oldDir)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
newStore, err := server.NewStore(*newDir)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = Convert(oldStore, newStore)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
fmt.Println("successfully converted")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Convert converts old store ato a new store
|
||||||
|
// Previously Account.Id was an Auth0 user id
|
||||||
|
// Conversion moved user id to Account.CreatedBy and generated a new Account.Id using xid
|
||||||
|
// It also adds a User with id = old Account.Id with a role Admin
|
||||||
|
func Convert(oldStore *server.FileStore, newStore *server.FileStore) error {
|
||||||
|
for _, account := range oldStore.Accounts {
|
||||||
|
accountCopy := account.Copy()
|
||||||
|
accountCopy.Id = xid.New().String()
|
||||||
|
accountCopy.CreatedBy = account.Id
|
||||||
|
accountCopy.Users[account.Id] = &server.User{
|
||||||
|
Id: account.Id,
|
||||||
|
Role: server.UserRoleAdmin,
|
||||||
|
}
|
||||||
|
|
||||||
|
err := newStore.SaveAccount(accountCopy)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
76
management/server/migration/convert_accounts_test.go
Normal file
76
management/server/migration/convert_accounts_test.go
Normal file
@@ -0,0 +1,76 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/wiretrustee/wiretrustee/management/server"
|
||||||
|
"github.com/wiretrustee/wiretrustee/util"
|
||||||
|
"path/filepath"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestConvertAccounts(t *testing.T) {
|
||||||
|
|
||||||
|
storeDir := t.TempDir()
|
||||||
|
|
||||||
|
err := util.CopyFileContents("../testdata/storev1.json", filepath.Join(storeDir, "store.json"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
store, err := server.NewStore(storeDir)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
convertedStore, err := server.NewStore(filepath.Join(storeDir, "converted"))
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
err = Convert(store, convertedStore)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(store.Accounts) != len(convertedStore.Accounts) {
|
||||||
|
t.Errorf("expecting the same number of accounts after conversion")
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, account := range store.Accounts {
|
||||||
|
convertedAccount, err := convertedStore.GetUserAccount(account.Id)
|
||||||
|
if err != nil || convertedAccount == nil {
|
||||||
|
t.Errorf("expecting Account %s to be converted", account.Id)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if convertedAccount.CreatedBy != account.Id {
|
||||||
|
t.Errorf("expecting converted Account.CreatedBy field to be equal to the old Account.Id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if convertedAccount.Id == account.Id {
|
||||||
|
t.Errorf("expecting converted Account.Id to be different from Account.Id")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(convertedAccount.Users) != 1 {
|
||||||
|
t.Errorf("expecting converted Account.Users to be of size 1")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
user := convertedAccount.Users[account.Id]
|
||||||
|
if user == nil {
|
||||||
|
t.Errorf("expecting to find a user in converted Account.Users")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if user.Role != server.UserRoleAdmin {
|
||||||
|
t.Errorf("expecting to find a user in converted Account.Users with a role Admin")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for peerId := range account.Peers {
|
||||||
|
convertedPeer := convertedAccount.Peers[peerId]
|
||||||
|
if convertedPeer == nil {
|
||||||
|
t.Errorf("expecting Account Peer of StoreV1 to be found in StoreV2")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
@@ -17,6 +17,14 @@ type Network struct {
|
|||||||
Dns string
|
Dns string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (n *Network) Copy() *Network {
|
||||||
|
return &Network{
|
||||||
|
Id: n.Id,
|
||||||
|
Net: n.Net,
|
||||||
|
Dns: n.Dns,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// AllocatePeerIP pics an available IP from an net.IPNet.
|
// AllocatePeerIP pics an available IP from an net.IPNet.
|
||||||
// This method considers already taken IPs and reuses IPs if there are gaps in takenIps
|
// This method considers already taken IPs and reuses IPs if there are gaps in takenIps
|
||||||
// E.g. if ipNet=100.30.0.0/16 and takenIps=[100.30.0.1, 100.30.0.5] then the result would be 100.30.0.2
|
// E.g. if ipNet=100.30.0.0/16 and takenIps=[100.30.0.1, 100.30.0.5] then the result would be 100.30.0.2
|
||||||
|
|||||||
@@ -206,7 +206,6 @@ func (am *AccountManager) GetPeersForAPeer(peerKey string) ([]*Peer, error) {
|
|||||||
// Each Account has a list of pre-authorised SetupKey and if no Account has a given key err wit ha code codes.Unauthenticated
|
// Each Account has a list of pre-authorised SetupKey and if no Account has a given key err wit ha code codes.Unauthenticated
|
||||||
// will be returned, meaning the key is invalid
|
// will be returned, meaning the key is invalid
|
||||||
// Each new Peer will be assigned a new next net.IP from the Account.Network and Account.Network.LastIP will be updated (IP's are not reused).
|
// Each new Peer will be assigned a new next net.IP from the Account.Network and Account.Network.LastIP will be updated (IP's are not reused).
|
||||||
// If the specified setupKey is empty then a new Account will be created //todo remove this part
|
|
||||||
// The peer property is just a placeholder for the Peer properties to pass further
|
// The peer property is just a placeholder for the Peer properties to pass further
|
||||||
func (am *AccountManager) AddPeer(setupKey string, peer Peer) (*Peer, error) {
|
func (am *AccountManager) AddPeer(setupKey string, peer Peer) (*Peer, error) {
|
||||||
am.mux.Lock()
|
am.mux.Lock()
|
||||||
@@ -218,8 +217,8 @@ func (am *AccountManager) AddPeer(setupKey string, peer Peer) (*Peer, error) {
|
|||||||
var err error
|
var err error
|
||||||
var sk *SetupKey
|
var sk *SetupKey
|
||||||
if len(upperKey) == 0 {
|
if len(upperKey) == 0 {
|
||||||
// Empty setup key, create a new account for it.
|
// Empty setup key, fail
|
||||||
account, sk = newAccount()
|
return nil, status.Errorf(codes.InvalidArgument, "empty setupKey %s", setupKey)
|
||||||
} else {
|
} else {
|
||||||
account, err = am.Store.GetAccountBySetupKey(upperKey)
|
account, err = am.Store.GetAccountBySetupKey(upperKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|||||||
@@ -5,6 +5,7 @@ type Store interface {
|
|||||||
DeletePeer(accountId string, peerKey string) (*Peer, error)
|
DeletePeer(accountId string, peerKey string) (*Peer, error)
|
||||||
SavePeer(accountId string, peer *Peer) error
|
SavePeer(accountId string, peer *Peer) error
|
||||||
GetAccount(accountId string) (*Account, error)
|
GetAccount(accountId string) (*Account, error)
|
||||||
|
GetUserAccount(userId string) (*Account, error)
|
||||||
GetAccountPeers(accountId string) ([]*Peer, error)
|
GetAccountPeers(accountId string) ([]*Peer, error)
|
||||||
GetPeerAccount(peerKey string) (*Account, error)
|
GetPeerAccount(peerKey string) (*Account, error)
|
||||||
GetAccountBySetupKey(setupKey string) (*Account, error)
|
GetAccountBySetupKey(setupKey string) (*Account, error)
|
||||||
|
|||||||
12
management/server/testdata/store.json
vendored
12
management/server/testdata/store.json
vendored
@@ -22,7 +22,17 @@
|
|||||||
},
|
},
|
||||||
"Dns": null
|
"Dns": null
|
||||||
},
|
},
|
||||||
"Peers": {}
|
"Peers": {},
|
||||||
|
"Users": {
|
||||||
|
"edafee4e-63fb-11ec-90d6-0242ac120003": {
|
||||||
|
"Id": "edafee4e-63fb-11ec-90d6-0242ac120003",
|
||||||
|
"Role": "admin"
|
||||||
|
},
|
||||||
|
"f4f6d672-63fb-11ec-90d6-0242ac120003": {
|
||||||
|
"Id": "f4f6d672-63fb-11ec-90d6-0242ac120003",
|
||||||
|
"Role": "user"
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
154
management/server/testdata/storev1.json
vendored
Normal file
154
management/server/testdata/storev1.json
vendored
Normal file
@@ -0,0 +1,154 @@
|
|||||||
|
{
|
||||||
|
"Accounts": {
|
||||||
|
"auth0|61bf82ddeab084006aa1bccd": {
|
||||||
|
"Id": "auth0|61bf82ddeab084006aa1bccd",
|
||||||
|
"SetupKeys": {
|
||||||
|
"1B2B50B0-B3E8-4B0C-A426-525EDB8481BD": {
|
||||||
|
"Id": "831727121",
|
||||||
|
"Key": "1B2B50B0-B3E8-4B0C-A426-525EDB8481BD",
|
||||||
|
"Name": "One-off key",
|
||||||
|
"Type": "one-off",
|
||||||
|
"CreatedAt": "2021-12-24T16:09:45.926075752+01:00",
|
||||||
|
"ExpiresAt": "2022-01-23T16:09:45.926075752+01:00",
|
||||||
|
"Revoked": false,
|
||||||
|
"UsedTimes": 1,
|
||||||
|
"LastUsed": "2021-12-24T16:12:45.763424077+01:00"
|
||||||
|
},
|
||||||
|
"EB51E9EB-A11F-4F6E-8E49-C982891B405A": {
|
||||||
|
"Id": "1769568301",
|
||||||
|
"Key": "EB51E9EB-A11F-4F6E-8E49-C982891B405A",
|
||||||
|
"Name": "Default key",
|
||||||
|
"Type": "reusable",
|
||||||
|
"CreatedAt": "2021-12-24T16:09:45.926073628+01:00",
|
||||||
|
"ExpiresAt": "2022-01-23T16:09:45.926073628+01:00",
|
||||||
|
"Revoked": false,
|
||||||
|
"UsedTimes": 1,
|
||||||
|
"LastUsed": "2021-12-24T16:13:06.236748538+01:00"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Network": {
|
||||||
|
"Id": "a443c07a-5765-4a78-97fc-390d9c1d0e49",
|
||||||
|
"Net": {
|
||||||
|
"IP": "100.64.0.0",
|
||||||
|
"Mask": "/8AAAA=="
|
||||||
|
},
|
||||||
|
"Dns": ""
|
||||||
|
},
|
||||||
|
"Peers": {
|
||||||
|
"oMNaI8qWi0CyclSuwGR++SurxJyM3pQEiPEHwX8IREo=": {
|
||||||
|
"Key": "oMNaI8qWi0CyclSuwGR++SurxJyM3pQEiPEHwX8IREo=",
|
||||||
|
"SetupKey": "EB51E9EB-A11F-4F6E-8E49-C982891B405A",
|
||||||
|
"IP": "100.64.0.2",
|
||||||
|
"Meta": {
|
||||||
|
"Hostname": "braginini",
|
||||||
|
"GoOS": "linux",
|
||||||
|
"Kernel": "Linux",
|
||||||
|
"Core": "21.04",
|
||||||
|
"Platform": "x86_64",
|
||||||
|
"OS": "Ubuntu",
|
||||||
|
"WtVersion": ""
|
||||||
|
},
|
||||||
|
"Name": "braginini",
|
||||||
|
"Status": {
|
||||||
|
"LastSeen": "2021-12-24T16:13:11.244342541+01:00",
|
||||||
|
"Connected": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"xlx9/9D8+ibnRiIIB8nHGMxGOzxV17r8ShPHgi4aYSM=": {
|
||||||
|
"Key": "xlx9/9D8+ibnRiIIB8nHGMxGOzxV17r8ShPHgi4aYSM=",
|
||||||
|
"SetupKey": "1B2B50B0-B3E8-4B0C-A426-525EDB8481BD",
|
||||||
|
"IP": "100.64.0.1",
|
||||||
|
"Meta": {
|
||||||
|
"Hostname": "braginini",
|
||||||
|
"GoOS": "linux",
|
||||||
|
"Kernel": "Linux",
|
||||||
|
"Core": "21.04",
|
||||||
|
"Platform": "x86_64",
|
||||||
|
"OS": "Ubuntu",
|
||||||
|
"WtVersion": ""
|
||||||
|
},
|
||||||
|
"Name": "braginini",
|
||||||
|
"Status": {
|
||||||
|
"LastSeen": "2021-12-24T16:12:49.089339333+01:00",
|
||||||
|
"Connected": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"google-oauth2|103201118415301331038": {
|
||||||
|
"Id": "google-oauth2|103201118415301331038",
|
||||||
|
"SetupKeys": {
|
||||||
|
"5AFB60DB-61F2-4251-8E11-494847EE88E9": {
|
||||||
|
"Id": "2485964613",
|
||||||
|
"Key": "5AFB60DB-61F2-4251-8E11-494847EE88E9",
|
||||||
|
"Name": "Default key",
|
||||||
|
"Type": "reusable",
|
||||||
|
"CreatedAt": "2021-12-24T16:10:02.238476+01:00",
|
||||||
|
"ExpiresAt": "2022-01-23T16:10:02.238476+01:00",
|
||||||
|
"Revoked": false,
|
||||||
|
"UsedTimes": 1,
|
||||||
|
"LastUsed": "2021-12-24T16:12:05.994307717+01:00"
|
||||||
|
},
|
||||||
|
"A72E4DC2-00DE-4542-8A24-62945438104E": {
|
||||||
|
"Id": "3504804807",
|
||||||
|
"Key": "A72E4DC2-00DE-4542-8A24-62945438104E",
|
||||||
|
"Name": "One-off key",
|
||||||
|
"Type": "one-off",
|
||||||
|
"CreatedAt": "2021-12-24T16:10:02.238478209+01:00",
|
||||||
|
"ExpiresAt": "2022-01-23T16:10:02.238478209+01:00",
|
||||||
|
"Revoked": false,
|
||||||
|
"UsedTimes": 1,
|
||||||
|
"LastUsed": "2021-12-24T16:11:27.015741738+01:00"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Network": {
|
||||||
|
"Id": "b6d0b152-364e-40c1-a8a1-fa7bcac2267f",
|
||||||
|
"Net": {
|
||||||
|
"IP": "100.64.0.0",
|
||||||
|
"Mask": "/8AAAA=="
|
||||||
|
},
|
||||||
|
"Dns": ""
|
||||||
|
},
|
||||||
|
"Peers": {
|
||||||
|
"6kjbmVq1hmucVzvBXo5OucY5OYv+jSsB1jUTLq291Dw=": {
|
||||||
|
"Key": "6kjbmVq1hmucVzvBXo5OucY5OYv+jSsB1jUTLq291Dw=",
|
||||||
|
"SetupKey": "5AFB60DB-61F2-4251-8E11-494847EE88E9",
|
||||||
|
"IP": "100.64.0.2",
|
||||||
|
"Meta": {
|
||||||
|
"Hostname": "braginini",
|
||||||
|
"GoOS": "linux",
|
||||||
|
"Kernel": "Linux",
|
||||||
|
"Core": "21.04",
|
||||||
|
"Platform": "x86_64",
|
||||||
|
"OS": "Ubuntu",
|
||||||
|
"WtVersion": ""
|
||||||
|
},
|
||||||
|
"Name": "braginini",
|
||||||
|
"Status": {
|
||||||
|
"LastSeen": "2021-12-24T16:12:05.994305438+01:00",
|
||||||
|
"Connected": false
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"Ok+5QMdt/UjoktNOvicGYj+IX2g98p+0N2PJ3vJ45RI=": {
|
||||||
|
"Key": "Ok+5QMdt/UjoktNOvicGYj+IX2g98p+0N2PJ3vJ45RI=",
|
||||||
|
"SetupKey": "A72E4DC2-00DE-4542-8A24-62945438104E",
|
||||||
|
"IP": "100.64.0.1",
|
||||||
|
"Meta": {
|
||||||
|
"Hostname": "braginini",
|
||||||
|
"GoOS": "linux",
|
||||||
|
"Kernel": "Linux",
|
||||||
|
"Core": "21.04",
|
||||||
|
"Platform": "x86_64",
|
||||||
|
"OS": "Ubuntu",
|
||||||
|
"WtVersion": ""
|
||||||
|
},
|
||||||
|
"Name": "braginini",
|
||||||
|
"Status": {
|
||||||
|
"LastSeen": "2021-12-24T16:11:27.015739803+01:00",
|
||||||
|
"Connected": false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
71
management/server/user.go
Normal file
71
management/server/user.go
Normal file
@@ -0,0 +1,71 @@
|
|||||||
|
package server
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
UserRoleAdmin UserRole = "admin"
|
||||||
|
UserRoleUser UserRole = "user"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UserRole is the role of the User
|
||||||
|
type UserRole string
|
||||||
|
|
||||||
|
// User represents a user of the system
|
||||||
|
type User struct {
|
||||||
|
Id string
|
||||||
|
Role UserRole
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *User) Copy() *User {
|
||||||
|
return &User{
|
||||||
|
Id: u.Id,
|
||||||
|
Role: u.Role,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUser creates a new user
|
||||||
|
func NewUser(id string, role UserRole) *User {
|
||||||
|
return &User{
|
||||||
|
Id: id,
|
||||||
|
Role: role,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAdminUser creates a new user with role UserRoleAdmin
|
||||||
|
func NewAdminUser(id string) *User {
|
||||||
|
return NewUser(id, UserRoleAdmin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOrCreateAccountByUser returns an existing account for a given user id or creates a new one if doesn't exist
|
||||||
|
func (am *AccountManager) GetOrCreateAccountByUser(userId string) (*Account, error) {
|
||||||
|
am.mux.Lock()
|
||||||
|
defer am.mux.Unlock()
|
||||||
|
|
||||||
|
account, err := am.Store.GetUserAccount(userId)
|
||||||
|
if err != nil {
|
||||||
|
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
|
||||||
|
account, _ = newAccount(userId)
|
||||||
|
account.Users[userId] = NewAdminUser(userId)
|
||||||
|
err = am.Store.SaveAccount(account)
|
||||||
|
if err != nil {
|
||||||
|
return nil, status.Errorf(codes.Internal, "failed creating account")
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// other error
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return account, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAccountByUser returns an existing account for a given user id, NotFound if account couldn't be found
|
||||||
|
func (am *AccountManager) GetAccountByUser(userId string) (*Account, error) {
|
||||||
|
am.mux.Lock()
|
||||||
|
defer am.mux.Unlock()
|
||||||
|
|
||||||
|
return am.Store.GetUserAccount(userId)
|
||||||
|
}
|
||||||
@@ -11,7 +11,9 @@ import (
|
|||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/connectivity"
|
||||||
"google.golang.org/grpc/credentials"
|
"google.golang.org/grpc/credentials"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
@@ -23,6 +25,12 @@ import (
|
|||||||
|
|
||||||
// A set of tools to exchange connection details (Wireguard endpoints) with the remote peer.
|
// A set of tools to exchange connection details (Wireguard endpoints) with the remote peer.
|
||||||
|
|
||||||
|
// Status is the status of the client
|
||||||
|
type Status string
|
||||||
|
|
||||||
|
const StreamConnected Status = "Connected"
|
||||||
|
const StreamDisconnected Status = "Disconnected"
|
||||||
|
|
||||||
// Client Wraps the Signal Exchange Service gRpc client
|
// Client Wraps the Signal Exchange Service gRpc client
|
||||||
type Client struct {
|
type Client struct {
|
||||||
key wgtypes.Key
|
key wgtypes.Key
|
||||||
@@ -30,8 +38,19 @@ type Client struct {
|
|||||||
signalConn *grpc.ClientConn
|
signalConn *grpc.ClientConn
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
stream proto.SignalExchange_ConnectStreamClient
|
stream proto.SignalExchange_ConnectStreamClient
|
||||||
//waiting group to notify once stream is connected
|
// connectedCh used to notify goroutines waiting for the connection to the Signal stream
|
||||||
connWg *sync.WaitGroup //todo use a channel instead??
|
connectedCh chan struct{}
|
||||||
|
mux sync.Mutex
|
||||||
|
// StreamConnected indicates whether this client is StreamConnected to the Signal stream
|
||||||
|
status Status
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) StreamConnected() bool {
|
||||||
|
return c.status == StreamConnected
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) GetStatus() Status {
|
||||||
|
return c.status
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close Closes underlying connections to the Signal Exchange
|
// Close Closes underlying connections to the Signal Exchange
|
||||||
@@ -42,7 +61,7 @@ func (c *Client) Close() error {
|
|||||||
// NewClient creates a new Signal client
|
// NewClient creates a new Signal client
|
||||||
func NewClient(ctx context.Context, addr string, key wgtypes.Key, tlsEnabled bool) (*Client, error) {
|
func NewClient(ctx context.Context, addr string, key wgtypes.Key, tlsEnabled bool) (*Client, error) {
|
||||||
|
|
||||||
transportOption := grpc.WithInsecure()
|
transportOption := grpc.WithTransportCredentials(insecure.NewCredentials())
|
||||||
|
|
||||||
if tlsEnabled {
|
if tlsEnabled {
|
||||||
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
transportOption = grpc.WithTransportCredentials(credentials.NewTLS(&tls.Config{}))
|
||||||
@@ -65,13 +84,13 @@ func NewClient(ctx context.Context, addr string, key wgtypes.Key, tlsEnabled boo
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
|
||||||
return &Client{
|
return &Client{
|
||||||
realClient: proto.NewSignalExchangeClient(conn),
|
realClient: proto.NewSignalExchangeClient(conn),
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
signalConn: conn,
|
signalConn: conn,
|
||||||
key: key,
|
key: key,
|
||||||
connWg: &wg,
|
mux: sync.Mutex{},
|
||||||
|
status: StreamDisconnected,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -82,7 +101,7 @@ func defaultBackoff(ctx context.Context) backoff.BackOff {
|
|||||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||||
Multiplier: backoff.DefaultMultiplier,
|
Multiplier: backoff.DefaultMultiplier,
|
||||||
MaxInterval: 10 * time.Second,
|
MaxInterval: 10 * time.Second,
|
||||||
MaxElapsedTime: 30 * time.Minute, //stop after an 30 min of trying, the error will be propagated to the general retry of the client
|
MaxElapsedTime: 12 * time.Hour, //stop after 12 hours of trying, the error will be propagated to the general retry of the client
|
||||||
Stop: backoff.Stop,
|
Stop: backoff.Stop,
|
||||||
Clock: backoff.SystemClock,
|
Clock: backoff.SystemClock,
|
||||||
}, ctx)
|
}, ctx)
|
||||||
@@ -91,25 +110,37 @@ func defaultBackoff(ctx context.Context) backoff.BackOff {
|
|||||||
|
|
||||||
// Receive Connects to the Signal Exchange message stream and starts receiving messages.
|
// Receive Connects to the Signal Exchange message stream and starts receiving messages.
|
||||||
// The messages will be handled by msgHandler function provided.
|
// The messages will be handled by msgHandler function provided.
|
||||||
// This function runs a goroutine underneath and reconnects to the Signal Exchange if errors occur (e.g. Exchange restart)
|
// This function is blocking and reconnects to the Signal Exchange if errors occur (e.g. Exchange restart)
|
||||||
// The key is the identifier of our Peer (could be Wireguard public key)
|
// The connection retry logic will try to reconnect for 30 min and if wasn't successful will propagate the error to the function caller.
|
||||||
func (c *Client) Receive(msgHandler func(msg *proto.Message) error) {
|
func (c *Client) Receive(msgHandler func(msg *proto.Message) error) error {
|
||||||
c.connWg.Add(1)
|
|
||||||
go func() {
|
|
||||||
|
|
||||||
var backOff = defaultBackoff(c.ctx)
|
var backOff = defaultBackoff(c.ctx)
|
||||||
|
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
|
|
||||||
|
c.notifyStreamDisconnected()
|
||||||
|
|
||||||
|
log.Debugf("signal connection state %v", c.signalConn.GetState())
|
||||||
|
if !c.Ready() {
|
||||||
|
return fmt.Errorf("no connection to signal")
|
||||||
|
}
|
||||||
|
|
||||||
|
// connect to Signal stream identifying ourselves with a public Wireguard key
|
||||||
|
// todo once the key rotation logic has been implemented, consider changing to some other identifier (received from management)
|
||||||
stream, err := c.connect(c.key.PublicKey().String())
|
stream, err := c.connect(c.key.PublicKey().String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("disconnected from the Signal Exchange due to an error: %v", err)
|
log.Warnf("disconnected from the Signal Exchange due to an error: %v", err)
|
||||||
c.connWg.Add(1)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.notifyStreamConnected()
|
||||||
|
|
||||||
|
log.Infof("connected to the Signal Service stream")
|
||||||
|
|
||||||
|
// start receiving messages from the Signal stream (from other peers through signal)
|
||||||
err = c.receive(stream, msgHandler)
|
err = c.receive(stream, msgHandler)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
log.Warnf("disconnected from the Signal Exchange due to an error: %v", err)
|
||||||
backOff.Reset()
|
backOff.Reset()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -120,9 +151,35 @@ func (c *Client) Receive(msgHandler func(msg *proto.Message) error) {
|
|||||||
err := backoff.Retry(operation, backOff)
|
err := backoff.Retry(operation, backOff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("exiting Signal Service connection retry loop due to unrecoverable error: %s", err)
|
log.Errorf("exiting Signal Service connection retry loop due to unrecoverable error: %s", err)
|
||||||
return
|
return err
|
||||||
}
|
}
|
||||||
}()
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (c *Client) notifyStreamDisconnected() {
|
||||||
|
c.mux.Lock()
|
||||||
|
defer c.mux.Unlock()
|
||||||
|
c.status = StreamDisconnected
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) notifyStreamConnected() {
|
||||||
|
c.mux.Lock()
|
||||||
|
defer c.mux.Unlock()
|
||||||
|
c.status = StreamConnected
|
||||||
|
if c.connectedCh != nil {
|
||||||
|
// there are goroutines waiting on this channel -> release them
|
||||||
|
close(c.connectedCh)
|
||||||
|
c.connectedCh = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Client) getStreamStatusChan() <-chan struct{} {
|
||||||
|
c.mux.Lock()
|
||||||
|
defer c.mux.Unlock()
|
||||||
|
if c.connectedCh == nil {
|
||||||
|
c.connectedCh = make(chan struct{})
|
||||||
|
}
|
||||||
|
return c.connectedCh
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Client) connect(key string) (proto.SignalExchange_ConnectStreamClient, error) {
|
func (c *Client) connect(key string) (proto.SignalExchange_ConnectStreamClient, error) {
|
||||||
@@ -147,24 +204,37 @@ func (c *Client) connect(key string) (proto.SignalExchange_ConnectStreamClient,
|
|||||||
if len(registered) == 0 {
|
if len(registered) == 0 {
|
||||||
return nil, fmt.Errorf("didn't receive a registration header from the Signal server whille connecting to the streams")
|
return nil, fmt.Errorf("didn't receive a registration header from the Signal server whille connecting to the streams")
|
||||||
}
|
}
|
||||||
//connection established we are good to use the stream
|
|
||||||
c.connWg.Done()
|
|
||||||
|
|
||||||
log.Infof("connected to the Signal Exchange Stream")
|
|
||||||
|
|
||||||
return stream, nil
|
return stream, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// WaitConnected waits until the client is connected to the message stream
|
// Ready indicates whether the client is okay and Ready to be used
|
||||||
func (c *Client) WaitConnected() {
|
// for now it just checks whether gRPC connection to the service is in state Ready
|
||||||
c.connWg.Wait()
|
func (c *Client) Ready() bool {
|
||||||
|
return c.signalConn.GetState() == connectivity.Ready || c.signalConn.GetState() == connectivity.Idle
|
||||||
|
}
|
||||||
|
|
||||||
|
// WaitStreamConnected waits until the client is connected to the Signal stream
|
||||||
|
func (c *Client) WaitStreamConnected() {
|
||||||
|
|
||||||
|
if c.status == StreamConnected {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ch := c.getStreamStatusChan()
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
case <-ch:
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// SendToStream sends a message to the remote Peer through the Signal Exchange using established stream connection to the Signal Server
|
// SendToStream sends a message to the remote Peer through the Signal Exchange using established stream connection to the Signal Server
|
||||||
// The Client.Receive method must be called before sending messages to establish initial connection to the Signal Exchange
|
// The Client.Receive method must be called before sending messages to establish initial connection to the Signal Exchange
|
||||||
// Client.connWg can be used to wait
|
// Client.connWg can be used to wait
|
||||||
func (c *Client) SendToStream(msg *proto.EncryptedMessage) error {
|
func (c *Client) SendToStream(msg *proto.EncryptedMessage) error {
|
||||||
|
if !c.Ready() {
|
||||||
|
return fmt.Errorf("no connection to signal")
|
||||||
|
}
|
||||||
if c.stream == nil {
|
if c.stream == nil {
|
||||||
return fmt.Errorf("connection to the Signal Exchnage has not been established yet. Please call Client.Receive before sending messages")
|
return fmt.Errorf("connection to the Signal Exchnage has not been established yet. Please call Client.Receive before sending messages")
|
||||||
}
|
}
|
||||||
@@ -221,13 +291,19 @@ func (c *Client) encryptMessage(msg *proto.Message) (*proto.EncryptedMessage, er
|
|||||||
// Send sends a message to the remote Peer through the Signal Exchange.
|
// Send sends a message to the remote Peer through the Signal Exchange.
|
||||||
func (c *Client) Send(msg *proto.Message) error {
|
func (c *Client) Send(msg *proto.Message) error {
|
||||||
|
|
||||||
|
if !c.Ready() {
|
||||||
|
return fmt.Errorf("no connection to signal")
|
||||||
|
}
|
||||||
|
|
||||||
encryptedMessage, err := c.encryptMessage(msg)
|
encryptedMessage, err := c.encryptMessage(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
_, err = c.realClient.Send(context.TODO(), encryptedMessage)
|
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
_, err = c.realClient.Send(ctx, encryptedMessage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while sending message to peer [%s] [error: %v]", msg.RemoteKey, err)
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -244,10 +320,10 @@ func (c *Client) receive(stream proto.SignalExchange_ConnectStreamClient,
|
|||||||
log.Warnf("stream canceled (usually indicates shutdown)")
|
log.Warnf("stream canceled (usually indicates shutdown)")
|
||||||
return err
|
return err
|
||||||
} else if s.Code() == codes.Unavailable {
|
} else if s.Code() == codes.Unavailable {
|
||||||
log.Warnf("server has been stopped")
|
log.Warnf("Signal Service is unavailable")
|
||||||
return err
|
return err
|
||||||
} else if err == io.EOF {
|
} else if err == io.EOF {
|
||||||
log.Warnf("stream closed by server")
|
log.Warnf("Signal Service stream closed by server")
|
||||||
return err
|
return err
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
|
|||||||
@@ -9,6 +9,7 @@ import (
|
|||||||
"github.com/wiretrustee/wiretrustee/signal/server"
|
"github.com/wiretrustee/wiretrustee/signal/server"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/credentials/insecure"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"net"
|
"net"
|
||||||
@@ -48,17 +49,24 @@ var _ = Describe("Client", func() {
|
|||||||
// connect PeerA to Signal
|
// connect PeerA to Signal
|
||||||
keyA, _ := wgtypes.GenerateKey()
|
keyA, _ := wgtypes.GenerateKey()
|
||||||
clientA := createSignalClient(addr, keyA)
|
clientA := createSignalClient(addr, keyA)
|
||||||
clientA.Receive(func(msg *sigProto.Message) error {
|
go func() {
|
||||||
|
err := clientA.Receive(func(msg *sigProto.Message) error {
|
||||||
receivedOnA = msg.GetBody().GetPayload()
|
receivedOnA = msg.GetBody().GetPayload()
|
||||||
msgReceived.Done()
|
msgReceived.Done()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
clientA.WaitConnected()
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
clientA.WaitStreamConnected()
|
||||||
|
|
||||||
// connect PeerB to Signal
|
// connect PeerB to Signal
|
||||||
keyB, _ := wgtypes.GenerateKey()
|
keyB, _ := wgtypes.GenerateKey()
|
||||||
clientB := createSignalClient(addr, keyB)
|
clientB := createSignalClient(addr, keyB)
|
||||||
clientB.Receive(func(msg *sigProto.Message) error {
|
|
||||||
|
go func() {
|
||||||
|
err := clientB.Receive(func(msg *sigProto.Message) error {
|
||||||
receivedOnB = msg.GetBody().GetPayload()
|
receivedOnB = msg.GetBody().GetPayload()
|
||||||
err := clientB.Send(&sigProto.Message{
|
err := clientB.Send(&sigProto.Message{
|
||||||
Key: keyB.PublicKey().String(),
|
Key: keyB.PublicKey().String(),
|
||||||
@@ -71,7 +79,12 @@ var _ = Describe("Client", func() {
|
|||||||
msgReceived.Done()
|
msgReceived.Done()
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
clientB.WaitConnected()
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
clientB.WaitStreamConnected()
|
||||||
|
|
||||||
// PeerA initiates ping-pong
|
// PeerA initiates ping-pong
|
||||||
err := clientA.Send(&sigProto.Message{
|
err := clientA.Send(&sigProto.Message{
|
||||||
@@ -100,11 +113,15 @@ var _ = Describe("Client", func() {
|
|||||||
|
|
||||||
key, _ := wgtypes.GenerateKey()
|
key, _ := wgtypes.GenerateKey()
|
||||||
client := createSignalClient(addr, key)
|
client := createSignalClient(addr, key)
|
||||||
client.Receive(func(msg *sigProto.Message) error {
|
go func() {
|
||||||
|
err := client.Receive(func(msg *sigProto.Message) error {
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
client.WaitConnected()
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
client.WaitStreamConnected()
|
||||||
Expect(client).NotTo(BeNil())
|
Expect(client).NotTo(BeNil())
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
@@ -154,7 +171,8 @@ func createSignalClient(addr string, key wgtypes.Key) *Client {
|
|||||||
|
|
||||||
func createRawSignalClient(addr string) sigProto.SignalExchangeClient {
|
func createRawSignalClient(addr string) sigProto.SignalExchangeClient {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
conn, err := grpc.DialContext(ctx, addr, grpc.WithInsecure(),
|
conn, err := grpc.DialContext(ctx, addr,
|
||||||
|
grpc.WithTransportCredentials(insecure.NewCredentials()),
|
||||||
grpc.WithBlock(),
|
grpc.WithBlock(),
|
||||||
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
grpc.WithKeepaliveParams(keepalive.ClientParameters{
|
||||||
Time: 3 * time.Second,
|
Time: 3 * time.Second,
|
||||||
|
|||||||
@@ -74,10 +74,6 @@ var (
|
|||||||
log.Fatalf("failed to listen: %v", err)
|
log.Fatalf("failed to listen: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
log.Fatalf("failed to listen: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
proto.RegisterSignalExchangeServer(grpcServer, server.NewServer())
|
proto.RegisterSignalExchangeServer(grpcServer, server.NewServer())
|
||||||
log.Printf("started server: localhost:%v", signalPort)
|
log.Printf("started server: localhost:%v", signalPort)
|
||||||
if err := grpcServer.Serve(lis); err != nil {
|
if err := grpcServer.Serve(lis); err != nil {
|
||||||
|
|||||||
@@ -55,7 +55,7 @@ func (registry *Registry) Register(peer *Peer) {
|
|||||||
// can be that peer already exists but it is fine (e.g. reconnect)
|
// can be that peer already exists but it is fine (e.g. reconnect)
|
||||||
// todo investigate what happens to the old peer (especially Peer.Stream) when we override it
|
// todo investigate what happens to the old peer (especially Peer.Stream) when we override it
|
||||||
registry.Peers.Store(peer.Id, peer)
|
registry.Peers.Store(peer.Id, peer)
|
||||||
log.Printf("registered peer [%s]", peer.Id)
|
log.Debugf("peer registered [%s]", peer.Id)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -63,7 +63,7 @@ func (registry *Registry) Register(peer *Peer) {
|
|||||||
func (registry *Registry) Deregister(peer *Peer) {
|
func (registry *Registry) Deregister(peer *Peer) {
|
||||||
_, loaded := registry.Peers.LoadAndDelete(peer.Id)
|
_, loaded := registry.Peers.LoadAndDelete(peer.Id)
|
||||||
if loaded {
|
if loaded {
|
||||||
log.Printf("deregistered peer [%s]", peer.Id)
|
log.Debugf("peer deregistered [%s]", peer.Id)
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("attempted to remove non-existent peer [%s]", peer.Id)
|
log.Warnf("attempted to remove non-existent peer [%s]", peer.Id)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -29,18 +29,18 @@ func NewServer() *Server {
|
|||||||
func (s *Server) Send(ctx context.Context, msg *proto.EncryptedMessage) (*proto.EncryptedMessage, error) {
|
func (s *Server) Send(ctx context.Context, msg *proto.EncryptedMessage) (*proto.EncryptedMessage, error) {
|
||||||
|
|
||||||
if !s.registry.IsPeerRegistered(msg.Key) {
|
if !s.registry.IsPeerRegistered(msg.Key) {
|
||||||
return nil, fmt.Errorf("unknown peer %s", msg.Key)
|
return nil, fmt.Errorf("peer %s is not registered", msg.Key)
|
||||||
}
|
}
|
||||||
|
|
||||||
if dstPeer, found := s.registry.Get(msg.RemoteKey); found {
|
if dstPeer, found := s.registry.Get(msg.RemoteKey); found {
|
||||||
//forward the message to the target peer
|
//forward the message to the target peer
|
||||||
err := dstPeer.Stream.Send(msg)
|
err := dstPeer.Stream.Send(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while forwarding message from peer [%s] to peer [%s]", msg.Key, msg.RemoteKey)
|
log.Errorf("error while forwarding message from peer [%s] to peer [%s] %v", msg.Key, msg.RemoteKey, err)
|
||||||
//todo respond to the sender?
|
//todo respond to the sender?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("message from peer [%s] can't be forwarded to peer [%s] because destination peer is not connected", msg.Key, msg.RemoteKey)
|
log.Debugf("message from peer [%s] can't be forwarded to peer [%s] because destination peer is not connected", msg.Key, msg.RemoteKey)
|
||||||
//todo respond to the sender?
|
//todo respond to the sender?
|
||||||
}
|
}
|
||||||
return &proto.EncryptedMessage{}, nil
|
return &proto.EncryptedMessage{}, nil
|
||||||
@@ -48,11 +48,17 @@ func (s *Server) Send(ctx context.Context, msg *proto.EncryptedMessage) (*proto.
|
|||||||
|
|
||||||
// ConnectStream connects to the exchange stream
|
// ConnectStream connects to the exchange stream
|
||||||
func (s *Server) ConnectStream(stream proto.SignalExchange_ConnectStreamServer) error {
|
func (s *Server) ConnectStream(stream proto.SignalExchange_ConnectStreamServer) error {
|
||||||
|
|
||||||
p, err := s.connectPeer(stream)
|
p, err := s.connectPeer(stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
log.Infof("peer disconnected [%s] ", p.Id)
|
||||||
|
s.registry.Deregister(p)
|
||||||
|
}()
|
||||||
|
|
||||||
//needed to confirm that the peer has been registered so that the client can proceed
|
//needed to confirm that the peer has been registered so that the client can proceed
|
||||||
header := metadata.Pairs(proto.HeaderRegistered, "1")
|
header := metadata.Pairs(proto.HeaderRegistered, "1")
|
||||||
err = stream.SendHeader(header)
|
err = stream.SendHeader(header)
|
||||||
@@ -60,8 +66,10 @@ func (s *Server) ConnectStream(stream proto.SignalExchange_ConnectStreamServer)
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Infof("peer [%s] has successfully connected", p.Id)
|
log.Infof("peer connected [%s]", p.Id)
|
||||||
|
|
||||||
for {
|
for {
|
||||||
|
//read incoming messages
|
||||||
msg, err := stream.Recv()
|
msg, err := stream.Recv()
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
break
|
break
|
||||||
@@ -74,14 +82,13 @@ func (s *Server) ConnectStream(stream proto.SignalExchange_ConnectStreamServer)
|
|||||||
//forward the message to the target peer
|
//forward the message to the target peer
|
||||||
err := dstPeer.Stream.Send(msg)
|
err := dstPeer.Stream.Send(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while forwarding message from peer [%s] to peer [%s]", p.Id, msg.RemoteKey)
|
log.Errorf("error while forwarding message from peer [%s] to peer [%s] %v", p.Id, msg.RemoteKey, err)
|
||||||
//todo respond to the sender?
|
//todo respond to the sender?
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.Warnf("message from peer [%s] can't be forwarded to peer [%s] because destination peer is not connected", p.Id, msg.RemoteKey)
|
log.Debugf("message from peer [%s] can't be forwarded to peer [%s] because destination peer is not connected", p.Id, msg.RemoteKey)
|
||||||
//todo respond to the sender?
|
//todo respond to the sender?
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
<-stream.Context().Done()
|
<-stream.Context().Done()
|
||||||
return stream.Context().Err()
|
return stream.Context().Err()
|
||||||
|
|||||||
Reference in New Issue
Block a user