mirror of
https://github.com/netbirdio/netbird.git
synced 2026-04-19 08:46:38 +00:00
Compare commits
449 Commits
send-ssh-r
...
relay/debu
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
32fbb7da51 | ||
|
|
694c468e76 | ||
|
|
887c789171 | ||
|
|
8740473753 | ||
|
|
a023bcedb1 | ||
|
|
73255051dd | ||
|
|
d36880fc3f | ||
|
|
f2e865f493 | ||
|
|
22f5ca8490 | ||
|
|
a8e1e5139b | ||
|
|
5a653732a3 | ||
|
|
2df6eefac3 | ||
|
|
86f745eb9d | ||
|
|
049b981746 | ||
|
|
a918a99396 | ||
|
|
30f22eca73 | ||
|
|
f4f09ec2e2 | ||
|
|
f3cbf96a13 | ||
|
|
ce383eb259 | ||
|
|
b31f25ec70 | ||
|
|
2f848cc51b | ||
|
|
b380f17064 | ||
|
|
f08bf93b19 | ||
|
|
ad75c9f31a | ||
|
|
0f20e2ce70 | ||
|
|
7ef9a3de7e | ||
|
|
7d44be55be | ||
|
|
5400754954 | ||
|
|
9700b105b3 | ||
|
|
1802e51213 | ||
|
|
7da74e707a | ||
|
|
d56dfae9b8 | ||
|
|
6b930271fd | ||
|
|
059fc7c3a2 | ||
|
|
0371f529ca | ||
|
|
501fd93e47 | ||
|
|
727a4f0753 | ||
|
|
e6f7222034 | ||
|
|
bfc33a3f6f | ||
|
|
5ad4ae769a | ||
|
|
cbe90b5dd9 | ||
|
|
f84b606506 | ||
|
|
216d9f2ee8 | ||
|
|
57624203c9 | ||
|
|
24e031ab74 | ||
|
|
df8b8db068 | ||
|
|
3506ac4234 | ||
|
|
0c8f8a62c7 | ||
|
|
cbf9f2058e | ||
|
|
02f3105e48 | ||
|
|
5ee9c77e90 | ||
|
|
c832cef44c | ||
|
|
165988429c | ||
|
|
9d2047a08a | ||
|
|
e5f8ecdeb7 | ||
|
|
aa1a482669 | ||
|
|
7942b0ebae | ||
|
|
5be33d668b | ||
|
|
12f472c58c | ||
|
|
100e31276f | ||
|
|
da39c8bbca | ||
|
|
7321046cd6 | ||
|
|
ea3205643a | ||
|
|
1a15b0f900 | ||
|
|
4b37311e54 | ||
|
|
d2c9a44953 | ||
|
|
b946088a90 | ||
|
|
a8b58a182e | ||
|
|
61c06c7dd2 | ||
|
|
a31d43a14c | ||
|
|
9ee062b4d1 | ||
|
|
1f48fdf6ca | ||
|
|
d70df99f7b | ||
|
|
45fd1e9c21 | ||
|
|
b62ad97e59 | ||
|
|
efa0bbdf3d | ||
|
|
8861e89ab0 | ||
|
|
e1ee73500a | ||
|
|
163a80d53e | ||
|
|
7279d58110 | ||
|
|
7b677f8db2 | ||
|
|
856c97b9df | ||
|
|
6f36ec7a32 | ||
|
|
3e82fcbdd0 | ||
|
|
ff167e2907 | ||
|
|
08022dca10 | ||
|
|
5dbe5d0d49 | ||
|
|
3d2ef17364 | ||
|
|
334926ce90 | ||
|
|
4339b6528f | ||
|
|
8568fbffdd | ||
|
|
fdf9756808 | ||
|
|
1d833113ce | ||
|
|
c42f7628d7 | ||
|
|
a4ba8bd3b8 | ||
|
|
7bfc505962 | ||
|
|
42ea9611d5 | ||
|
|
7ae9cffccc | ||
|
|
57f8c620c0 | ||
|
|
ecb6f0831e | ||
|
|
4802b83ef9 | ||
|
|
63aeeb834d | ||
|
|
20eb1f50e3 | ||
|
|
e9e3b8ba10 | ||
|
|
268e801ec5 | ||
|
|
2576221315 | ||
|
|
788f130941 | ||
|
|
926e11b086 | ||
|
|
1097285d80 | ||
|
|
0329c12173 | ||
|
|
03df0878dc | ||
|
|
8c7215a9f5 | ||
|
|
0a8c78deb1 | ||
|
|
e10bc658f5 | ||
|
|
c815ad86fd | ||
|
|
ef1a39cb01 | ||
|
|
787c900342 | ||
|
|
f247a7be46 | ||
|
|
894d68adf2 | ||
|
|
f3282bea80 | ||
|
|
c900fa81bb | ||
|
|
233a7b9802 | ||
|
|
9a6de52dd0 | ||
|
|
19147f518e | ||
|
|
e78ec2e985 | ||
|
|
e75fbd34a7 | ||
|
|
7162e0a2ac | ||
|
|
95d725f2c1 | ||
|
|
4fad0e521f | ||
|
|
a711e116a3 | ||
|
|
668d229b67 | ||
|
|
7c595e8493 | ||
|
|
03e8acccde | ||
|
|
f9c59a7131 | ||
|
|
4ea55bfe3c | ||
|
|
1d6f5482dd | ||
|
|
12ff93ba72 | ||
|
|
88d1c5a0fd | ||
|
|
1537b0f5e7 | ||
|
|
2577100096 | ||
|
|
bc09348f5a | ||
|
|
d5ba2ef6ec | ||
|
|
47752e1573 | ||
|
|
add4e9f4e4 | ||
|
|
58fbc1249c | ||
|
|
1cc341a268 | ||
|
|
89df6e7242 | ||
|
|
f74646a3ac | ||
|
|
807b830663 | ||
|
|
b5c9af9e9c | ||
|
|
30331e8f62 | ||
|
|
ea93a5edd3 | ||
|
|
cb77ff4661 | ||
|
|
83b83ccfd2 | ||
|
|
4e75e15ea1 | ||
|
|
06afe64aff | ||
|
|
7acaef1152 | ||
|
|
469be3442d | ||
|
|
e8c2fafccd | ||
|
|
d1b6387803 | ||
|
|
820e2feec9 | ||
|
|
e0d086a8a8 | ||
|
|
1f95467b02 | ||
|
|
6553d8ce03 | ||
|
|
f0c829afac | ||
|
|
86f14523e4 | ||
|
|
6cefcbfe5d | ||
|
|
19103031ee | ||
|
|
7369f4bc38 | ||
|
|
d9d275a7ce | ||
|
|
57b85f4f8d | ||
|
|
7ef191903e | ||
|
|
3bd15dd1c4 | ||
|
|
1065e0a6c5 | ||
|
|
d4ff55e6fe | ||
|
|
5625d83c3f | ||
|
|
63f2f51614 | ||
|
|
defdcb631e | ||
|
|
7bf0d04bed | ||
|
|
e4ec1fd757 | ||
|
|
dab50f35d7 | ||
|
|
2d7e797e08 | ||
|
|
c3e8187a47 | ||
|
|
cfac8c4762 | ||
|
|
d9dfae625b | ||
|
|
a9e6742d9a | ||
|
|
931f165c9a | ||
|
|
2803e1669b | ||
|
|
f28a657a1d | ||
|
|
1f949f8cee | ||
|
|
75f5b75bc4 | ||
|
|
48a2f6e69d | ||
|
|
b3715b5fad | ||
|
|
836072098b | ||
|
|
85e991ff78 | ||
|
|
f9845e53a0 | ||
|
|
8845e8fbc7 | ||
|
|
1a5ee744a8 | ||
|
|
765aba2c1c | ||
|
|
7cb81f1d70 | ||
|
|
cea19de667 | ||
|
|
29e5eceb6b | ||
|
|
0f63737330 | ||
|
|
bf518c5fba | ||
|
|
eab6183a8e | ||
|
|
15a7b7629b | ||
|
|
4517da8b3a | ||
|
|
9c0d923124 | ||
|
|
6857734c48 | ||
|
|
d3785dc1fa | ||
|
|
ed82ef7fe4 | ||
|
|
aa55fba5ee | ||
|
|
faeae52329 | ||
|
|
9ae03046e7 | ||
|
|
98aa830831 | ||
|
|
c94c949173 | ||
|
|
183f746158 | ||
|
|
dd0d15c9d4 | ||
|
|
3b019800f8 | ||
|
|
4cd4f88666 | ||
|
|
4d0e16f2d0 | ||
|
|
3fcdb51376 | ||
|
|
c0efce6556 | ||
|
|
f0eb004582 | ||
|
|
0a59f12012 | ||
|
|
745e4f76b1 | ||
|
|
085d072b17 | ||
|
|
d2157bda66 | ||
|
|
0a67f5be1a | ||
|
|
43a8ba97e3 | ||
|
|
17874771cc | ||
|
|
f6ccf6b97a | ||
|
|
6aae797baf | ||
|
|
aca054e51e | ||
|
|
f72e852ccb | ||
|
|
10cee8f46e | ||
|
|
54dc78aab8 | ||
|
|
628673db20 | ||
|
|
eaa31c2dc6 | ||
|
|
25723e9b07 | ||
|
|
3cf4d5758f | ||
|
|
69d8d5aa86 | ||
|
|
7581bbd925 | ||
|
|
4d67d72785 | ||
|
|
4a08f1a1e9 | ||
|
|
bfe60c01ba | ||
|
|
06ceac65de | ||
|
|
fc15ee6351 | ||
|
|
6801dcb3f6 | ||
|
|
4a3e78fb0f | ||
|
|
f9462eea27 | ||
|
|
b075009ef7 | ||
|
|
c7db2c0524 | ||
|
|
4f890ff712 | ||
|
|
f7e6aa9b8f | ||
|
|
81f2330d49 | ||
|
|
0261e15aad | ||
|
|
c347a4c2ca | ||
|
|
11de2ec42e | ||
|
|
61bc092458 | ||
|
|
4d2a25b728 | ||
|
|
2f32e0d8cf | ||
|
|
b679404618 | ||
|
|
48310ef99c | ||
|
|
24f71bc68a | ||
|
|
e26e2c3a75 | ||
|
|
215fb257f7 | ||
|
|
381447b8d6 | ||
|
|
919c1cb3d4 | ||
|
|
85d17cbc89 | ||
|
|
a5e664d83d | ||
|
|
d8ab3c1632 | ||
|
|
63b4041e9c | ||
|
|
f7d8d03e55 | ||
|
|
5b86a7f3f2 | ||
|
|
deb8203f06 | ||
|
|
e407fe02c5 | ||
|
|
c9f3854dde | ||
|
|
a7760bf0a7 | ||
|
|
64f949abbb | ||
|
|
245b086646 | ||
|
|
1609b21b5b | ||
|
|
1f926d15b8 | ||
|
|
a432e8e23a | ||
|
|
4fec709bb1 | ||
|
|
95299be52d | ||
|
|
f51cae7103 | ||
|
|
f68d5e965f | ||
|
|
85b8f36ec1 | ||
|
|
38f2a59d1b | ||
|
|
94e505480b | ||
|
|
9504012920 | ||
|
|
5e93d117cf | ||
|
|
8c70b7d7ff | ||
|
|
ed8def4d9b | ||
|
|
10d8617be6 | ||
|
|
1e115e3893 | ||
|
|
deffe037aa | ||
|
|
fed9e587af | ||
|
|
983d7bafbe | ||
|
|
a40d4d2f32 | ||
|
|
4da29451d0 | ||
|
|
15818b72c6 | ||
|
|
0556dc1860 | ||
|
|
2b369cd28f | ||
|
|
9d44a476c6 | ||
|
|
9b3449753e | ||
|
|
456629811b | ||
|
|
57ddb5f262 | ||
|
|
4ced07dd8d | ||
|
|
3430b81622 | ||
|
|
fd4ad15c83 | ||
|
|
c311d0d19e | ||
|
|
521f7dd39f | ||
|
|
f9ec0a9a2e | ||
|
|
012235ff12 | ||
|
|
4ff069a102 | ||
|
|
7cc3964a4d | ||
|
|
6d627f1923 | ||
|
|
076ce69a24 | ||
|
|
f176807ebe | ||
|
|
d4c47eaf8a | ||
|
|
645a1f31a7 | ||
|
|
b4aa7e50f9 | ||
|
|
d35a79d3b5 | ||
|
|
6a2929011d | ||
|
|
173ca25dac | ||
|
|
e877c9d6c1 | ||
|
|
7a1c96ebf4 | ||
|
|
41fe9f84ec | ||
|
|
d13fb0e379 | ||
|
|
f3214527ea | ||
|
|
69048bfd34 | ||
|
|
29a2d93873 | ||
|
|
6b01b0020e | ||
|
|
9d3db68805 | ||
|
|
2e315311e0 | ||
|
|
36b2cd16cc | ||
|
|
67e2185964 | ||
|
|
89149dc6f4 | ||
|
|
5a1f8f13a2 | ||
|
|
e71059d245 | ||
|
|
91fa2e20a0 | ||
|
|
61034aaf4d | ||
|
|
0a05f8b4d4 | ||
|
|
e82c0a55a3 | ||
|
|
13eb457132 | ||
|
|
b8717b8956 | ||
|
|
1c9c9ae47e | ||
|
|
9ac5a1ed3f | ||
|
|
d4eaec5cbd | ||
|
|
6ae7a790f2 | ||
|
|
49dfbc82d9 | ||
|
|
57a89cf0cc | ||
|
|
50201d63c2 | ||
|
|
d11b39282b | ||
|
|
bd58eea8ea | ||
|
|
a5811a2d7d | ||
|
|
a680f80ed9 | ||
|
|
10fbdc2c4a | ||
|
|
1444fbe104 | ||
|
|
650bca7ca8 | ||
|
|
570e28d227 | ||
|
|
272ade07a8 | ||
|
|
263abe4862 | ||
|
|
ceee421a05 | ||
|
|
0a75da6fb7 | ||
|
|
920877964f | ||
|
|
2e0047daea | ||
|
|
ce0718fcb5 | ||
|
|
c590518e0c | ||
|
|
f309b120cd | ||
|
|
7357a9954c | ||
|
|
13b63eebc1 | ||
|
|
735ed7ab34 | ||
|
|
961d9198ef | ||
|
|
df4ca01848 | ||
|
|
4e7c17756c | ||
|
|
6a4935139d | ||
|
|
35dd991776 | ||
|
|
3598418206 | ||
|
|
e435e39158 | ||
|
|
fd26e989e3 | ||
|
|
4424162bce | ||
|
|
54b045d9ca | ||
|
|
71c6437bab | ||
|
|
7b254cb966 | ||
|
|
8f3a0f2c38 | ||
|
|
1f33e2e003 | ||
|
|
1e6addaa65 | ||
|
|
f51dc13f8c | ||
|
|
3477108ce7 | ||
|
|
012e624296 | ||
|
|
4c5e987e02 | ||
|
|
a80c8b0176 | ||
|
|
9e01155d2e | ||
|
|
3c3111ad01 | ||
|
|
b74078fd95 | ||
|
|
77488ad11a | ||
|
|
e3b76448f3 | ||
|
|
e0de86d6c9 | ||
|
|
5204d07811 | ||
|
|
5ea24ba56e | ||
|
|
d30cf8706a | ||
|
|
15a2feb723 | ||
|
|
91b2f9fc51 | ||
|
|
76702c8a09 | ||
|
|
061f673a4f | ||
|
|
9505805313 | ||
|
|
704c67dec8 | ||
|
|
3ed2f08f3c | ||
|
|
4c83408f27 | ||
|
|
90bd39c740 | ||
|
|
dd0cf41147 | ||
|
|
22b2caffc6 | ||
|
|
c1f66d1354 | ||
|
|
ac0fe6025b | ||
|
|
c28657710a | ||
|
|
3875c29f6b | ||
|
|
9f32ccd453 | ||
|
|
1d1d057e7d | ||
|
|
3461b1bb90 | ||
|
|
3d2a2377c6 | ||
|
|
25f5f26527 | ||
|
|
bb0d5c5baf | ||
|
|
7938295190 | ||
|
|
9af532fe71 | ||
|
|
23a1473797 | ||
|
|
9c2dc05df1 | ||
|
|
40d56e5d29 | ||
|
|
fd23d0c28f | ||
|
|
4fff93a1f2 | ||
|
|
22beac1b1b | ||
|
|
bd7a65d798 | ||
|
|
2d76b058fc | ||
|
|
ea2d060f93 | ||
|
|
68b377a28c | ||
|
|
af50eb350f | ||
|
|
2475473227 | ||
|
|
846871913d | ||
|
|
6cba9c0818 | ||
|
|
f0672b87bc | ||
|
|
9b0fe2c8e5 | ||
|
|
abd57d1191 | ||
|
|
416f04c27a | ||
|
|
fc7c1e397f | ||
|
|
52a3ac6b06 | ||
|
|
0b3b50c705 | ||
|
|
042141db06 |
8
.editorconfig
Normal file
8
.editorconfig
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
end_of_line = lf
|
||||||
|
insert_final_newline = true
|
||||||
|
|
||||||
|
[*.go]
|
||||||
|
indent_style = tab
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
name: Test Code Darwin
|
name: Test Code Linux
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
push:
|
||||||
@@ -11,26 +11,27 @@ concurrency:
|
|||||||
cancel-in-progress: true
|
cancel-in-progress: true
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
test:
|
test_client_on_docker:
|
||||||
strategy:
|
runs-on: ubuntu-20.04
|
||||||
matrix:
|
|
||||||
store: ['jsonfile', 'sqlite']
|
|
||||||
runs-on: macos-latest
|
|
||||||
steps:
|
steps:
|
||||||
- name: Install Go
|
- name: Install Go
|
||||||
uses: actions/setup-go@v4
|
uses: actions/setup-go@v4
|
||||||
with:
|
with:
|
||||||
go-version: "1.21.x"
|
go-version: "1.21.x"
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Cache Go modules
|
- name: Cache Go modules
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: ~/go/pkg/mod
|
path: ~/go/pkg/mod
|
||||||
key: macos-go-${{ hashFiles('**/go.sum') }}
|
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
macos-go-
|
${{ runner.os }}-go-
|
||||||
|
|
||||||
|
- name: Checkout code
|
||||||
|
uses: actions/checkout@v3
|
||||||
|
|
||||||
|
- name: Install dependencies
|
||||||
|
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib libpcap-dev
|
||||||
|
|
||||||
- name: Install modules
|
- name: Install modules
|
||||||
run: go mod tidy
|
run: go mod tidy
|
||||||
@@ -38,5 +39,5 @@ jobs:
|
|||||||
- name: check git status
|
- name: check git status
|
||||||
run: git --no-pager diff --exit-code
|
run: git --no-pager diff --exit-code
|
||||||
|
|
||||||
- name: Test
|
- name: Run test
|
||||||
run: NETBIRD_STORE_ENGINE=${{ matrix.store }} go test -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 5m -p 1 ./...
|
run: go test ./client/internal/routemanager -run TestGetBestrouteFromStatuses
|
||||||
117
.github/workflows/golang-test-linux.yml
vendored
117
.github/workflows/golang-test-linux.yml
vendored
@@ -1,117 +0,0 @@
|
|||||||
name: Test Code Linux
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
strategy:
|
|
||||||
matrix:
|
|
||||||
arch: ['386','amd64']
|
|
||||||
store: ['jsonfile', 'sqlite']
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21.x"
|
|
||||||
|
|
||||||
|
|
||||||
- name: Cache Go modules
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib
|
|
||||||
|
|
||||||
- name: Install modules
|
|
||||||
run: go mod tidy
|
|
||||||
|
|
||||||
- name: check git status
|
|
||||||
run: git --no-pager diff --exit-code
|
|
||||||
|
|
||||||
- name: Test
|
|
||||||
run: CGO_ENABLED=1 GOARCH=${{ matrix.arch }} NETBIRD_STORE_ENGINE=${{ matrix.store }} go test -exec 'sudo --preserve-env=CI,NETBIRD_STORE_ENGINE' -timeout 5m -p 1 ./...
|
|
||||||
|
|
||||||
test_client_on_docker:
|
|
||||||
runs-on: ubuntu-20.04
|
|
||||||
steps:
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21.x"
|
|
||||||
|
|
||||||
- name: Cache Go modules
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev gcc-multilib
|
|
||||||
|
|
||||||
- name: Install modules
|
|
||||||
run: go mod tidy
|
|
||||||
|
|
||||||
- name: check git status
|
|
||||||
run: git --no-pager diff --exit-code
|
|
||||||
|
|
||||||
- name: Generate Iface Test bin
|
|
||||||
run: CGO_ENABLED=0 go test -c -o iface-testing.bin ./iface/
|
|
||||||
|
|
||||||
- name: Generate Shared Sock Test bin
|
|
||||||
run: CGO_ENABLED=0 go test -c -o sharedsock-testing.bin ./sharedsock
|
|
||||||
|
|
||||||
- name: Generate RouteManager Test bin
|
|
||||||
run: CGO_ENABLED=0 go test -c -o routemanager-testing.bin ./client/internal/routemanager/...
|
|
||||||
|
|
||||||
- name: Generate nftables Manager Test bin
|
|
||||||
run: CGO_ENABLED=0 go test -c -o nftablesmanager-testing.bin ./client/firewall/nftables/...
|
|
||||||
|
|
||||||
- name: Generate Engine Test bin
|
|
||||||
run: CGO_ENABLED=1 go test -c -o engine-testing.bin ./client/internal
|
|
||||||
|
|
||||||
- name: Generate Peer Test bin
|
|
||||||
run: CGO_ENABLED=0 go test -c -o peer-testing.bin ./client/internal/peer/...
|
|
||||||
|
|
||||||
- run: chmod +x *testing.bin
|
|
||||||
|
|
||||||
- name: Run Shared Sock tests in docker
|
|
||||||
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/sharedsock --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/sharedsock-testing.bin -test.timeout 5m -test.parallel 1
|
|
||||||
|
|
||||||
- name: Run Iface tests in docker
|
|
||||||
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/iface --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/iface-testing.bin -test.timeout 5m -test.parallel 1
|
|
||||||
|
|
||||||
- name: Run RouteManager tests in docker
|
|
||||||
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal/routemanager --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/routemanager-testing.bin -test.timeout 5m -test.parallel 1
|
|
||||||
|
|
||||||
- name: Run nftables Manager tests in docker
|
|
||||||
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/firewall --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/nftablesmanager-testing.bin -test.timeout 5m -test.parallel 1
|
|
||||||
|
|
||||||
- name: Run Engine tests in docker with file store
|
|
||||||
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal -e NETBIRD_STORE_ENGINE="jsonfile" --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/engine-testing.bin -test.timeout 5m -test.parallel 1
|
|
||||||
|
|
||||||
- name: Run Engine tests in docker with sqlite store
|
|
||||||
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal -e NETBIRD_STORE_ENGINE="sqlite" --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/engine-testing.bin -test.timeout 5m -test.parallel 1
|
|
||||||
|
|
||||||
- name: Run Peer tests in docker
|
|
||||||
run: docker run -t --cap-add=NET_ADMIN --privileged --rm -v $PWD:/ci -w /ci/client/internal/peer --entrypoint /busybox/sh gcr.io/distroless/base:debug -c /ci/peer-testing.bin -test.timeout 5m -test.parallel 1
|
|
||||||
53
.github/workflows/golang-test-windows.yml
vendored
53
.github/workflows/golang-test-windows.yml
vendored
@@ -1,53 +0,0 @@
|
|||||||
name: Test Code Windows
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
env:
|
|
||||||
downloadPath: '${{ github.workspace }}\temp'
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test:
|
|
||||||
runs-on: windows-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
id: go
|
|
||||||
with:
|
|
||||||
go-version: "1.21.x"
|
|
||||||
|
|
||||||
- name: Download wintun
|
|
||||||
uses: carlosperate/download-file-action@v2
|
|
||||||
id: download-wintun
|
|
||||||
with:
|
|
||||||
file-url: https://pkgs.netbird.io/wintun/wintun-0.14.1.zip
|
|
||||||
file-name: wintun.zip
|
|
||||||
location: ${{ env.downloadPath }}
|
|
||||||
sha256: '07c256185d6ee3652e09fa55c0b673e2624b565e02c4b9091c79ca7d2f24ef51'
|
|
||||||
|
|
||||||
- name: Decompressing wintun files
|
|
||||||
run: tar -zvxf "${{ steps.download-wintun.outputs.file-path }}" -C ${{ env.downloadPath }}
|
|
||||||
|
|
||||||
- run: mv ${{ env.downloadPath }}/wintun/bin/amd64/wintun.dll 'C:\Windows\System32\'
|
|
||||||
|
|
||||||
- run: choco install -y sysinternals --ignore-checksums
|
|
||||||
- run: choco install -y mingw
|
|
||||||
|
|
||||||
- run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOMODCACHE=C:\Users\runneradmin\go\pkg\mod
|
|
||||||
- run: PsExec64 -s -w ${{ github.workspace }} C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe env -w GOCACHE=C:\Users\runneradmin\AppData\Local\go-build
|
|
||||||
- run: "[Environment]::SetEnvironmentVariable('NETBIRD_STORE_ENGINE', 'jsonfile', 'Machine')"
|
|
||||||
|
|
||||||
- name: test
|
|
||||||
run: PsExec64 -s -w ${{ github.workspace }} cmd.exe /c "C:\hostedtoolcache\windows\go\${{ steps.go.outputs.go-version }}\x64\bin\go.exe test -timeout 5m -p 1 ./... > test-out.txt 2>&1"
|
|
||||||
- name: test output
|
|
||||||
if: ${{ always() }}
|
|
||||||
run: Get-Content test-out.txt
|
|
||||||
48
.github/workflows/golangci-lint.yml
vendored
48
.github/workflows/golangci-lint.yml
vendored
@@ -1,48 +0,0 @@
|
|||||||
name: golangci-lint
|
|
||||||
on: [pull_request]
|
|
||||||
|
|
||||||
permissions:
|
|
||||||
contents: read
|
|
||||||
pull-requests: read
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
codespell:
|
|
||||||
name: codespell
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: codespell
|
|
||||||
uses: codespell-project/actions-codespell@v2
|
|
||||||
with:
|
|
||||||
ignore_words_list: erro,clienta
|
|
||||||
skip: go.mod,go.sum
|
|
||||||
only_warn: 1
|
|
||||||
golangci:
|
|
||||||
strategy:
|
|
||||||
fail-fast: false
|
|
||||||
matrix:
|
|
||||||
os: [macos-latest, windows-latest, ubuntu-latest]
|
|
||||||
name: lint
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
timeout-minutes: 15
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21.x"
|
|
||||||
cache: false
|
|
||||||
- name: Install dependencies
|
|
||||||
if: matrix.os == 'ubuntu-latest'
|
|
||||||
run: sudo apt update && sudo apt install -y -q libgtk-3-dev libayatana-appindicator3-dev libgl1-mesa-dev xorg-dev
|
|
||||||
- name: golangci-lint
|
|
||||||
uses: golangci/golangci-lint-action@v3
|
|
||||||
with:
|
|
||||||
version: latest
|
|
||||||
args: --timeout=12m
|
|
||||||
36
.github/workflows/install-script-test.yml
vendored
36
.github/workflows/install-script-test.yml
vendored
@@ -1,36 +0,0 @@
|
|||||||
name: Test installation
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- "release_files/install.sh"
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
jobs:
|
|
||||||
test-install-script:
|
|
||||||
strategy:
|
|
||||||
max-parallel: 2
|
|
||||||
matrix:
|
|
||||||
os: [ubuntu-latest, macos-latest]
|
|
||||||
skip_ui_mode: [true, false]
|
|
||||||
install_binary: [true, false]
|
|
||||||
runs-on: ${{ matrix.os }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: run install script
|
|
||||||
env:
|
|
||||||
SKIP_UI_APP: ${{ matrix.skip_ui_mode }}
|
|
||||||
USE_BIN_INSTALL: ${{ matrix.install_binary }}
|
|
||||||
GITHUB_TOKEN: ${{ secrets.RO_API_CALLER_TOKEN }}
|
|
||||||
run: |
|
|
||||||
[ "$SKIP_UI_APP" == "false" ] && export XDG_CURRENT_DESKTOP="none"
|
|
||||||
cat release_files/install.sh | sh -x
|
|
||||||
|
|
||||||
- name: check cli binary
|
|
||||||
run: command -v netbird
|
|
||||||
65
.github/workflows/mobile-build-validation.yml
vendored
65
.github/workflows/mobile-build-validation.yml
vendored
@@ -1,65 +0,0 @@
|
|||||||
name: Mobile build validation
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
android_build:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21.x"
|
|
||||||
- name: Setup Android SDK
|
|
||||||
uses: android-actions/setup-android@v3
|
|
||||||
with:
|
|
||||||
cmdline-tools-version: 8512546
|
|
||||||
- name: Setup Java
|
|
||||||
uses: actions/setup-java@v3
|
|
||||||
with:
|
|
||||||
java-version: "11"
|
|
||||||
distribution: "adopt"
|
|
||||||
- name: NDK Cache
|
|
||||||
id: ndk-cache
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: /usr/local/lib/android/sdk/ndk
|
|
||||||
key: ndk-cache-23.1.7779620
|
|
||||||
- name: Setup NDK
|
|
||||||
run: /usr/local/lib/android/sdk/cmdline-tools/7.0/bin/sdkmanager --install "ndk;23.1.7779620"
|
|
||||||
- name: install gomobile
|
|
||||||
run: go install golang.org/x/mobile/cmd/gomobile@v0.0.0-20230531173138-3c911d8e3eda
|
|
||||||
- name: gomobile init
|
|
||||||
run: gomobile init
|
|
||||||
- name: build android netbird lib
|
|
||||||
run: PATH=$PATH:$(go env GOPATH) gomobile bind -o $GITHUB_WORKSPACE/netbird.aar -javapkg=io.netbird.gomobile -ldflags="-X golang.zx2c4.com/wireguard/ipc.socketDirectory=/data/data/io.netbird.client/cache/wireguard -X github.com/netbirdio/netbird/version.version=buildtest" $GITHUB_WORKSPACE/client/android
|
|
||||||
env:
|
|
||||||
CGO_ENABLED: 0
|
|
||||||
ANDROID_NDK_HOME: /usr/local/lib/android/sdk/ndk/23.1.7779620
|
|
||||||
ios_build:
|
|
||||||
runs-on: macos-latest
|
|
||||||
steps:
|
|
||||||
- name: Checkout repository
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21.x"
|
|
||||||
- name: install gomobile
|
|
||||||
run: go install golang.org/x/mobile/cmd/gomobile@v0.0.0-20230531173138-3c911d8e3eda
|
|
||||||
- name: gomobile init
|
|
||||||
run: gomobile init
|
|
||||||
- name: build iOS netbird lib
|
|
||||||
run: PATH=$PATH:$(go env GOPATH) gomobile bind -target=ios -bundleid=io.netbird.framework -ldflags="-X github.com/netbirdio/netbird/version.version=buildtest" -o $GITHUB_WORKSPACE/NetBirdSDK.xcframework $GITHUB_WORKSPACE/client/ios/NetBirdSDK
|
|
||||||
env:
|
|
||||||
CGO_ENABLED: 0
|
|
||||||
239
.github/workflows/release.yml
vendored
239
.github/workflows/release.yml
vendored
@@ -1,239 +0,0 @@
|
|||||||
name: Release
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- 'v*'
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'go.mod'
|
|
||||||
- 'go.sum'
|
|
||||||
- '.goreleaser.yml'
|
|
||||||
- '.goreleaser_ui.yaml'
|
|
||||||
- '.goreleaser_ui_darwin.yaml'
|
|
||||||
- '.github/workflows/release.yml'
|
|
||||||
- 'release_files/**'
|
|
||||||
- '**/Dockerfile'
|
|
||||||
- '**/Dockerfile.*'
|
|
||||||
- 'client/ui/**'
|
|
||||||
|
|
||||||
env:
|
|
||||||
SIGN_PIPE_VER: "v0.0.11"
|
|
||||||
GORELEASER_VER: "v1.14.1"
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
release:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
flags: ""
|
|
||||||
steps:
|
|
||||||
- if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
|
|
||||||
run: echo "flags=--snapshot" >> $GITHUB_ENV
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # It is required for GoReleaser to work properly
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21"
|
|
||||||
cache: false
|
|
||||||
-
|
|
||||||
name: Cache Go modules
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
key: ${{ runner.os }}-go-releaser-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-releaser-
|
|
||||||
-
|
|
||||||
name: Install modules
|
|
||||||
run: go mod tidy
|
|
||||||
-
|
|
||||||
name: check git status
|
|
||||||
run: git --no-pager diff --exit-code
|
|
||||||
-
|
|
||||||
name: Set up QEMU
|
|
||||||
uses: docker/setup-qemu-action@v2
|
|
||||||
-
|
|
||||||
name: Set up Docker Buildx
|
|
||||||
uses: docker/setup-buildx-action@v2
|
|
||||||
-
|
|
||||||
name: Login to Docker hub
|
|
||||||
if: github.event_name != 'pull_request'
|
|
||||||
uses: docker/login-action@v1
|
|
||||||
with:
|
|
||||||
username: netbirdio
|
|
||||||
password: ${{ secrets.DOCKER_TOKEN }}
|
|
||||||
- name: Install OS build dependencies
|
|
||||||
run: sudo apt update && sudo apt install -y -q gcc-arm-linux-gnueabihf gcc-aarch64-linux-gnu
|
|
||||||
|
|
||||||
- name: Install rsrc
|
|
||||||
run: go install github.com/akavel/rsrc@v0.10.2
|
|
||||||
- name: Generate windows rsrc amd64
|
|
||||||
run: rsrc -arch amd64 -ico client/ui/netbird.ico -manifest client/manifest.xml -o client/resources_windows_amd64.syso
|
|
||||||
- name: Generate windows rsrc arm64
|
|
||||||
run: rsrc -arch arm64 -ico client/ui/netbird.ico -manifest client/manifest.xml -o client/resources_windows_arm64.syso
|
|
||||||
- name: Generate windows rsrc arm
|
|
||||||
run: rsrc -arch arm -ico client/ui/netbird.ico -manifest client/manifest.xml -o client/resources_windows_arm.syso
|
|
||||||
- name: Generate windows rsrc 386
|
|
||||||
run: rsrc -arch 386 -ico client/ui/netbird.ico -manifest client/manifest.xml -o client/resources_windows_386.syso
|
|
||||||
-
|
|
||||||
name: Run GoReleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v4
|
|
||||||
with:
|
|
||||||
version: ${{ env.GORELEASER_VER }}
|
|
||||||
args: release --rm-dist ${{ env.flags }}
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
|
||||||
UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
|
||||||
UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
|
||||||
-
|
|
||||||
name: upload non tags for debug purposes
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: release
|
|
||||||
path: dist/
|
|
||||||
retention-days: 3
|
|
||||||
|
|
||||||
release_ui:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
|
|
||||||
run: echo "flags=--snapshot" >> $GITHUB_ENV
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # It is required for GoReleaser to work properly
|
|
||||||
|
|
||||||
- name: Set up Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21"
|
|
||||||
cache: false
|
|
||||||
- name: Cache Go modules
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
key: ${{ runner.os }}-ui-go-releaser-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-ui-go-releaser-
|
|
||||||
|
|
||||||
- name: Install modules
|
|
||||||
run: go mod tidy
|
|
||||||
|
|
||||||
- name: check git status
|
|
||||||
run: git --no-pager diff --exit-code
|
|
||||||
|
|
||||||
- name: Install dependencies
|
|
||||||
run: sudo apt update && sudo apt install -y -q libappindicator3-dev gir1.2-appindicator3-0.1 libxxf86vm-dev gcc-mingw-w64-x86-64
|
|
||||||
- name: Install rsrc
|
|
||||||
run: go install github.com/akavel/rsrc@v0.10.2
|
|
||||||
- name: Generate windows rsrc
|
|
||||||
run: rsrc -arch amd64 -ico client/ui/netbird.ico -manifest client/ui/manifest.xml -o client/ui/resources_windows_amd64.syso
|
|
||||||
- name: Run GoReleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v4
|
|
||||||
with:
|
|
||||||
version: ${{ env.GORELEASER_VER }}
|
|
||||||
args: release --config .goreleaser_ui.yaml --rm-dist ${{ env.flags }}
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
HOMEBREW_TAP_GITHUB_TOKEN: ${{ secrets.HOMEBREW_TAP_GITHUB_TOKEN }}
|
|
||||||
UPLOAD_DEBIAN_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
|
||||||
UPLOAD_YUM_SECRET: ${{ secrets.PKG_UPLOAD_SECRET }}
|
|
||||||
- name: upload non tags for debug purposes
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: release-ui
|
|
||||||
path: dist/
|
|
||||||
retention-days: 3
|
|
||||||
|
|
||||||
release_ui_darwin:
|
|
||||||
runs-on: macos-11
|
|
||||||
steps:
|
|
||||||
- if: ${{ !startsWith(github.ref, 'refs/tags/v') }}
|
|
||||||
run: echo "flags=--snapshot" >> $GITHUB_ENV
|
|
||||||
-
|
|
||||||
name: Checkout
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
with:
|
|
||||||
fetch-depth: 0 # It is required for GoReleaser to work properly
|
|
||||||
-
|
|
||||||
name: Set up Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21"
|
|
||||||
cache: false
|
|
||||||
-
|
|
||||||
name: Cache Go modules
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: |
|
|
||||||
~/go/pkg/mod
|
|
||||||
~/.cache/go-build
|
|
||||||
key: ${{ runner.os }}-ui-go-releaser-darwin-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-ui-go-releaser-darwin-
|
|
||||||
-
|
|
||||||
name: Install modules
|
|
||||||
run: go mod tidy
|
|
||||||
-
|
|
||||||
name: check git status
|
|
||||||
run: git --no-pager diff --exit-code
|
|
||||||
-
|
|
||||||
name: Run GoReleaser
|
|
||||||
id: goreleaser
|
|
||||||
uses: goreleaser/goreleaser-action@v4
|
|
||||||
with:
|
|
||||||
version: ${{ env.GORELEASER_VER }}
|
|
||||||
args: release --config .goreleaser_ui_darwin.yaml --rm-dist ${{ env.flags }}
|
|
||||||
env:
|
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
-
|
|
||||||
name: upload non tags for debug purposes
|
|
||||||
uses: actions/upload-artifact@v3
|
|
||||||
with:
|
|
||||||
name: release-ui-darwin
|
|
||||||
path: dist/
|
|
||||||
retention-days: 3
|
|
||||||
|
|
||||||
trigger_windows_signer:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [release,release_ui]
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
steps:
|
|
||||||
- name: Trigger Windows binaries sign pipeline
|
|
||||||
uses: benc-uk/workflow-dispatch@v1
|
|
||||||
with:
|
|
||||||
workflow: Sign windows bin and installer
|
|
||||||
repo: netbirdio/sign-pipelines
|
|
||||||
ref: ${{ env.SIGN_PIPE_VER }}
|
|
||||||
token: ${{ secrets.SIGN_GITHUB_TOKEN }}
|
|
||||||
inputs: '{ "tag": "${{ github.ref }}" }'
|
|
||||||
|
|
||||||
trigger_darwin_signer:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
needs: [release,release_ui_darwin]
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
steps:
|
|
||||||
- name: Trigger Darwin App binaries sign pipeline
|
|
||||||
uses: benc-uk/workflow-dispatch@v1
|
|
||||||
with:
|
|
||||||
workflow: Sign darwin ui app with dispatch
|
|
||||||
repo: netbirdio/sign-pipelines
|
|
||||||
ref: ${{ env.SIGN_PIPE_VER }}
|
|
||||||
token: ${{ secrets.SIGN_GITHUB_TOKEN }}
|
|
||||||
inputs: '{ "tag": "${{ github.ref }}" }'
|
|
||||||
22
.github/workflows/sync-main.yml
vendored
22
.github/workflows/sync-main.yml
vendored
@@ -1,22 +0,0 @@
|
|||||||
name: sync main
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
trigger_sync_main:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Trigger main branch sync
|
|
||||||
uses: benc-uk/workflow-dispatch@v1
|
|
||||||
with:
|
|
||||||
workflow: sync-main.yml
|
|
||||||
repo: ${{ secrets.UPSTREAM_REPO }}
|
|
||||||
token: ${{ secrets.NC_GITHUB_TOKEN }}
|
|
||||||
inputs: '{ "sha": "${{ github.sha }}" }'
|
|
||||||
23
.github/workflows/sync-tag.yml
vendored
23
.github/workflows/sync-tag.yml
vendored
@@ -1,23 +0,0 @@
|
|||||||
name: sync tag
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- 'v*'
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
trigger_sync_tag:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Trigger release tag sync
|
|
||||||
uses: benc-uk/workflow-dispatch@v1
|
|
||||||
with:
|
|
||||||
workflow: sync-tag.yml
|
|
||||||
ref: main
|
|
||||||
repo: ${{ secrets.UPSTREAM_REPO }}
|
|
||||||
token: ${{ secrets.NC_GITHUB_TOKEN }}
|
|
||||||
inputs: '{ "tag": "${{ github.ref_name }}" }'
|
|
||||||
211
.github/workflows/test-infrastructure-files.yml
vendored
211
.github/workflows/test-infrastructure-files.yml
vendored
@@ -1,211 +0,0 @@
|
|||||||
name: Test Infrastructure files
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- main
|
|
||||||
pull_request:
|
|
||||||
paths:
|
|
||||||
- 'infrastructure_files/**'
|
|
||||||
- '.github/workflows/test-infrastructure-files.yml'
|
|
||||||
- 'management/cmd/**'
|
|
||||||
- 'signal/cmd/**'
|
|
||||||
|
|
||||||
concurrency:
|
|
||||||
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref || github.actor_id }}
|
|
||||||
cancel-in-progress: true
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
test-docker-compose:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Install jq
|
|
||||||
run: sudo apt-get install -y jq
|
|
||||||
|
|
||||||
- name: Install curl
|
|
||||||
run: sudo apt-get install -y curl
|
|
||||||
|
|
||||||
- name: Install Go
|
|
||||||
uses: actions/setup-go@v4
|
|
||||||
with:
|
|
||||||
go-version: "1.21.x"
|
|
||||||
|
|
||||||
- name: Cache Go modules
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: ~/go/pkg/mod
|
|
||||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
|
||||||
restore-keys: |
|
|
||||||
${{ runner.os }}-go-
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: cp setup.env
|
|
||||||
run: cp infrastructure_files/tests/setup.env infrastructure_files/
|
|
||||||
|
|
||||||
- name: run configure
|
|
||||||
working-directory: infrastructure_files
|
|
||||||
run: bash -x configure.sh
|
|
||||||
env:
|
|
||||||
CI_NETBIRD_DOMAIN: localhost
|
|
||||||
CI_NETBIRD_AUTH_CLIENT_ID: testing.client.id
|
|
||||||
CI_NETBIRD_AUTH_CLIENT_SECRET: testing.client.secret
|
|
||||||
CI_NETBIRD_AUTH_AUDIENCE: testing.ci
|
|
||||||
CI_NETBIRD_AUTH_OIDC_CONFIGURATION_ENDPOINT: https://example.eu.auth0.com/.well-known/openid-configuration
|
|
||||||
CI_NETBIRD_USE_AUTH0: true
|
|
||||||
CI_NETBIRD_MGMT_IDP: "none"
|
|
||||||
CI_NETBIRD_IDP_MGMT_CLIENT_ID: testing.client.id
|
|
||||||
CI_NETBIRD_IDP_MGMT_CLIENT_SECRET: testing.client.secret
|
|
||||||
CI_NETBIRD_AUTH_SUPPORTED_SCOPES: "openid profile email offline_access api email_verified"
|
|
||||||
CI_NETBIRD_STORE_CONFIG_ENGINE: "sqlite"
|
|
||||||
CI_NETBIRD_MGMT_IDP_SIGNKEY_REFRESH: false
|
|
||||||
|
|
||||||
- name: check values
|
|
||||||
working-directory: infrastructure_files/artifacts
|
|
||||||
env:
|
|
||||||
CI_NETBIRD_DOMAIN: localhost
|
|
||||||
CI_NETBIRD_AUTH_CLIENT_ID: testing.client.id
|
|
||||||
CI_NETBIRD_AUTH_CLIENT_SECRET: testing.client.secret
|
|
||||||
CI_NETBIRD_AUTH_AUDIENCE: testing.ci
|
|
||||||
CI_NETBIRD_AUTH_OIDC_CONFIGURATION_ENDPOINT: https://example.eu.auth0.com/.well-known/openid-configuration
|
|
||||||
CI_NETBIRD_USE_AUTH0: true
|
|
||||||
CI_NETBIRD_AUTH_SUPPORTED_SCOPES: "openid profile email offline_access api email_verified"
|
|
||||||
CI_NETBIRD_AUTH_AUTHORITY: https://example.eu.auth0.com/
|
|
||||||
CI_NETBIRD_AUTH_JWT_CERTS: https://example.eu.auth0.com/.well-known/jwks.json
|
|
||||||
CI_NETBIRD_AUTH_TOKEN_ENDPOINT: https://example.eu.auth0.com/oauth/token
|
|
||||||
CI_NETBIRD_AUTH_DEVICE_AUTH_ENDPOINT: https://example.eu.auth0.com/oauth/device/code
|
|
||||||
CI_NETBIRD_AUTH_PKCE_AUTHORIZATION_ENDPOINT: https://example.eu.auth0.com/authorize
|
|
||||||
CI_NETBIRD_AUTH_REDIRECT_URI: "/peers"
|
|
||||||
CI_NETBIRD_TOKEN_SOURCE: "idToken"
|
|
||||||
CI_NETBIRD_AUTH_USER_ID_CLAIM: "email"
|
|
||||||
CI_NETBIRD_AUTH_DEVICE_AUTH_AUDIENCE: "super"
|
|
||||||
CI_NETBIRD_AUTH_DEVICE_AUTH_SCOPE: "openid email"
|
|
||||||
CI_NETBIRD_MGMT_IDP: "none"
|
|
||||||
CI_NETBIRD_IDP_MGMT_CLIENT_ID: testing.client.id
|
|
||||||
CI_NETBIRD_IDP_MGMT_CLIENT_SECRET: testing.client.secret
|
|
||||||
CI_NETBIRD_SIGNAL_PORT: 12345
|
|
||||||
CI_NETBIRD_STORE_CONFIG_ENGINE: "sqlite"
|
|
||||||
CI_NETBIRD_MGMT_IDP_SIGNKEY_REFRESH: false
|
|
||||||
CI_NETBIRD_TURN_EXTERNAL_IP: "1.2.3.4"
|
|
||||||
|
|
||||||
run: |
|
|
||||||
set -x
|
|
||||||
grep AUTH_CLIENT_ID docker-compose.yml | grep $CI_NETBIRD_AUTH_CLIENT_ID
|
|
||||||
grep AUTH_CLIENT_SECRET docker-compose.yml | grep $CI_NETBIRD_AUTH_CLIENT_SECRET
|
|
||||||
grep AUTH_AUTHORITY docker-compose.yml | grep $CI_NETBIRD_AUTH_AUTHORITY
|
|
||||||
grep AUTH_AUDIENCE docker-compose.yml | grep $CI_NETBIRD_AUTH_AUDIENCE
|
|
||||||
grep AUTH_SUPPORTED_SCOPES docker-compose.yml | grep "$CI_NETBIRD_AUTH_SUPPORTED_SCOPES"
|
|
||||||
grep USE_AUTH0 docker-compose.yml | grep $CI_NETBIRD_USE_AUTH0
|
|
||||||
grep NETBIRD_MGMT_API_ENDPOINT docker-compose.yml | grep "$CI_NETBIRD_DOMAIN:33073"
|
|
||||||
grep AUTH_REDIRECT_URI docker-compose.yml | grep $CI_NETBIRD_AUTH_REDIRECT_URI
|
|
||||||
grep AUTH_SILENT_REDIRECT_URI docker-compose.yml | egrep 'AUTH_SILENT_REDIRECT_URI=$'
|
|
||||||
grep $CI_NETBIRD_SIGNAL_PORT docker-compose.yml | grep ':80'
|
|
||||||
grep LETSENCRYPT_DOMAIN docker-compose.yml | egrep 'LETSENCRYPT_DOMAIN=$'
|
|
||||||
grep NETBIRD_TOKEN_SOURCE docker-compose.yml | grep $CI_NETBIRD_TOKEN_SOURCE
|
|
||||||
grep AuthUserIDClaim management.json | grep $CI_NETBIRD_AUTH_USER_ID_CLAIM
|
|
||||||
grep -A 3 DeviceAuthorizationFlow management.json | grep -A 1 ProviderConfig | grep Audience | grep $CI_NETBIRD_AUTH_DEVICE_AUTH_AUDIENCE
|
|
||||||
grep -A 3 DeviceAuthorizationFlow management.json | grep -A 1 ProviderConfig | grep Audience | grep $CI_NETBIRD_AUTH_DEVICE_AUTH_AUDIENCE
|
|
||||||
grep Engine management.json | grep "$CI_NETBIRD_STORE_CONFIG_ENGINE"
|
|
||||||
grep IdpSignKeyRefreshEnabled management.json | grep "$CI_NETBIRD_MGMT_IDP_SIGNKEY_REFRESH"
|
|
||||||
grep UseIDToken management.json | grep false
|
|
||||||
grep -A 1 IdpManagerConfig management.json | grep ManagerType | grep $CI_NETBIRD_MGMT_IDP
|
|
||||||
grep -A 3 IdpManagerConfig management.json | grep -A 1 ClientConfig | grep Issuer | grep $CI_NETBIRD_AUTH_AUTHORITY
|
|
||||||
grep -A 4 IdpManagerConfig management.json | grep -A 2 ClientConfig | grep TokenEndpoint | grep $CI_NETBIRD_AUTH_TOKEN_ENDPOINT
|
|
||||||
grep -A 5 IdpManagerConfig management.json | grep -A 3 ClientConfig | grep ClientID | grep $CI_NETBIRD_IDP_MGMT_CLIENT_ID
|
|
||||||
grep -A 6 IdpManagerConfig management.json | grep -A 4 ClientConfig | grep ClientSecret | grep $CI_NETBIRD_IDP_MGMT_CLIENT_SECRET
|
|
||||||
grep -A 7 IdpManagerConfig management.json | grep -A 5 ClientConfig | grep GrantType | grep client_credentials
|
|
||||||
grep -A 10 PKCEAuthorizationFlow management.json | grep -A 10 ProviderConfig | grep Audience | grep $CI_NETBIRD_AUTH_AUDIENCE
|
|
||||||
grep -A 10 PKCEAuthorizationFlow management.json | grep -A 10 ProviderConfig | grep ClientID | grep $CI_NETBIRD_AUTH_CLIENT_ID
|
|
||||||
grep -A 10 PKCEAuthorizationFlow management.json | grep -A 10 ProviderConfig | grep ClientSecret | grep $CI_NETBIRD_AUTH_CLIENT_SECRET
|
|
||||||
grep -A 10 PKCEAuthorizationFlow management.json | grep -A 10 ProviderConfig | grep AuthorizationEndpoint | grep $CI_NETBIRD_AUTH_PKCE_AUTHORIZATION_ENDPOINT
|
|
||||||
grep -A 10 PKCEAuthorizationFlow management.json | grep -A 10 ProviderConfig | grep TokenEndpoint | grep $CI_NETBIRD_AUTH_TOKEN_ENDPOINT
|
|
||||||
grep -A 10 PKCEAuthorizationFlow management.json | grep -A 10 ProviderConfig | grep Scope | grep "$CI_NETBIRD_AUTH_SUPPORTED_SCOPES"
|
|
||||||
grep -A 10 PKCEAuthorizationFlow management.json | grep -A 10 ProviderConfig | grep -A 3 RedirectURLs | grep "http://localhost:53000"
|
|
||||||
grep "external-ip" turnserver.conf | grep $CI_NETBIRD_TURN_EXTERNAL_IP
|
|
||||||
|
|
||||||
- name: Install modules
|
|
||||||
run: go mod tidy
|
|
||||||
|
|
||||||
- name: check git status
|
|
||||||
run: git --no-pager diff --exit-code
|
|
||||||
|
|
||||||
- name: Build management binary
|
|
||||||
working-directory: management
|
|
||||||
run: CGO_ENABLED=1 go build -o netbird-mgmt main.go
|
|
||||||
|
|
||||||
- name: Build management docker image
|
|
||||||
working-directory: management
|
|
||||||
run: |
|
|
||||||
docker build -t netbirdio/management:latest .
|
|
||||||
|
|
||||||
- name: Build signal binary
|
|
||||||
working-directory: signal
|
|
||||||
run: CGO_ENABLED=0 go build -o netbird-signal main.go
|
|
||||||
|
|
||||||
- name: Build signal docker image
|
|
||||||
working-directory: signal
|
|
||||||
run: |
|
|
||||||
docker build -t netbirdio/signal:latest .
|
|
||||||
|
|
||||||
- name: run docker compose up
|
|
||||||
working-directory: infrastructure_files/artifacts
|
|
||||||
run: |
|
|
||||||
docker-compose up -d
|
|
||||||
sleep 5
|
|
||||||
docker-compose ps
|
|
||||||
docker-compose logs --tail=20
|
|
||||||
|
|
||||||
- name: test running containers
|
|
||||||
run: |
|
|
||||||
count=$(docker compose ps --format json | jq '. | select(.Name | contains("artifacts")) | .State' | grep -c running)
|
|
||||||
test $count -eq 4
|
|
||||||
working-directory: infrastructure_files/artifacts
|
|
||||||
|
|
||||||
- name: test geolocation databases
|
|
||||||
working-directory: infrastructure_files/artifacts
|
|
||||||
run: |
|
|
||||||
sleep 30
|
|
||||||
docker compose exec management ls -l /var/lib/netbird/ | grep -i GeoLite2-City.mmdb
|
|
||||||
docker compose exec management ls -l /var/lib/netbird/ | grep -i geonames.db
|
|
||||||
|
|
||||||
test-getting-started-script:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Install jq
|
|
||||||
run: sudo apt-get install -y jq
|
|
||||||
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
|
|
||||||
- name: run script
|
|
||||||
run: NETBIRD_DOMAIN=use-ip bash -x infrastructure_files/getting-started-with-zitadel.sh
|
|
||||||
|
|
||||||
- name: test Caddy file gen
|
|
||||||
run: test -f Caddyfile
|
|
||||||
- name: test docker-compose file gen
|
|
||||||
run: test -f docker-compose.yml
|
|
||||||
- name: test management.json file gen
|
|
||||||
run: test -f management.json
|
|
||||||
- name: test turnserver.conf file gen
|
|
||||||
run: |
|
|
||||||
set -x
|
|
||||||
test -f turnserver.conf
|
|
||||||
grep external-ip turnserver.conf
|
|
||||||
- name: test zitadel.env file gen
|
|
||||||
run: test -f zitadel.env
|
|
||||||
- name: test dashboard.env file gen
|
|
||||||
run: test -f dashboard.env
|
|
||||||
test-download-geolite2-script:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
steps:
|
|
||||||
- name: Install jq
|
|
||||||
run: sudo apt-get update && sudo apt-get install -y unzip sqlite3
|
|
||||||
- name: Checkout code
|
|
||||||
uses: actions/checkout@v3
|
|
||||||
- name: test script
|
|
||||||
run: bash -x infrastructure_files/download-geolite2.sh
|
|
||||||
- name: test mmdb file exists
|
|
||||||
run: test -f GeoLite2-City.mmdb
|
|
||||||
- name: test geonames file exists
|
|
||||||
run: test -f geonames.db
|
|
||||||
22
.github/workflows/update-docs.yml
vendored
22
.github/workflows/update-docs.yml
vendored
@@ -1,22 +0,0 @@
|
|||||||
name: update docs
|
|
||||||
|
|
||||||
on:
|
|
||||||
push:
|
|
||||||
tags:
|
|
||||||
- 'v*'
|
|
||||||
paths:
|
|
||||||
- 'management/server/http/api/openapi.yml'
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
trigger_docs_api_update:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
if: startsWith(github.ref, 'refs/tags/')
|
|
||||||
steps:
|
|
||||||
- name: Trigger API pages generation
|
|
||||||
uses: benc-uk/workflow-dispatch@v1
|
|
||||||
with:
|
|
||||||
workflow: generate api pages
|
|
||||||
repo: netbirdio/docs
|
|
||||||
ref: "refs/heads/main"
|
|
||||||
token: ${{ secrets.SIGN_GITHUB_TOKEN }}
|
|
||||||
inputs: '{ "tag": "${{ github.ref }}" }'
|
|
||||||
@@ -130,3 +130,10 @@ issues:
|
|||||||
- path: mock\.go
|
- path: mock\.go
|
||||||
linters:
|
linters:
|
||||||
- nilnil
|
- nilnil
|
||||||
|
# Exclude specific deprecation warnings for grpc methods
|
||||||
|
- linters:
|
||||||
|
- staticcheck
|
||||||
|
text: "grpc.DialContext is deprecated"
|
||||||
|
- linters:
|
||||||
|
- staticcheck
|
||||||
|
text: "grpc.WithBlock is deprecated"
|
||||||
|
|||||||
@@ -3,8 +3,10 @@ builds:
|
|||||||
- id: netbird-ui-darwin
|
- id: netbird-ui-darwin
|
||||||
dir: client/ui
|
dir: client/ui
|
||||||
binary: netbird-ui
|
binary: netbird-ui
|
||||||
env: [CGO_ENABLED=1]
|
env:
|
||||||
|
- CGO_ENABLED=1
|
||||||
|
- MACOSX_DEPLOYMENT_TARGET=11.0
|
||||||
|
- MACOS_DEPLOYMENT_TARGET=11.0
|
||||||
goos:
|
goos:
|
||||||
- darwin
|
- darwin
|
||||||
goarch:
|
goarch:
|
||||||
|
|||||||
@@ -5,7 +5,7 @@
|
|||||||
We as members, contributors, and leaders pledge to make participation in our
|
We as members, contributors, and leaders pledge to make participation in our
|
||||||
community a harassment-free experience for everyone, regardless of age, body
|
community a harassment-free experience for everyone, regardless of age, body
|
||||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||||
identity and expression, level of experience, education, socio-economic status,
|
identity and expression, level of experience, education, socioeconomic status,
|
||||||
nationality, personal appearance, race, caste, color, religion, or sexual
|
nationality, personal appearance, race, caste, color, religion, or sexual
|
||||||
identity and orientation.
|
identity and orientation.
|
||||||
|
|
||||||
|
|||||||
18
README.md
18
README.md
@@ -10,10 +10,12 @@
|
|||||||
<img width="234" src="docs/media/logo-full.png"/>
|
<img width="234" src="docs/media/logo-full.png"/>
|
||||||
</p>
|
</p>
|
||||||
<p>
|
<p>
|
||||||
|
<a href="https://img.shields.io/badge/license-BSD--3-blue)">
|
||||||
|
<img src="https://sonarcloud.io/api/project_badges/measure?project=netbirdio_netbird&metric=alert_status" />
|
||||||
|
</a>
|
||||||
<a href="https://github.com/netbirdio/netbird/blob/main/LICENSE">
|
<a href="https://github.com/netbirdio/netbird/blob/main/LICENSE">
|
||||||
<img src="https://img.shields.io/badge/license-BSD--3-blue" />
|
<img src="https://img.shields.io/badge/license-BSD--3-blue" />
|
||||||
</a>
|
</a>
|
||||||
<a href="https://www.codacy.com/gh/netbirdio/netbird/dashboard?utm_source=github.com&utm_medium=referral&utm_content=netbirdio/netbird&utm_campaign=Badge_Grade"><img src="https://app.codacy.com/project/badge/Grade/e3013d046aec44cdb7462c8673b00976"/></a>
|
|
||||||
<br>
|
<br>
|
||||||
<a href="https://join.slack.com/t/netbirdio/shared_invite/zt-vrahf41g-ik1v7fV8du6t0RwxSrJ96A">
|
<a href="https://join.slack.com/t/netbirdio/shared_invite/zt-vrahf41g-ik1v7fV8du6t0RwxSrJ96A">
|
||||||
<img src="https://img.shields.io/badge/slack-@netbird-red.svg?logo=slack"/>
|
<img src="https://img.shields.io/badge/slack-@netbird-red.svg?logo=slack"/>
|
||||||
@@ -40,11 +42,13 @@
|
|||||||
|
|
||||||
**Connect.** NetBird creates a WireGuard-based overlay network that automatically connects your machines over an encrypted tunnel, leaving behind the hassle of opening ports, complex firewall rules, VPN gateways, and so forth.
|
**Connect.** NetBird creates a WireGuard-based overlay network that automatically connects your machines over an encrypted tunnel, leaving behind the hassle of opening ports, complex firewall rules, VPN gateways, and so forth.
|
||||||
|
|
||||||
**Secure.** NetBird enables secure remote access by applying granular access policies, while allowing you to manage them intuitively from a single place. Works universally on any infrastructure.
|
**Secure.** NetBird enables secure remote access by applying granular access policies while allowing you to manage them intuitively from a single place. Works universally on any infrastructure.
|
||||||
|
|
||||||
### Open-Source Network Security in a Single Platform
|
### Open-Source Network Security in a Single Platform
|
||||||
|
|
||||||

|
|
||||||
|

|
||||||
|
|
||||||
|
|
||||||
### Key features
|
### Key features
|
||||||
|
|
||||||
@@ -76,7 +80,7 @@ Follow the [Advanced guide with a custom identity provider](https://docs.netbird
|
|||||||
- **Public domain** name pointing to the VM.
|
- **Public domain** name pointing to the VM.
|
||||||
|
|
||||||
**Software requirements:**
|
**Software requirements:**
|
||||||
- Docker installed on the VM with the docker compose plugin ([Docker installation guide](https://docs.docker.com/engine/install/)) or docker with docker-compose in version 2 or higher.
|
- Docker installed on the VM with the docker-compose plugin ([Docker installation guide](https://docs.docker.com/engine/install/)) or docker with docker-compose in version 2 or higher.
|
||||||
- [jq](https://jqlang.github.io/jq/) installed. In most distributions
|
- [jq](https://jqlang.github.io/jq/) installed. In most distributions
|
||||||
Usually available in the official repositories and can be installed with `sudo apt install jq` or `sudo yum install jq`
|
Usually available in the official repositories and can be installed with `sudo apt install jq` or `sudo yum install jq`
|
||||||
- [curl](https://curl.se/) installed.
|
- [curl](https://curl.se/) installed.
|
||||||
@@ -93,9 +97,9 @@ export NETBIRD_DOMAIN=netbird.example.com; curl -fsSL https://github.com/netbird
|
|||||||
- Every machine in the network runs [NetBird Agent (or Client)](client/) that manages WireGuard.
|
- Every machine in the network runs [NetBird Agent (or Client)](client/) that manages WireGuard.
|
||||||
- Every agent connects to [Management Service](management/) that holds network state, manages peer IPs, and distributes network updates to agents (peers).
|
- Every agent connects to [Management Service](management/) that holds network state, manages peer IPs, and distributes network updates to agents (peers).
|
||||||
- NetBird agent uses WebRTC ICE implemented in [pion/ice library](https://github.com/pion/ice) to discover connection candidates when establishing a peer-to-peer connection between machines.
|
- NetBird agent uses WebRTC ICE implemented in [pion/ice library](https://github.com/pion/ice) to discover connection candidates when establishing a peer-to-peer connection between machines.
|
||||||
- Connection candidates are discovered with a help of [STUN](https://en.wikipedia.org/wiki/STUN) servers.
|
- Connection candidates are discovered with the help of [STUN](https://en.wikipedia.org/wiki/STUN) servers.
|
||||||
- Agents negotiate a connection through [Signal Service](signal/) passing p2p encrypted messages with candidates.
|
- Agents negotiate a connection through [Signal Service](signal/) passing p2p encrypted messages with candidates.
|
||||||
- Sometimes the NAT traversal is unsuccessful due to strict NATs (e.g. mobile carrier-grade NAT) and p2p connection isn't possible. When this occurs the system falls back to a relay server called [TURN](https://en.wikipedia.org/wiki/Traversal_Using_Relays_around_NAT), and a secure WireGuard tunnel is established via the TURN server.
|
- Sometimes the NAT traversal is unsuccessful due to strict NATs (e.g. mobile carrier-grade NAT) and a p2p connection isn't possible. When this occurs the system falls back to a relay server called [TURN](https://en.wikipedia.org/wiki/Traversal_Using_Relays_around_NAT), and a secure WireGuard tunnel is established via the TURN server.
|
||||||
|
|
||||||
[Coturn](https://github.com/coturn/coturn) is the one that has been successfully used for STUN and TURN in NetBird setups.
|
[Coturn](https://github.com/coturn/coturn) is the one that has been successfully used for STUN and TURN in NetBird setups.
|
||||||
|
|
||||||
@@ -119,7 +123,7 @@ In November 2022, NetBird joined the [StartUpSecure program](https://www.forschu
|
|||||||

|

|
||||||
|
|
||||||
### Testimonials
|
### Testimonials
|
||||||
We use open-source technologies like [WireGuard®](https://www.wireguard.com/), [Pion ICE (WebRTC)](https://github.com/pion/ice), [Coturn](https://github.com/coturn/coturn), and [Rosenpass](https://rosenpass.eu). We very much appreciate the work these guys are doing and we'd greatly appreciate if you could support them in any way (e.g. giving a star or a contribution).
|
We use open-source technologies like [WireGuard®](https://www.wireguard.com/), [Pion ICE (WebRTC)](https://github.com/pion/ice), [Coturn](https://github.com/coturn/coturn), and [Rosenpass](https://rosenpass.eu). We very much appreciate the work these guys are doing and we'd greatly appreciate if you could support them in any way (e.g., by giving a star or a contribution).
|
||||||
|
|
||||||
### Legal
|
### Legal
|
||||||
_WireGuard_ and the _WireGuard_ logo are [registered trademarks](https://www.wireguard.com/trademark-policy/) of Jason A. Donenfeld.
|
_WireGuard_ and the _WireGuard_ logo are [registered trademarks](https://www.wireguard.com/trademark-policy/) of Jason A. Donenfeld.
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
FROM alpine:3.18.5
|
FROM alpine:3.19
|
||||||
RUN apk add --no-cache ca-certificates iptables ip6tables
|
RUN apk add --no-cache ca-certificates iptables ip6tables
|
||||||
ENV NB_FOREGROUND_MODE=true
|
ENV NB_FOREGROUND_MODE=true
|
||||||
ENTRYPOINT [ "/go/bin/netbird","up"]
|
ENTRYPOINT [ "/usr/local/bin/netbird","up"]
|
||||||
COPY netbird /go/bin/netbird
|
COPY netbird /usr/local/bin/netbird
|
||||||
@@ -1,3 +1,5 @@
|
|||||||
|
//go:build android
|
||||||
|
|
||||||
package android
|
package android
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@@ -14,6 +16,7 @@ import (
|
|||||||
"github.com/netbirdio/netbird/client/system"
|
"github.com/netbirdio/netbird/client/system"
|
||||||
"github.com/netbirdio/netbird/formatter"
|
"github.com/netbirdio/netbird/formatter"
|
||||||
"github.com/netbirdio/netbird/iface"
|
"github.com/netbirdio/netbird/iface"
|
||||||
|
"github.com/netbirdio/netbird/util/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ConnectionListener export internal Listener for mobile
|
// ConnectionListener export internal Listener for mobile
|
||||||
@@ -54,14 +57,17 @@ type Client struct {
|
|||||||
ctxCancel context.CancelFunc
|
ctxCancel context.CancelFunc
|
||||||
ctxCancelLock *sync.Mutex
|
ctxCancelLock *sync.Mutex
|
||||||
deviceName string
|
deviceName string
|
||||||
|
uiVersion string
|
||||||
networkChangeListener listener.NetworkChangeListener
|
networkChangeListener listener.NetworkChangeListener
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewClient instantiate a new Client
|
// NewClient instantiate a new Client
|
||||||
func NewClient(cfgFile, deviceName string, tunAdapter TunAdapter, iFaceDiscover IFaceDiscover, networkChangeListener NetworkChangeListener) *Client {
|
func NewClient(cfgFile, deviceName string, uiVersion string, tunAdapter TunAdapter, iFaceDiscover IFaceDiscover, networkChangeListener NetworkChangeListener) *Client {
|
||||||
|
net.SetAndroidProtectSocketFn(tunAdapter.ProtectSocket)
|
||||||
return &Client{
|
return &Client{
|
||||||
cfgFile: cfgFile,
|
cfgFile: cfgFile,
|
||||||
deviceName: deviceName,
|
deviceName: deviceName,
|
||||||
|
uiVersion: uiVersion,
|
||||||
tunAdapter: tunAdapter,
|
tunAdapter: tunAdapter,
|
||||||
iFaceDiscover: iFaceDiscover,
|
iFaceDiscover: iFaceDiscover,
|
||||||
recorder: peer.NewRecorder(""),
|
recorder: peer.NewRecorder(""),
|
||||||
@@ -84,6 +90,9 @@ func (c *Client) Run(urlOpener URLOpener, dns *DNSList, dnsReadyListener DnsRead
|
|||||||
var ctx context.Context
|
var ctx context.Context
|
||||||
//nolint
|
//nolint
|
||||||
ctxWithValues := context.WithValue(context.Background(), system.DeviceNameCtxKey, c.deviceName)
|
ctxWithValues := context.WithValue(context.Background(), system.DeviceNameCtxKey, c.deviceName)
|
||||||
|
//nolint
|
||||||
|
ctxWithValues = context.WithValue(ctxWithValues, system.UiVersionCtxKey, c.uiVersion)
|
||||||
|
|
||||||
c.ctxCancelLock.Lock()
|
c.ctxCancelLock.Lock()
|
||||||
ctx, c.ctxCancel = context.WithCancel(ctxWithValues)
|
ctx, c.ctxCancel = context.WithCancel(ctxWithValues)
|
||||||
defer c.ctxCancel()
|
defer c.ctxCancel()
|
||||||
@@ -97,7 +106,8 @@ func (c *Client) Run(urlOpener URLOpener, dns *DNSList, dnsReadyListener DnsRead
|
|||||||
|
|
||||||
// todo do not throw error in case of cancelled context
|
// todo do not throw error in case of cancelled context
|
||||||
ctx = internal.CtxInitState(ctx)
|
ctx = internal.CtxInitState(ctx)
|
||||||
return internal.RunClientMobile(ctx, cfg, c.recorder, c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, dns.items, dnsReadyListener)
|
connectClient := internal.NewConnectClient(ctx, cfg, c.recorder)
|
||||||
|
return connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, dns.items, dnsReadyListener)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunWithoutLogin we apply this type of run function when the backed has been started without UI (i.e. after reboot).
|
// RunWithoutLogin we apply this type of run function when the backed has been started without UI (i.e. after reboot).
|
||||||
@@ -122,7 +132,8 @@ func (c *Client) RunWithoutLogin(dns *DNSList, dnsReadyListener DnsReadyListener
|
|||||||
|
|
||||||
// todo do not throw error in case of cancelled context
|
// todo do not throw error in case of cancelled context
|
||||||
ctx = internal.CtxInitState(ctx)
|
ctx = internal.CtxInitState(ctx)
|
||||||
return internal.RunClientMobile(ctx, cfg, c.recorder, c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, dns.items, dnsReadyListener)
|
connectClient := internal.NewConnectClient(ctx, cfg, c.recorder)
|
||||||
|
return connectClient.RunOnAndroid(c.tunAdapter, c.iFaceDiscover, c.networkChangeListener, dns.items, dnsReadyListener)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop the internal client and free the resources
|
// Stop the internal client and free the resources
|
||||||
|
|||||||
227
client/anonymize/anonymize.go
Normal file
227
client/anonymize/anonymize.go
Normal file
@@ -0,0 +1,227 @@
|
|||||||
|
package anonymize
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"fmt"
|
||||||
|
"math/big"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"slices"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Anonymizer struct {
|
||||||
|
ipAnonymizer map[netip.Addr]netip.Addr
|
||||||
|
domainAnonymizer map[string]string
|
||||||
|
currentAnonIPv4 netip.Addr
|
||||||
|
currentAnonIPv6 netip.Addr
|
||||||
|
startAnonIPv4 netip.Addr
|
||||||
|
startAnonIPv6 netip.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultAddresses() (netip.Addr, netip.Addr) {
|
||||||
|
// 192.51.100.0, 100::
|
||||||
|
return netip.AddrFrom4([4]byte{198, 51, 100, 0}), netip.AddrFrom16([16]byte{0x01})
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAnonymizer(startIPv4, startIPv6 netip.Addr) *Anonymizer {
|
||||||
|
return &Anonymizer{
|
||||||
|
ipAnonymizer: map[netip.Addr]netip.Addr{},
|
||||||
|
domainAnonymizer: map[string]string{},
|
||||||
|
currentAnonIPv4: startIPv4,
|
||||||
|
currentAnonIPv6: startIPv6,
|
||||||
|
startAnonIPv4: startIPv4,
|
||||||
|
startAnonIPv6: startIPv6,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Anonymizer) AnonymizeIP(ip netip.Addr) netip.Addr {
|
||||||
|
if ip.IsLoopback() ||
|
||||||
|
ip.IsLinkLocalUnicast() ||
|
||||||
|
ip.IsLinkLocalMulticast() ||
|
||||||
|
ip.IsInterfaceLocalMulticast() ||
|
||||||
|
ip.IsPrivate() ||
|
||||||
|
ip.IsUnspecified() ||
|
||||||
|
ip.IsMulticast() ||
|
||||||
|
isWellKnown(ip) ||
|
||||||
|
a.isInAnonymizedRange(ip) {
|
||||||
|
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, ok := a.ipAnonymizer[ip]; !ok {
|
||||||
|
if ip.Is4() {
|
||||||
|
a.ipAnonymizer[ip] = a.currentAnonIPv4
|
||||||
|
a.currentAnonIPv4 = a.currentAnonIPv4.Next()
|
||||||
|
} else {
|
||||||
|
a.ipAnonymizer[ip] = a.currentAnonIPv6
|
||||||
|
a.currentAnonIPv6 = a.currentAnonIPv6.Next()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return a.ipAnonymizer[ip]
|
||||||
|
}
|
||||||
|
|
||||||
|
// isInAnonymizedRange checks if an IP is within the range of already assigned anonymized IPs
|
||||||
|
func (a *Anonymizer) isInAnonymizedRange(ip netip.Addr) bool {
|
||||||
|
if ip.Is4() && ip.Compare(a.startAnonIPv4) >= 0 && ip.Compare(a.currentAnonIPv4) <= 0 {
|
||||||
|
return true
|
||||||
|
} else if !ip.Is4() && ip.Compare(a.startAnonIPv6) >= 0 && ip.Compare(a.currentAnonIPv6) <= 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Anonymizer) AnonymizeIPString(ip string) string {
|
||||||
|
addr, err := netip.ParseAddr(ip)
|
||||||
|
if err != nil {
|
||||||
|
return ip
|
||||||
|
}
|
||||||
|
|
||||||
|
return a.AnonymizeIP(addr).String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Anonymizer) AnonymizeDomain(domain string) string {
|
||||||
|
if strings.HasSuffix(domain, "netbird.io") ||
|
||||||
|
strings.HasSuffix(domain, "netbird.selfhosted") ||
|
||||||
|
strings.HasSuffix(domain, "netbird.cloud") ||
|
||||||
|
strings.HasSuffix(domain, "netbird.stage") ||
|
||||||
|
strings.HasSuffix(domain, ".domain") {
|
||||||
|
return domain
|
||||||
|
}
|
||||||
|
|
||||||
|
parts := strings.Split(domain, ".")
|
||||||
|
if len(parts) < 2 {
|
||||||
|
return domain
|
||||||
|
}
|
||||||
|
|
||||||
|
baseDomain := parts[len(parts)-2] + "." + parts[len(parts)-1]
|
||||||
|
|
||||||
|
anonymized, ok := a.domainAnonymizer[baseDomain]
|
||||||
|
if !ok {
|
||||||
|
anonymizedBase := "anon-" + generateRandomString(5) + ".domain"
|
||||||
|
a.domainAnonymizer[baseDomain] = anonymizedBase
|
||||||
|
anonymized = anonymizedBase
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Replace(domain, baseDomain, anonymized, 1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Anonymizer) AnonymizeURI(uri string) string {
|
||||||
|
u, err := url.Parse(uri)
|
||||||
|
if err != nil {
|
||||||
|
return uri
|
||||||
|
}
|
||||||
|
|
||||||
|
var anonymizedHost string
|
||||||
|
if u.Opaque != "" {
|
||||||
|
host, port, err := net.SplitHostPort(u.Opaque)
|
||||||
|
if err == nil {
|
||||||
|
anonymizedHost = fmt.Sprintf("%s:%s", a.AnonymizeDomain(host), port)
|
||||||
|
} else {
|
||||||
|
anonymizedHost = a.AnonymizeDomain(u.Opaque)
|
||||||
|
}
|
||||||
|
u.Opaque = anonymizedHost
|
||||||
|
} else if u.Host != "" {
|
||||||
|
host, port, err := net.SplitHostPort(u.Host)
|
||||||
|
if err == nil {
|
||||||
|
anonymizedHost = fmt.Sprintf("%s:%s", a.AnonymizeDomain(host), port)
|
||||||
|
} else {
|
||||||
|
anonymizedHost = a.AnonymizeDomain(u.Host)
|
||||||
|
}
|
||||||
|
u.Host = anonymizedHost
|
||||||
|
}
|
||||||
|
return u.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *Anonymizer) AnonymizeString(str string) string {
|
||||||
|
ipv4Regex := regexp.MustCompile(`\b(?:[0-9]{1,3}\.){3}[0-9]{1,3}\b`)
|
||||||
|
ipv6Regex := regexp.MustCompile(`\b([0-9a-fA-F:]+:+[0-9a-fA-F]{0,4})(?:%[0-9a-zA-Z]+)?(?:\/[0-9]{1,3})?(?::[0-9]{1,5})?\b`)
|
||||||
|
|
||||||
|
str = ipv4Regex.ReplaceAllStringFunc(str, a.AnonymizeIPString)
|
||||||
|
str = ipv6Regex.ReplaceAllStringFunc(str, a.AnonymizeIPString)
|
||||||
|
|
||||||
|
for domain, anonDomain := range a.domainAnonymizer {
|
||||||
|
str = strings.ReplaceAll(str, domain, anonDomain)
|
||||||
|
}
|
||||||
|
|
||||||
|
str = a.AnonymizeSchemeURI(str)
|
||||||
|
str = a.AnonymizeDNSLogLine(str)
|
||||||
|
|
||||||
|
return str
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnonymizeSchemeURI finds and anonymizes URIs with stun, stuns, turn, and turns schemes.
|
||||||
|
func (a *Anonymizer) AnonymizeSchemeURI(text string) string {
|
||||||
|
re := regexp.MustCompile(`(?i)\b(stuns?:|turns?:|https?://)\S+\b`)
|
||||||
|
|
||||||
|
return re.ReplaceAllStringFunc(text, a.AnonymizeURI)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnonymizeDNSLogLine anonymizes domain names in DNS log entries by replacing them with a random string.
|
||||||
|
func (a *Anonymizer) AnonymizeDNSLogLine(logEntry string) string {
|
||||||
|
domainPattern := `dns\.Question{Name:"([^"]+)",`
|
||||||
|
domainRegex := regexp.MustCompile(domainPattern)
|
||||||
|
|
||||||
|
return domainRegex.ReplaceAllStringFunc(logEntry, func(match string) string {
|
||||||
|
parts := strings.Split(match, `"`)
|
||||||
|
if len(parts) >= 2 {
|
||||||
|
domain := parts[1]
|
||||||
|
if strings.HasSuffix(domain, ".domain") {
|
||||||
|
return match
|
||||||
|
}
|
||||||
|
randomDomain := generateRandomString(10) + ".domain"
|
||||||
|
return strings.Replace(match, domain, randomDomain, 1)
|
||||||
|
}
|
||||||
|
return match
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnonymizeRoute anonymizes a route string by replacing IP addresses with anonymized versions and
|
||||||
|
// domain names with random strings.
|
||||||
|
func (a *Anonymizer) AnonymizeRoute(route string) string {
|
||||||
|
prefix, err := netip.ParsePrefix(route)
|
||||||
|
if err == nil {
|
||||||
|
ip := a.AnonymizeIPString(prefix.Addr().String())
|
||||||
|
return fmt.Sprintf("%s/%d", ip, prefix.Bits())
|
||||||
|
}
|
||||||
|
domains := strings.Split(route, ", ")
|
||||||
|
for i, domain := range domains {
|
||||||
|
domains[i] = a.AnonymizeDomain(domain)
|
||||||
|
}
|
||||||
|
return strings.Join(domains, ", ")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isWellKnown(addr netip.Addr) bool {
|
||||||
|
wellKnown := []string{
|
||||||
|
"8.8.8.8", "8.8.4.4", // Google DNS IPv4
|
||||||
|
"2001:4860:4860::8888", "2001:4860:4860::8844", // Google DNS IPv6
|
||||||
|
"1.1.1.1", "1.0.0.1", // Cloudflare DNS IPv4
|
||||||
|
"2606:4700:4700::1111", "2606:4700:4700::1001", // Cloudflare DNS IPv6
|
||||||
|
"9.9.9.9", "149.112.112.112", // Quad9 DNS IPv4
|
||||||
|
"2620:fe::fe", "2620:fe::9", // Quad9 DNS IPv6
|
||||||
|
}
|
||||||
|
|
||||||
|
if slices.Contains(wellKnown, addr.String()) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
cgnatRangeStart := netip.AddrFrom4([4]byte{100, 64, 0, 0})
|
||||||
|
cgnatRange := netip.PrefixFrom(cgnatRangeStart, 10)
|
||||||
|
|
||||||
|
return cgnatRange.Contains(addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateRandomString(length int) string {
|
||||||
|
const letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||||
|
result := make([]byte, length)
|
||||||
|
for i := range result {
|
||||||
|
num, err := rand.Int(rand.Reader, big.NewInt(int64(len(letters))))
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
result[i] = letters[num.Int64()]
|
||||||
|
}
|
||||||
|
return string(result)
|
||||||
|
}
|
||||||
223
client/anonymize/anonymize_test.go
Normal file
223
client/anonymize/anonymize_test.go
Normal file
@@ -0,0 +1,223 @@
|
|||||||
|
package anonymize_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/netip"
|
||||||
|
"regexp"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/anonymize"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAnonymizeIP(t *testing.T) {
|
||||||
|
startIPv4 := netip.MustParseAddr("198.51.100.0")
|
||||||
|
startIPv6 := netip.MustParseAddr("100::")
|
||||||
|
anonymizer := anonymize.NewAnonymizer(startIPv4, startIPv6)
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
ip string
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{"Well known", "8.8.8.8", "8.8.8.8"},
|
||||||
|
{"First Public IPv4", "1.2.3.4", "198.51.100.0"},
|
||||||
|
{"Second Public IPv4", "4.3.2.1", "198.51.100.1"},
|
||||||
|
{"Repeated IPv4", "1.2.3.4", "198.51.100.0"},
|
||||||
|
{"Private IPv4", "192.168.1.1", "192.168.1.1"},
|
||||||
|
{"First Public IPv6", "2607:f8b0:4005:805::200e", "100::"},
|
||||||
|
{"Second Public IPv6", "a::b", "100::1"},
|
||||||
|
{"Repeated IPv6", "2607:f8b0:4005:805::200e", "100::"},
|
||||||
|
{"Private IPv6", "fe80::1", "fe80::1"},
|
||||||
|
{"In Range IPv4", "198.51.100.2", "198.51.100.2"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
ip := netip.MustParseAddr(tc.ip)
|
||||||
|
anonymizedIP := anonymizer.AnonymizeIP(ip)
|
||||||
|
if anonymizedIP.String() != tc.expect {
|
||||||
|
t.Errorf("%s: expected %s, got %s", tc.name, tc.expect, anonymizedIP)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnonymizeDNSLogLine(t *testing.T) {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(netip.Addr{}, netip.Addr{})
|
||||||
|
testLog := `2024-04-23T20:01:11+02:00 TRAC client/internal/dns/local.go:25: received question: dns.Question{Name:"example.com", Qtype:0x1c, Qclass:0x1}`
|
||||||
|
|
||||||
|
result := anonymizer.AnonymizeDNSLogLine(testLog)
|
||||||
|
require.NotEqual(t, testLog, result)
|
||||||
|
assert.NotContains(t, result, "example.com")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnonymizeDomain(t *testing.T) {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(netip.Addr{}, netip.Addr{})
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
domain string
|
||||||
|
expectPattern string
|
||||||
|
shouldAnonymize bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"General Domain",
|
||||||
|
"example.com",
|
||||||
|
`^anon-[a-zA-Z0-9]+\.domain$`,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Subdomain",
|
||||||
|
"sub.example.com",
|
||||||
|
`^sub\.anon-[a-zA-Z0-9]+\.domain$`,
|
||||||
|
true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Protected Domain",
|
||||||
|
"netbird.io",
|
||||||
|
`^netbird\.io$`,
|
||||||
|
false,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
result := anonymizer.AnonymizeDomain(tc.domain)
|
||||||
|
if tc.shouldAnonymize {
|
||||||
|
assert.Regexp(t, tc.expectPattern, result, "The anonymized domain should match the expected pattern")
|
||||||
|
assert.NotContains(t, result, tc.domain, "The original domain should not be present in the result")
|
||||||
|
} else {
|
||||||
|
assert.Equal(t, tc.domain, result, "Protected domains should not be anonymized")
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnonymizeURI(t *testing.T) {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(netip.Addr{}, netip.Addr{})
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
uri string
|
||||||
|
regex string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"HTTP URI with Port",
|
||||||
|
"http://example.com:80/path",
|
||||||
|
`^http://anon-[a-zA-Z0-9]+\.domain:80/path$`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"HTTP URI without Port",
|
||||||
|
"http://example.com/path",
|
||||||
|
`^http://anon-[a-zA-Z0-9]+\.domain/path$`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Opaque URI with Port",
|
||||||
|
"stun:example.com:80?transport=udp",
|
||||||
|
`^stun:anon-[a-zA-Z0-9]+\.domain:80\?transport=udp$`,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"Opaque URI without Port",
|
||||||
|
"stun:example.com?transport=udp",
|
||||||
|
`^stun:anon-[a-zA-Z0-9]+\.domain\?transport=udp$`,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
result := anonymizer.AnonymizeURI(tc.uri)
|
||||||
|
assert.Regexp(t, regexp.MustCompile(tc.regex), result, "URI should match expected pattern")
|
||||||
|
require.NotContains(t, result, "example.com", "Original domain should not be present")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnonymizeSchemeURI(t *testing.T) {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(netip.Addr{}, netip.Addr{})
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{"STUN URI in text", "Connection made via stun:example.com", `Connection made via stun:anon-[a-zA-Z0-9]+\.domain`},
|
||||||
|
{"TURN URI in log", "Failed attempt turn:some.example.com:3478?transport=tcp: retrying", `Failed attempt turn:some.anon-[a-zA-Z0-9]+\.domain:3478\?transport=tcp: retrying`},
|
||||||
|
{"HTTPS URI in message", "Visit https://example.com for more", `Visit https://anon-[a-zA-Z0-9]+\.domain for more`},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
result := anonymizer.AnonymizeSchemeURI(tc.input)
|
||||||
|
assert.Regexp(t, tc.expect, result, "The anonymized output should match expected pattern")
|
||||||
|
require.NotContains(t, result, "example.com", "Original domain should not be present")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnonymizString_MemorizedDomain(t *testing.T) {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(netip.Addr{}, netip.Addr{})
|
||||||
|
domain := "example.com"
|
||||||
|
anonymizedDomain := anonymizer.AnonymizeDomain(domain)
|
||||||
|
|
||||||
|
sampleString := "This is a test string including the domain example.com which should be anonymized."
|
||||||
|
|
||||||
|
firstPassResult := anonymizer.AnonymizeString(sampleString)
|
||||||
|
secondPassResult := anonymizer.AnonymizeString(firstPassResult)
|
||||||
|
|
||||||
|
assert.Contains(t, firstPassResult, anonymizedDomain, "The domain should be anonymized in the first pass")
|
||||||
|
assert.NotContains(t, firstPassResult, domain, "The original domain should not appear in the first pass output")
|
||||||
|
|
||||||
|
assert.Equal(t, firstPassResult, secondPassResult, "The second pass should not further anonymize the string")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnonymizeString_DoubleURI(t *testing.T) {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(netip.Addr{}, netip.Addr{})
|
||||||
|
domain := "example.com"
|
||||||
|
anonymizedDomain := anonymizer.AnonymizeDomain(domain)
|
||||||
|
|
||||||
|
sampleString := "Check out our site at https://example.com for more info."
|
||||||
|
|
||||||
|
firstPassResult := anonymizer.AnonymizeString(sampleString)
|
||||||
|
secondPassResult := anonymizer.AnonymizeString(firstPassResult)
|
||||||
|
|
||||||
|
assert.Contains(t, firstPassResult, "https://"+anonymizedDomain, "The URI should be anonymized in the first pass")
|
||||||
|
assert.NotContains(t, firstPassResult, "https://example.com", "The original URI should not appear in the first pass output")
|
||||||
|
|
||||||
|
assert.Equal(t, firstPassResult, secondPassResult, "The second pass should not further anonymize the URI")
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestAnonymizeString_IPAddresses(t *testing.T) {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(anonymize.DefaultAddresses())
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
input string
|
||||||
|
expect string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "IPv4 Address",
|
||||||
|
input: "Error occurred at IP 122.138.1.1",
|
||||||
|
expect: "Error occurred at IP 198.51.100.0",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "IPv6 Address",
|
||||||
|
input: "Access attempted from 2001:db8::ff00:42",
|
||||||
|
expect: "Access attempted from 100::",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "IPv6 Address with Port",
|
||||||
|
input: "Access attempted from [2001:db8::ff00:42]:8080",
|
||||||
|
expect: "Access attempted from [100::]:8080",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "Both IPv4 and IPv6",
|
||||||
|
input: "IPv4: 142.108.0.1 and IPv6: 2001:db8::ff00:43",
|
||||||
|
expect: "IPv4: 198.51.100.1 and IPv6: 100::1",
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
result := anonymizer.AnonymizeString(tc.input)
|
||||||
|
assert.Equal(t, tc.expect, result, "IP addresses should be anonymized correctly")
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
273
client/cmd/debug.go
Normal file
273
client/cmd/debug.go
Normal file
@@ -0,0 +1,273 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal"
|
||||||
|
"github.com/netbirdio/netbird/client/proto"
|
||||||
|
"github.com/netbirdio/netbird/client/server"
|
||||||
|
)
|
||||||
|
|
||||||
|
const errCloseConnection = "Failed to close connection: %v"
|
||||||
|
|
||||||
|
var debugCmd = &cobra.Command{
|
||||||
|
Use: "debug",
|
||||||
|
Short: "Debugging commands",
|
||||||
|
Long: "Provides commands for debugging and logging control within the Netbird daemon.",
|
||||||
|
}
|
||||||
|
|
||||||
|
var debugBundleCmd = &cobra.Command{
|
||||||
|
Use: "bundle",
|
||||||
|
Example: " netbird debug bundle",
|
||||||
|
Short: "Create a debug bundle",
|
||||||
|
Long: "Generates a compressed archive of the daemon's logs and status for debugging purposes.",
|
||||||
|
RunE: debugBundle,
|
||||||
|
}
|
||||||
|
|
||||||
|
var logCmd = &cobra.Command{
|
||||||
|
Use: "log",
|
||||||
|
Short: "Manage logging for the Netbird daemon",
|
||||||
|
Long: `Commands to manage logging settings for the Netbird daemon, including ICE, gRPC, and general log levels.`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var logLevelCmd = &cobra.Command{
|
||||||
|
Use: "level <level>",
|
||||||
|
Short: "Set the logging level for this session",
|
||||||
|
Long: `Sets the logging level for the current session. This setting is temporary and will revert to the default on daemon restart.
|
||||||
|
Available log levels are:
|
||||||
|
panic: for panic level, highest level of severity
|
||||||
|
fatal: for fatal level errors that cause the program to exit
|
||||||
|
error: for error conditions
|
||||||
|
warn: for warning conditions
|
||||||
|
info: for informational messages
|
||||||
|
debug: for debug-level messages
|
||||||
|
trace: for trace-level messages, which include more fine-grained information than debug`,
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: setLogLevel,
|
||||||
|
}
|
||||||
|
|
||||||
|
var forCmd = &cobra.Command{
|
||||||
|
Use: "for <time>",
|
||||||
|
Short: "Run debug logs for a specified duration and create a debug bundle",
|
||||||
|
Long: `Sets the logging level to trace, runs for the specified duration, and then generates a debug bundle.`,
|
||||||
|
Example: " netbird debug for 5m",
|
||||||
|
Args: cobra.ExactArgs(1),
|
||||||
|
RunE: runForDuration,
|
||||||
|
}
|
||||||
|
|
||||||
|
func debugBundle(cmd *cobra.Command, _ []string) error {
|
||||||
|
conn, err := getClient(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := conn.Close(); err != nil {
|
||||||
|
log.Errorf(errCloseConnection, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
client := proto.NewDaemonServiceClient(conn)
|
||||||
|
resp, err := client.DebugBundle(cmd.Context(), &proto.DebugBundleRequest{
|
||||||
|
Anonymize: anonymizeFlag,
|
||||||
|
Status: getStatusOutput(cmd),
|
||||||
|
SystemInfo: debugSystemInfoFlag,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to bundle debug: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println(resp.GetPath())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func setLogLevel(cmd *cobra.Command, args []string) error {
|
||||||
|
conn, err := getClient(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := conn.Close(); err != nil {
|
||||||
|
log.Errorf(errCloseConnection, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
client := proto.NewDaemonServiceClient(conn)
|
||||||
|
level := server.ParseLogLevel(args[0])
|
||||||
|
if level == proto.LogLevel_UNKNOWN {
|
||||||
|
return fmt.Errorf("unknown log level: %s. Available levels are: panic, fatal, error, warn, info, debug, trace\n", args[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = client.SetLogLevel(cmd.Context(), &proto.SetLogLevelRequest{
|
||||||
|
Level: level,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set log level: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println("Log level set successfully to", args[0])
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func runForDuration(cmd *cobra.Command, args []string) error {
|
||||||
|
duration, err := time.ParseDuration(args[0])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("invalid duration format: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
conn, err := getClient(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
if err := conn.Close(); err != nil {
|
||||||
|
log.Errorf(errCloseConnection, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
client := proto.NewDaemonServiceClient(conn)
|
||||||
|
|
||||||
|
stat, err := client.Status(cmd.Context(), &proto.StatusRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get status: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
stateWasDown := stat.Status != string(internal.StatusConnected) && stat.Status != string(internal.StatusConnecting)
|
||||||
|
|
||||||
|
initialLogLevel, err := client.GetLogLevel(cmd.Context(), &proto.GetLogLevelRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to get log level: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
if stateWasDown {
|
||||||
|
if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil {
|
||||||
|
return fmt.Errorf("failed to up: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
cmd.Println("Netbird up")
|
||||||
|
time.Sleep(time.Second * 10)
|
||||||
|
}
|
||||||
|
|
||||||
|
initialLevelTrace := initialLogLevel.GetLevel() >= proto.LogLevel_TRACE
|
||||||
|
if !initialLevelTrace {
|
||||||
|
_, err = client.SetLogLevel(cmd.Context(), &proto.SetLogLevelRequest{
|
||||||
|
Level: proto.LogLevel_TRACE,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to set log level to TRACE: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
cmd.Println("Log level set to trace.")
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil {
|
||||||
|
return fmt.Errorf("failed to down: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
cmd.Println("Netbird down")
|
||||||
|
|
||||||
|
time.Sleep(1 * time.Second)
|
||||||
|
|
||||||
|
if _, err := client.Up(cmd.Context(), &proto.UpRequest{}); err != nil {
|
||||||
|
return fmt.Errorf("failed to up: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
cmd.Println("Netbird up")
|
||||||
|
|
||||||
|
time.Sleep(3 * time.Second)
|
||||||
|
|
||||||
|
headerPostUp := fmt.Sprintf("----- Netbird post-up - Timestamp: %s", time.Now().Format(time.RFC3339))
|
||||||
|
statusOutput := fmt.Sprintf("%s\n%s", headerPostUp, getStatusOutput(cmd))
|
||||||
|
|
||||||
|
if waitErr := waitForDurationOrCancel(cmd.Context(), duration, cmd); waitErr != nil {
|
||||||
|
return waitErr
|
||||||
|
}
|
||||||
|
cmd.Println("\nDuration completed")
|
||||||
|
|
||||||
|
cmd.Println("Creating debug bundle...")
|
||||||
|
|
||||||
|
headerPreDown := fmt.Sprintf("----- Netbird pre-down - Timestamp: %s - Duration: %s", time.Now().Format(time.RFC3339), duration)
|
||||||
|
statusOutput = fmt.Sprintf("%s\n%s\n%s", statusOutput, headerPreDown, getStatusOutput(cmd))
|
||||||
|
|
||||||
|
resp, err := client.DebugBundle(cmd.Context(), &proto.DebugBundleRequest{
|
||||||
|
Anonymize: anonymizeFlag,
|
||||||
|
Status: statusOutput,
|
||||||
|
SystemInfo: debugSystemInfoFlag,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to bundle debug: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
if stateWasDown {
|
||||||
|
if _, err := client.Down(cmd.Context(), &proto.DownRequest{}); err != nil {
|
||||||
|
return fmt.Errorf("failed to down: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
cmd.Println("Netbird down")
|
||||||
|
}
|
||||||
|
|
||||||
|
if !initialLevelTrace {
|
||||||
|
if _, err := client.SetLogLevel(cmd.Context(), &proto.SetLogLevelRequest{Level: initialLogLevel.GetLevel()}); err != nil {
|
||||||
|
return fmt.Errorf("failed to restore log level: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
cmd.Println("Log level restored to", initialLogLevel.GetLevel())
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println(resp.GetPath())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStatusOutput(cmd *cobra.Command) string {
|
||||||
|
var statusOutputString string
|
||||||
|
statusResp, err := getStatus(cmd.Context())
|
||||||
|
if err != nil {
|
||||||
|
cmd.PrintErrf("Failed to get status: %v\n", err)
|
||||||
|
} else {
|
||||||
|
statusOutputString = parseToFullDetailSummary(convertToStatusOutputOverview(statusResp))
|
||||||
|
}
|
||||||
|
return statusOutputString
|
||||||
|
}
|
||||||
|
|
||||||
|
func waitForDurationOrCancel(ctx context.Context, duration time.Duration, cmd *cobra.Command) error {
|
||||||
|
ticker := time.NewTicker(1 * time.Second)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
go func() {
|
||||||
|
defer close(done)
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
elapsed := time.Since(startTime)
|
||||||
|
if elapsed >= duration {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
remaining := duration - elapsed
|
||||||
|
cmd.Printf("\rRemaining time: %s", formatDuration(remaining))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ctx.Err()
|
||||||
|
case <-done:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatDuration(d time.Duration) string {
|
||||||
|
d = d.Round(time.Second)
|
||||||
|
h := d / time.Hour
|
||||||
|
d %= time.Hour
|
||||||
|
m := d / time.Minute
|
||||||
|
d %= time.Minute
|
||||||
|
s := d / time.Second
|
||||||
|
return fmt.Sprintf("%02d:%02d:%02d", h, m, s)
|
||||||
|
}
|
||||||
@@ -2,9 +2,10 @@ package cmd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"github.com/netbirdio/netbird/util"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/util"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
@@ -25,7 +26,7 @@ var downCmd = &cobra.Command{
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Second*3)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second*7)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
conn, err := DialClientGRPCServer(ctx, daemonAddr)
|
conn, err := DialClientGRPCServer(ctx, daemonAddr)
|
||||||
|
|||||||
@@ -32,8 +32,12 @@ const (
|
|||||||
preSharedKeyFlag = "preshared-key"
|
preSharedKeyFlag = "preshared-key"
|
||||||
interfaceNameFlag = "interface-name"
|
interfaceNameFlag = "interface-name"
|
||||||
wireguardPortFlag = "wireguard-port"
|
wireguardPortFlag = "wireguard-port"
|
||||||
|
networkMonitorFlag = "network-monitor"
|
||||||
disableAutoConnectFlag = "disable-auto-connect"
|
disableAutoConnectFlag = "disable-auto-connect"
|
||||||
serverSSHAllowedFlag = "allow-server-ssh"
|
serverSSHAllowedFlag = "allow-server-ssh"
|
||||||
|
extraIFaceBlackListFlag = "extra-iface-blacklist"
|
||||||
|
dnsRouteIntervalFlag = "dns-router-interval"
|
||||||
|
systemInfoFlag = "system-info"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@@ -61,9 +65,15 @@ var (
|
|||||||
serverSSHAllowed bool
|
serverSSHAllowed bool
|
||||||
interfaceName string
|
interfaceName string
|
||||||
wireguardPort uint16
|
wireguardPort uint16
|
||||||
|
networkMonitor bool
|
||||||
serviceName string
|
serviceName string
|
||||||
autoConnectDisabled bool
|
autoConnectDisabled bool
|
||||||
rootCmd = &cobra.Command{
|
extraIFaceBlackList []string
|
||||||
|
anonymizeFlag bool
|
||||||
|
debugSystemInfoFlag bool
|
||||||
|
dnsRouteInterval time.Duration
|
||||||
|
|
||||||
|
rootCmd = &cobra.Command{
|
||||||
Use: "netbird",
|
Use: "netbird",
|
||||||
Short: "",
|
Short: "",
|
||||||
Long: "",
|
Long: "",
|
||||||
@@ -83,12 +93,15 @@ func init() {
|
|||||||
oldDefaultConfigPathDir = "/etc/wiretrustee/"
|
oldDefaultConfigPathDir = "/etc/wiretrustee/"
|
||||||
oldDefaultLogFileDir = "/var/log/wiretrustee/"
|
oldDefaultLogFileDir = "/var/log/wiretrustee/"
|
||||||
|
|
||||||
if runtime.GOOS == "windows" {
|
switch runtime.GOOS {
|
||||||
|
case "windows":
|
||||||
defaultConfigPathDir = os.Getenv("PROGRAMDATA") + "\\Netbird\\"
|
defaultConfigPathDir = os.Getenv("PROGRAMDATA") + "\\Netbird\\"
|
||||||
defaultLogFileDir = os.Getenv("PROGRAMDATA") + "\\Netbird\\"
|
defaultLogFileDir = os.Getenv("PROGRAMDATA") + "\\Netbird\\"
|
||||||
|
|
||||||
oldDefaultConfigPathDir = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\"
|
oldDefaultConfigPathDir = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\"
|
||||||
oldDefaultLogFileDir = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\"
|
oldDefaultLogFileDir = os.Getenv("PROGRAMDATA") + "\\Wiretrustee\\"
|
||||||
|
case "freebsd":
|
||||||
|
defaultConfigPathDir = "/var/db/netbird/"
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultConfigPath = defaultConfigPathDir + "config.json"
|
defaultConfigPath = defaultConfigPathDir + "config.json"
|
||||||
@@ -113,10 +126,12 @@ func init() {
|
|||||||
rootCmd.PersistentFlags().StringVarP(&serviceName, "service", "s", defaultServiceName, "Netbird system service name")
|
rootCmd.PersistentFlags().StringVarP(&serviceName, "service", "s", defaultServiceName, "Netbird system service name")
|
||||||
rootCmd.PersistentFlags().StringVarP(&configPath, "config", "c", defaultConfigPath, "Netbird config file location")
|
rootCmd.PersistentFlags().StringVarP(&configPath, "config", "c", defaultConfigPath, "Netbird config file location")
|
||||||
rootCmd.PersistentFlags().StringVarP(&logLevel, "log-level", "l", "info", "sets Netbird log level")
|
rootCmd.PersistentFlags().StringVarP(&logLevel, "log-level", "l", "info", "sets Netbird log level")
|
||||||
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Netbird log path. If console is specified the log will be output to stdout")
|
rootCmd.PersistentFlags().StringVar(&logFile, "log-file", defaultLogFile, "sets Netbird log path. If console is specified the log will be output to stdout. If syslog is specified the log will be sent to syslog daemon.")
|
||||||
rootCmd.PersistentFlags().StringVarP(&setupKey, "setup-key", "k", "", "Setup key obtained from the Management Service Dashboard (used to register peer)")
|
rootCmd.PersistentFlags().StringVarP(&setupKey, "setup-key", "k", "", "Setup key obtained from the Management Service Dashboard (used to register peer)")
|
||||||
rootCmd.PersistentFlags().StringVar(&preSharedKey, preSharedKeyFlag, "", "Sets Wireguard PreSharedKey property. If set, then only peers that have the same key can communicate.")
|
rootCmd.PersistentFlags().StringVar(&preSharedKey, preSharedKeyFlag, "", "Sets Wireguard PreSharedKey property. If set, then only peers that have the same key can communicate.")
|
||||||
rootCmd.PersistentFlags().StringVarP(&hostName, "hostname", "n", "", "Sets a custom hostname for the device")
|
rootCmd.PersistentFlags().StringVarP(&hostName, "hostname", "n", "", "Sets a custom hostname for the device")
|
||||||
|
rootCmd.PersistentFlags().BoolVarP(&anonymizeFlag, "anonymize", "A", false, "anonymize IP addresses and non-netbird.io domains in logs and status output")
|
||||||
|
|
||||||
rootCmd.AddCommand(serviceCmd)
|
rootCmd.AddCommand(serviceCmd)
|
||||||
rootCmd.AddCommand(upCmd)
|
rootCmd.AddCommand(upCmd)
|
||||||
rootCmd.AddCommand(downCmd)
|
rootCmd.AddCommand(downCmd)
|
||||||
@@ -124,8 +139,20 @@ func init() {
|
|||||||
rootCmd.AddCommand(loginCmd)
|
rootCmd.AddCommand(loginCmd)
|
||||||
rootCmd.AddCommand(versionCmd)
|
rootCmd.AddCommand(versionCmd)
|
||||||
rootCmd.AddCommand(sshCmd)
|
rootCmd.AddCommand(sshCmd)
|
||||||
|
rootCmd.AddCommand(routesCmd)
|
||||||
|
rootCmd.AddCommand(debugCmd)
|
||||||
|
|
||||||
serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd) // service control commands are subcommands of service
|
serviceCmd.AddCommand(runCmd, startCmd, stopCmd, restartCmd) // service control commands are subcommands of service
|
||||||
serviceCmd.AddCommand(installCmd, uninstallCmd) // service installer commands are subcommands of service
|
serviceCmd.AddCommand(installCmd, uninstallCmd) // service installer commands are subcommands of service
|
||||||
|
|
||||||
|
routesCmd.AddCommand(routesListCmd)
|
||||||
|
routesCmd.AddCommand(routesSelectCmd, routesDeselectCmd)
|
||||||
|
|
||||||
|
debugCmd.AddCommand(debugBundleCmd)
|
||||||
|
debugCmd.AddCommand(logCmd)
|
||||||
|
logCmd.AddCommand(logLevelCmd)
|
||||||
|
debugCmd.AddCommand(forCmd)
|
||||||
|
|
||||||
upCmd.PersistentFlags().StringSliceVar(&natExternalIPs, externalIPMapFlag, nil,
|
upCmd.PersistentFlags().StringSliceVar(&natExternalIPs, externalIPMapFlag, nil,
|
||||||
`Sets external IPs maps between local addresses and interfaces.`+
|
`Sets external IPs maps between local addresses and interfaces.`+
|
||||||
`You can specify a comma-separated list with a single IP and IP/IP or IP/Interface Name. `+
|
`You can specify a comma-separated list with a single IP and IP/IP or IP/Interface Name. `+
|
||||||
@@ -143,6 +170,8 @@ func init() {
|
|||||||
upCmd.PersistentFlags().BoolVar(&rosenpassPermissive, rosenpassPermissiveFlag, false, "[Experimental] Enable Rosenpass in permissive mode to allow this peer to accept WireGuard connections without requiring Rosenpass functionality from peers that do not have Rosenpass enabled.")
|
upCmd.PersistentFlags().BoolVar(&rosenpassPermissive, rosenpassPermissiveFlag, false, "[Experimental] Enable Rosenpass in permissive mode to allow this peer to accept WireGuard connections without requiring Rosenpass functionality from peers that do not have Rosenpass enabled.")
|
||||||
upCmd.PersistentFlags().BoolVar(&serverSSHAllowed, serverSSHAllowedFlag, false, "Allow SSH server on peer. If enabled, the SSH server will be permitted")
|
upCmd.PersistentFlags().BoolVar(&serverSSHAllowed, serverSSHAllowedFlag, false, "Allow SSH server on peer. If enabled, the SSH server will be permitted")
|
||||||
upCmd.PersistentFlags().BoolVar(&autoConnectDisabled, disableAutoConnectFlag, false, "Disables auto-connect feature. If enabled, then the client won't connect automatically when the service starts.")
|
upCmd.PersistentFlags().BoolVar(&autoConnectDisabled, disableAutoConnectFlag, false, "Disables auto-connect feature. If enabled, then the client won't connect automatically when the service starts.")
|
||||||
|
|
||||||
|
debugCmd.PersistentFlags().BoolVarP(&debugSystemInfoFlag, systemInfoFlag, "S", false, "Adds system information to the debug bundle")
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetupCloseHandler handles SIGTERM signal and exits with success
|
// SetupCloseHandler handles SIGTERM signal and exits with success
|
||||||
@@ -333,3 +362,17 @@ func migrateToNetbird(oldPath, newPath string) bool {
|
|||||||
|
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func getClient(cmd *cobra.Command) (*grpc.ClientConn, error) {
|
||||||
|
SetFlagsFromEnvVars(rootCmd)
|
||||||
|
cmd.SetOut(cmd.OutOrStdout())
|
||||||
|
|
||||||
|
conn, err := DialClientGRPCServer(cmd.Context(), daemonAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+
|
||||||
|
"If the daemon is not running please run: "+
|
||||||
|
"\nnetbird service install \nnetbird service start\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return conn, nil
|
||||||
|
}
|
||||||
|
|||||||
174
client/cmd/route.go
Normal file
174
client/cmd/route.go
Normal file
@@ -0,0 +1,174 @@
|
|||||||
|
package cmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/spf13/cobra"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
var appendFlag bool
|
||||||
|
|
||||||
|
var routesCmd = &cobra.Command{
|
||||||
|
Use: "routes",
|
||||||
|
Short: "Manage network routes",
|
||||||
|
Long: `Commands to list, select, or deselect network routes.`,
|
||||||
|
}
|
||||||
|
|
||||||
|
var routesListCmd = &cobra.Command{
|
||||||
|
Use: "list",
|
||||||
|
Aliases: []string{"ls"},
|
||||||
|
Short: "List routes",
|
||||||
|
Example: " netbird routes list",
|
||||||
|
Long: "List all available network routes.",
|
||||||
|
RunE: routesList,
|
||||||
|
}
|
||||||
|
|
||||||
|
var routesSelectCmd = &cobra.Command{
|
||||||
|
Use: "select route...|all",
|
||||||
|
Short: "Select routes",
|
||||||
|
Long: "Select a list of routes by identifiers or 'all' to clear all selections and to accept all (including new) routes.\nDefault mode is replace, use -a to append to already selected routes.",
|
||||||
|
Example: " netbird routes select all\n netbird routes select route1 route2\n netbird routes select -a route3",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
RunE: routesSelect,
|
||||||
|
}
|
||||||
|
|
||||||
|
var routesDeselectCmd = &cobra.Command{
|
||||||
|
Use: "deselect route...|all",
|
||||||
|
Short: "Deselect routes",
|
||||||
|
Long: "Deselect previously selected routes by identifiers or 'all' to disable accepting any routes.",
|
||||||
|
Example: " netbird routes deselect all\n netbird routes deselect route1 route2",
|
||||||
|
Args: cobra.MinimumNArgs(1),
|
||||||
|
RunE: routesDeselect,
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
routesSelectCmd.PersistentFlags().BoolVarP(&appendFlag, "append", "a", false, "Append to current route selection instead of replacing")
|
||||||
|
}
|
||||||
|
|
||||||
|
func routesList(cmd *cobra.Command, _ []string) error {
|
||||||
|
conn, err := getClient(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
client := proto.NewDaemonServiceClient(conn)
|
||||||
|
resp, err := client.ListRoutes(cmd.Context(), &proto.ListRoutesRequest{})
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to list routes: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(resp.Routes) == 0 {
|
||||||
|
cmd.Println("No routes available.")
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
printRoutes(cmd, resp)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func printRoutes(cmd *cobra.Command, resp *proto.ListRoutesResponse) {
|
||||||
|
cmd.Println("Available Routes:")
|
||||||
|
for _, route := range resp.Routes {
|
||||||
|
printRoute(cmd, route)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printRoute(cmd *cobra.Command, route *proto.Route) {
|
||||||
|
selectedStatus := getSelectedStatus(route)
|
||||||
|
domains := route.GetDomains()
|
||||||
|
|
||||||
|
if len(domains) > 0 {
|
||||||
|
printDomainRoute(cmd, route, domains, selectedStatus)
|
||||||
|
} else {
|
||||||
|
printNetworkRoute(cmd, route, selectedStatus)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSelectedStatus(route *proto.Route) string {
|
||||||
|
if route.GetSelected() {
|
||||||
|
return "Selected"
|
||||||
|
}
|
||||||
|
return "Not Selected"
|
||||||
|
}
|
||||||
|
|
||||||
|
func printDomainRoute(cmd *cobra.Command, route *proto.Route, domains []string, selectedStatus string) {
|
||||||
|
cmd.Printf("\n - ID: %s\n Domains: %s\n Status: %s\n", route.GetID(), strings.Join(domains, ", "), selectedStatus)
|
||||||
|
resolvedIPs := route.GetResolvedIPs()
|
||||||
|
|
||||||
|
if len(resolvedIPs) > 0 {
|
||||||
|
printResolvedIPs(cmd, domains, resolvedIPs)
|
||||||
|
} else {
|
||||||
|
cmd.Printf(" Resolved IPs: -\n")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printNetworkRoute(cmd *cobra.Command, route *proto.Route, selectedStatus string) {
|
||||||
|
cmd.Printf("\n - ID: %s\n Network: %s\n Status: %s\n", route.GetID(), route.GetNetwork(), selectedStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
func printResolvedIPs(cmd *cobra.Command, domains []string, resolvedIPs map[string]*proto.IPList) {
|
||||||
|
cmd.Printf(" Resolved IPs:\n")
|
||||||
|
for _, domain := range domains {
|
||||||
|
if ipList, exists := resolvedIPs[domain]; exists {
|
||||||
|
cmd.Printf(" [%s]: %s\n", domain, strings.Join(ipList.GetIps(), ", "))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func routesSelect(cmd *cobra.Command, args []string) error {
|
||||||
|
conn, err := getClient(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
client := proto.NewDaemonServiceClient(conn)
|
||||||
|
req := &proto.SelectRoutesRequest{
|
||||||
|
RouteIDs: args,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(args) == 1 && args[0] == "all" {
|
||||||
|
req.All = true
|
||||||
|
} else if appendFlag {
|
||||||
|
req.Append = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := client.SelectRoutes(cmd.Context(), req); err != nil {
|
||||||
|
return fmt.Errorf("failed to select routes: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println("Routes selected successfully.")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func routesDeselect(cmd *cobra.Command, args []string) error {
|
||||||
|
conn, err := getClient(cmd)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer conn.Close()
|
||||||
|
|
||||||
|
client := proto.NewDaemonServiceClient(conn)
|
||||||
|
req := &proto.SelectRoutesRequest{
|
||||||
|
RouteIDs: args,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(args) == 1 && args[0] == "all" {
|
||||||
|
req.All = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := client.DeselectRoutes(cmd.Context(), req); err != nil {
|
||||||
|
return fmt.Errorf("failed to deselect routes: %v", status.Convert(err).Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd.Println("Routes deselected successfully.")
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -31,6 +31,8 @@ var installCmd = &cobra.Command{
|
|||||||
configPath,
|
configPath,
|
||||||
"--log-level",
|
"--log-level",
|
||||||
logLevel,
|
logLevel,
|
||||||
|
"--daemon-addr",
|
||||||
|
daemonAddr,
|
||||||
}
|
}
|
||||||
|
|
||||||
if managementURL != "" {
|
if managementURL != "" {
|
||||||
@@ -64,6 +66,10 @@ var installCmd = &cobra.Command{
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runtime.GOOS == "windows" {
|
||||||
|
svcConfig.Option["OnFailure"] = "restart"
|
||||||
|
}
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(cmd.Context())
|
ctx, cancel := context.WithCancel(cmd.Context())
|
||||||
|
|
||||||
s, err := newSVC(newProgram(ctx, cancel), svcConfig)
|
s, err := newSVC(newProgram(ctx, cancel), svcConfig)
|
||||||
|
|||||||
@@ -24,7 +24,7 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var sshCmd = &cobra.Command{
|
var sshCmd = &cobra.Command{
|
||||||
Use: "ssh",
|
Use: "ssh [user@]host",
|
||||||
Args: func(cmd *cobra.Command, args []string) error {
|
Args: func(cmd *cobra.Command, args []string) error {
|
||||||
if len(args) < 1 {
|
if len(args) < 1 {
|
||||||
return errors.New("requires a host argument")
|
return errors.New("requires a host argument")
|
||||||
@@ -94,7 +94,7 @@ func runSSH(ctx context.Context, addr string, pemKey []byte, cmd *cobra.Command)
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
cmd.Printf("Error: %v\n", err)
|
cmd.Printf("Error: %v\n", err)
|
||||||
cmd.Printf("Couldn't connect. Please check the connection status or if the ssh server is enabled on the other peer" +
|
cmd.Printf("Couldn't connect. Please check the connection status or if the ssh server is enabled on the other peer" +
|
||||||
"You can verify the connection by running:\n\n" +
|
"\nYou can verify the connection by running:\n\n" +
|
||||||
" netbird status\n\n")
|
" netbird status\n\n")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -6,6 +6,8 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
"sort"
|
"sort"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
@@ -14,6 +16,7 @@ import (
|
|||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/anonymize"
|
||||||
"github.com/netbirdio/netbird/client/internal"
|
"github.com/netbirdio/netbird/client/internal"
|
||||||
"github.com/netbirdio/netbird/client/internal/peer"
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
"github.com/netbirdio/netbird/client/proto"
|
"github.com/netbirdio/netbird/client/proto"
|
||||||
@@ -28,12 +31,13 @@ type peerStateDetailOutput struct {
|
|||||||
Status string `json:"status" yaml:"status"`
|
Status string `json:"status" yaml:"status"`
|
||||||
LastStatusUpdate time.Time `json:"lastStatusUpdate" yaml:"lastStatusUpdate"`
|
LastStatusUpdate time.Time `json:"lastStatusUpdate" yaml:"lastStatusUpdate"`
|
||||||
ConnType string `json:"connectionType" yaml:"connectionType"`
|
ConnType string `json:"connectionType" yaml:"connectionType"`
|
||||||
Direct bool `json:"direct" yaml:"direct"`
|
|
||||||
IceCandidateType iceCandidateType `json:"iceCandidateType" yaml:"iceCandidateType"`
|
IceCandidateType iceCandidateType `json:"iceCandidateType" yaml:"iceCandidateType"`
|
||||||
IceCandidateEndpoint iceCandidateType `json:"iceCandidateEndpoint" yaml:"iceCandidateEndpoint"`
|
IceCandidateEndpoint iceCandidateType `json:"iceCandidateEndpoint" yaml:"iceCandidateEndpoint"`
|
||||||
|
RelayAddress string `json:"relayAddress" yaml:"relayAddress"`
|
||||||
LastWireguardHandshake time.Time `json:"lastWireguardHandshake" yaml:"lastWireguardHandshake"`
|
LastWireguardHandshake time.Time `json:"lastWireguardHandshake" yaml:"lastWireguardHandshake"`
|
||||||
TransferReceived int64 `json:"transferReceived" yaml:"transferReceived"`
|
TransferReceived int64 `json:"transferReceived" yaml:"transferReceived"`
|
||||||
TransferSent int64 `json:"transferSent" yaml:"transferSent"`
|
TransferSent int64 `json:"transferSent" yaml:"transferSent"`
|
||||||
|
Latency time.Duration `json:"latency" yaml:"latency"`
|
||||||
RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"`
|
RosenpassEnabled bool `json:"quantumResistance" yaml:"quantumResistance"`
|
||||||
Routes []string `json:"routes" yaml:"routes"`
|
Routes []string `json:"routes" yaml:"routes"`
|
||||||
}
|
}
|
||||||
@@ -143,9 +147,9 @@ func statusFunc(cmd *cobra.Command, args []string) error {
|
|||||||
return fmt.Errorf("failed initializing log %v", err)
|
return fmt.Errorf("failed initializing log %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := internal.CtxInitState(context.Background())
|
ctx := internal.CtxInitState(cmd.Context())
|
||||||
|
|
||||||
resp, err := getStatus(ctx, cmd)
|
resp, err := getStatus(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -190,7 +194,7 @@ func statusFunc(cmd *cobra.Command, args []string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getStatus(ctx context.Context, cmd *cobra.Command) (*proto.StatusResponse, error) {
|
func getStatus(ctx context.Context) (*proto.StatusResponse, error) {
|
||||||
conn, err := DialClientGRPCServer(ctx, daemonAddr)
|
conn, err := DialClientGRPCServer(ctx, daemonAddr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+
|
return nil, fmt.Errorf("failed to connect to daemon error: %v\n"+
|
||||||
@@ -199,7 +203,7 @@ func getStatus(ctx context.Context, cmd *cobra.Command) (*proto.StatusResponse,
|
|||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
|
|
||||||
resp, err := proto.NewDaemonServiceClient(conn).Status(cmd.Context(), &proto.StatusRequest{GetFullPeerStatus: true})
|
resp, err := proto.NewDaemonServiceClient(conn).Status(ctx, &proto.StatusRequest{GetFullPeerStatus: true})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("status failed: %v", status.Convert(err).Message())
|
return nil, fmt.Errorf("status failed: %v", status.Convert(err).Message())
|
||||||
}
|
}
|
||||||
@@ -282,6 +286,11 @@ func convertToStatusOutputOverview(resp *proto.StatusResponse) statusOutputOverv
|
|||||||
NSServerGroups: mapNSGroups(pbFullStatus.GetDnsServers()),
|
NSServerGroups: mapNSGroups(pbFullStatus.GetDnsServers()),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if anonymizeFlag {
|
||||||
|
anonymizer := anonymize.NewAnonymizer(anonymize.DefaultAddresses())
|
||||||
|
anonymizeOverview(anonymizer, &overview)
|
||||||
|
}
|
||||||
|
|
||||||
return overview
|
return overview
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -326,16 +335,18 @@ func mapNSGroups(servers []*proto.NSGroupState) []nsServerGroupStateOutput {
|
|||||||
|
|
||||||
func mapPeers(peers []*proto.PeerState) peersStateOutput {
|
func mapPeers(peers []*proto.PeerState) peersStateOutput {
|
||||||
var peersStateDetail []peerStateDetailOutput
|
var peersStateDetail []peerStateDetailOutput
|
||||||
localICE := ""
|
|
||||||
remoteICE := ""
|
|
||||||
localICEEndpoint := ""
|
|
||||||
remoteICEEndpoint := ""
|
|
||||||
connType := ""
|
|
||||||
peersConnected := 0
|
peersConnected := 0
|
||||||
lastHandshake := time.Time{}
|
|
||||||
transferReceived := int64(0)
|
|
||||||
transferSent := int64(0)
|
|
||||||
for _, pbPeerState := range peers {
|
for _, pbPeerState := range peers {
|
||||||
|
localICE := ""
|
||||||
|
remoteICE := ""
|
||||||
|
localICEEndpoint := ""
|
||||||
|
remoteICEEndpoint := ""
|
||||||
|
relayServerAddress := ""
|
||||||
|
connType := ""
|
||||||
|
lastHandshake := time.Time{}
|
||||||
|
transferReceived := int64(0)
|
||||||
|
transferSent := int64(0)
|
||||||
|
|
||||||
isPeerConnected := pbPeerState.ConnStatus == peer.StatusConnected.String()
|
isPeerConnected := pbPeerState.ConnStatus == peer.StatusConnected.String()
|
||||||
if skipDetailByFilters(pbPeerState, isPeerConnected) {
|
if skipDetailByFilters(pbPeerState, isPeerConnected) {
|
||||||
continue
|
continue
|
||||||
@@ -351,6 +362,7 @@ func mapPeers(peers []*proto.PeerState) peersStateOutput {
|
|||||||
if pbPeerState.Relayed {
|
if pbPeerState.Relayed {
|
||||||
connType = "Relayed"
|
connType = "Relayed"
|
||||||
}
|
}
|
||||||
|
relayServerAddress = pbPeerState.GetRelayAddress()
|
||||||
lastHandshake = pbPeerState.GetLastWireguardHandshake().AsTime().Local()
|
lastHandshake = pbPeerState.GetLastWireguardHandshake().AsTime().Local()
|
||||||
transferReceived = pbPeerState.GetBytesRx()
|
transferReceived = pbPeerState.GetBytesRx()
|
||||||
transferSent = pbPeerState.GetBytesTx()
|
transferSent = pbPeerState.GetBytesTx()
|
||||||
@@ -363,7 +375,6 @@ func mapPeers(peers []*proto.PeerState) peersStateOutput {
|
|||||||
Status: pbPeerState.GetConnStatus(),
|
Status: pbPeerState.GetConnStatus(),
|
||||||
LastStatusUpdate: timeLocal,
|
LastStatusUpdate: timeLocal,
|
||||||
ConnType: connType,
|
ConnType: connType,
|
||||||
Direct: pbPeerState.GetDirect(),
|
|
||||||
IceCandidateType: iceCandidateType{
|
IceCandidateType: iceCandidateType{
|
||||||
Local: localICE,
|
Local: localICE,
|
||||||
Remote: remoteICE,
|
Remote: remoteICE,
|
||||||
@@ -372,10 +383,12 @@ func mapPeers(peers []*proto.PeerState) peersStateOutput {
|
|||||||
Local: localICEEndpoint,
|
Local: localICEEndpoint,
|
||||||
Remote: remoteICEEndpoint,
|
Remote: remoteICEEndpoint,
|
||||||
},
|
},
|
||||||
|
RelayAddress: relayServerAddress,
|
||||||
FQDN: pbPeerState.GetFqdn(),
|
FQDN: pbPeerState.GetFqdn(),
|
||||||
LastWireguardHandshake: lastHandshake,
|
LastWireguardHandshake: lastHandshake,
|
||||||
TransferReceived: transferReceived,
|
TransferReceived: transferReceived,
|
||||||
TransferSent: transferSent,
|
TransferSent: transferSent,
|
||||||
|
Latency: pbPeerState.GetLatency().AsDuration(),
|
||||||
RosenpassEnabled: pbPeerState.GetRosenpassEnabled(),
|
RosenpassEnabled: pbPeerState.GetRosenpassEnabled(),
|
||||||
Routes: pbPeerState.GetRoutes(),
|
Routes: pbPeerState.GetRoutes(),
|
||||||
}
|
}
|
||||||
@@ -523,8 +536,16 @@ func parseGeneralSummary(overview statusOutputOverview, showURL bool, showRelays
|
|||||||
|
|
||||||
peersCountString := fmt.Sprintf("%d/%d Connected", overview.Peers.Connected, overview.Peers.Total)
|
peersCountString := fmt.Sprintf("%d/%d Connected", overview.Peers.Connected, overview.Peers.Total)
|
||||||
|
|
||||||
|
goos := runtime.GOOS
|
||||||
|
goarch := runtime.GOARCH
|
||||||
|
goarm := ""
|
||||||
|
if goarch == "arm" {
|
||||||
|
goarm = fmt.Sprintf(" (ARMv%s)", os.Getenv("GOARM"))
|
||||||
|
}
|
||||||
|
|
||||||
summary := fmt.Sprintf(
|
summary := fmt.Sprintf(
|
||||||
"Daemon version: %s\n"+
|
"OS: %s\n"+
|
||||||
|
"Daemon version: %s\n"+
|
||||||
"CLI version: %s\n"+
|
"CLI version: %s\n"+
|
||||||
"Management: %s\n"+
|
"Management: %s\n"+
|
||||||
"Signal: %s\n"+
|
"Signal: %s\n"+
|
||||||
@@ -536,6 +557,7 @@ func parseGeneralSummary(overview statusOutputOverview, showURL bool, showRelays
|
|||||||
"Quantum resistance: %s\n"+
|
"Quantum resistance: %s\n"+
|
||||||
"Routes: %s\n"+
|
"Routes: %s\n"+
|
||||||
"Peers count: %s\n",
|
"Peers count: %s\n",
|
||||||
|
fmt.Sprintf("%s/%s%s", goos, goarch, goarm),
|
||||||
overview.DaemonVersion,
|
overview.DaemonVersion,
|
||||||
version.NetbirdVersion(),
|
version.NetbirdVersion(),
|
||||||
managementConnString,
|
managementConnString,
|
||||||
@@ -591,15 +613,6 @@ func parsePeers(peers peersStateOutput, rosenpassEnabled, rosenpassPermissive bo
|
|||||||
if peerState.IceCandidateEndpoint.Remote != "" {
|
if peerState.IceCandidateEndpoint.Remote != "" {
|
||||||
remoteICEEndpoint = peerState.IceCandidateEndpoint.Remote
|
remoteICEEndpoint = peerState.IceCandidateEndpoint.Remote
|
||||||
}
|
}
|
||||||
lastStatusUpdate := "-"
|
|
||||||
if !peerState.LastStatusUpdate.IsZero() {
|
|
||||||
lastStatusUpdate = peerState.LastStatusUpdate.Format("2006-01-02 15:04:05")
|
|
||||||
}
|
|
||||||
|
|
||||||
lastWireGuardHandshake := "-"
|
|
||||||
if !peerState.LastWireguardHandshake.IsZero() && peerState.LastWireguardHandshake != time.Unix(0, 0) {
|
|
||||||
lastWireGuardHandshake = peerState.LastWireguardHandshake.Format("2006-01-02 15:04:05")
|
|
||||||
}
|
|
||||||
|
|
||||||
rosenpassEnabledStatus := "false"
|
rosenpassEnabledStatus := "false"
|
||||||
if rosenpassEnabled {
|
if rosenpassEnabled {
|
||||||
@@ -631,30 +644,32 @@ func parsePeers(peers peersStateOutput, rosenpassEnabled, rosenpassPermissive bo
|
|||||||
" Status: %s\n"+
|
" Status: %s\n"+
|
||||||
" -- detail --\n"+
|
" -- detail --\n"+
|
||||||
" Connection type: %s\n"+
|
" Connection type: %s\n"+
|
||||||
" Direct: %t\n"+
|
|
||||||
" ICE candidate (Local/Remote): %s/%s\n"+
|
" ICE candidate (Local/Remote): %s/%s\n"+
|
||||||
" ICE candidate endpoints (Local/Remote): %s/%s\n"+
|
" ICE candidate endpoints (Local/Remote): %s/%s\n"+
|
||||||
|
" Relay server address: %s\n"+
|
||||||
" Last connection update: %s\n"+
|
" Last connection update: %s\n"+
|
||||||
" Last WireGuard handshake: %s\n"+
|
" Last WireGuard handshake: %s\n"+
|
||||||
" Transfer status (received/sent) %s/%s\n"+
|
" Transfer status (received/sent) %s/%s\n"+
|
||||||
" Quantum resistance: %s\n"+
|
" Quantum resistance: %s\n"+
|
||||||
" Routes: %s\n",
|
" Routes: %s\n"+
|
||||||
|
" Latency: %s\n",
|
||||||
peerState.FQDN,
|
peerState.FQDN,
|
||||||
peerState.IP,
|
peerState.IP,
|
||||||
peerState.PubKey,
|
peerState.PubKey,
|
||||||
peerState.Status,
|
peerState.Status,
|
||||||
peerState.ConnType,
|
peerState.ConnType,
|
||||||
peerState.Direct,
|
|
||||||
localICE,
|
localICE,
|
||||||
remoteICE,
|
remoteICE,
|
||||||
localICEEndpoint,
|
localICEEndpoint,
|
||||||
remoteICEEndpoint,
|
remoteICEEndpoint,
|
||||||
lastStatusUpdate,
|
peerState.RelayAddress,
|
||||||
lastWireGuardHandshake,
|
timeAgo(peerState.LastStatusUpdate),
|
||||||
|
timeAgo(peerState.LastWireguardHandshake),
|
||||||
toIEC(peerState.TransferReceived),
|
toIEC(peerState.TransferReceived),
|
||||||
toIEC(peerState.TransferSent),
|
toIEC(peerState.TransferSent),
|
||||||
rosenpassEnabledStatus,
|
rosenpassEnabledStatus,
|
||||||
routes,
|
routes,
|
||||||
|
peerState.Latency.String(),
|
||||||
)
|
)
|
||||||
|
|
||||||
peersString += peerString
|
peersString += peerString
|
||||||
@@ -718,3 +733,121 @@ func countEnabled(dnsServers []nsServerGroupStateOutput) int {
|
|||||||
}
|
}
|
||||||
return count
|
return count
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// timeAgo returns a string representing the duration since the provided time in a human-readable format.
|
||||||
|
func timeAgo(t time.Time) string {
|
||||||
|
if t.IsZero() || t.Equal(time.Unix(0, 0)) {
|
||||||
|
return "-"
|
||||||
|
}
|
||||||
|
duration := time.Since(t)
|
||||||
|
switch {
|
||||||
|
case duration < time.Second:
|
||||||
|
return "Now"
|
||||||
|
case duration < time.Minute:
|
||||||
|
seconds := int(duration.Seconds())
|
||||||
|
if seconds == 1 {
|
||||||
|
return "1 second ago"
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d seconds ago", seconds)
|
||||||
|
case duration < time.Hour:
|
||||||
|
minutes := int(duration.Minutes())
|
||||||
|
seconds := int(duration.Seconds()) % 60
|
||||||
|
if minutes == 1 {
|
||||||
|
if seconds == 1 {
|
||||||
|
return "1 minute, 1 second ago"
|
||||||
|
} else if seconds > 0 {
|
||||||
|
return fmt.Sprintf("1 minute, %d seconds ago", seconds)
|
||||||
|
}
|
||||||
|
return "1 minute ago"
|
||||||
|
}
|
||||||
|
if seconds > 0 {
|
||||||
|
return fmt.Sprintf("%d minutes, %d seconds ago", minutes, seconds)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d minutes ago", minutes)
|
||||||
|
case duration < 24*time.Hour:
|
||||||
|
hours := int(duration.Hours())
|
||||||
|
minutes := int(duration.Minutes()) % 60
|
||||||
|
if hours == 1 {
|
||||||
|
if minutes == 1 {
|
||||||
|
return "1 hour, 1 minute ago"
|
||||||
|
} else if minutes > 0 {
|
||||||
|
return fmt.Sprintf("1 hour, %d minutes ago", minutes)
|
||||||
|
}
|
||||||
|
return "1 hour ago"
|
||||||
|
}
|
||||||
|
if minutes > 0 {
|
||||||
|
return fmt.Sprintf("%d hours, %d minutes ago", hours, minutes)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d hours ago", hours)
|
||||||
|
}
|
||||||
|
|
||||||
|
days := int(duration.Hours()) / 24
|
||||||
|
hours := int(duration.Hours()) % 24
|
||||||
|
if days == 1 {
|
||||||
|
if hours == 1 {
|
||||||
|
return "1 day, 1 hour ago"
|
||||||
|
} else if hours > 0 {
|
||||||
|
return fmt.Sprintf("1 day, %d hours ago", hours)
|
||||||
|
}
|
||||||
|
return "1 day ago"
|
||||||
|
}
|
||||||
|
if hours > 0 {
|
||||||
|
return fmt.Sprintf("%d days, %d hours ago", days, hours)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%d days ago", days)
|
||||||
|
}
|
||||||
|
|
||||||
|
func anonymizePeerDetail(a *anonymize.Anonymizer, peer *peerStateDetailOutput) {
|
||||||
|
peer.FQDN = a.AnonymizeDomain(peer.FQDN)
|
||||||
|
if localIP, port, err := net.SplitHostPort(peer.IceCandidateEndpoint.Local); err == nil {
|
||||||
|
peer.IceCandidateEndpoint.Local = fmt.Sprintf("%s:%s", a.AnonymizeIPString(localIP), port)
|
||||||
|
}
|
||||||
|
if remoteIP, port, err := net.SplitHostPort(peer.IceCandidateEndpoint.Remote); err == nil {
|
||||||
|
peer.IceCandidateEndpoint.Remote = fmt.Sprintf("%s:%s", a.AnonymizeIPString(remoteIP), port)
|
||||||
|
}
|
||||||
|
for i, route := range peer.Routes {
|
||||||
|
peer.Routes[i] = a.AnonymizeIPString(route)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, route := range peer.Routes {
|
||||||
|
peer.Routes[i] = a.AnonymizeRoute(route)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func anonymizeOverview(a *anonymize.Anonymizer, overview *statusOutputOverview) {
|
||||||
|
for i, peer := range overview.Peers.Details {
|
||||||
|
peer := peer
|
||||||
|
anonymizePeerDetail(a, &peer)
|
||||||
|
overview.Peers.Details[i] = peer
|
||||||
|
}
|
||||||
|
|
||||||
|
overview.ManagementState.URL = a.AnonymizeURI(overview.ManagementState.URL)
|
||||||
|
overview.ManagementState.Error = a.AnonymizeString(overview.ManagementState.Error)
|
||||||
|
overview.SignalState.URL = a.AnonymizeURI(overview.SignalState.URL)
|
||||||
|
overview.SignalState.Error = a.AnonymizeString(overview.SignalState.Error)
|
||||||
|
|
||||||
|
overview.IP = a.AnonymizeIPString(overview.IP)
|
||||||
|
for i, detail := range overview.Relays.Details {
|
||||||
|
detail.URI = a.AnonymizeURI(detail.URI)
|
||||||
|
detail.Error = a.AnonymizeString(detail.Error)
|
||||||
|
overview.Relays.Details[i] = detail
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, nsGroup := range overview.NSServerGroups {
|
||||||
|
for j, domain := range nsGroup.Domains {
|
||||||
|
overview.NSServerGroups[i].Domains[j] = a.AnonymizeDomain(domain)
|
||||||
|
}
|
||||||
|
for j, ns := range nsGroup.Servers {
|
||||||
|
host, port, err := net.SplitHostPort(ns)
|
||||||
|
if err == nil {
|
||||||
|
overview.NSServerGroups[i].Servers[j] = fmt.Sprintf("%s:%s", a.AnonymizeIPString(host), port)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, route := range overview.Routes {
|
||||||
|
overview.Routes[i] = a.AnonymizeRoute(route)
|
||||||
|
}
|
||||||
|
|
||||||
|
overview.FQDN = a.AnonymizeDomain(overview.FQDN)
|
||||||
|
}
|
||||||
|
|||||||
@@ -3,11 +3,14 @@ package cmd
|
|||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"runtime"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/client/proto"
|
"github.com/netbirdio/netbird/client/proto"
|
||||||
@@ -34,7 +37,6 @@ var resp = &proto.StatusResponse{
|
|||||||
ConnStatus: "Connected",
|
ConnStatus: "Connected",
|
||||||
ConnStatusUpdate: timestamppb.New(time.Date(2001, time.Month(1), 1, 1, 1, 1, 0, time.UTC)),
|
ConnStatusUpdate: timestamppb.New(time.Date(2001, time.Month(1), 1, 1, 1, 1, 0, time.UTC)),
|
||||||
Relayed: false,
|
Relayed: false,
|
||||||
Direct: true,
|
|
||||||
LocalIceCandidateType: "",
|
LocalIceCandidateType: "",
|
||||||
RemoteIceCandidateType: "",
|
RemoteIceCandidateType: "",
|
||||||
LocalIceCandidateEndpoint: "",
|
LocalIceCandidateEndpoint: "",
|
||||||
@@ -45,6 +47,7 @@ var resp = &proto.StatusResponse{
|
|||||||
Routes: []string{
|
Routes: []string{
|
||||||
"10.1.0.0/24",
|
"10.1.0.0/24",
|
||||||
},
|
},
|
||||||
|
Latency: durationpb.New(time.Duration(10000000)),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "192.168.178.102",
|
IP: "192.168.178.102",
|
||||||
@@ -53,7 +56,6 @@ var resp = &proto.StatusResponse{
|
|||||||
ConnStatus: "Connected",
|
ConnStatus: "Connected",
|
||||||
ConnStatusUpdate: timestamppb.New(time.Date(2002, time.Month(2), 2, 2, 2, 2, 0, time.UTC)),
|
ConnStatusUpdate: timestamppb.New(time.Date(2002, time.Month(2), 2, 2, 2, 2, 0, time.UTC)),
|
||||||
Relayed: true,
|
Relayed: true,
|
||||||
Direct: false,
|
|
||||||
LocalIceCandidateType: "relay",
|
LocalIceCandidateType: "relay",
|
||||||
RemoteIceCandidateType: "prflx",
|
RemoteIceCandidateType: "prflx",
|
||||||
LocalIceCandidateEndpoint: "10.0.0.1:10001",
|
LocalIceCandidateEndpoint: "10.0.0.1:10001",
|
||||||
@@ -61,6 +63,7 @@ var resp = &proto.StatusResponse{
|
|||||||
LastWireguardHandshake: timestamppb.New(time.Date(2002, time.Month(2), 2, 2, 2, 3, 0, time.UTC)),
|
LastWireguardHandshake: timestamppb.New(time.Date(2002, time.Month(2), 2, 2, 2, 3, 0, time.UTC)),
|
||||||
BytesRx: 2000,
|
BytesRx: 2000,
|
||||||
BytesTx: 1000,
|
BytesTx: 1000,
|
||||||
|
Latency: durationpb.New(time.Duration(10000000)),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
ManagementState: &proto.ManagementState{
|
ManagementState: &proto.ManagementState{
|
||||||
@@ -132,7 +135,6 @@ var overview = statusOutputOverview{
|
|||||||
Status: "Connected",
|
Status: "Connected",
|
||||||
LastStatusUpdate: time.Date(2001, 1, 1, 1, 1, 1, 0, time.UTC),
|
LastStatusUpdate: time.Date(2001, 1, 1, 1, 1, 1, 0, time.UTC),
|
||||||
ConnType: "P2P",
|
ConnType: "P2P",
|
||||||
Direct: true,
|
|
||||||
IceCandidateType: iceCandidateType{
|
IceCandidateType: iceCandidateType{
|
||||||
Local: "",
|
Local: "",
|
||||||
Remote: "",
|
Remote: "",
|
||||||
@@ -147,6 +149,7 @@ var overview = statusOutputOverview{
|
|||||||
Routes: []string{
|
Routes: []string{
|
||||||
"10.1.0.0/24",
|
"10.1.0.0/24",
|
||||||
},
|
},
|
||||||
|
Latency: time.Duration(10000000),
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
IP: "192.168.178.102",
|
IP: "192.168.178.102",
|
||||||
@@ -155,7 +158,6 @@ var overview = statusOutputOverview{
|
|||||||
Status: "Connected",
|
Status: "Connected",
|
||||||
LastStatusUpdate: time.Date(2002, 2, 2, 2, 2, 2, 0, time.UTC),
|
LastStatusUpdate: time.Date(2002, 2, 2, 2, 2, 2, 0, time.UTC),
|
||||||
ConnType: "Relayed",
|
ConnType: "Relayed",
|
||||||
Direct: false,
|
|
||||||
IceCandidateType: iceCandidateType{
|
IceCandidateType: iceCandidateType{
|
||||||
Local: "relay",
|
Local: "relay",
|
||||||
Remote: "prflx",
|
Remote: "prflx",
|
||||||
@@ -167,6 +169,7 @@ var overview = statusOutputOverview{
|
|||||||
LastWireguardHandshake: time.Date(2002, 2, 2, 2, 2, 3, 0, time.UTC),
|
LastWireguardHandshake: time.Date(2002, 2, 2, 2, 2, 3, 0, time.UTC),
|
||||||
TransferReceived: 2000,
|
TransferReceived: 2000,
|
||||||
TransferSent: 1000,
|
TransferSent: 1000,
|
||||||
|
Latency: time.Duration(10000000),
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
@@ -276,7 +279,6 @@ func TestParsingToJSON(t *testing.T) {
|
|||||||
"status": "Connected",
|
"status": "Connected",
|
||||||
"lastStatusUpdate": "2001-01-01T01:01:01Z",
|
"lastStatusUpdate": "2001-01-01T01:01:01Z",
|
||||||
"connectionType": "P2P",
|
"connectionType": "P2P",
|
||||||
"direct": true,
|
|
||||||
"iceCandidateType": {
|
"iceCandidateType": {
|
||||||
"local": "",
|
"local": "",
|
||||||
"remote": ""
|
"remote": ""
|
||||||
@@ -285,9 +287,11 @@ func TestParsingToJSON(t *testing.T) {
|
|||||||
"local": "",
|
"local": "",
|
||||||
"remote": ""
|
"remote": ""
|
||||||
},
|
},
|
||||||
|
"relayAddress": "",
|
||||||
"lastWireguardHandshake": "2001-01-01T01:01:02Z",
|
"lastWireguardHandshake": "2001-01-01T01:01:02Z",
|
||||||
"transferReceived": 200,
|
"transferReceived": 200,
|
||||||
"transferSent": 100,
|
"transferSent": 100,
|
||||||
|
"latency": 10000000,
|
||||||
"quantumResistance": false,
|
"quantumResistance": false,
|
||||||
"routes": [
|
"routes": [
|
||||||
"10.1.0.0/24"
|
"10.1.0.0/24"
|
||||||
@@ -300,7 +304,6 @@ func TestParsingToJSON(t *testing.T) {
|
|||||||
"status": "Connected",
|
"status": "Connected",
|
||||||
"lastStatusUpdate": "2002-02-02T02:02:02Z",
|
"lastStatusUpdate": "2002-02-02T02:02:02Z",
|
||||||
"connectionType": "Relayed",
|
"connectionType": "Relayed",
|
||||||
"direct": false,
|
|
||||||
"iceCandidateType": {
|
"iceCandidateType": {
|
||||||
"local": "relay",
|
"local": "relay",
|
||||||
"remote": "prflx"
|
"remote": "prflx"
|
||||||
@@ -309,9 +312,11 @@ func TestParsingToJSON(t *testing.T) {
|
|||||||
"local": "10.0.0.1:10001",
|
"local": "10.0.0.1:10001",
|
||||||
"remote": "10.0.10.1:10002"
|
"remote": "10.0.10.1:10002"
|
||||||
},
|
},
|
||||||
|
"relayAddress": "",
|
||||||
"lastWireguardHandshake": "2002-02-02T02:02:03Z",
|
"lastWireguardHandshake": "2002-02-02T02:02:03Z",
|
||||||
"transferReceived": 2000,
|
"transferReceived": 2000,
|
||||||
"transferSent": 1000,
|
"transferSent": 1000,
|
||||||
|
"latency": 10000000,
|
||||||
"quantumResistance": false,
|
"quantumResistance": false,
|
||||||
"routes": null
|
"routes": null
|
||||||
}
|
}
|
||||||
@@ -399,16 +404,17 @@ func TestParsingToYAML(t *testing.T) {
|
|||||||
status: Connected
|
status: Connected
|
||||||
lastStatusUpdate: 2001-01-01T01:01:01Z
|
lastStatusUpdate: 2001-01-01T01:01:01Z
|
||||||
connectionType: P2P
|
connectionType: P2P
|
||||||
direct: true
|
|
||||||
iceCandidateType:
|
iceCandidateType:
|
||||||
local: ""
|
local: ""
|
||||||
remote: ""
|
remote: ""
|
||||||
iceCandidateEndpoint:
|
iceCandidateEndpoint:
|
||||||
local: ""
|
local: ""
|
||||||
remote: ""
|
remote: ""
|
||||||
|
relayAddress: ""
|
||||||
lastWireguardHandshake: 2001-01-01T01:01:02Z
|
lastWireguardHandshake: 2001-01-01T01:01:02Z
|
||||||
transferReceived: 200
|
transferReceived: 200
|
||||||
transferSent: 100
|
transferSent: 100
|
||||||
|
latency: 10ms
|
||||||
quantumResistance: false
|
quantumResistance: false
|
||||||
routes:
|
routes:
|
||||||
- 10.1.0.0/24
|
- 10.1.0.0/24
|
||||||
@@ -418,16 +424,17 @@ func TestParsingToYAML(t *testing.T) {
|
|||||||
status: Connected
|
status: Connected
|
||||||
lastStatusUpdate: 2002-02-02T02:02:02Z
|
lastStatusUpdate: 2002-02-02T02:02:02Z
|
||||||
connectionType: Relayed
|
connectionType: Relayed
|
||||||
direct: false
|
|
||||||
iceCandidateType:
|
iceCandidateType:
|
||||||
local: relay
|
local: relay
|
||||||
remote: prflx
|
remote: prflx
|
||||||
iceCandidateEndpoint:
|
iceCandidateEndpoint:
|
||||||
local: 10.0.0.1:10001
|
local: 10.0.0.1:10001
|
||||||
remote: 10.0.10.1:10002
|
remote: 10.0.10.1:10002
|
||||||
|
relayAddress: ""
|
||||||
lastWireguardHandshake: 2002-02-02T02:02:03Z
|
lastWireguardHandshake: 2002-02-02T02:02:03Z
|
||||||
transferReceived: 2000
|
transferReceived: 2000
|
||||||
transferSent: 1000
|
transferSent: 1000
|
||||||
|
latency: 10ms
|
||||||
quantumResistance: false
|
quantumResistance: false
|
||||||
routes: []
|
routes: []
|
||||||
cliVersion: development
|
cliVersion: development
|
||||||
@@ -478,9 +485,15 @@ dnsServers:
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestParsingToDetail(t *testing.T) {
|
func TestParsingToDetail(t *testing.T) {
|
||||||
|
// Calculate time ago based on the fixture dates
|
||||||
|
lastConnectionUpdate1 := timeAgo(overview.Peers.Details[0].LastStatusUpdate)
|
||||||
|
lastHandshake1 := timeAgo(overview.Peers.Details[0].LastWireguardHandshake)
|
||||||
|
lastConnectionUpdate2 := timeAgo(overview.Peers.Details[1].LastStatusUpdate)
|
||||||
|
lastHandshake2 := timeAgo(overview.Peers.Details[1].LastWireguardHandshake)
|
||||||
|
|
||||||
detail := parseToFullDetailSummary(overview)
|
detail := parseToFullDetailSummary(overview)
|
||||||
|
|
||||||
expectedDetail :=
|
expectedDetail := fmt.Sprintf(
|
||||||
`Peers detail:
|
`Peers detail:
|
||||||
peer-1.awesome-domain.com:
|
peer-1.awesome-domain.com:
|
||||||
NetBird IP: 192.168.178.101
|
NetBird IP: 192.168.178.101
|
||||||
@@ -488,14 +501,15 @@ func TestParsingToDetail(t *testing.T) {
|
|||||||
Status: Connected
|
Status: Connected
|
||||||
-- detail --
|
-- detail --
|
||||||
Connection type: P2P
|
Connection type: P2P
|
||||||
Direct: true
|
|
||||||
ICE candidate (Local/Remote): -/-
|
ICE candidate (Local/Remote): -/-
|
||||||
ICE candidate endpoints (Local/Remote): -/-
|
ICE candidate endpoints (Local/Remote): -/-
|
||||||
Last connection update: 2001-01-01 01:01:01
|
Relay server address:
|
||||||
Last WireGuard handshake: 2001-01-01 01:01:02
|
Last connection update: %s
|
||||||
|
Last WireGuard handshake: %s
|
||||||
Transfer status (received/sent) 200 B/100 B
|
Transfer status (received/sent) 200 B/100 B
|
||||||
Quantum resistance: false
|
Quantum resistance: false
|
||||||
Routes: 10.1.0.0/24
|
Routes: 10.1.0.0/24
|
||||||
|
Latency: 10ms
|
||||||
|
|
||||||
peer-2.awesome-domain.com:
|
peer-2.awesome-domain.com:
|
||||||
NetBird IP: 192.168.178.102
|
NetBird IP: 192.168.178.102
|
||||||
@@ -503,17 +517,19 @@ func TestParsingToDetail(t *testing.T) {
|
|||||||
Status: Connected
|
Status: Connected
|
||||||
-- detail --
|
-- detail --
|
||||||
Connection type: Relayed
|
Connection type: Relayed
|
||||||
Direct: false
|
|
||||||
ICE candidate (Local/Remote): relay/prflx
|
ICE candidate (Local/Remote): relay/prflx
|
||||||
ICE candidate endpoints (Local/Remote): 10.0.0.1:10001/10.0.10.1:10002
|
ICE candidate endpoints (Local/Remote): 10.0.0.1:10001/10.0.10.1:10002
|
||||||
Last connection update: 2002-02-02 02:02:02
|
Relay server address:
|
||||||
Last WireGuard handshake: 2002-02-02 02:02:03
|
Last connection update: %s
|
||||||
|
Last WireGuard handshake: %s
|
||||||
Transfer status (received/sent) 2.0 KiB/1000 B
|
Transfer status (received/sent) 2.0 KiB/1000 B
|
||||||
Quantum resistance: false
|
Quantum resistance: false
|
||||||
Routes: -
|
Routes: -
|
||||||
|
Latency: 10ms
|
||||||
|
|
||||||
|
OS: %s/%s
|
||||||
Daemon version: 0.14.1
|
Daemon version: 0.14.1
|
||||||
CLI version: development
|
CLI version: %s
|
||||||
Management: Connected to my-awesome-management.com:443
|
Management: Connected to my-awesome-management.com:443
|
||||||
Signal: Connected to my-awesome-signal.com:443
|
Signal: Connected to my-awesome-signal.com:443
|
||||||
Relays:
|
Relays:
|
||||||
@@ -528,7 +544,7 @@ Interface type: Kernel
|
|||||||
Quantum resistance: false
|
Quantum resistance: false
|
||||||
Routes: 10.10.0.0/24
|
Routes: 10.10.0.0/24
|
||||||
Peers count: 2/2 Connected
|
Peers count: 2/2 Connected
|
||||||
`
|
`, lastConnectionUpdate1, lastHandshake1, lastConnectionUpdate2, lastHandshake2, runtime.GOOS, runtime.GOARCH, overview.CliVersion)
|
||||||
|
|
||||||
assert.Equal(t, expectedDetail, detail)
|
assert.Equal(t, expectedDetail, detail)
|
||||||
}
|
}
|
||||||
@@ -536,8 +552,8 @@ Peers count: 2/2 Connected
|
|||||||
func TestParsingToShortVersion(t *testing.T) {
|
func TestParsingToShortVersion(t *testing.T) {
|
||||||
shortVersion := parseGeneralSummary(overview, false, false, false)
|
shortVersion := parseGeneralSummary(overview, false, false, false)
|
||||||
|
|
||||||
expectedString :=
|
expectedString := fmt.Sprintf("OS: %s/%s", runtime.GOOS, runtime.GOARCH) + `
|
||||||
`Daemon version: 0.14.1
|
Daemon version: 0.14.1
|
||||||
CLI version: development
|
CLI version: development
|
||||||
Management: Connected
|
Management: Connected
|
||||||
Signal: Connected
|
Signal: Connected
|
||||||
@@ -561,3 +577,31 @@ func TestParsingOfIP(t *testing.T) {
|
|||||||
|
|
||||||
assert.Equal(t, "192.168.178.123\n", parsedIP)
|
assert.Equal(t, "192.168.178.123\n", parsedIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestTimeAgo(t *testing.T) {
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
cases := []struct {
|
||||||
|
name string
|
||||||
|
input time.Time
|
||||||
|
expected string
|
||||||
|
}{
|
||||||
|
{"Now", now, "Now"},
|
||||||
|
{"Seconds ago", now.Add(-10 * time.Second), "10 seconds ago"},
|
||||||
|
{"One minute ago", now.Add(-1 * time.Minute), "1 minute ago"},
|
||||||
|
{"Minutes and seconds ago", now.Add(-(1*time.Minute + 30*time.Second)), "1 minute, 30 seconds ago"},
|
||||||
|
{"One hour ago", now.Add(-1 * time.Hour), "1 hour ago"},
|
||||||
|
{"Hours and minutes ago", now.Add(-(2*time.Hour + 15*time.Minute)), "2 hours, 15 minutes ago"},
|
||||||
|
{"One day ago", now.Add(-24 * time.Hour), "1 day ago"},
|
||||||
|
{"Multiple days ago", now.Add(-(72*time.Hour + 20*time.Minute)), "3 days ago"},
|
||||||
|
{"Zero time", time.Time{}, "-"},
|
||||||
|
{"Unix zero time", time.Unix(0, 0), "-"},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range cases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
result := timeAgo(tc.input)
|
||||||
|
assert.Equal(t, tc.expected, result, "Failed %s", tc.name)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -7,12 +7,17 @@ import (
|
|||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/management/server/activity"
|
"github.com/netbirdio/netbird/management/server/activity"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/util"
|
"github.com/netbirdio/netbird/util"
|
||||||
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
|
||||||
|
"github.com/netbirdio/management-integrations/integrations"
|
||||||
|
|
||||||
clientProto "github.com/netbirdio/netbird/client/proto"
|
clientProto "github.com/netbirdio/netbird/client/proto"
|
||||||
client "github.com/netbirdio/netbird/client/server"
|
client "github.com/netbirdio/netbird/client/server"
|
||||||
mgmtProto "github.com/netbirdio/netbird/management/proto"
|
mgmtProto "github.com/netbirdio/netbird/management/proto"
|
||||||
@@ -51,7 +56,10 @@ func startSignal(t *testing.T) (*grpc.Server, net.Listener) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
s := grpc.NewServer()
|
s := grpc.NewServer()
|
||||||
sigProto.RegisterSignalExchangeServer(s, sig.NewServer())
|
srv, err := sig.NewServer(otel.Meter(""))
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
sigProto.RegisterSignalExchangeServer(s, srv)
|
||||||
go func() {
|
go func() {
|
||||||
if err := s.Serve(lis); err != nil {
|
if err := s.Serve(lis); err != nil {
|
||||||
panic(err)
|
panic(err)
|
||||||
@@ -68,22 +76,28 @@ func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Liste
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
s := grpc.NewServer()
|
s := grpc.NewServer()
|
||||||
store, err := mgmt.NewStoreFromJson(config.Datadir, nil)
|
store, cleanUp, err := mgmt.NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
t.Cleanup(cleanUp)
|
||||||
|
|
||||||
peersUpdateManager := mgmt.NewPeersUpdateManager(nil)
|
peersUpdateManager := mgmt.NewPeersUpdateManager(nil)
|
||||||
eventStore := &activity.InMemoryEventStore{}
|
eventStore := &activity.InMemoryEventStore{}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil
|
return nil, nil
|
||||||
}
|
}
|
||||||
accountManager, err := mgmt.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false)
|
iv, _ := integrations.NewIntegratedValidator(context.Background(), eventStore)
|
||||||
|
accountManager, err := mgmt.BuildManager(context.Background(), store, peersUpdateManager, nil, "", "netbird.selfhosted", eventStore, nil, false, iv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
turnManager := mgmt.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig)
|
|
||||||
mgmtServer, err := mgmt.NewServer(config, accountManager, peersUpdateManager, turnManager, nil, nil)
|
rc := &mgmt.RelayConfig{
|
||||||
|
Address: "localhost:0",
|
||||||
|
}
|
||||||
|
turnManager := mgmt.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig, rc)
|
||||||
|
mgmtServer, err := mgmt.NewServer(context.Background(), config, accountManager, peersUpdateManager, turnManager, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -98,7 +112,7 @@ func startManagement(t *testing.T, config *mgmt.Config) (*grpc.Server, net.Liste
|
|||||||
}
|
}
|
||||||
|
|
||||||
func startClientDaemon(
|
func startClientDaemon(
|
||||||
t *testing.T, ctx context.Context, managementURL, configPath string,
|
t *testing.T, ctx context.Context, _, configPath string,
|
||||||
) (*grpc.Server, net.Listener) {
|
) (*grpc.Server, net.Listener) {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
lis, err := net.Listen("tcp", "127.0.0.1:0")
|
||||||
@@ -7,11 +7,13 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
gstatus "google.golang.org/grpc/status"
|
gstatus "google.golang.org/grpc/status"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/client/internal"
|
"github.com/netbirdio/netbird/client/internal"
|
||||||
"github.com/netbirdio/netbird/client/internal/peer"
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
@@ -40,6 +42,12 @@ func init() {
|
|||||||
upCmd.PersistentFlags().BoolVarP(&foregroundMode, "foreground-mode", "F", false, "start service in foreground")
|
upCmd.PersistentFlags().BoolVarP(&foregroundMode, "foreground-mode", "F", false, "start service in foreground")
|
||||||
upCmd.PersistentFlags().StringVar(&interfaceName, interfaceNameFlag, iface.WgInterfaceDefault, "Wireguard interface name")
|
upCmd.PersistentFlags().StringVar(&interfaceName, interfaceNameFlag, iface.WgInterfaceDefault, "Wireguard interface name")
|
||||||
upCmd.PersistentFlags().Uint16Var(&wireguardPort, wireguardPortFlag, iface.DefaultWgPort, "Wireguard interface listening port")
|
upCmd.PersistentFlags().Uint16Var(&wireguardPort, wireguardPortFlag, iface.DefaultWgPort, "Wireguard interface listening port")
|
||||||
|
upCmd.PersistentFlags().BoolVarP(&networkMonitor, networkMonitorFlag, "N", networkMonitor,
|
||||||
|
`Manage network monitoring. Defaults to true on Windows and macOS, false on Linux. `+
|
||||||
|
`E.g. --network-monitor=false to disable or --network-monitor=true to enable.`,
|
||||||
|
)
|
||||||
|
upCmd.PersistentFlags().StringSliceVar(&extraIFaceBlackList, extraIFaceBlackListFlag, nil, "Extra list of default interfaces to ignore for listening")
|
||||||
|
upCmd.PersistentFlags().DurationVar(&dnsRouteInterval, dnsRouteIntervalFlag, time.Minute, "DNS route update interval")
|
||||||
}
|
}
|
||||||
|
|
||||||
func upFunc(cmd *cobra.Command, args []string) error {
|
func upFunc(cmd *cobra.Command, args []string) error {
|
||||||
@@ -83,11 +91,12 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ic := internal.ConfigInput{
|
ic := internal.ConfigInput{
|
||||||
ManagementURL: managementURL,
|
ManagementURL: managementURL,
|
||||||
AdminURL: adminURL,
|
AdminURL: adminURL,
|
||||||
ConfigPath: configPath,
|
ConfigPath: configPath,
|
||||||
NATExternalIPs: natExternalIPs,
|
NATExternalIPs: natExternalIPs,
|
||||||
CustomDNSAddress: customDNSAddressConverted,
|
CustomDNSAddress: customDNSAddressConverted,
|
||||||
|
ExtraIFaceBlackList: extraIFaceBlackList,
|
||||||
}
|
}
|
||||||
|
|
||||||
if cmd.Flag(enableRosenpassFlag).Changed {
|
if cmd.Flag(enableRosenpassFlag).Changed {
|
||||||
@@ -114,6 +123,10 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command) error {
|
|||||||
ic.WireguardPort = &p
|
ic.WireguardPort = &p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cmd.Flag(networkMonitorFlag).Changed {
|
||||||
|
ic.NetworkMonitor = &networkMonitor
|
||||||
|
}
|
||||||
|
|
||||||
if rootCmd.PersistentFlags().Changed(preSharedKeyFlag) {
|
if rootCmd.PersistentFlags().Changed(preSharedKeyFlag) {
|
||||||
ic.PreSharedKey = &preSharedKey
|
ic.PreSharedKey = &preSharedKey
|
||||||
}
|
}
|
||||||
@@ -130,6 +143,10 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cmd.Flag(dnsRouteIntervalFlag).Changed {
|
||||||
|
ic.DNSRouteInterval = &dnsRouteInterval
|
||||||
|
}
|
||||||
|
|
||||||
config, err := internal.UpdateOrCreateConfig(ic)
|
config, err := internal.UpdateOrCreateConfig(ic)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("get config file: %v", err)
|
return fmt.Errorf("get config file: %v", err)
|
||||||
@@ -145,11 +162,15 @@ func runInForegroundMode(ctx context.Context, cmd *cobra.Command) error {
|
|||||||
var cancel context.CancelFunc
|
var cancel context.CancelFunc
|
||||||
ctx, cancel = context.WithCancel(ctx)
|
ctx, cancel = context.WithCancel(ctx)
|
||||||
SetupCloseHandler(ctx, cancel)
|
SetupCloseHandler(ctx, cancel)
|
||||||
return internal.RunClient(ctx, config, peer.NewRecorder(config.ManagementURL.String()))
|
|
||||||
|
r := peer.NewRecorder(config.ManagementURL.String())
|
||||||
|
r.GetFullStatus()
|
||||||
|
|
||||||
|
connectClient := internal.NewConnectClient(ctx, config, r)
|
||||||
|
return connectClient.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
func runInDaemonMode(ctx context.Context, cmd *cobra.Command) error {
|
func runInDaemonMode(ctx context.Context, cmd *cobra.Command) error {
|
||||||
|
|
||||||
customDNSAddressConverted, err := parseCustomDNSAddress(cmd.Flag(dnsResolverAddress).Changed)
|
customDNSAddressConverted, err := parseCustomDNSAddress(cmd.Flag(dnsResolverAddress).Changed)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@@ -190,6 +211,7 @@ func runInDaemonMode(ctx context.Context, cmd *cobra.Command) error {
|
|||||||
CustomDNSAddress: customDNSAddressConverted,
|
CustomDNSAddress: customDNSAddressConverted,
|
||||||
IsLinuxDesktopClient: isLinuxRunningDesktop(),
|
IsLinuxDesktopClient: isLinuxRunningDesktop(),
|
||||||
Hostname: hostName,
|
Hostname: hostName,
|
||||||
|
ExtraIFaceBlacklist: extraIFaceBlackList,
|
||||||
}
|
}
|
||||||
|
|
||||||
if rootCmd.PersistentFlags().Changed(preSharedKeyFlag) {
|
if rootCmd.PersistentFlags().Changed(preSharedKeyFlag) {
|
||||||
@@ -224,6 +246,14 @@ func runInDaemonMode(ctx context.Context, cmd *cobra.Command) error {
|
|||||||
loginRequest.WireguardPort = &wp
|
loginRequest.WireguardPort = &wp
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if cmd.Flag(networkMonitorFlag).Changed {
|
||||||
|
loginRequest.NetworkMonitor = &networkMonitor
|
||||||
|
}
|
||||||
|
|
||||||
|
if cmd.Flag(dnsRouteIntervalFlag).Changed {
|
||||||
|
loginRequest.DnsRouteInterval = durationpb.New(dnsRouteInterval)
|
||||||
|
}
|
||||||
|
|
||||||
var loginErr error
|
var loginErr error
|
||||||
|
|
||||||
var loginResp *proto.LoginResponse
|
var loginResp *proto.LoginResponse
|
||||||
|
|||||||
30
client/errors/errors.go
Normal file
30
client/errors/errors.go
Normal file
@@ -0,0 +1,30 @@
|
|||||||
|
package errors
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
)
|
||||||
|
|
||||||
|
func formatError(es []error) string {
|
||||||
|
if len(es) == 0 {
|
||||||
|
return fmt.Sprintf("0 error occurred:\n\t* %s", es[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
points := make([]string, len(es))
|
||||||
|
for i, err := range es {
|
||||||
|
points[i] = fmt.Sprintf("* %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fmt.Sprintf(
|
||||||
|
"%d errors occurred:\n\t%s",
|
||||||
|
len(es), strings.Join(points, "\n\t"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func FormatErrorOrNil(err *multierror.Error) error {
|
||||||
|
if err != nil {
|
||||||
|
err.ErrorFormat = formatError
|
||||||
|
}
|
||||||
|
return err.ErrorOrNil()
|
||||||
|
}
|
||||||
@@ -42,20 +42,20 @@ func NewFirewall(context context.Context, iface IFaceMapper) (firewall.Manager,
|
|||||||
|
|
||||||
switch check() {
|
switch check() {
|
||||||
case IPTABLES:
|
case IPTABLES:
|
||||||
log.Debug("creating an iptables firewall manager")
|
log.Info("creating an iptables firewall manager")
|
||||||
fm, errFw = nbiptables.Create(context, iface)
|
fm, errFw = nbiptables.Create(context, iface)
|
||||||
if errFw != nil {
|
if errFw != nil {
|
||||||
log.Errorf("failed to create iptables manager: %s", errFw)
|
log.Errorf("failed to create iptables manager: %s", errFw)
|
||||||
}
|
}
|
||||||
case NFTABLES:
|
case NFTABLES:
|
||||||
log.Debug("creating an nftables firewall manager")
|
log.Info("creating an nftables firewall manager")
|
||||||
fm, errFw = nbnftables.Create(context, iface)
|
fm, errFw = nbnftables.Create(context, iface)
|
||||||
if errFw != nil {
|
if errFw != nil {
|
||||||
log.Errorf("failed to create nftables manager: %s", errFw)
|
log.Errorf("failed to create nftables manager: %s", errFw)
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
errFw = fmt.Errorf("no firewall manager found")
|
errFw = fmt.Errorf("no firewall manager found")
|
||||||
log.Debug("no firewall manager found, try to use userspace packet filtering firewall")
|
log.Info("no firewall manager found, trying to use userspace packet filtering firewall")
|
||||||
}
|
}
|
||||||
|
|
||||||
if iface.IsUserspaceBind() {
|
if iface.IsUserspaceBind() {
|
||||||
@@ -85,16 +85,58 @@ func NewFirewall(context context.Context, iface IFaceMapper) (firewall.Manager,
|
|||||||
|
|
||||||
// check returns the firewall type based on common lib checks. It returns UNKNOWN if no firewall is found.
|
// check returns the firewall type based on common lib checks. It returns UNKNOWN if no firewall is found.
|
||||||
func check() FWType {
|
func check() FWType {
|
||||||
nf := nftables.Conn{}
|
useIPTABLES := false
|
||||||
if _, err := nf.ListChains(); err == nil && os.Getenv(SKIP_NFTABLES_ENV) != "true" {
|
var iptablesChains []string
|
||||||
return NFTABLES
|
ip, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
|
||||||
|
if err == nil && isIptablesClientAvailable(ip) {
|
||||||
|
major, minor, _ := ip.GetIptablesVersion()
|
||||||
|
// use iptables when its version is lower than 1.8.0 which doesn't work well with our nftables manager
|
||||||
|
if major < 1 || (major == 1 && minor < 8) {
|
||||||
|
return IPTABLES
|
||||||
|
}
|
||||||
|
|
||||||
|
useIPTABLES = true
|
||||||
|
|
||||||
|
iptablesChains, err = ip.ListChains("filter")
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to list iptables chains: %s", err)
|
||||||
|
useIPTABLES = false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
ip, err := iptables.NewWithProtocol(iptables.ProtocolIPv4)
|
nf := nftables.Conn{}
|
||||||
if err != nil {
|
if chains, err := nf.ListChains(); err == nil && os.Getenv(SKIP_NFTABLES_ENV) != "true" {
|
||||||
return UNKNOWN
|
if !useIPTABLES {
|
||||||
|
return NFTABLES
|
||||||
|
}
|
||||||
|
|
||||||
|
// search for chains where table is filter
|
||||||
|
// if we find one, we assume that nftables manager can be used with iptables
|
||||||
|
for _, chain := range chains {
|
||||||
|
if chain.Table.Name == "filter" {
|
||||||
|
return NFTABLES
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check tables for the following constraints:
|
||||||
|
// 1. there is no chain in nftables for the filter table and there is at least one chain in iptables, we assume that nftables manager can not be used
|
||||||
|
// 2. there is no tables or more than one table, we assume that nftables manager can be used
|
||||||
|
// 3. there is only one table and its name is filter, we assume that nftables manager can not be used, since there was no chain in it
|
||||||
|
// 4. if we find an error we log and continue with iptables check
|
||||||
|
nbTablesList, err := nf.ListTables()
|
||||||
|
switch {
|
||||||
|
case err == nil && len(iptablesChains) > 0:
|
||||||
|
return IPTABLES
|
||||||
|
case err == nil && len(nbTablesList) != 1:
|
||||||
|
return NFTABLES
|
||||||
|
case err == nil && len(nbTablesList) == 1 && nbTablesList[0].Name == "filter":
|
||||||
|
return IPTABLES
|
||||||
|
case err != nil:
|
||||||
|
log.Errorf("failed to list nftables tables on fw manager discovery: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if isIptablesClientAvailable(ip) {
|
|
||||||
|
if useIPTABLES {
|
||||||
return IPTABLES
|
return IPTABLES
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -74,12 +74,12 @@ func (i *routerManager) InsertRoutingRules(pair firewall.RouterPair) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
err = i.insertRoutingRule(firewall.NatFormat, tableNat, chainRTNAT, routingFinalNatJump, pair)
|
err = i.addNATRule(firewall.NatFormat, tableNat, chainRTNAT, routingFinalNatJump, pair)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = i.insertRoutingRule(firewall.InNatFormat, tableNat, chainRTNAT, routingFinalNatJump, firewall.GetInPair(pair))
|
err = i.addNATRule(firewall.InNatFormat, tableNat, chainRTNAT, routingFinalNatJump, firewall.GetInPair(pair))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -87,12 +87,12 @@ func (i *routerManager) InsertRoutingRules(pair firewall.RouterPair) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// insertRoutingRule inserts an iptable rule
|
// insertRoutingRule inserts an iptables rule
|
||||||
func (i *routerManager) insertRoutingRule(keyFormat, table, chain, jump string, pair firewall.RouterPair) error {
|
func (i *routerManager) insertRoutingRule(keyFormat, table, chain, jump string, pair firewall.RouterPair) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
ruleKey := firewall.GenKey(keyFormat, pair.ID)
|
ruleKey := firewall.GenKey(keyFormat, pair.ID)
|
||||||
rule := genRuleSpec(jump, ruleKey, pair.Source, pair.Destination)
|
rule := genRuleSpec(jump, pair.Source, pair.Destination)
|
||||||
existingRule, found := i.rules[ruleKey]
|
existingRule, found := i.rules[ruleKey]
|
||||||
if found {
|
if found {
|
||||||
err = i.iptablesClient.DeleteIfExists(table, chain, existingRule...)
|
err = i.iptablesClient.DeleteIfExists(table, chain, existingRule...)
|
||||||
@@ -101,6 +101,7 @@ func (i *routerManager) insertRoutingRule(keyFormat, table, chain, jump string,
|
|||||||
}
|
}
|
||||||
delete(i.rules, ruleKey)
|
delete(i.rules, ruleKey)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = i.iptablesClient.Insert(table, chain, 1, rule...)
|
err = i.iptablesClient.Insert(table, chain, 1, rule...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error while adding new %s rule for %s: %v", getIptablesRuleType(table), pair.Destination, err)
|
return fmt.Errorf("error while adding new %s rule for %s: %v", getIptablesRuleType(table), pair.Destination, err)
|
||||||
@@ -317,6 +318,13 @@ func (i *routerManager) createChain(table, newChain string) error {
|
|||||||
return fmt.Errorf("couldn't create chain %s in %s table, error: %v", newChain, table, err)
|
return fmt.Errorf("couldn't create chain %s in %s table, error: %v", newChain, table, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Add the loopback return rule to the NAT chain
|
||||||
|
loopbackRule := []string{"-o", "lo", "-j", "RETURN"}
|
||||||
|
err = i.iptablesClient.Insert(table, newChain, 1, loopbackRule...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to add loopback return rule to %s: %v", chainRTNAT, err)
|
||||||
|
}
|
||||||
|
|
||||||
err = i.iptablesClient.Append(table, newChain, "-j", "RETURN")
|
err = i.iptablesClient.Append(table, newChain, "-j", "RETURN")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("couldn't create chain %s default rule, error: %v", newChain, err)
|
return fmt.Errorf("couldn't create chain %s default rule, error: %v", newChain, err)
|
||||||
@@ -326,9 +334,33 @@ func (i *routerManager) createChain(table, newChain string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// genRuleSpec generates rule specification with comment identifier
|
// addNATRule appends an iptables rule pair to the nat chain
|
||||||
func genRuleSpec(jump, id, source, destination string) []string {
|
func (i *routerManager) addNATRule(keyFormat, table, chain, jump string, pair firewall.RouterPair) error {
|
||||||
return []string{"-s", source, "-d", destination, "-j", jump, "-m", "comment", "--comment", id}
|
ruleKey := firewall.GenKey(keyFormat, pair.ID)
|
||||||
|
rule := genRuleSpec(jump, pair.Source, pair.Destination)
|
||||||
|
existingRule, found := i.rules[ruleKey]
|
||||||
|
if found {
|
||||||
|
err := i.iptablesClient.DeleteIfExists(table, chain, existingRule...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while removing existing NAT rule for %s: %v", pair.Destination, err)
|
||||||
|
}
|
||||||
|
delete(i.rules, ruleKey)
|
||||||
|
}
|
||||||
|
|
||||||
|
// inserting after loopback ignore rule
|
||||||
|
err := i.iptablesClient.Insert(table, chain, 2, rule...)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("error while appending new NAT rule for %s: %v", pair.Destination, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
i.rules[ruleKey] = rule
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// genRuleSpec generates rule specification
|
||||||
|
func genRuleSpec(jump, source, destination string) []string {
|
||||||
|
return []string{"-s", source, "-d", destination, "-j", jump}
|
||||||
}
|
}
|
||||||
|
|
||||||
func getIptablesRuleType(table string) string {
|
func getIptablesRuleType(table string) string {
|
||||||
|
|||||||
@@ -51,14 +51,12 @@ func TestIptablesManager_RestoreOrCreateContainers(t *testing.T) {
|
|||||||
Destination: "100.100.100.0/24",
|
Destination: "100.100.100.0/24",
|
||||||
Masquerade: true,
|
Masquerade: true,
|
||||||
}
|
}
|
||||||
forward4RuleKey := firewall.GenKey(firewall.ForwardingFormat, pair.ID)
|
forward4Rule := genRuleSpec(routingFinalForwardJump, pair.Source, pair.Destination)
|
||||||
forward4Rule := genRuleSpec(routingFinalForwardJump, forward4RuleKey, pair.Source, pair.Destination)
|
|
||||||
|
|
||||||
err = manager.iptablesClient.Insert(tableFilter, chainRTFWD, 1, forward4Rule...)
|
err = manager.iptablesClient.Insert(tableFilter, chainRTFWD, 1, forward4Rule...)
|
||||||
require.NoError(t, err, "inserting rule should not return error")
|
require.NoError(t, err, "inserting rule should not return error")
|
||||||
|
|
||||||
nat4RuleKey := firewall.GenKey(firewall.NatFormat, pair.ID)
|
nat4Rule := genRuleSpec(routingFinalNatJump, pair.Source, pair.Destination)
|
||||||
nat4Rule := genRuleSpec(routingFinalNatJump, nat4RuleKey, pair.Source, pair.Destination)
|
|
||||||
|
|
||||||
err = manager.iptablesClient.Insert(tableNat, chainRTNAT, 1, nat4Rule...)
|
err = manager.iptablesClient.Insert(tableNat, chainRTNAT, 1, nat4Rule...)
|
||||||
require.NoError(t, err, "inserting rule should not return error")
|
require.NoError(t, err, "inserting rule should not return error")
|
||||||
@@ -92,7 +90,7 @@ func TestIptablesManager_InsertRoutingRules(t *testing.T) {
|
|||||||
require.NoError(t, err, "forwarding pair should be inserted")
|
require.NoError(t, err, "forwarding pair should be inserted")
|
||||||
|
|
||||||
forwardRuleKey := firewall.GenKey(firewall.ForwardingFormat, testCase.InputPair.ID)
|
forwardRuleKey := firewall.GenKey(firewall.ForwardingFormat, testCase.InputPair.ID)
|
||||||
forwardRule := genRuleSpec(routingFinalForwardJump, forwardRuleKey, testCase.InputPair.Source, testCase.InputPair.Destination)
|
forwardRule := genRuleSpec(routingFinalForwardJump, testCase.InputPair.Source, testCase.InputPair.Destination)
|
||||||
|
|
||||||
exists, err := iptablesClient.Exists(tableFilter, chainRTFWD, forwardRule...)
|
exists, err := iptablesClient.Exists(tableFilter, chainRTFWD, forwardRule...)
|
||||||
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableFilter, chainRTFWD)
|
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableFilter, chainRTFWD)
|
||||||
@@ -103,7 +101,7 @@ func TestIptablesManager_InsertRoutingRules(t *testing.T) {
|
|||||||
require.Equal(t, forwardRule[:4], foundRule[:4], "stored forwarding rule should match")
|
require.Equal(t, forwardRule[:4], foundRule[:4], "stored forwarding rule should match")
|
||||||
|
|
||||||
inForwardRuleKey := firewall.GenKey(firewall.InForwardingFormat, testCase.InputPair.ID)
|
inForwardRuleKey := firewall.GenKey(firewall.InForwardingFormat, testCase.InputPair.ID)
|
||||||
inForwardRule := genRuleSpec(routingFinalForwardJump, inForwardRuleKey, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
inForwardRule := genRuleSpec(routingFinalForwardJump, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
||||||
|
|
||||||
exists, err = iptablesClient.Exists(tableFilter, chainRTFWD, inForwardRule...)
|
exists, err = iptablesClient.Exists(tableFilter, chainRTFWD, inForwardRule...)
|
||||||
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableFilter, chainRTFWD)
|
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableFilter, chainRTFWD)
|
||||||
@@ -114,7 +112,7 @@ func TestIptablesManager_InsertRoutingRules(t *testing.T) {
|
|||||||
require.Equal(t, inForwardRule[:4], foundRule[:4], "stored income forwarding rule should match")
|
require.Equal(t, inForwardRule[:4], foundRule[:4], "stored income forwarding rule should match")
|
||||||
|
|
||||||
natRuleKey := firewall.GenKey(firewall.NatFormat, testCase.InputPair.ID)
|
natRuleKey := firewall.GenKey(firewall.NatFormat, testCase.InputPair.ID)
|
||||||
natRule := genRuleSpec(routingFinalNatJump, natRuleKey, testCase.InputPair.Source, testCase.InputPair.Destination)
|
natRule := genRuleSpec(routingFinalNatJump, testCase.InputPair.Source, testCase.InputPair.Destination)
|
||||||
|
|
||||||
exists, err = iptablesClient.Exists(tableNat, chainRTNAT, natRule...)
|
exists, err = iptablesClient.Exists(tableNat, chainRTNAT, natRule...)
|
||||||
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableNat, chainRTNAT)
|
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableNat, chainRTNAT)
|
||||||
@@ -130,7 +128,7 @@ func TestIptablesManager_InsertRoutingRules(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
inNatRuleKey := firewall.GenKey(firewall.InNatFormat, testCase.InputPair.ID)
|
inNatRuleKey := firewall.GenKey(firewall.InNatFormat, testCase.InputPair.ID)
|
||||||
inNatRule := genRuleSpec(routingFinalNatJump, inNatRuleKey, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
inNatRule := genRuleSpec(routingFinalNatJump, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
||||||
|
|
||||||
exists, err = iptablesClient.Exists(tableNat, chainRTNAT, inNatRule...)
|
exists, err = iptablesClient.Exists(tableNat, chainRTNAT, inNatRule...)
|
||||||
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableNat, chainRTNAT)
|
require.NoError(t, err, "should be able to query the iptables %s table and %s chain", tableNat, chainRTNAT)
|
||||||
@@ -167,25 +165,25 @@ func TestIptablesManager_RemoveRoutingRules(t *testing.T) {
|
|||||||
require.NoError(t, err, "shouldn't return error")
|
require.NoError(t, err, "shouldn't return error")
|
||||||
|
|
||||||
forwardRuleKey := firewall.GenKey(firewall.ForwardingFormat, testCase.InputPair.ID)
|
forwardRuleKey := firewall.GenKey(firewall.ForwardingFormat, testCase.InputPair.ID)
|
||||||
forwardRule := genRuleSpec(routingFinalForwardJump, forwardRuleKey, testCase.InputPair.Source, testCase.InputPair.Destination)
|
forwardRule := genRuleSpec(routingFinalForwardJump, testCase.InputPair.Source, testCase.InputPair.Destination)
|
||||||
|
|
||||||
err = iptablesClient.Insert(tableFilter, chainRTFWD, 1, forwardRule...)
|
err = iptablesClient.Insert(tableFilter, chainRTFWD, 1, forwardRule...)
|
||||||
require.NoError(t, err, "inserting rule should not return error")
|
require.NoError(t, err, "inserting rule should not return error")
|
||||||
|
|
||||||
inForwardRuleKey := firewall.GenKey(firewall.InForwardingFormat, testCase.InputPair.ID)
|
inForwardRuleKey := firewall.GenKey(firewall.InForwardingFormat, testCase.InputPair.ID)
|
||||||
inForwardRule := genRuleSpec(routingFinalForwardJump, inForwardRuleKey, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
inForwardRule := genRuleSpec(routingFinalForwardJump, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
||||||
|
|
||||||
err = iptablesClient.Insert(tableFilter, chainRTFWD, 1, inForwardRule...)
|
err = iptablesClient.Insert(tableFilter, chainRTFWD, 1, inForwardRule...)
|
||||||
require.NoError(t, err, "inserting rule should not return error")
|
require.NoError(t, err, "inserting rule should not return error")
|
||||||
|
|
||||||
natRuleKey := firewall.GenKey(firewall.NatFormat, testCase.InputPair.ID)
|
natRuleKey := firewall.GenKey(firewall.NatFormat, testCase.InputPair.ID)
|
||||||
natRule := genRuleSpec(routingFinalNatJump, natRuleKey, testCase.InputPair.Source, testCase.InputPair.Destination)
|
natRule := genRuleSpec(routingFinalNatJump, testCase.InputPair.Source, testCase.InputPair.Destination)
|
||||||
|
|
||||||
err = iptablesClient.Insert(tableNat, chainRTNAT, 1, natRule...)
|
err = iptablesClient.Insert(tableNat, chainRTNAT, 1, natRule...)
|
||||||
require.NoError(t, err, "inserting rule should not return error")
|
require.NoError(t, err, "inserting rule should not return error")
|
||||||
|
|
||||||
inNatRuleKey := firewall.GenKey(firewall.InNatFormat, testCase.InputPair.ID)
|
inNatRuleKey := firewall.GenKey(firewall.InNatFormat, testCase.InputPair.ID)
|
||||||
inNatRule := genRuleSpec(routingFinalNatJump, inNatRuleKey, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
inNatRule := genRuleSpec(routingFinalNatJump, firewall.GetInPair(testCase.InputPair).Source, firewall.GetInPair(testCase.InputPair).Destination)
|
||||||
|
|
||||||
err = iptablesClient.Insert(tableNat, chainRTNAT, 1, inNatRule...)
|
err = iptablesClient.Insert(tableNat, chainRTNAT, 1, inNatRule...)
|
||||||
require.NoError(t, err, "inserting rule should not return error")
|
require.NoError(t, err, "inserting rule should not return error")
|
||||||
|
|||||||
@@ -95,7 +95,7 @@ func (m *Manager) InsertRoutingRules(pair firewall.RouterPair) error {
|
|||||||
m.mutex.Lock()
|
m.mutex.Lock()
|
||||||
defer m.mutex.Unlock()
|
defer m.mutex.Unlock()
|
||||||
|
|
||||||
return m.router.InsertRoutingRules(pair)
|
return m.router.AddRoutingRules(pair)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *Manager) RemoveRoutingRules(pair firewall.RouterPair) error {
|
func (m *Manager) RemoveRoutingRules(pair firewall.RouterPair) error {
|
||||||
|
|||||||
@@ -22,6 +22,8 @@ const (
|
|||||||
|
|
||||||
userDataAcceptForwardRuleSrc = "frwacceptsrc"
|
userDataAcceptForwardRuleSrc = "frwacceptsrc"
|
||||||
userDataAcceptForwardRuleDst = "frwacceptdst"
|
userDataAcceptForwardRuleDst = "frwacceptdst"
|
||||||
|
|
||||||
|
loopbackInterface = "lo\x00"
|
||||||
)
|
)
|
||||||
|
|
||||||
// some presets for building nftable rules
|
// some presets for building nftable rules
|
||||||
@@ -126,6 +128,22 @@ func (r *router) createContainers() error {
|
|||||||
Type: nftables.ChainTypeNAT,
|
Type: nftables.ChainTypeNAT,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Add RETURN rule for loopback interface
|
||||||
|
loRule := &nftables.Rule{
|
||||||
|
Table: r.workTable,
|
||||||
|
Chain: r.chains[chainNameRoutingNat],
|
||||||
|
Exprs: []expr.Any{
|
||||||
|
&expr.Meta{Key: expr.MetaKeyOIFNAME, Register: 1},
|
||||||
|
&expr.Cmp{
|
||||||
|
Op: expr.CmpOpEq,
|
||||||
|
Register: 1,
|
||||||
|
Data: []byte(loopbackInterface),
|
||||||
|
},
|
||||||
|
&expr.Verdict{Kind: expr.VerdictReturn},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
r.conn.InsertRule(loRule)
|
||||||
|
|
||||||
err := r.refreshRulesMap()
|
err := r.refreshRulesMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to clean up rules from FORWARD chain: %s", err)
|
log.Errorf("failed to clean up rules from FORWARD chain: %s", err)
|
||||||
@@ -138,28 +156,28 @@ func (r *router) createContainers() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// InsertRoutingRules inserts a nftable rule pair to the forwarding chain and if enabled, to the nat chain
|
// AddRoutingRules appends a nftable rule pair to the forwarding chain and if enabled, to the nat chain
|
||||||
func (r *router) InsertRoutingRules(pair manager.RouterPair) error {
|
func (r *router) AddRoutingRules(pair manager.RouterPair) error {
|
||||||
err := r.refreshRulesMap()
|
err := r.refreshRulesMap()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.insertRoutingRule(manager.ForwardingFormat, chainNameRouteingFw, pair, false)
|
err = r.addRoutingRule(manager.ForwardingFormat, chainNameRouteingFw, pair, false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = r.insertRoutingRule(manager.InForwardingFormat, chainNameRouteingFw, manager.GetInPair(pair), false)
|
err = r.addRoutingRule(manager.InForwardingFormat, chainNameRouteingFw, manager.GetInPair(pair), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if pair.Masquerade {
|
if pair.Masquerade {
|
||||||
err = r.insertRoutingRule(manager.NatFormat, chainNameRoutingNat, pair, true)
|
err = r.addRoutingRule(manager.NatFormat, chainNameRoutingNat, pair, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = r.insertRoutingRule(manager.InNatFormat, chainNameRoutingNat, manager.GetInPair(pair), true)
|
err = r.addRoutingRule(manager.InNatFormat, chainNameRoutingNat, manager.GetInPair(pair), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@@ -177,8 +195,8 @@ func (r *router) InsertRoutingRules(pair manager.RouterPair) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// insertRoutingRule inserts a nftable rule to the conn client flush queue
|
// addRoutingRule inserts a nftable rule to the conn client flush queue
|
||||||
func (r *router) insertRoutingRule(format, chainName string, pair manager.RouterPair, isNat bool) error {
|
func (r *router) addRoutingRule(format, chainName string, pair manager.RouterPair, isNat bool) error {
|
||||||
sourceExp := generateCIDRMatcherExpressions(true, pair.Source)
|
sourceExp := generateCIDRMatcherExpressions(true, pair.Source)
|
||||||
destExp := generateCIDRMatcherExpressions(false, pair.Destination)
|
destExp := generateCIDRMatcherExpressions(false, pair.Destination)
|
||||||
|
|
||||||
@@ -199,7 +217,7 @@ func (r *router) insertRoutingRule(format, chainName string, pair manager.Router
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r.rules[ruleKey] = r.conn.InsertRule(&nftables.Rule{
|
r.rules[ruleKey] = r.conn.AddRule(&nftables.Rule{
|
||||||
Table: r.workTable,
|
Table: r.workTable,
|
||||||
Chain: r.chains[chainName],
|
Chain: r.chains[chainName],
|
||||||
Exprs: expression,
|
Exprs: expression,
|
||||||
|
|||||||
@@ -47,7 +47,7 @@ func TestNftablesManager_InsertRoutingRules(t *testing.T) {
|
|||||||
|
|
||||||
require.NoError(t, err, "shouldn't return error")
|
require.NoError(t, err, "shouldn't return error")
|
||||||
|
|
||||||
err = manager.InsertRoutingRules(testCase.InputPair)
|
err = manager.AddRoutingRules(testCase.InputPair)
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = manager.RemoveRoutingRules(testCase.InputPair)
|
_ = manager.RemoveRoutingRules(testCase.InputPair)
|
||||||
}()
|
}()
|
||||||
|
|||||||
@@ -64,15 +64,18 @@ func manageFirewallRule(ruleName string, action action, extraArgs ...string) err
|
|||||||
if action == addRule {
|
if action == addRule {
|
||||||
args = append(args, extraArgs...)
|
args = append(args, extraArgs...)
|
||||||
}
|
}
|
||||||
|
netshCmd := GetSystem32Command("netsh")
|
||||||
cmd := exec.Command("netsh", args...)
|
cmd := exec.Command(netshCmd, args...)
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
||||||
return cmd.Run()
|
return cmd.Run()
|
||||||
}
|
}
|
||||||
|
|
||||||
func isWindowsFirewallReachable() bool {
|
func isWindowsFirewallReachable() bool {
|
||||||
args := []string{"advfirewall", "show", "allprofiles", "state"}
|
args := []string{"advfirewall", "show", "allprofiles", "state"}
|
||||||
cmd := exec.Command("netsh", args...)
|
|
||||||
|
netshCmd := GetSystem32Command("netsh")
|
||||||
|
|
||||||
|
cmd := exec.Command(netshCmd, args...)
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
||||||
|
|
||||||
_, err := cmd.Output()
|
_, err := cmd.Output()
|
||||||
@@ -87,8 +90,23 @@ func isWindowsFirewallReachable() bool {
|
|||||||
func isFirewallRuleActive(ruleName string) bool {
|
func isFirewallRuleActive(ruleName string) bool {
|
||||||
args := []string{"advfirewall", "firewall", "show", "rule", "name=" + ruleName}
|
args := []string{"advfirewall", "firewall", "show", "rule", "name=" + ruleName}
|
||||||
|
|
||||||
cmd := exec.Command("netsh", args...)
|
netshCmd := GetSystem32Command("netsh")
|
||||||
|
|
||||||
|
cmd := exec.Command(netshCmd, args...)
|
||||||
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
cmd.SysProcAttr = &syscall.SysProcAttr{HideWindow: true}
|
||||||
_, err := cmd.Output()
|
_, err := cmd.Output()
|
||||||
return err == nil
|
return err == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetSystem32Command checks if a command can be found in the system path and returns it. In case it can't find it
|
||||||
|
// in the path it will return the full path of a command assuming C:\windows\system32 as the base path.
|
||||||
|
func GetSystem32Command(command string) string {
|
||||||
|
_, err := exec.LookPath(command)
|
||||||
|
if err == nil {
|
||||||
|
return command
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Tracef("Command %s not found in PATH, using C:\\windows\\system32\\%s.exe path", command, command)
|
||||||
|
|
||||||
|
return "C:\\windows\\system32\\" + command + ".exe"
|
||||||
|
}
|
||||||
|
|||||||
@@ -337,7 +337,6 @@ func validateRule(ip net.IP, packetData []byte, rules map[string]Rule, d *decode
|
|||||||
if rule.dPort != 0 && rule.dPort == uint16(d.udp.DstPort) {
|
if rule.dPort != 0 && rule.dPort == uint16(d.udp.DstPort) {
|
||||||
return rule.drop, true
|
return rule.drop, true
|
||||||
}
|
}
|
||||||
return rule.drop, true
|
|
||||||
case layers.LayerTypeICMPv4, layers.LayerTypeICMPv6:
|
case layers.LayerTypeICMPv4, layers.LayerTypeICMPv6:
|
||||||
return rule.drop, true
|
return rule.drop, true
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -69,6 +69,11 @@ func NewOAuthFlow(ctx context.Context, config *internal.Config, isLinuxDesktopCl
|
|||||||
return authenticateWithDeviceCodeFlow(ctx, config)
|
return authenticateWithDeviceCodeFlow(ctx, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// On FreeBSD we currently do not support desktop environments and offer only Device Code Flow (#2384)
|
||||||
|
if runtime.GOOS == "freebsd" {
|
||||||
|
return authenticateWithDeviceCodeFlow(ctx, config)
|
||||||
|
}
|
||||||
|
|
||||||
pkceFlow, err := authenticateWithPKCEFlow(ctx, config)
|
pkceFlow, err := authenticateWithPKCEFlow(ctx, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// fallback to device code flow
|
// fallback to device code flow
|
||||||
|
|||||||
@@ -5,12 +5,17 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/dynamic"
|
||||||
"github.com/netbirdio/netbird/client/ssh"
|
"github.com/netbirdio/netbird/client/ssh"
|
||||||
"github.com/netbirdio/netbird/iface"
|
"github.com/netbirdio/netbird/iface"
|
||||||
mgm "github.com/netbirdio/netbird/management/client"
|
mgm "github.com/netbirdio/netbird/management/client"
|
||||||
@@ -30,8 +35,10 @@ const (
|
|||||||
DefaultAdminURL = "https://app.netbird.io:443"
|
DefaultAdminURL = "https://app.netbird.io:443"
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultInterfaceBlacklist = []string{iface.WgInterfaceDefault, "wt", "utun", "tun0", "zt", "ZeroTier", "wg", "ts",
|
var defaultInterfaceBlacklist = []string{
|
||||||
"Tailscale", "tailscale", "docker", "veth", "br-", "lo"}
|
iface.WgInterfaceDefault, "wt", "utun", "tun0", "zt", "ZeroTier", "wg", "ts",
|
||||||
|
"Tailscale", "tailscale", "docker", "veth", "br-", "lo",
|
||||||
|
}
|
||||||
|
|
||||||
// ConfigInput carries configuration changes to the client
|
// ConfigInput carries configuration changes to the client
|
||||||
type ConfigInput struct {
|
type ConfigInput struct {
|
||||||
@@ -46,7 +53,10 @@ type ConfigInput struct {
|
|||||||
RosenpassPermissive *bool
|
RosenpassPermissive *bool
|
||||||
InterfaceName *string
|
InterfaceName *string
|
||||||
WireguardPort *int
|
WireguardPort *int
|
||||||
|
NetworkMonitor *bool
|
||||||
DisableAutoConnect *bool
|
DisableAutoConnect *bool
|
||||||
|
ExtraIFaceBlackList []string
|
||||||
|
DNSRouteInterval *time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Config Configuration type
|
// Config Configuration type
|
||||||
@@ -58,6 +68,7 @@ type Config struct {
|
|||||||
AdminURL *url.URL
|
AdminURL *url.URL
|
||||||
WgIface string
|
WgIface string
|
||||||
WgPort int
|
WgPort int
|
||||||
|
NetworkMonitor *bool
|
||||||
IFaceBlackList []string
|
IFaceBlackList []string
|
||||||
DisableIPv6Discovery bool
|
DisableIPv6Discovery bool
|
||||||
RosenpassEnabled bool
|
RosenpassEnabled bool
|
||||||
@@ -88,6 +99,9 @@ type Config struct {
|
|||||||
// DisableAutoConnect determines whether the client should not start with the service
|
// DisableAutoConnect determines whether the client should not start with the service
|
||||||
// it's set to false by default due to backwards compatibility
|
// it's set to false by default due to backwards compatibility
|
||||||
DisableAutoConnect bool
|
DisableAutoConnect bool
|
||||||
|
|
||||||
|
// DNSRouteInterval is the interval in which the DNS routes are updated
|
||||||
|
DNSRouteInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// ReadConfig read config file and return with Config. If it is not exists create a new with default values
|
// ReadConfig read config file and return with Config. If it is not exists create a new with default values
|
||||||
@@ -97,6 +111,14 @@ func ReadConfig(configPath string) (*Config, error) {
|
|||||||
if _, err := util.ReadJson(configPath, config); err != nil {
|
if _, err := util.ReadJson(configPath, config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
// initialize through apply() without changes
|
||||||
|
if changed, err := config.apply(ConfigInput{}); err != nil {
|
||||||
|
return nil, err
|
||||||
|
} else if changed {
|
||||||
|
if err = WriteOutConfig(configPath, config); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
@@ -149,78 +171,15 @@ func WriteOutConfig(path string, config *Config) error {
|
|||||||
|
|
||||||
// createNewConfig creates a new config generating a new Wireguard key and saving to file
|
// createNewConfig creates a new config generating a new Wireguard key and saving to file
|
||||||
func createNewConfig(input ConfigInput) (*Config, error) {
|
func createNewConfig(input ConfigInput) (*Config, error) {
|
||||||
wgKey := generateKey()
|
|
||||||
pem, err := ssh.GeneratePrivateKey(ssh.ED25519)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
config := &Config{
|
config := &Config{
|
||||||
SSHKey: string(pem),
|
// defaults to false only for new (post 0.26) configurations
|
||||||
PrivateKey: wgKey,
|
ServerSSHAllowed: util.False(),
|
||||||
IFaceBlackList: []string{},
|
|
||||||
DisableIPv6Discovery: false,
|
|
||||||
NATExternalIPs: input.NATExternalIPs,
|
|
||||||
CustomDNSAddress: string(input.CustomDNSAddress),
|
|
||||||
ServerSSHAllowed: util.False(),
|
|
||||||
DisableAutoConnect: false,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
defaultManagementURL, err := parseURL("Management URL", DefaultManagementURL)
|
if _, err := config.apply(input); err != nil {
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
config.ManagementURL = defaultManagementURL
|
|
||||||
if input.ManagementURL != "" {
|
|
||||||
URL, err := parseURL("Management URL", input.ManagementURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.ManagementURL = URL
|
|
||||||
}
|
|
||||||
|
|
||||||
config.WgPort = iface.DefaultWgPort
|
|
||||||
if input.WireguardPort != nil {
|
|
||||||
config.WgPort = *input.WireguardPort
|
|
||||||
}
|
|
||||||
|
|
||||||
config.WgIface = iface.WgInterfaceDefault
|
|
||||||
if input.InterfaceName != nil {
|
|
||||||
config.WgIface = *input.InterfaceName
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.PreSharedKey != nil {
|
|
||||||
config.PreSharedKey = *input.PreSharedKey
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.RosenpassEnabled != nil {
|
|
||||||
config.RosenpassEnabled = *input.RosenpassEnabled
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.RosenpassPermissive != nil {
|
|
||||||
config.RosenpassPermissive = *input.RosenpassPermissive
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.ServerSSHAllowed != nil {
|
|
||||||
config.ServerSSHAllowed = input.ServerSSHAllowed
|
|
||||||
}
|
|
||||||
|
|
||||||
defaultAdminURL, err := parseURL("Admin URL", DefaultAdminURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
config.AdminURL = defaultAdminURL
|
|
||||||
if input.AdminURL != "" {
|
|
||||||
newURL, err := parseURL("Admin Panel URL", input.AdminURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.AdminURL = newURL
|
|
||||||
}
|
|
||||||
|
|
||||||
config.IFaceBlackList = defaultInterfaceBlacklist
|
|
||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -231,97 +190,12 @@ func update(input ConfigInput) (*Config, error) {
|
|||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
refresh := false
|
updated, err := config.apply(input)
|
||||||
|
if err != nil {
|
||||||
if input.ManagementURL != "" && config.ManagementURL.String() != input.ManagementURL {
|
return nil, err
|
||||||
log.Infof("new Management URL provided, updated to %s (old value %s)",
|
|
||||||
input.ManagementURL, config.ManagementURL)
|
|
||||||
newURL, err := parseURL("Management URL", input.ManagementURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.ManagementURL = newURL
|
|
||||||
refresh = true
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if input.AdminURL != "" && (config.AdminURL == nil || config.AdminURL.String() != input.AdminURL) {
|
if updated {
|
||||||
log.Infof("new Admin Panel URL provided, updated to %s (old value %s)",
|
|
||||||
input.AdminURL, config.AdminURL)
|
|
||||||
newURL, err := parseURL("Admin Panel URL", input.AdminURL)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.AdminURL = newURL
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.PreSharedKey != nil && config.PreSharedKey != *input.PreSharedKey {
|
|
||||||
log.Infof("new pre-shared key provided, replacing old key")
|
|
||||||
config.PreSharedKey = *input.PreSharedKey
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.SSHKey == "" {
|
|
||||||
pem, err := ssh.GeneratePrivateKey(ssh.ED25519)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
config.SSHKey = string(pem)
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.WgPort == 0 {
|
|
||||||
config.WgPort = iface.DefaultWgPort
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.WireguardPort != nil {
|
|
||||||
config.WgPort = *input.WireguardPort
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.InterfaceName != nil {
|
|
||||||
config.WgIface = *input.InterfaceName
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.NATExternalIPs != nil && len(config.NATExternalIPs) != len(input.NATExternalIPs) {
|
|
||||||
config.NATExternalIPs = input.NATExternalIPs
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.CustomDNSAddress != nil {
|
|
||||||
config.CustomDNSAddress = string(input.CustomDNSAddress)
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.RosenpassEnabled != nil {
|
|
||||||
config.RosenpassEnabled = *input.RosenpassEnabled
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.RosenpassPermissive != nil {
|
|
||||||
config.RosenpassPermissive = *input.RosenpassPermissive
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.DisableAutoConnect != nil {
|
|
||||||
config.DisableAutoConnect = *input.DisableAutoConnect
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if input.ServerSSHAllowed != nil {
|
|
||||||
config.ServerSSHAllowed = input.ServerSSHAllowed
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if config.ServerSSHAllowed == nil {
|
|
||||||
config.ServerSSHAllowed = util.True()
|
|
||||||
refresh = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if refresh {
|
|
||||||
// since we have new management URL, we need to update config file
|
|
||||||
if err := util.WriteJson(input.ConfigPath, config); err != nil {
|
if err := util.WriteJson(input.ConfigPath, config); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@@ -330,6 +204,190 @@ func update(input ConfigInput) (*Config, error) {
|
|||||||
return config, nil
|
return config, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (config *Config) apply(input ConfigInput) (updated bool, err error) {
|
||||||
|
if config.ManagementURL == nil {
|
||||||
|
log.Infof("using default Management URL %s", DefaultManagementURL)
|
||||||
|
config.ManagementURL, err = parseURL("Management URL", DefaultManagementURL)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if input.ManagementURL != "" && input.ManagementURL != config.ManagementURL.String() {
|
||||||
|
log.Infof("new Management URL provided, updated to %#v (old value %#v)",
|
||||||
|
input.ManagementURL, config.ManagementURL.String())
|
||||||
|
URL, err := parseURL("Management URL", input.ManagementURL)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
config.ManagementURL = URL
|
||||||
|
updated = true
|
||||||
|
} else if config.ManagementURL == nil {
|
||||||
|
log.Infof("using default Management URL %s", DefaultManagementURL)
|
||||||
|
config.ManagementURL, err = parseURL("Management URL", DefaultManagementURL)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.AdminURL == nil {
|
||||||
|
log.Infof("using default Admin URL %s", DefaultManagementURL)
|
||||||
|
config.AdminURL, err = parseURL("Admin URL", DefaultAdminURL)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if input.AdminURL != "" && input.AdminURL != config.AdminURL.String() {
|
||||||
|
log.Infof("new Admin Panel URL provided, updated to %#v (old value %#v)",
|
||||||
|
input.AdminURL, config.AdminURL.String())
|
||||||
|
newURL, err := parseURL("Admin Panel URL", input.AdminURL)
|
||||||
|
if err != nil {
|
||||||
|
return updated, err
|
||||||
|
}
|
||||||
|
config.AdminURL = newURL
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.PrivateKey == "" {
|
||||||
|
log.Infof("generated new Wireguard key")
|
||||||
|
config.PrivateKey = generateKey()
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.SSHKey == "" {
|
||||||
|
log.Infof("generated new SSH key")
|
||||||
|
pem, err := ssh.GeneratePrivateKey(ssh.ED25519)
|
||||||
|
if err != nil {
|
||||||
|
return false, err
|
||||||
|
}
|
||||||
|
config.SSHKey = string(pem)
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.WireguardPort != nil && *input.WireguardPort != config.WgPort {
|
||||||
|
log.Infof("updating Wireguard port %d (old value %d)",
|
||||||
|
*input.WireguardPort, config.WgPort)
|
||||||
|
config.WgPort = *input.WireguardPort
|
||||||
|
updated = true
|
||||||
|
} else if config.WgPort == 0 {
|
||||||
|
config.WgPort = iface.DefaultWgPort
|
||||||
|
log.Infof("using default Wireguard port %d", config.WgPort)
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.InterfaceName != nil && *input.InterfaceName != config.WgIface {
|
||||||
|
log.Infof("updating Wireguard interface %#v (old value %#v)",
|
||||||
|
*input.InterfaceName, config.WgIface)
|
||||||
|
config.WgIface = *input.InterfaceName
|
||||||
|
updated = true
|
||||||
|
} else if config.WgIface == "" {
|
||||||
|
config.WgIface = iface.WgInterfaceDefault
|
||||||
|
log.Infof("using default Wireguard interface %s", config.WgIface)
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.NATExternalIPs != nil && !reflect.DeepEqual(config.NATExternalIPs, input.NATExternalIPs) {
|
||||||
|
log.Infof("updating NAT External IP [ %s ] (old value: [ %s ])",
|
||||||
|
strings.Join(input.NATExternalIPs, " "),
|
||||||
|
strings.Join(config.NATExternalIPs, " "))
|
||||||
|
config.NATExternalIPs = input.NATExternalIPs
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.PreSharedKey != nil && *input.PreSharedKey != config.PreSharedKey {
|
||||||
|
log.Infof("new pre-shared key provided, replacing old key")
|
||||||
|
config.PreSharedKey = *input.PreSharedKey
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.RosenpassEnabled != nil && *input.RosenpassEnabled != config.RosenpassEnabled {
|
||||||
|
log.Infof("switching Rosenpass to %t", *input.RosenpassEnabled)
|
||||||
|
config.RosenpassEnabled = *input.RosenpassEnabled
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.RosenpassPermissive != nil && *input.RosenpassPermissive != config.RosenpassPermissive {
|
||||||
|
log.Infof("switching Rosenpass permissive to %t", *input.RosenpassPermissive)
|
||||||
|
config.RosenpassPermissive = *input.RosenpassPermissive
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.NetworkMonitor != nil && input.NetworkMonitor != config.NetworkMonitor {
|
||||||
|
log.Infof("switching Network Monitor to %t", *input.NetworkMonitor)
|
||||||
|
config.NetworkMonitor = input.NetworkMonitor
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.NetworkMonitor == nil {
|
||||||
|
// enable network monitoring by default on windows and darwin clients
|
||||||
|
if runtime.GOOS == "windows" || runtime.GOOS == "darwin" {
|
||||||
|
enabled := true
|
||||||
|
config.NetworkMonitor = &enabled
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.CustomDNSAddress != nil && string(input.CustomDNSAddress) != config.CustomDNSAddress {
|
||||||
|
log.Infof("updating custom DNS address %#v (old value %#v)",
|
||||||
|
string(input.CustomDNSAddress), config.CustomDNSAddress)
|
||||||
|
config.CustomDNSAddress = string(input.CustomDNSAddress)
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(config.IFaceBlackList) == 0 {
|
||||||
|
log.Infof("filling in interface blacklist with defaults: [ %s ]",
|
||||||
|
strings.Join(defaultInterfaceBlacklist, " "))
|
||||||
|
config.IFaceBlackList = append(config.IFaceBlackList, defaultInterfaceBlacklist...)
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(input.ExtraIFaceBlackList) > 0 {
|
||||||
|
for _, iFace := range util.SliceDiff(input.ExtraIFaceBlackList, config.IFaceBlackList) {
|
||||||
|
log.Infof("adding new entry to interface blacklist: %s", iFace)
|
||||||
|
config.IFaceBlackList = append(config.IFaceBlackList, iFace)
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.DisableAutoConnect != nil && *input.DisableAutoConnect != config.DisableAutoConnect {
|
||||||
|
if *input.DisableAutoConnect {
|
||||||
|
log.Infof("turning off automatic connection on startup")
|
||||||
|
} else {
|
||||||
|
log.Infof("enabling automatic connection on startup")
|
||||||
|
}
|
||||||
|
config.DisableAutoConnect = *input.DisableAutoConnect
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.ServerSSHAllowed != nil && *input.ServerSSHAllowed != *config.ServerSSHAllowed {
|
||||||
|
if *input.ServerSSHAllowed {
|
||||||
|
log.Infof("enabling SSH server")
|
||||||
|
} else {
|
||||||
|
log.Infof("disabling SSH server")
|
||||||
|
}
|
||||||
|
config.ServerSSHAllowed = input.ServerSSHAllowed
|
||||||
|
updated = true
|
||||||
|
} else if config.ServerSSHAllowed == nil {
|
||||||
|
// enables SSH for configs from old versions to preserve backwards compatibility
|
||||||
|
log.Infof("falling back to enabled SSH server for pre-existing configuration")
|
||||||
|
config.ServerSSHAllowed = util.True()
|
||||||
|
updated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if input.DNSRouteInterval != nil && *input.DNSRouteInterval != config.DNSRouteInterval {
|
||||||
|
log.Infof("updating DNS route interval to %s (old value %s)",
|
||||||
|
input.DNSRouteInterval.String(), config.DNSRouteInterval.String())
|
||||||
|
config.DNSRouteInterval = *input.DNSRouteInterval
|
||||||
|
updated = true
|
||||||
|
} else if config.DNSRouteInterval == 0 {
|
||||||
|
config.DNSRouteInterval = dynamic.DefaultInterval
|
||||||
|
log.Infof("using default DNS route interval %s", config.DNSRouteInterval)
|
||||||
|
updated = true
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return updated, nil
|
||||||
|
}
|
||||||
|
|
||||||
// parseURL parses and validates a service URL
|
// parseURL parses and validates a service URL
|
||||||
func parseURL(serviceName, serviceURL string) (*url.URL, error) {
|
func parseURL(serviceName, serviceURL string) (*url.URL, error) {
|
||||||
parsedMgmtURL, err := url.ParseRequestURI(serviceURL)
|
parsedMgmtURL, err := url.ParseRequestURI(serviceURL)
|
||||||
@@ -384,7 +442,6 @@ func configFileIsExists(path string) bool {
|
|||||||
// If it can switch, then it updates the config and returns a new one. Otherwise, it returns the provided config.
|
// If it can switch, then it updates the config and returns a new one. Otherwise, it returns the provided config.
|
||||||
// The check is performed only for the NetBird's managed version.
|
// The check is performed only for the NetBird's managed version.
|
||||||
func UpdateOldManagementURL(ctx context.Context, config *Config, configPath string) (*Config, error) {
|
func UpdateOldManagementURL(ctx context.Context, config *Config, configPath string) (*Config, error) {
|
||||||
|
|
||||||
defaultManagementURL, err := parseURL("Management URL", DefaultManagementURL)
|
defaultManagementURL, err := parseURL("Management URL", DefaultManagementURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -18,7 +18,6 @@ func TestGetConfig(t *testing.T) {
|
|||||||
config, err := UpdateOrCreateConfig(ConfigInput{
|
config, err := UpdateOrCreateConfig(ConfigInput{
|
||||||
ConfigPath: filepath.Join(t.TempDir(), "config.json"),
|
ConfigPath: filepath.Join(t.TempDir(), "config.json"),
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -86,6 +85,26 @@ func TestGetConfig(t *testing.T) {
|
|||||||
assert.Equal(t, readConf.(*Config).ManagementURL.String(), newManagementURL)
|
assert.Equal(t, readConf.(*Config).ManagementURL.String(), newManagementURL)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestExtraIFaceBlackList(t *testing.T) {
|
||||||
|
extraIFaceBlackList := []string{"eth1"}
|
||||||
|
path := filepath.Join(t.TempDir(), "config.json")
|
||||||
|
config, err := UpdateOrCreateConfig(ConfigInput{
|
||||||
|
ConfigPath: path,
|
||||||
|
ExtraIFaceBlackList: extraIFaceBlackList,
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Contains(t, config.IFaceBlackList, "eth1")
|
||||||
|
readConf, err := util.ReadJson(path, config)
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
assert.Contains(t, readConf.(*Config).IFaceBlackList, "eth1")
|
||||||
|
}
|
||||||
|
|
||||||
func TestHiddenPreSharedKey(t *testing.T) {
|
func TestHiddenPreSharedKey(t *testing.T) {
|
||||||
hidden := "**********"
|
hidden := "**********"
|
||||||
samplePreSharedKey := "mysecretpresharedkey"
|
samplePreSharedKey := "mysecretpresharedkey"
|
||||||
@@ -111,7 +130,6 @@ func TestHiddenPreSharedKey(t *testing.T) {
|
|||||||
ConfigPath: cfgFile,
|
ConfigPath: cfgFile,
|
||||||
PreSharedKey: tt.preSharedKey,
|
PreSharedKey: tt.preSharedKey,
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("failed to get cfg: %s", err)
|
t.Fatalf("failed to get cfg: %s", err)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -4,7 +4,11 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"runtime"
|
||||||
|
"runtime/debug"
|
||||||
"strings"
|
"strings"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cenkalti/backoff/v4"
|
"github.com/cenkalti/backoff/v4"
|
||||||
@@ -22,34 +26,52 @@ import (
|
|||||||
"github.com/netbirdio/netbird/iface"
|
"github.com/netbirdio/netbird/iface"
|
||||||
mgm "github.com/netbirdio/netbird/management/client"
|
mgm "github.com/netbirdio/netbird/management/client"
|
||||||
mgmProto "github.com/netbirdio/netbird/management/proto"
|
mgmProto "github.com/netbirdio/netbird/management/proto"
|
||||||
|
"github.com/netbirdio/netbird/relay/auth/hmac"
|
||||||
|
relayClient "github.com/netbirdio/netbird/relay/client"
|
||||||
signal "github.com/netbirdio/netbird/signal/client"
|
signal "github.com/netbirdio/netbird/signal/client"
|
||||||
"github.com/netbirdio/netbird/util"
|
"github.com/netbirdio/netbird/util"
|
||||||
"github.com/netbirdio/netbird/version"
|
"github.com/netbirdio/netbird/version"
|
||||||
)
|
)
|
||||||
|
|
||||||
// RunClient with main logic.
|
type ConnectClient struct {
|
||||||
func RunClient(ctx context.Context, config *Config, statusRecorder *peer.Status) error {
|
ctx context.Context
|
||||||
return runClient(ctx, config, statusRecorder, MobileDependency{}, nil, nil, nil, nil)
|
config *Config
|
||||||
|
statusRecorder *peer.Status
|
||||||
|
engine *Engine
|
||||||
|
engineMutex sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunClientWithProbes runs the client's main logic with probes attached
|
func NewConnectClient(
|
||||||
func RunClientWithProbes(
|
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
config *Config,
|
config *Config,
|
||||||
statusRecorder *peer.Status,
|
statusRecorder *peer.Status,
|
||||||
|
|
||||||
|
) *ConnectClient {
|
||||||
|
return &ConnectClient{
|
||||||
|
ctx: ctx,
|
||||||
|
config: config,
|
||||||
|
statusRecorder: statusRecorder,
|
||||||
|
engineMutex: sync.Mutex{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Run with main logic.
|
||||||
|
func (c *ConnectClient) Run() error {
|
||||||
|
return c.run(MobileDependency{}, nil, nil, nil, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RunWithProbes runs the client's main logic with probes attached
|
||||||
|
func (c *ConnectClient) RunWithProbes(
|
||||||
mgmProbe *Probe,
|
mgmProbe *Probe,
|
||||||
signalProbe *Probe,
|
signalProbe *Probe,
|
||||||
relayProbe *Probe,
|
relayProbe *Probe,
|
||||||
wgProbe *Probe,
|
wgProbe *Probe,
|
||||||
) error {
|
) error {
|
||||||
return runClient(ctx, config, statusRecorder, MobileDependency{}, mgmProbe, signalProbe, relayProbe, wgProbe)
|
return c.run(MobileDependency{}, mgmProbe, signalProbe, relayProbe, wgProbe)
|
||||||
}
|
}
|
||||||
|
|
||||||
// RunClientMobile with main logic on mobile system
|
// RunOnAndroid with main logic on mobile system
|
||||||
func RunClientMobile(
|
func (c *ConnectClient) RunOnAndroid(
|
||||||
ctx context.Context,
|
|
||||||
config *Config,
|
|
||||||
statusRecorder *peer.Status,
|
|
||||||
tunAdapter iface.TunAdapter,
|
tunAdapter iface.TunAdapter,
|
||||||
iFaceDiscover stdnet.ExternalIFaceDiscover,
|
iFaceDiscover stdnet.ExternalIFaceDiscover,
|
||||||
networkChangeListener listener.NetworkChangeListener,
|
networkChangeListener listener.NetworkChangeListener,
|
||||||
@@ -64,40 +86,43 @@ func RunClientMobile(
|
|||||||
HostDNSAddresses: dnsAddresses,
|
HostDNSAddresses: dnsAddresses,
|
||||||
DnsReadyListener: dnsReadyListener,
|
DnsReadyListener: dnsReadyListener,
|
||||||
}
|
}
|
||||||
return runClient(ctx, config, statusRecorder, mobileDependency, nil, nil, nil, nil)
|
return c.run(mobileDependency, nil, nil, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func RunClientiOS(
|
func (c *ConnectClient) RunOniOS(
|
||||||
ctx context.Context,
|
|
||||||
config *Config,
|
|
||||||
statusRecorder *peer.Status,
|
|
||||||
fileDescriptor int32,
|
fileDescriptor int32,
|
||||||
networkChangeListener listener.NetworkChangeListener,
|
networkChangeListener listener.NetworkChangeListener,
|
||||||
dnsManager dns.IosDnsManager,
|
dnsManager dns.IosDnsManager,
|
||||||
) error {
|
) error {
|
||||||
|
// Set GC percent to 5% to reduce memory usage as iOS only allows 50MB of memory for the extension.
|
||||||
|
debug.SetGCPercent(5)
|
||||||
|
|
||||||
mobileDependency := MobileDependency{
|
mobileDependency := MobileDependency{
|
||||||
FileDescriptor: fileDescriptor,
|
FileDescriptor: fileDescriptor,
|
||||||
NetworkChangeListener: networkChangeListener,
|
NetworkChangeListener: networkChangeListener,
|
||||||
DnsManager: dnsManager,
|
DnsManager: dnsManager,
|
||||||
}
|
}
|
||||||
return runClient(ctx, config, statusRecorder, mobileDependency, nil, nil, nil, nil)
|
return c.run(mobileDependency, nil, nil, nil, nil)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runClient(
|
func (c *ConnectClient) run(
|
||||||
ctx context.Context,
|
|
||||||
config *Config,
|
|
||||||
statusRecorder *peer.Status,
|
|
||||||
mobileDependency MobileDependency,
|
mobileDependency MobileDependency,
|
||||||
mgmProbe *Probe,
|
mgmProbe *Probe,
|
||||||
signalProbe *Probe,
|
signalProbe *Probe,
|
||||||
relayProbe *Probe,
|
relayProbe *Probe,
|
||||||
wgProbe *Probe,
|
wgProbe *Probe,
|
||||||
) error {
|
) error {
|
||||||
log.Infof("starting NetBird client version %s", version.NetbirdVersion())
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
log.Panicf("Panic occurred: %v, stack trace: %s", r, string(debug.Stack()))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
log.Infof("starting NetBird client version %s on %s/%s", version.NetbirdVersion(), runtime.GOOS, runtime.GOARCH)
|
||||||
|
|
||||||
// Check if client was not shut down in a clean way and restore DNS config if required.
|
// Check if client was not shut down in a clean way and restore DNS config if required.
|
||||||
// Otherwise, we might not be able to connect to the management server to retrieve new config.
|
// Otherwise, we might not be able to connect to the management server to retrieve new config.
|
||||||
if err := dns.CheckUncleanShutdown(config.WgIface); err != nil {
|
if err := dns.CheckUncleanShutdown(c.config.WgIface); err != nil {
|
||||||
log.Errorf("checking unclean shutdown error: %s", err)
|
log.Errorf("checking unclean shutdown error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,7 +136,7 @@ func runClient(
|
|||||||
Clock: backoff.SystemClock,
|
Clock: backoff.SystemClock,
|
||||||
}
|
}
|
||||||
|
|
||||||
state := CtxGetState(ctx)
|
state := CtxGetState(c.ctx)
|
||||||
defer func() {
|
defer func() {
|
||||||
s, err := state.Status()
|
s, err := state.Status()
|
||||||
if err != nil || s != StatusNeedsLogin {
|
if err != nil || s != StatusNeedsLogin {
|
||||||
@@ -120,52 +145,49 @@ func runClient(
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
wrapErr := state.Wrap
|
wrapErr := state.Wrap
|
||||||
myPrivateKey, err := wgtypes.ParseKey(config.PrivateKey)
|
myPrivateKey, err := wgtypes.ParseKey(c.config.PrivateKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed parsing Wireguard key %s: [%s]", config.PrivateKey, err.Error())
|
log.Errorf("failed parsing Wireguard key %s: [%s]", c.config.PrivateKey, err.Error())
|
||||||
return wrapErr(err)
|
return wrapErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
var mgmTlsEnabled bool
|
var mgmTlsEnabled bool
|
||||||
if config.ManagementURL.Scheme == "https" {
|
if c.config.ManagementURL.Scheme == "https" {
|
||||||
mgmTlsEnabled = true
|
mgmTlsEnabled = true
|
||||||
}
|
}
|
||||||
|
|
||||||
publicSSHKey, err := ssh.GeneratePublicKey([]byte(config.SSHKey))
|
publicSSHKey, err := ssh.GeneratePublicKey([]byte(c.config.SSHKey))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
defer statusRecorder.ClientStop()
|
defer c.statusRecorder.ClientStop()
|
||||||
operation := func() error {
|
operation := func() error {
|
||||||
// if context cancelled we not start new backoff cycle
|
// if context cancelled we not start new backoff cycle
|
||||||
select {
|
if c.isContextCancelled() {
|
||||||
case <-ctx.Done():
|
|
||||||
return nil
|
return nil
|
||||||
default:
|
|
||||||
}
|
}
|
||||||
|
|
||||||
state.Set(StatusConnecting)
|
state.Set(StatusConnecting)
|
||||||
|
|
||||||
engineCtx, cancel := context.WithCancel(ctx)
|
engineCtx, cancel := context.WithCancel(c.ctx)
|
||||||
defer func() {
|
defer func() {
|
||||||
statusRecorder.MarkManagementDisconnected(state.err)
|
c.statusRecorder.MarkManagementDisconnected(state.err)
|
||||||
statusRecorder.CleanLocalPeerState()
|
c.statusRecorder.CleanLocalPeerState()
|
||||||
cancel()
|
cancel()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
log.Debugf("connecting to the Management service %s", config.ManagementURL.Host)
|
log.Debugf("connecting to the Management service %s", c.config.ManagementURL.Host)
|
||||||
mgmClient, err := mgm.NewClient(engineCtx, config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
|
mgmClient, err := mgm.NewClient(engineCtx, c.config.ManagementURL.Host, myPrivateKey, mgmTlsEnabled)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return wrapErr(gstatus.Errorf(codes.FailedPrecondition, "failed connecting to Management Service : %s", err))
|
return wrapErr(gstatus.Errorf(codes.FailedPrecondition, "failed connecting to Management Service : %s", err))
|
||||||
}
|
}
|
||||||
mgmNotifier := statusRecorderToMgmConnStateNotifier(statusRecorder)
|
mgmNotifier := statusRecorderToMgmConnStateNotifier(c.statusRecorder)
|
||||||
mgmClient.SetConnStateListener(mgmNotifier)
|
mgmClient.SetConnStateListener(mgmNotifier)
|
||||||
|
|
||||||
log.Debugf("connected to the Management service %s", config.ManagementURL.Host)
|
log.Debugf("connected to the Management service %s", c.config.ManagementURL.Host)
|
||||||
defer func() {
|
defer func() {
|
||||||
err = mgmClient.Close()
|
if err = mgmClient.Close(); err != nil {
|
||||||
if err != nil {
|
|
||||||
log.Warnf("failed to close the Management service client %v", err)
|
log.Warnf("failed to close the Management service client %v", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -180,7 +202,7 @@ func runClient(
|
|||||||
}
|
}
|
||||||
return wrapErr(err)
|
return wrapErr(err)
|
||||||
}
|
}
|
||||||
statusRecorder.MarkManagementConnected()
|
c.statusRecorder.MarkManagementConnected()
|
||||||
|
|
||||||
localPeerState := peer.LocalPeerState{
|
localPeerState := peer.LocalPeerState{
|
||||||
IP: loginResp.GetPeerConfig().GetAddress(),
|
IP: loginResp.GetPeerConfig().GetAddress(),
|
||||||
@@ -188,19 +210,18 @@ func runClient(
|
|||||||
KernelInterface: iface.WireGuardModuleIsLoaded(),
|
KernelInterface: iface.WireGuardModuleIsLoaded(),
|
||||||
FQDN: loginResp.GetPeerConfig().GetFqdn(),
|
FQDN: loginResp.GetPeerConfig().GetFqdn(),
|
||||||
}
|
}
|
||||||
|
c.statusRecorder.UpdateLocalPeerState(localPeerState)
|
||||||
statusRecorder.UpdateLocalPeerState(localPeerState)
|
|
||||||
|
|
||||||
signalURL := fmt.Sprintf("%s://%s",
|
signalURL := fmt.Sprintf("%s://%s",
|
||||||
strings.ToLower(loginResp.GetWiretrusteeConfig().GetSignal().GetProtocol().String()),
|
strings.ToLower(loginResp.GetWiretrusteeConfig().GetSignal().GetProtocol().String()),
|
||||||
loginResp.GetWiretrusteeConfig().GetSignal().GetUri(),
|
loginResp.GetWiretrusteeConfig().GetSignal().GetUri(),
|
||||||
)
|
)
|
||||||
|
|
||||||
statusRecorder.UpdateSignalAddress(signalURL)
|
c.statusRecorder.UpdateSignalAddress(signalURL)
|
||||||
|
|
||||||
statusRecorder.MarkSignalDisconnected(nil)
|
c.statusRecorder.MarkSignalDisconnected(nil)
|
||||||
defer func() {
|
defer func() {
|
||||||
statusRecorder.MarkSignalDisconnected(state.err)
|
c.statusRecorder.MarkSignalDisconnected(state.err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// with the global Wiretrustee config in hand connect (just a connection, no stream yet) Signal
|
// with the global Wiretrustee config in hand connect (just a connection, no stream yet) Signal
|
||||||
@@ -216,35 +237,53 @@ func runClient(
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
signalNotifier := statusRecorderToSignalConnStateNotifier(statusRecorder)
|
signalNotifier := statusRecorderToSignalConnStateNotifier(c.statusRecorder)
|
||||||
signalClient.SetConnStateListener(signalNotifier)
|
signalClient.SetConnStateListener(signalNotifier)
|
||||||
|
|
||||||
statusRecorder.MarkSignalConnected()
|
c.statusRecorder.MarkSignalConnected()
|
||||||
|
|
||||||
|
relayURL, token := parseRelayInfo(loginResp)
|
||||||
|
relayManager := relayClient.NewManager(engineCtx, relayURL, myPrivateKey.PublicKey().String())
|
||||||
|
if relayURL != "" {
|
||||||
|
if token != nil {
|
||||||
|
relayManager.UpdateToken(token)
|
||||||
|
}
|
||||||
|
log.Infof("connecting to the Relay service %s", relayURL)
|
||||||
|
if err = relayManager.Serve(); err != nil {
|
||||||
|
log.Error(err)
|
||||||
|
return wrapErr(err)
|
||||||
|
}
|
||||||
|
c.statusRecorder.SetRelayMgr(relayManager)
|
||||||
|
}
|
||||||
|
|
||||||
peerConfig := loginResp.GetPeerConfig()
|
peerConfig := loginResp.GetPeerConfig()
|
||||||
|
|
||||||
engineConfig, err := createEngineConfig(myPrivateKey, config, peerConfig)
|
engineConfig, err := createEngineConfig(myPrivateKey, c.config, peerConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Error(err)
|
||||||
return wrapErr(err)
|
return wrapErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
engine := NewEngineWithProbes(engineCtx, cancel, signalClient, mgmClient, engineConfig, mobileDependency, statusRecorder, mgmProbe, signalProbe, relayProbe, wgProbe)
|
checks := loginResp.GetChecks()
|
||||||
err = engine.Start()
|
|
||||||
if err != nil {
|
c.engineMutex.Lock()
|
||||||
|
c.engine = NewEngineWithProbes(engineCtx, cancel, signalClient, mgmClient, relayManager, engineConfig, mobileDependency, c.statusRecorder, mgmProbe, signalProbe, relayProbe, wgProbe, checks)
|
||||||
|
c.engineMutex.Unlock()
|
||||||
|
|
||||||
|
if err := c.engine.Start(); err != nil {
|
||||||
log.Errorf("error while starting Netbird Connection Engine: %s", err)
|
log.Errorf("error while starting Netbird Connection Engine: %s", err)
|
||||||
return wrapErr(err)
|
return wrapErr(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Print("Netbird engine started, my IP is: ", peerConfig.Address)
|
log.Infof("Netbird engine started, the IP is: %s", peerConfig.GetAddress())
|
||||||
state.Set(StatusConnected)
|
state.Set(StatusConnected)
|
||||||
|
|
||||||
<-engineCtx.Done()
|
<-engineCtx.Done()
|
||||||
statusRecorder.ClientTeardown()
|
c.statusRecorder.ClientTeardown()
|
||||||
|
|
||||||
backOff.Reset()
|
backOff.Reset()
|
||||||
|
|
||||||
err = engine.Stop()
|
err = c.engine.Stop()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed stopping engine %v", err)
|
log.Errorf("failed stopping engine %v", err)
|
||||||
return wrapErr(err)
|
return wrapErr(err)
|
||||||
@@ -259,7 +298,7 @@ func runClient(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
statusRecorder.ClientStart()
|
c.statusRecorder.ClientStart()
|
||||||
err = backoff.Retry(operation, backOff)
|
err = backoff.Retry(operation, backOff)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("exiting client retry loop due to unrecoverable error: %s", err)
|
log.Debugf("exiting client retry loop due to unrecoverable error: %s", err)
|
||||||
@@ -271,8 +310,48 @@ func runClient(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func parseRelayInfo(resp *mgmProto.LoginResponse) (string, *hmac.Token) {
|
||||||
|
msg := resp.GetWiretrusteeConfig().GetRelay()
|
||||||
|
if msg == nil {
|
||||||
|
return "", nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var url string
|
||||||
|
if msg.GetUrls() != nil && len(msg.GetUrls()) > 0 {
|
||||||
|
url = msg.GetUrls()[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
token := &hmac.Token{
|
||||||
|
Payload: msg.GetTokenPayload(),
|
||||||
|
Signature: msg.GetTokenSignature(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return url, token
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConnectClient) Engine() *Engine {
|
||||||
|
var e *Engine
|
||||||
|
c.engineMutex.Lock()
|
||||||
|
e = c.engine
|
||||||
|
c.engineMutex.Unlock()
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *ConnectClient) isContextCancelled() bool {
|
||||||
|
select {
|
||||||
|
case <-c.ctx.Done():
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// createEngineConfig converts configuration received from Management Service to EngineConfig
|
// createEngineConfig converts configuration received from Management Service to EngineConfig
|
||||||
func createEngineConfig(key wgtypes.Key, config *Config, peerConfig *mgmProto.PeerConfig) (*EngineConfig, error) {
|
func createEngineConfig(key wgtypes.Key, config *Config, peerConfig *mgmProto.PeerConfig) (*EngineConfig, error) {
|
||||||
|
nm := false
|
||||||
|
if config.NetworkMonitor != nil {
|
||||||
|
nm = *config.NetworkMonitor
|
||||||
|
}
|
||||||
engineConf := &EngineConfig{
|
engineConf := &EngineConfig{
|
||||||
WgIfaceName: config.WgIface,
|
WgIfaceName: config.WgIface,
|
||||||
WgAddr: peerConfig.Address,
|
WgAddr: peerConfig.Address,
|
||||||
@@ -280,12 +359,14 @@ func createEngineConfig(key wgtypes.Key, config *Config, peerConfig *mgmProto.Pe
|
|||||||
DisableIPv6Discovery: config.DisableIPv6Discovery,
|
DisableIPv6Discovery: config.DisableIPv6Discovery,
|
||||||
WgPrivateKey: key,
|
WgPrivateKey: key,
|
||||||
WgPort: config.WgPort,
|
WgPort: config.WgPort,
|
||||||
|
NetworkMonitor: nm,
|
||||||
SSHKey: []byte(config.SSHKey),
|
SSHKey: []byte(config.SSHKey),
|
||||||
NATExternalIPs: config.NATExternalIPs,
|
NATExternalIPs: config.NATExternalIPs,
|
||||||
CustomDNSAddress: config.CustomDNSAddress,
|
CustomDNSAddress: config.CustomDNSAddress,
|
||||||
RosenpassEnabled: config.RosenpassEnabled,
|
RosenpassEnabled: config.RosenpassEnabled,
|
||||||
RosenpassPermissive: config.RosenpassPermissive,
|
RosenpassPermissive: config.RosenpassPermissive,
|
||||||
ServerSSHAllowed: util.ReturnBoolWithDefaultTrue(config.ServerSSHAllowed),
|
ServerSSHAllowed: util.ReturnBoolWithDefaultTrue(config.ServerSSHAllowed),
|
||||||
|
DNSRouteInterval: config.DNSRouteInterval,
|
||||||
}
|
}
|
||||||
|
|
||||||
if config.PreSharedKey != "" {
|
if config.PreSharedKey != "" {
|
||||||
@@ -296,6 +377,15 @@ func createEngineConfig(key wgtypes.Key, config *Config, peerConfig *mgmProto.Pe
|
|||||||
engineConf.PreSharedKey = &preSharedKey
|
engineConf.PreSharedKey = &preSharedKey
|
||||||
}
|
}
|
||||||
|
|
||||||
|
port, err := freePort(config.WgPort)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if port != config.WgPort {
|
||||||
|
log.Infof("using %d as wireguard port: %d is in use", port, config.WgPort)
|
||||||
|
}
|
||||||
|
engineConf.WgPort = port
|
||||||
|
|
||||||
return engineConf, nil
|
return engineConf, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -345,3 +435,20 @@ func statusRecorderToSignalConnStateNotifier(statusRecorder *peer.Status) signal
|
|||||||
notifier, _ := sri.(signal.ConnStateNotifier)
|
notifier, _ := sri.(signal.ConnStateNotifier)
|
||||||
return notifier
|
return notifier
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func freePort(start int) (int, error) {
|
||||||
|
addr := net.UDPAddr{}
|
||||||
|
if start == 0 {
|
||||||
|
start = iface.DefaultWgPort
|
||||||
|
}
|
||||||
|
for x := start; x <= 65535; x++ {
|
||||||
|
addr.Port = x
|
||||||
|
conn, err := net.ListenUDP("udp", &addr)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
conn.Close()
|
||||||
|
return x, nil
|
||||||
|
}
|
||||||
|
return 0, errors.New("no free ports")
|
||||||
|
}
|
||||||
|
|||||||
57
client/internal/connect_test.go
Normal file
57
client/internal/connect_test.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Test_freePort(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
port int
|
||||||
|
want int
|
||||||
|
wantErr bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "available",
|
||||||
|
port: 51820,
|
||||||
|
want: 51820,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "notavailable",
|
||||||
|
port: 51830,
|
||||||
|
want: 51831,
|
||||||
|
wantErr: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "noports",
|
||||||
|
port: 65535,
|
||||||
|
want: 0,
|
||||||
|
wantErr: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for _, tt := range tests {
|
||||||
|
|
||||||
|
c1, err := net.ListenUDP("udp", &net.UDPAddr{Port: 51830})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("freePort error = %v", err)
|
||||||
|
}
|
||||||
|
c2, err := net.ListenUDP("udp", &net.UDPAddr{Port: 65535})
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("freePort error = %v", err)
|
||||||
|
}
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
got, err := freePort(tt.port)
|
||||||
|
if (err != nil) != tt.wantErr {
|
||||||
|
t.Errorf("freePort() error = %v, wantErr %v", err, tt.wantErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if got != tt.want {
|
||||||
|
t.Errorf("freePort() = %v, want %v", got, tt.want)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
c1.Close()
|
||||||
|
c2.Close()
|
||||||
|
}
|
||||||
|
}
|
||||||
6
client/internal/dns/consts_freebsd.go
Normal file
6
client/internal/dns/consts_freebsd.go
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
package dns
|
||||||
|
|
||||||
|
const (
|
||||||
|
fileUncleanShutdownResolvConfLocation = "/var/db/netbird/resolv.conf"
|
||||||
|
fileUncleanShutdownManagerTypeLocation = "/var/db/netbird/manager"
|
||||||
|
)
|
||||||
8
client/internal/dns/consts_linux.go
Normal file
8
client/internal/dns/consts_linux.go
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
//go:build !android
|
||||||
|
|
||||||
|
package dns
|
||||||
|
|
||||||
|
const (
|
||||||
|
fileUncleanShutdownResolvConfLocation = "/var/lib/netbird/resolv.conf"
|
||||||
|
fileUncleanShutdownManagerTypeLocation = "/var/lib/netbird/manager"
|
||||||
|
)
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -47,24 +47,20 @@ func (f *fileConfigurator) supportCustomPort() bool {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (f *fileConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
func (f *fileConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
||||||
backupFileExist := false
|
backupFileExist := f.isBackupFileExist()
|
||||||
_, err := os.Stat(fileDefaultResolvConfBackupLocation)
|
|
||||||
if err == nil {
|
|
||||||
backupFileExist = true
|
|
||||||
}
|
|
||||||
|
|
||||||
if !config.RouteAll {
|
if !config.RouteAll {
|
||||||
if backupFileExist {
|
if backupFileExist {
|
||||||
err = f.restore()
|
f.repair.stopWatchFileChanges()
|
||||||
|
err := f.restore()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to configure DNS for this peer using file manager without a Primary nameserver group. Restoring the original file return err: %w", err)
|
return fmt.Errorf("restoring the original resolv.conf file return err: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return fmt.Errorf("unable to configure DNS for this peer using file manager without a nameserver group with all domains configured")
|
return fmt.Errorf("unable to configure DNS for this peer using file manager without a nameserver group with all domains configured")
|
||||||
}
|
}
|
||||||
|
|
||||||
if !backupFileExist {
|
if !backupFileExist {
|
||||||
err = f.backup()
|
err := f.backup()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("unable to backup the resolv.conf file: %w", err)
|
return fmt.Errorf("unable to backup the resolv.conf file: %w", err)
|
||||||
}
|
}
|
||||||
@@ -184,6 +180,11 @@ func (f *fileConfigurator) restoreUncleanShutdownDNS(storedDNSAddress *netip.Add
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (f *fileConfigurator) isBackupFileExist() bool {
|
||||||
|
_, err := os.Stat(fileDefaultResolvConfBackupLocation)
|
||||||
|
return err == nil
|
||||||
|
}
|
||||||
|
|
||||||
func restoreResolvConfFile() error {
|
func restoreResolvConfFile() error {
|
||||||
log.Debugf("restoring unclean shutdown: restoring %s from %s", defaultResolvConfPath, fileUncleanShutdownResolvConfLocation)
|
log.Debugf("restoring unclean shutdown: restoring %s from %s", defaultResolvConfPath, fileUncleanShutdownResolvConfLocation)
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -15,6 +15,12 @@ type hostManager interface {
|
|||||||
restoreUncleanShutdownDNS(storedDNSAddress *netip.Addr) error
|
restoreUncleanShutdownDNS(storedDNSAddress *netip.Addr) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
type SystemDNSSettings struct {
|
||||||
|
Domains []string
|
||||||
|
ServerIP string
|
||||||
|
ServerPort int
|
||||||
|
}
|
||||||
|
|
||||||
type HostDNSConfig struct {
|
type HostDNSConfig struct {
|
||||||
Domains []DomainConfig `json:"domains"`
|
Domains []DomainConfig `json:"domains"`
|
||||||
RouteAll bool `json:"routeAll"`
|
RouteAll bool `json:"routeAll"`
|
||||||
|
|||||||
@@ -7,6 +7,7 @@ import (
|
|||||||
"bytes"
|
"bytes"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"strconv"
|
"strconv"
|
||||||
@@ -18,7 +19,7 @@ import (
|
|||||||
const (
|
const (
|
||||||
netbirdDNSStateKeyFormat = "State:/Network/Service/NetBird-%s/DNS"
|
netbirdDNSStateKeyFormat = "State:/Network/Service/NetBird-%s/DNS"
|
||||||
globalIPv4State = "State:/Network/Global/IPv4"
|
globalIPv4State = "State:/Network/Global/IPv4"
|
||||||
primaryServiceSetupKeyFormat = "Setup:/Network/Service/%s/DNS"
|
primaryServiceStateKeyFormat = "State:/Network/Service/%s/DNS"
|
||||||
keySupplementalMatchDomains = "SupplementalMatchDomains"
|
keySupplementalMatchDomains = "SupplementalMatchDomains"
|
||||||
keySupplementalMatchDomainsNoSearch = "SupplementalMatchDomainsNoSearch"
|
keySupplementalMatchDomainsNoSearch = "SupplementalMatchDomainsNoSearch"
|
||||||
keyServerAddresses = "ServerAddresses"
|
keyServerAddresses = "ServerAddresses"
|
||||||
@@ -28,12 +29,12 @@ const (
|
|||||||
scutilPath = "/usr/sbin/scutil"
|
scutilPath = "/usr/sbin/scutil"
|
||||||
searchSuffix = "Search"
|
searchSuffix = "Search"
|
||||||
matchSuffix = "Match"
|
matchSuffix = "Match"
|
||||||
|
localSuffix = "Local"
|
||||||
)
|
)
|
||||||
|
|
||||||
type systemConfigurator struct {
|
type systemConfigurator struct {
|
||||||
// primaryServiceID primary interface in the system. AKA the interface with the default route
|
createdKeys map[string]struct{}
|
||||||
primaryServiceID string
|
systemDNSSettings SystemDNSSettings
|
||||||
createdKeys map[string]struct{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHostManager() (hostManager, error) {
|
func newHostManager() (hostManager, error) {
|
||||||
@@ -49,20 +50,6 @@ func (s *systemConfigurator) supportCustomPort() bool {
|
|||||||
func (s *systemConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
func (s *systemConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
||||||
var err error
|
var err error
|
||||||
|
|
||||||
if config.RouteAll {
|
|
||||||
err = s.addDNSSetupForAll(config.ServerIP, config.ServerPort)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("add dns setup for all: %w", err)
|
|
||||||
}
|
|
||||||
} else if s.primaryServiceID != "" {
|
|
||||||
err = s.removeKeyFromSystemConfig(getKeyWithInput(primaryServiceSetupKeyFormat, s.primaryServiceID))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("remote key from system config: %w", err)
|
|
||||||
}
|
|
||||||
s.primaryServiceID = ""
|
|
||||||
log.Infof("removed %s:%d as main DNS resolver for this peer", config.ServerIP, config.ServerPort)
|
|
||||||
}
|
|
||||||
|
|
||||||
// create a file for unclean shutdown detection
|
// create a file for unclean shutdown detection
|
||||||
if err := createUncleanShutdownIndicator(); err != nil {
|
if err := createUncleanShutdownIndicator(); err != nil {
|
||||||
log.Errorf("failed to create unclean shutdown file: %s", err)
|
log.Errorf("failed to create unclean shutdown file: %s", err)
|
||||||
@@ -73,6 +60,19 @@ func (s *systemConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
|||||||
matchDomains []string
|
matchDomains []string
|
||||||
)
|
)
|
||||||
|
|
||||||
|
err = s.recordSystemDNSSettings(true)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("unable to update record of System's DNS config: %s", err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
|
if config.RouteAll {
|
||||||
|
searchDomains = append(searchDomains, "\"\"")
|
||||||
|
err = s.addLocalDNS()
|
||||||
|
if err != nil {
|
||||||
|
log.Infof("failed to enable split DNS")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
for _, dConf := range config.Domains {
|
for _, dConf := range config.Domains {
|
||||||
if dConf.Disabled {
|
if dConf.Disabled {
|
||||||
continue
|
continue
|
||||||
@@ -110,23 +110,17 @@ func (s *systemConfigurator) applyDNSConfig(config HostDNSConfig) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemConfigurator) restoreHostDNS() error {
|
func (s *systemConfigurator) restoreHostDNS() error {
|
||||||
lines := ""
|
keys := s.getRemovableKeysWithDefaults()
|
||||||
for key := range s.createdKeys {
|
for _, key := range keys {
|
||||||
lines += buildRemoveKeyOperation(key)
|
|
||||||
keyType := "search"
|
keyType := "search"
|
||||||
if strings.Contains(key, matchSuffix) {
|
if strings.Contains(key, matchSuffix) {
|
||||||
keyType = "match"
|
keyType = "match"
|
||||||
}
|
}
|
||||||
log.Infof("removing %s domains from system", keyType)
|
log.Infof("removing %s domains from system", keyType)
|
||||||
}
|
err := s.removeKeyFromSystemConfig(key)
|
||||||
if s.primaryServiceID != "" {
|
if err != nil {
|
||||||
lines += buildRemoveKeyOperation(getKeyWithInput(primaryServiceSetupKeyFormat, s.primaryServiceID))
|
log.Errorf("failed to remove %s domains from system: %s", keyType, err)
|
||||||
log.Infof("restoring DNS resolver configuration for system")
|
}
|
||||||
}
|
|
||||||
_, err := runSystemConfigCommand(wrapCommand(lines))
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("got an error while cleaning the system configuration: %s", err)
|
|
||||||
return fmt.Errorf("clean system: %w", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := removeUncleanShutdownIndicator(); err != nil {
|
if err := removeUncleanShutdownIndicator(); err != nil {
|
||||||
@@ -136,6 +130,19 @@ func (s *systemConfigurator) restoreHostDNS() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *systemConfigurator) getRemovableKeysWithDefaults() []string {
|
||||||
|
if len(s.createdKeys) == 0 {
|
||||||
|
// return defaults for startup calls
|
||||||
|
return []string{getKeyWithInput(netbirdDNSStateKeyFormat, searchSuffix), getKeyWithInput(netbirdDNSStateKeyFormat, matchSuffix)}
|
||||||
|
}
|
||||||
|
|
||||||
|
keys := make([]string, 0, len(s.createdKeys))
|
||||||
|
for key := range s.createdKeys {
|
||||||
|
keys = append(keys, key)
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
func (s *systemConfigurator) removeKeyFromSystemConfig(key string) error {
|
func (s *systemConfigurator) removeKeyFromSystemConfig(key string) error {
|
||||||
line := buildRemoveKeyOperation(key)
|
line := buildRemoveKeyOperation(key)
|
||||||
_, err := runSystemConfigCommand(wrapCommand(line))
|
_, err := runSystemConfigCommand(wrapCommand(line))
|
||||||
@@ -148,6 +155,97 @@ func (s *systemConfigurator) removeKeyFromSystemConfig(key string) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s *systemConfigurator) addLocalDNS() error {
|
||||||
|
if s.systemDNSSettings.ServerIP == "" || len(s.systemDNSSettings.Domains) == 0 {
|
||||||
|
err := s.recordSystemDNSSettings(true)
|
||||||
|
log.Errorf("Unable to get system DNS configuration")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
localKey := getKeyWithInput(netbirdDNSStateKeyFormat, localSuffix)
|
||||||
|
if s.systemDNSSettings.ServerIP != "" && len(s.systemDNSSettings.Domains) != 0 {
|
||||||
|
err := s.addSearchDomains(localKey, strings.Join(s.systemDNSSettings.Domains, " "), s.systemDNSSettings.ServerIP, s.systemDNSSettings.ServerPort)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't add local network DNS conf: %w", err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Info("Not enabling local DNS server")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *systemConfigurator) recordSystemDNSSettings(force bool) error {
|
||||||
|
if s.systemDNSSettings.ServerIP != "" && len(s.systemDNSSettings.Domains) != 0 && !force {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
systemDNSSettings, err := s.getSystemDNSSettings()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("couldn't get current DNS config: %w", err)
|
||||||
|
}
|
||||||
|
s.systemDNSSettings = systemDNSSettings
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *systemConfigurator) getSystemDNSSettings() (SystemDNSSettings, error) {
|
||||||
|
primaryServiceKey, _, err := s.getPrimaryService()
|
||||||
|
if err != nil || primaryServiceKey == "" {
|
||||||
|
return SystemDNSSettings{}, fmt.Errorf("couldn't find the primary service key: %w", err)
|
||||||
|
}
|
||||||
|
dnsServiceKey := getKeyWithInput(primaryServiceStateKeyFormat, primaryServiceKey)
|
||||||
|
line := buildCommandLine("show", dnsServiceKey, "")
|
||||||
|
stdinCommands := wrapCommand(line)
|
||||||
|
|
||||||
|
b, err := runSystemConfigCommand(stdinCommands)
|
||||||
|
if err != nil {
|
||||||
|
return SystemDNSSettings{}, fmt.Errorf("sending the command: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dnsSettings SystemDNSSettings
|
||||||
|
inSearchDomainsArray := false
|
||||||
|
inServerAddressesArray := false
|
||||||
|
|
||||||
|
scanner := bufio.NewScanner(bytes.NewReader(b))
|
||||||
|
for scanner.Scan() {
|
||||||
|
line := strings.TrimSpace(scanner.Text())
|
||||||
|
switch {
|
||||||
|
case strings.HasPrefix(line, "DomainName :"):
|
||||||
|
domainName := strings.TrimSpace(strings.Split(line, ":")[1])
|
||||||
|
dnsSettings.Domains = append(dnsSettings.Domains, domainName)
|
||||||
|
case line == "SearchDomains : <array> {":
|
||||||
|
inSearchDomainsArray = true
|
||||||
|
continue
|
||||||
|
case line == "ServerAddresses : <array> {":
|
||||||
|
inServerAddressesArray = true
|
||||||
|
continue
|
||||||
|
case line == "}":
|
||||||
|
inSearchDomainsArray = false
|
||||||
|
inServerAddressesArray = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if inSearchDomainsArray {
|
||||||
|
searchDomain := strings.Split(line, " : ")[1]
|
||||||
|
dnsSettings.Domains = append(dnsSettings.Domains, searchDomain)
|
||||||
|
} else if inServerAddressesArray {
|
||||||
|
address := strings.Split(line, " : ")[1]
|
||||||
|
if ip := net.ParseIP(address); ip != nil && ip.To4() != nil {
|
||||||
|
dnsSettings.ServerIP = address
|
||||||
|
inServerAddressesArray = false // Stop reading after finding the first IPv4 address
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := scanner.Err(); err != nil {
|
||||||
|
return dnsSettings, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// default to 53 port
|
||||||
|
dnsSettings.ServerPort = 53
|
||||||
|
|
||||||
|
return dnsSettings, nil
|
||||||
|
}
|
||||||
|
|
||||||
func (s *systemConfigurator) addSearchDomains(key, domains string, ip string, port int) error {
|
func (s *systemConfigurator) addSearchDomains(key, domains string, ip string, port int) error {
|
||||||
err := s.addDNSState(key, domains, ip, port, true)
|
err := s.addDNSState(key, domains, ip, port, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -194,23 +292,6 @@ func (s *systemConfigurator) addDNSState(state, domains, dnsServer string, port
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemConfigurator) addDNSSetupForAll(dnsServer string, port int) error {
|
|
||||||
primaryServiceKey, existingNameserver, err := s.getPrimaryService()
|
|
||||||
if err != nil || primaryServiceKey == "" {
|
|
||||||
return fmt.Errorf("couldn't find the primary service key: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s.addDNSSetup(getKeyWithInput(primaryServiceSetupKeyFormat, primaryServiceKey), dnsServer, port, existingNameserver)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("add dns setup: %w", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
log.Infof("configured %s:%d as main DNS resolver for this peer", dnsServer, port)
|
|
||||||
s.primaryServiceID = primaryServiceKey
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *systemConfigurator) getPrimaryService() (string, string, error) {
|
func (s *systemConfigurator) getPrimaryService() (string, string, error) {
|
||||||
line := buildCommandLine("show", globalIPv4State, "")
|
line := buildCommandLine("show", globalIPv4State, "")
|
||||||
stdinCommands := wrapCommand(line)
|
stdinCommands := wrapCommand(line)
|
||||||
@@ -239,19 +320,6 @@ func (s *systemConfigurator) getPrimaryService() (string, string, error) {
|
|||||||
return primaryService, router, nil
|
return primaryService, router, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *systemConfigurator) addDNSSetup(setupKey, dnsServer string, port int, existingDNSServer string) error {
|
|
||||||
lines := buildAddCommandLine(keySupplementalMatchDomainsNoSearch, digitSymbol+strconv.Itoa(0))
|
|
||||||
lines += buildAddCommandLine(keyServerAddresses, arraySymbol+dnsServer+" "+existingDNSServer)
|
|
||||||
lines += buildAddCommandLine(keyServerPort, digitSymbol+strconv.Itoa(port))
|
|
||||||
addDomainCommand := buildCreateStateWithOperation(setupKey, lines)
|
|
||||||
stdinCommands := wrapCommand(addDomainCommand)
|
|
||||||
_, err := runSystemConfigCommand(stdinCommands)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("applying dns setup, error: %w", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *systemConfigurator) restoreUncleanShutdownDNS(*netip.Addr) error {
|
func (s *systemConfigurator) restoreUncleanShutdownDNS(*netip.Addr) error {
|
||||||
if err := s.restoreHostDNS(); err != nil {
|
if err := s.restoreHostDNS(); err != nil {
|
||||||
return fmt.Errorf("restoring dns via scutil: %w", err)
|
return fmt.Errorf("restoring dns via scutil: %w", err)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -108,7 +108,7 @@ func getOSDNSManagerType() (osManagerType, error) {
|
|||||||
if strings.Contains(text, "NetworkManager") && isDbusListenerRunning(networkManagerDest, networkManagerDbusObjectNode) && isNetworkManagerSupported() {
|
if strings.Contains(text, "NetworkManager") && isDbusListenerRunning(networkManagerDest, networkManagerDbusObjectNode) && isNetworkManagerSupported() {
|
||||||
return networkManager, nil
|
return networkManager, nil
|
||||||
}
|
}
|
||||||
if strings.Contains(text, "systemd-resolved") && isDbusListenerRunning(systemdResolvedDest, systemdDbusObjectNode) {
|
if strings.Contains(text, "systemd-resolved") && isSystemdResolvedRunning() {
|
||||||
if checkStub() {
|
if checkStub() {
|
||||||
return systemdManager, nil
|
return systemdManager, nil
|
||||||
} else {
|
} else {
|
||||||
@@ -116,16 +116,10 @@ func getOSDNSManagerType() (osManagerType, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if strings.Contains(text, "resolvconf") {
|
if strings.Contains(text, "resolvconf") {
|
||||||
if isDbusListenerRunning(systemdResolvedDest, systemdDbusObjectNode) {
|
if isSystemdResolveConfMode() {
|
||||||
var value string
|
return systemdManager, nil
|
||||||
err = getSystemdDbusProperty(systemdDbusResolvConfModeProperty, &value)
|
|
||||||
if err == nil {
|
|
||||||
if value == systemdDbusResolvConfModeForeign {
|
|
||||||
return systemdManager, nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Errorf("got an error while checking systemd resolv conf mode, error: %s", err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return resolvConfManager, nil
|
return resolvConfManager, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
63
client/internal/dns/hosts_dns_holder.go
Normal file
63
client/internal/dns/hosts_dns_holder.go
Normal file
@@ -0,0 +1,63 @@
|
|||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
)
|
||||||
|
|
||||||
|
type hostsDNSHolder struct {
|
||||||
|
unprotectedDNSList map[string]struct{}
|
||||||
|
mutex sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
func newHostsDNSHolder() *hostsDNSHolder {
|
||||||
|
return &hostsDNSHolder{
|
||||||
|
unprotectedDNSList: make(map[string]struct{}),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hostsDNSHolder) set(list []string) {
|
||||||
|
h.mutex.Lock()
|
||||||
|
h.unprotectedDNSList = make(map[string]struct{})
|
||||||
|
for _, dns := range list {
|
||||||
|
dnsAddr, err := h.normalizeAddress(dns)
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.unprotectedDNSList[dnsAddr] = struct{}{}
|
||||||
|
}
|
||||||
|
h.mutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hostsDNSHolder) get() map[string]struct{} {
|
||||||
|
h.mutex.RLock()
|
||||||
|
l := h.unprotectedDNSList
|
||||||
|
h.mutex.RUnlock()
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:unused
|
||||||
|
func (h *hostsDNSHolder) isContain(upstream string) bool {
|
||||||
|
h.mutex.RLock()
|
||||||
|
defer h.mutex.RUnlock()
|
||||||
|
|
||||||
|
_, ok := h.unprotectedDNSList[upstream]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *hostsDNSHolder) normalizeAddress(addr string) (string, error) {
|
||||||
|
a, err := netip.ParseAddr(addr)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("invalid upstream IP address: %s, error: %s", addr, err)
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
if a.Is4() {
|
||||||
|
return fmt.Sprintf("%s:53", addr), nil
|
||||||
|
} else {
|
||||||
|
return fmt.Sprintf("[%s]:53", addr), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -31,6 +31,8 @@ func (d *localResolver) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
|
|||||||
response := d.lookupRecord(r)
|
response := d.lookupRecord(r)
|
||||||
if response != nil {
|
if response != nil {
|
||||||
replyMessage.Answer = append(replyMessage.Answer, response)
|
replyMessage.Answer = append(replyMessage.Answer, response)
|
||||||
|
} else {
|
||||||
|
replyMessage.Rcode = dns.RcodeNameError
|
||||||
}
|
}
|
||||||
|
|
||||||
err := w.WriteMsg(replyMessage)
|
err := w.WriteMsg(replyMessage)
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -4,6 +4,7 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@@ -54,9 +55,8 @@ type DefaultServer struct {
|
|||||||
currentConfig HostDNSConfig
|
currentConfig HostDNSConfig
|
||||||
|
|
||||||
// permanent related properties
|
// permanent related properties
|
||||||
permanent bool
|
permanent bool
|
||||||
hostsDnsList []string
|
hostsDNSHolder *hostsDNSHolder
|
||||||
hostsDnsListLock sync.Mutex
|
|
||||||
|
|
||||||
// make sense on mobile only
|
// make sense on mobile only
|
||||||
searchDomainNotifier *notifier
|
searchDomainNotifier *notifier
|
||||||
@@ -94,7 +94,7 @@ func NewDefaultServer(
|
|||||||
|
|
||||||
var dnsService service
|
var dnsService service
|
||||||
if wgInterface.IsUserspaceBind() {
|
if wgInterface.IsUserspaceBind() {
|
||||||
dnsService = newServiceViaMemory(wgInterface)
|
dnsService = NewServiceViaMemory(wgInterface)
|
||||||
} else {
|
} else {
|
||||||
dnsService = newServiceViaListener(wgInterface, addrPort)
|
dnsService = newServiceViaListener(wgInterface, addrPort)
|
||||||
}
|
}
|
||||||
@@ -112,9 +112,9 @@ func NewDefaultServerPermanentUpstream(
|
|||||||
statusRecorder *peer.Status,
|
statusRecorder *peer.Status,
|
||||||
) *DefaultServer {
|
) *DefaultServer {
|
||||||
log.Debugf("host dns address list is: %v", hostsDnsList)
|
log.Debugf("host dns address list is: %v", hostsDnsList)
|
||||||
ds := newDefaultServer(ctx, wgInterface, newServiceViaMemory(wgInterface), statusRecorder)
|
ds := newDefaultServer(ctx, wgInterface, NewServiceViaMemory(wgInterface), statusRecorder)
|
||||||
|
ds.hostsDNSHolder.set(hostsDnsList)
|
||||||
ds.permanent = true
|
ds.permanent = true
|
||||||
ds.hostsDnsList = hostsDnsList
|
|
||||||
ds.addHostRootZone()
|
ds.addHostRootZone()
|
||||||
ds.currentConfig = dnsConfigToHostDNSConfig(config, ds.service.RuntimeIP(), ds.service.RuntimePort())
|
ds.currentConfig = dnsConfigToHostDNSConfig(config, ds.service.RuntimeIP(), ds.service.RuntimePort())
|
||||||
ds.searchDomainNotifier = newNotifier(ds.SearchDomains())
|
ds.searchDomainNotifier = newNotifier(ds.SearchDomains())
|
||||||
@@ -130,7 +130,7 @@ func NewDefaultServerIos(
|
|||||||
iosDnsManager IosDnsManager,
|
iosDnsManager IosDnsManager,
|
||||||
statusRecorder *peer.Status,
|
statusRecorder *peer.Status,
|
||||||
) *DefaultServer {
|
) *DefaultServer {
|
||||||
ds := newDefaultServer(ctx, wgInterface, newServiceViaMemory(wgInterface), statusRecorder)
|
ds := newDefaultServer(ctx, wgInterface, NewServiceViaMemory(wgInterface), statusRecorder)
|
||||||
ds.iosDnsManager = iosDnsManager
|
ds.iosDnsManager = iosDnsManager
|
||||||
return ds
|
return ds
|
||||||
}
|
}
|
||||||
@@ -147,6 +147,7 @@ func newDefaultServer(ctx context.Context, wgInterface WGIface, dnsService servi
|
|||||||
},
|
},
|
||||||
wgInterface: wgInterface,
|
wgInterface: wgInterface,
|
||||||
statusRecorder: statusRecorder,
|
statusRecorder: statusRecorder,
|
||||||
|
hostsDNSHolder: newHostsDNSHolder(),
|
||||||
}
|
}
|
||||||
|
|
||||||
return defaultServer
|
return defaultServer
|
||||||
@@ -202,10 +203,8 @@ func (s *DefaultServer) Stop() {
|
|||||||
// OnUpdatedHostDNSServer update the DNS servers addresses for root zones
|
// OnUpdatedHostDNSServer update the DNS servers addresses for root zones
|
||||||
// It will be applied if the mgm server do not enforce DNS settings for root zone
|
// It will be applied if the mgm server do not enforce DNS settings for root zone
|
||||||
func (s *DefaultServer) OnUpdatedHostDNSServer(hostsDnsList []string) {
|
func (s *DefaultServer) OnUpdatedHostDNSServer(hostsDnsList []string) {
|
||||||
s.hostsDnsListLock.Lock()
|
s.hostsDNSHolder.set(hostsDnsList)
|
||||||
defer s.hostsDnsListLock.Unlock()
|
|
||||||
|
|
||||||
s.hostsDnsList = hostsDnsList
|
|
||||||
_, ok := s.dnsMuxMap[nbdns.RootZone]
|
_, ok := s.dnsMuxMap[nbdns.RootZone]
|
||||||
if ok {
|
if ok {
|
||||||
log.Debugf("on new host DNS config but skip to apply it")
|
log.Debugf("on new host DNS config but skip to apply it")
|
||||||
@@ -278,9 +277,15 @@ func (s *DefaultServer) SearchDomains() []string {
|
|||||||
// ProbeAvailability tests each upstream group's servers for availability
|
// ProbeAvailability tests each upstream group's servers for availability
|
||||||
// and deactivates the group if no server responds
|
// and deactivates the group if no server responds
|
||||||
func (s *DefaultServer) ProbeAvailability() {
|
func (s *DefaultServer) ProbeAvailability() {
|
||||||
|
var wg sync.WaitGroup
|
||||||
for _, mux := range s.dnsMuxMap {
|
for _, mux := range s.dnsMuxMap {
|
||||||
mux.probeAvailability()
|
wg.Add(1)
|
||||||
|
go func(mux handlerWithStop) {
|
||||||
|
defer wg.Done()
|
||||||
|
mux.probeAvailability()
|
||||||
|
}(mux)
|
||||||
}
|
}
|
||||||
|
wg.Wait()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *DefaultServer) applyConfiguration(update nbdns.Config) error {
|
func (s *DefaultServer) applyConfiguration(update nbdns.Config) error {
|
||||||
@@ -368,6 +373,7 @@ func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.Nam
|
|||||||
s.wgInterface.Address().IP,
|
s.wgInterface.Address().IP,
|
||||||
s.wgInterface.Address().Network,
|
s.wgInterface.Address().Network,
|
||||||
s.statusRecorder,
|
s.statusRecorder,
|
||||||
|
s.hostsDNSHolder,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("unable to create a new upstream resolver, error: %v", err)
|
return nil, fmt.Errorf("unable to create a new upstream resolver, error: %v", err)
|
||||||
@@ -446,9 +452,7 @@ func (s *DefaultServer) updateMux(muxUpdates []muxUpdate) {
|
|||||||
_, found := muxUpdateMap[key]
|
_, found := muxUpdateMap[key]
|
||||||
if !found {
|
if !found {
|
||||||
if !isContainRootUpdate && key == nbdns.RootZone {
|
if !isContainRootUpdate && key == nbdns.RootZone {
|
||||||
s.hostsDnsListLock.Lock()
|
|
||||||
s.addHostRootZone()
|
s.addHostRootZone()
|
||||||
s.hostsDnsListLock.Unlock()
|
|
||||||
existingHandler.stop()
|
existingHandler.stop()
|
||||||
} else {
|
} else {
|
||||||
existingHandler.stop()
|
existingHandler.stop()
|
||||||
@@ -506,6 +510,7 @@ func (s *DefaultServer) upstreamCallbacks(
|
|||||||
if nsGroup.Primary {
|
if nsGroup.Primary {
|
||||||
removeIndex[nbdns.RootZone] = -1
|
removeIndex[nbdns.RootZone] = -1
|
||||||
s.currentConfig.RouteAll = false
|
s.currentConfig.RouteAll = false
|
||||||
|
s.service.DeregisterMux(nbdns.RootZone)
|
||||||
}
|
}
|
||||||
|
|
||||||
for i, item := range s.currentConfig.Domains {
|
for i, item := range s.currentConfig.Domains {
|
||||||
@@ -515,10 +520,15 @@ func (s *DefaultServer) upstreamCallbacks(
|
|||||||
removeIndex[item.Domain] = i
|
removeIndex[item.Domain] = i
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.hostManager.applyDNSConfig(s.currentConfig); err != nil {
|
if err := s.hostManager.applyDNSConfig(s.currentConfig); err != nil {
|
||||||
l.Errorf("Failed to apply nameserver deactivation on the host: %v", err)
|
l.Errorf("Failed to apply nameserver deactivation on the host: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if runtime.GOOS == "android" && nsGroup.Primary && len(s.hostsDNSHolder.get()) > 0 {
|
||||||
|
s.addHostRootZone()
|
||||||
|
}
|
||||||
|
|
||||||
s.updateNSState(nsGroup, err, false)
|
s.updateNSState(nsGroup, err, false)
|
||||||
|
|
||||||
}
|
}
|
||||||
@@ -539,6 +549,7 @@ func (s *DefaultServer) upstreamCallbacks(
|
|||||||
|
|
||||||
if nsGroup.Primary {
|
if nsGroup.Primary {
|
||||||
s.currentConfig.RouteAll = true
|
s.currentConfig.RouteAll = true
|
||||||
|
s.service.RegisterMux(nbdns.RootZone, handler)
|
||||||
}
|
}
|
||||||
if err := s.hostManager.applyDNSConfig(s.currentConfig); err != nil {
|
if err := s.hostManager.applyDNSConfig(s.currentConfig); err != nil {
|
||||||
l.WithError(err).Error("reactivate temporary disabled nameserver group, DNS update apply")
|
l.WithError(err).Error("reactivate temporary disabled nameserver group, DNS update apply")
|
||||||
@@ -556,25 +567,16 @@ func (s *DefaultServer) addHostRootZone() {
|
|||||||
s.wgInterface.Address().IP,
|
s.wgInterface.Address().IP,
|
||||||
s.wgInterface.Address().Network,
|
s.wgInterface.Address().Network,
|
||||||
s.statusRecorder,
|
s.statusRecorder,
|
||||||
|
s.hostsDNSHolder,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("unable to create a new upstream resolver, error: %v", err)
|
log.Errorf("unable to create a new upstream resolver, error: %v", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
handler.upstreamServers = make([]string, len(s.hostsDnsList))
|
|
||||||
for n, ua := range s.hostsDnsList {
|
|
||||||
a, err := netip.ParseAddr(ua)
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("invalid upstream IP address: %s, error: %s", ua, err)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
ipString := ua
|
handler.upstreamServers = make([]string, 0)
|
||||||
if !a.Is4() {
|
for k := range s.hostsDNSHolder.get() {
|
||||||
ipString = fmt.Sprintf("[%s]", ua)
|
handler.upstreamServers = append(handler.upstreamServers, k)
|
||||||
}
|
|
||||||
|
|
||||||
handler.upstreamServers[n] = fmt.Sprintf("%s:53", ipString)
|
|
||||||
}
|
}
|
||||||
handler.deactivate = func(error) {}
|
handler.deactivate = func(error) {}
|
||||||
handler.reactivate = func() {}
|
handler.reactivate = func() {}
|
||||||
|
|||||||
@@ -39,6 +39,10 @@ func (w *mocWGIface) Address() iface.WGAddress {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (w *mocWGIface) ToInterface() *net.Interface {
|
||||||
|
panic("implement me")
|
||||||
|
}
|
||||||
|
|
||||||
func (w *mocWGIface) GetFilter() iface.PacketFilter {
|
func (w *mocWGIface) GetFilter() iface.PacketFilter {
|
||||||
return w.filter
|
return w.filter
|
||||||
}
|
}
|
||||||
@@ -261,7 +265,7 @@ func TestUpdateDNSServer(t *testing.T) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
wgIface, err := iface.NewWGIFace(fmt.Sprintf("utun230%d", n), fmt.Sprintf("100.66.100.%d/32", n+1), 33100, privKey.String(), iface.DefaultMTU, newNet, nil)
|
wgIface, err := iface.NewWGIFace(fmt.Sprintf("utun230%d", n), fmt.Sprintf("100.66.100.%d/32", n+1), 33100, privKey.String(), iface.DefaultMTU, newNet, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
@@ -339,7 +343,7 @@ func TestDNSFakeResolverHandleUpdates(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
privKey, _ := wgtypes.GeneratePrivateKey()
|
privKey, _ := wgtypes.GeneratePrivateKey()
|
||||||
wgIface, err := iface.NewWGIFace("utun2301", "100.66.100.1/32", 33100, privKey.String(), iface.DefaultMTU, newNet, nil)
|
wgIface, err := iface.NewWGIFace("utun2301", "100.66.100.1/32", 33100, privKey.String(), iface.DefaultMTU, newNet, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("build interface wireguard: %v", err)
|
t.Errorf("build interface wireguard: %v", err)
|
||||||
return
|
return
|
||||||
@@ -530,7 +534,7 @@ func TestDNSServerStartStop(t *testing.T) {
|
|||||||
func TestDNSServerUpstreamDeactivateCallback(t *testing.T) {
|
func TestDNSServerUpstreamDeactivateCallback(t *testing.T) {
|
||||||
hostManager := &mockHostConfigurator{}
|
hostManager := &mockHostConfigurator{}
|
||||||
server := DefaultServer{
|
server := DefaultServer{
|
||||||
service: newServiceViaMemory(&mocWGIface{}),
|
service: NewServiceViaMemory(&mocWGIface{}),
|
||||||
localResolver: &localResolver{
|
localResolver: &localResolver{
|
||||||
registeredMap: make(registrationMap),
|
registeredMap: make(registrationMap),
|
||||||
},
|
},
|
||||||
@@ -750,6 +754,11 @@ func TestDNSPermanent_matchOnly(t *testing.T) {
|
|||||||
NSType: nbdns.UDPNameServerType,
|
NSType: nbdns.UDPNameServerType,
|
||||||
Port: 53,
|
Port: 53,
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
IP: netip.MustParseAddr("9.9.9.9"),
|
||||||
|
NSType: nbdns.UDPNameServerType,
|
||||||
|
Port: 53,
|
||||||
|
},
|
||||||
},
|
},
|
||||||
Domains: []string{"customdomain.com"},
|
Domains: []string{"customdomain.com"},
|
||||||
Primary: false,
|
Primary: false,
|
||||||
@@ -792,7 +801,7 @@ func createWgInterfaceWithBind(t *testing.T) (*iface.WGIface, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
privKey, _ := wgtypes.GeneratePrivateKey()
|
privKey, _ := wgtypes.GeneratePrivateKey()
|
||||||
wgIface, err := iface.NewWGIFace("utun2301", "100.66.100.2/24", 33100, privKey.String(), iface.DefaultMTU, newNet, nil)
|
wgIface, err := iface.NewWGIFace("utun2301", "100.66.100.2/24", 33100, privKey.String(), iface.DefaultMTU, newNet, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("build interface wireguard: %v", err)
|
t.Fatalf("build interface wireguard: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -128,6 +128,9 @@ func (s *serviceViaListener) RuntimeIP() string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaListener) setListenerStatus(running bool) {
|
func (s *serviceViaListener) setListenerStatus(running bool) {
|
||||||
|
s.listenerFlagLock.Lock()
|
||||||
|
defer s.listenerFlagLock.Unlock()
|
||||||
|
|
||||||
s.listenerIsRunning = running
|
s.listenerIsRunning = running
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
type serviceViaMemory struct {
|
type ServiceViaMemory struct {
|
||||||
wgInterface WGIface
|
wgInterface WGIface
|
||||||
dnsMux *dns.ServeMux
|
dnsMux *dns.ServeMux
|
||||||
runtimeIP string
|
runtimeIP string
|
||||||
@@ -22,8 +22,8 @@ type serviceViaMemory struct {
|
|||||||
listenerFlagLock sync.Mutex
|
listenerFlagLock sync.Mutex
|
||||||
}
|
}
|
||||||
|
|
||||||
func newServiceViaMemory(wgIface WGIface) *serviceViaMemory {
|
func NewServiceViaMemory(wgIface WGIface) *ServiceViaMemory {
|
||||||
s := &serviceViaMemory{
|
s := &ServiceViaMemory{
|
||||||
wgInterface: wgIface,
|
wgInterface: wgIface,
|
||||||
dnsMux: dns.NewServeMux(),
|
dnsMux: dns.NewServeMux(),
|
||||||
|
|
||||||
@@ -33,7 +33,7 @@ func newServiceViaMemory(wgIface WGIface) *serviceViaMemory {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaMemory) Listen() error {
|
func (s *ServiceViaMemory) Listen() error {
|
||||||
s.listenerFlagLock.Lock()
|
s.listenerFlagLock.Lock()
|
||||||
defer s.listenerFlagLock.Unlock()
|
defer s.listenerFlagLock.Unlock()
|
||||||
|
|
||||||
@@ -52,7 +52,7 @@ func (s *serviceViaMemory) Listen() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaMemory) Stop() {
|
func (s *ServiceViaMemory) Stop() {
|
||||||
s.listenerFlagLock.Lock()
|
s.listenerFlagLock.Lock()
|
||||||
defer s.listenerFlagLock.Unlock()
|
defer s.listenerFlagLock.Unlock()
|
||||||
|
|
||||||
@@ -67,23 +67,23 @@ func (s *serviceViaMemory) Stop() {
|
|||||||
s.listenerIsRunning = false
|
s.listenerIsRunning = false
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaMemory) RegisterMux(pattern string, handler dns.Handler) {
|
func (s *ServiceViaMemory) RegisterMux(pattern string, handler dns.Handler) {
|
||||||
s.dnsMux.Handle(pattern, handler)
|
s.dnsMux.Handle(pattern, handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaMemory) DeregisterMux(pattern string) {
|
func (s *ServiceViaMemory) DeregisterMux(pattern string) {
|
||||||
s.dnsMux.HandleRemove(pattern)
|
s.dnsMux.HandleRemove(pattern)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaMemory) RuntimePort() int {
|
func (s *ServiceViaMemory) RuntimePort() int {
|
||||||
return s.runtimePort
|
return s.runtimePort
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaMemory) RuntimeIP() string {
|
func (s *ServiceViaMemory) RuntimeIP() string {
|
||||||
return s.runtimeIP
|
return s.runtimeIP
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *serviceViaMemory) filterDNSTraffic() (string, error) {
|
func (s *ServiceViaMemory) filterDNSTraffic() (string, error) {
|
||||||
filter := s.wgInterface.GetFilter()
|
filter := s.wgInterface.GetFilter()
|
||||||
if filter == nil {
|
if filter == nil {
|
||||||
return "", fmt.Errorf("can't set DNS filter, filter not initialized")
|
return "", fmt.Errorf("can't set DNS filter, filter not initialized")
|
||||||
|
|||||||
20
client/internal/dns/systemd_freebsd.go
Normal file
20
client/internal/dns/systemd_freebsd.go
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
var errNotImplemented = errors.New("not implemented")
|
||||||
|
|
||||||
|
func newSystemdDbusConfigurator(wgInterface string) (hostManager, error) {
|
||||||
|
return nil, fmt.Errorf("systemd dns management: %w on freebsd", errNotImplemented)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSystemdResolvedRunning() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSystemdResolveConfMode() bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
@@ -242,3 +242,25 @@ func getSystemdDbusProperty(property string, store any) error {
|
|||||||
|
|
||||||
return v.Store(store)
|
return v.Store(store)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func isSystemdResolvedRunning() bool {
|
||||||
|
return isDbusListenerRunning(systemdResolvedDest, systemdDbusObjectNode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSystemdResolveConfMode() bool {
|
||||||
|
if !isDbusListenerRunning(systemdResolvedDest, systemdDbusObjectNode) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var value string
|
||||||
|
if err := getSystemdDbusProperty(systemdDbusResolvConfModeProperty, &value); err != nil {
|
||||||
|
log.Errorf("got an error while checking systemd resolv conf mode, error: %s", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if value == systemdDbusResolvConfModeForeign {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !android
|
//go:build (linux && !android) || freebsd
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -14,11 +14,6 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
|
||||||
fileUncleanShutdownResolvConfLocation = "/var/lib/netbird/resolv.conf"
|
|
||||||
fileUncleanShutdownManagerTypeLocation = "/var/lib/netbird/manager"
|
|
||||||
)
|
|
||||||
|
|
||||||
func CheckUncleanShutdown(wgIface string) error {
|
func CheckUncleanShutdown(wgIface string) error {
|
||||||
if _, err := os.Stat(fileUncleanShutdownResolvConfLocation); err != nil {
|
if _, err := os.Stat(fileUncleanShutdownResolvConfLocation); err != nil {
|
||||||
if errors.Is(err, fs.ErrNotExist) {
|
if errors.Is(err, fs.ErrNotExist) {
|
||||||
@@ -5,7 +5,6 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"runtime"
|
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
@@ -25,7 +24,7 @@ const (
|
|||||||
probeTimeout = 2 * time.Second
|
probeTimeout = 2 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
const testRecord = "."
|
const testRecord = "com."
|
||||||
|
|
||||||
type upstreamClient interface {
|
type upstreamClient interface {
|
||||||
exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error)
|
exchange(ctx context.Context, upstream string, r *dns.Msg) (*dns.Msg, time.Duration, error)
|
||||||
@@ -43,6 +42,7 @@ type upstreamResolverBase struct {
|
|||||||
upstreamServers []string
|
upstreamServers []string
|
||||||
disabled bool
|
disabled bool
|
||||||
failsCount atomic.Int32
|
failsCount atomic.Int32
|
||||||
|
successCount atomic.Int32
|
||||||
failsTillDeact int32
|
failsTillDeact int32
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
reactivatePeriod time.Duration
|
reactivatePeriod time.Duration
|
||||||
@@ -79,6 +79,11 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
log.WithField("question", r.Question[0]).Trace("received an upstream question")
|
log.WithField("question", r.Question[0]).Trace("received an upstream question")
|
||||||
|
// set the AuthenticatedData flag and the EDNS0 buffer size to 4096 bytes to support larger dns records
|
||||||
|
if r.Extra == nil {
|
||||||
|
r.SetEdns0(4096, false)
|
||||||
|
r.MsgHdr.AuthenticatedData = true
|
||||||
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-u.ctx.Done():
|
case <-u.ctx.Done():
|
||||||
@@ -120,6 +125,7 @@ func (u *upstreamResolverBase) ServeDNS(w dns.ResponseWriter, r *dns.Msg) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u.successCount.Add(1)
|
||||||
log.Tracef("took %s to query the upstream %s", t, upstream)
|
log.Tracef("took %s to query the upstream %s", t, upstream)
|
||||||
|
|
||||||
err = w.WriteMsg(rm)
|
err = w.WriteMsg(rm)
|
||||||
@@ -168,6 +174,11 @@ func (u *upstreamResolverBase) probeAvailability() {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// avoid probe if upstreams could resolve at least one query and fails count is less than failsTillDeact
|
||||||
|
if u.successCount.Load() > 0 && u.failsCount.Load() < u.failsTillDeact {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
var success bool
|
var success bool
|
||||||
var mu sync.Mutex
|
var mu sync.Mutex
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
@@ -179,7 +190,7 @@ func (u *upstreamResolverBase) probeAvailability() {
|
|||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err := u.testNameserver(upstream)
|
err := u.testNameserver(upstream, 500*time.Millisecond)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errors = multierror.Append(errors, err)
|
errors = multierror.Append(errors, err)
|
||||||
log.Warnf("probing upstream nameserver %s: %s", upstream, err)
|
log.Warnf("probing upstream nameserver %s: %s", upstream, err)
|
||||||
@@ -220,7 +231,7 @@ func (u *upstreamResolverBase) waitUntilResponse() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
for _, upstream := range u.upstreamServers {
|
for _, upstream := range u.upstreamServers {
|
||||||
if err := u.testNameserver(upstream); err != nil {
|
if err := u.testNameserver(upstream, probeTimeout); err != nil {
|
||||||
log.Tracef("upstream check for %s: %s", upstream, err)
|
log.Tracef("upstream check for %s: %s", upstream, err)
|
||||||
} else {
|
} else {
|
||||||
// at least one upstream server is available, stop probing
|
// at least one upstream server is available, stop probing
|
||||||
@@ -240,6 +251,7 @@ func (u *upstreamResolverBase) waitUntilResponse() {
|
|||||||
|
|
||||||
log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServers)
|
log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServers)
|
||||||
u.failsCount.Store(0)
|
u.failsCount.Store(0)
|
||||||
|
u.successCount.Add(1)
|
||||||
u.reactivate()
|
u.reactivate()
|
||||||
u.disabled = false
|
u.disabled = false
|
||||||
}
|
}
|
||||||
@@ -260,17 +272,15 @@ func (u *upstreamResolverBase) disable(err error) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// todo test the deactivation logic, it seems to affect the client
|
log.Warnf("Upstream resolving is Disabled for %v", reactivatePeriod)
|
||||||
if runtime.GOOS != "ios" {
|
u.successCount.Store(0)
|
||||||
log.Warnf("Upstream resolving is Disabled for %v", reactivatePeriod)
|
u.deactivate(err)
|
||||||
u.deactivate(err)
|
u.disabled = true
|
||||||
u.disabled = true
|
go u.waitUntilResponse()
|
||||||
go u.waitUntilResponse()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *upstreamResolverBase) testNameserver(server string) error {
|
func (u *upstreamResolverBase) testNameserver(server string, timeout time.Duration) error {
|
||||||
ctx, cancel := context.WithTimeout(u.ctx, probeTimeout)
|
ctx, cancel := context.WithTimeout(u.ctx, timeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
r := new(dns.Msg).SetQuestion(testRecord, dns.TypeSOA)
|
r := new(dns.Msg).SetQuestion(testRecord, dns.TypeSOA)
|
||||||
|
|||||||
84
client/internal/dns/upstream_android.go
Normal file
84
client/internal/dns/upstream_android.go
Normal file
@@ -0,0 +1,84 @@
|
|||||||
|
package dns
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"net"
|
||||||
|
"syscall"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
|
nbnet "github.com/netbirdio/netbird/util/net"
|
||||||
|
)
|
||||||
|
|
||||||
|
type upstreamResolver struct {
|
||||||
|
*upstreamResolverBase
|
||||||
|
hostsDNSHolder *hostsDNSHolder
|
||||||
|
}
|
||||||
|
|
||||||
|
// newUpstreamResolver in Android we need to distinguish the DNS servers to available through VPN or outside of VPN
|
||||||
|
// In case if the assigned DNS address is available only in the protected network then the resolver will time out at the
|
||||||
|
// first time, and we need to wait for a while to start to use again the proper DNS resolver.
|
||||||
|
func newUpstreamResolver(
|
||||||
|
ctx context.Context,
|
||||||
|
_ string,
|
||||||
|
_ net.IP,
|
||||||
|
_ *net.IPNet,
|
||||||
|
statusRecorder *peer.Status,
|
||||||
|
hostsDNSHolder *hostsDNSHolder,
|
||||||
|
) (*upstreamResolver, error) {
|
||||||
|
upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder)
|
||||||
|
c := &upstreamResolver{
|
||||||
|
upstreamResolverBase: upstreamResolverBase,
|
||||||
|
hostsDNSHolder: hostsDNSHolder,
|
||||||
|
}
|
||||||
|
upstreamResolverBase.upstreamClient = c
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// exchange in case of Android if the upstream is a local resolver then we do not need to mark the socket as protected.
|
||||||
|
// In other case the DNS resolvation goes through the VPN, so we need to force to use the
|
||||||
|
func (u *upstreamResolver) exchange(ctx context.Context, upstream string, r *dns.Msg) (rm *dns.Msg, t time.Duration, err error) {
|
||||||
|
if u.isLocalResolver(upstream) {
|
||||||
|
return u.exchangeWithoutVPN(ctx, upstream, r)
|
||||||
|
} else {
|
||||||
|
return u.exchangeWithinVPN(ctx, upstream, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *upstreamResolver) exchangeWithinVPN(ctx context.Context, upstream string, r *dns.Msg) (rm *dns.Msg, t time.Duration, err error) {
|
||||||
|
upstreamExchangeClient := &dns.Client{}
|
||||||
|
return upstreamExchangeClient.ExchangeContext(ctx, r, upstream)
|
||||||
|
}
|
||||||
|
|
||||||
|
// exchangeWithoutVPN protect the UDP socket by Android SDK to avoid to goes through the VPN
|
||||||
|
func (u *upstreamResolver) exchangeWithoutVPN(ctx context.Context, upstream string, r *dns.Msg) (rm *dns.Msg, t time.Duration, err error) {
|
||||||
|
timeout := upstreamTimeout
|
||||||
|
if deadline, ok := ctx.Deadline(); ok {
|
||||||
|
timeout = time.Until(deadline)
|
||||||
|
}
|
||||||
|
dialTimeout := timeout
|
||||||
|
|
||||||
|
nbDialer := nbnet.NewDialer()
|
||||||
|
|
||||||
|
dialer := &net.Dialer{
|
||||||
|
Control: func(network, address string, c syscall.RawConn) error {
|
||||||
|
return nbDialer.Control(network, address, c)
|
||||||
|
},
|
||||||
|
Timeout: dialTimeout,
|
||||||
|
}
|
||||||
|
|
||||||
|
upstreamExchangeClient := &dns.Client{
|
||||||
|
Dialer: dialer,
|
||||||
|
}
|
||||||
|
|
||||||
|
return upstreamExchangeClient.Exchange(r, upstream)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (u *upstreamResolver) isLocalResolver(upstream string) bool {
|
||||||
|
if u.hostsDNSHolder.isContain(upstream) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
@@ -1,4 +1,4 @@
|
|||||||
//go:build !ios
|
//go:build !android && !ios
|
||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
@@ -12,7 +12,7 @@ import (
|
|||||||
"github.com/netbirdio/netbird/client/internal/peer"
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
)
|
)
|
||||||
|
|
||||||
type upstreamResolverNonIOS struct {
|
type upstreamResolver struct {
|
||||||
*upstreamResolverBase
|
*upstreamResolverBase
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -22,16 +22,17 @@ func newUpstreamResolver(
|
|||||||
_ net.IP,
|
_ net.IP,
|
||||||
_ *net.IPNet,
|
_ *net.IPNet,
|
||||||
statusRecorder *peer.Status,
|
statusRecorder *peer.Status,
|
||||||
) (*upstreamResolverNonIOS, error) {
|
_ *hostsDNSHolder,
|
||||||
|
) (*upstreamResolver, error) {
|
||||||
upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder)
|
upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder)
|
||||||
nonIOS := &upstreamResolverNonIOS{
|
nonIOS := &upstreamResolver{
|
||||||
upstreamResolverBase: upstreamResolverBase,
|
upstreamResolverBase: upstreamResolverBase,
|
||||||
}
|
}
|
||||||
upstreamResolverBase.upstreamClient = nonIOS
|
upstreamResolverBase.upstreamClient = nonIOS
|
||||||
return nonIOS, nil
|
return nonIOS, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (u *upstreamResolverNonIOS) exchange(ctx context.Context, upstream string, r *dns.Msg) (rm *dns.Msg, t time.Duration, err error) {
|
func (u *upstreamResolver) exchange(ctx context.Context, upstream string, r *dns.Msg) (rm *dns.Msg, t time.Duration, err error) {
|
||||||
upstreamExchangeClient := &dns.Client{}
|
upstreamExchangeClient := &dns.Client{}
|
||||||
return upstreamExchangeClient.ExchangeContext(ctx, r, upstream)
|
return upstreamExchangeClient.ExchangeContext(ctx, r, upstream)
|
||||||
}
|
}
|
||||||
@@ -4,6 +4,7 @@ package dns
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
@@ -17,9 +18,9 @@ import (
|
|||||||
|
|
||||||
type upstreamResolverIOS struct {
|
type upstreamResolverIOS struct {
|
||||||
*upstreamResolverBase
|
*upstreamResolverBase
|
||||||
lIP net.IP
|
lIP net.IP
|
||||||
lNet *net.IPNet
|
lNet *net.IPNet
|
||||||
iIndex int
|
interfaceName string
|
||||||
}
|
}
|
||||||
|
|
||||||
func newUpstreamResolver(
|
func newUpstreamResolver(
|
||||||
@@ -28,20 +29,15 @@ func newUpstreamResolver(
|
|||||||
ip net.IP,
|
ip net.IP,
|
||||||
net *net.IPNet,
|
net *net.IPNet,
|
||||||
statusRecorder *peer.Status,
|
statusRecorder *peer.Status,
|
||||||
|
_ *hostsDNSHolder,
|
||||||
) (*upstreamResolverIOS, error) {
|
) (*upstreamResolverIOS, error) {
|
||||||
upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder)
|
upstreamResolverBase := newUpstreamResolverBase(ctx, statusRecorder)
|
||||||
|
|
||||||
index, err := getInterfaceIndex(interfaceName)
|
|
||||||
if err != nil {
|
|
||||||
log.Debugf("unable to get interface index for %s: %s", interfaceName, err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
ios := &upstreamResolverIOS{
|
ios := &upstreamResolverIOS{
|
||||||
upstreamResolverBase: upstreamResolverBase,
|
upstreamResolverBase: upstreamResolverBase,
|
||||||
lIP: ip,
|
lIP: ip,
|
||||||
lNet: net,
|
lNet: net,
|
||||||
iIndex: index,
|
interfaceName: interfaceName,
|
||||||
}
|
}
|
||||||
ios.upstreamClient = ios
|
ios.upstreamClient = ios
|
||||||
|
|
||||||
@@ -52,7 +48,7 @@ func (u *upstreamResolverIOS) exchange(ctx context.Context, upstream string, r *
|
|||||||
client := &dns.Client{}
|
client := &dns.Client{}
|
||||||
upstreamHost, _, err := net.SplitHostPort(upstream)
|
upstreamHost, _, err := net.SplitHostPort(upstream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("error while parsing upstream host: %s", err)
|
return nil, 0, fmt.Errorf("error while parsing upstream host: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
timeout := upstreamTimeout
|
timeout := upstreamTimeout
|
||||||
@@ -64,26 +60,35 @@ func (u *upstreamResolverIOS) exchange(ctx context.Context, upstream string, r *
|
|||||||
upstreamIP := net.ParseIP(upstreamHost)
|
upstreamIP := net.ParseIP(upstreamHost)
|
||||||
if u.lNet.Contains(upstreamIP) || net.IP.IsPrivate(upstreamIP) {
|
if u.lNet.Contains(upstreamIP) || net.IP.IsPrivate(upstreamIP) {
|
||||||
log.Debugf("using private client to query upstream: %s", upstream)
|
log.Debugf("using private client to query upstream: %s", upstream)
|
||||||
client = u.getClientPrivate(timeout)
|
client, err = GetClientPrivate(u.lIP, u.interfaceName, timeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("error while creating private client: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cannot use client.ExchangeContext because it overwrites our Dialer
|
// Cannot use client.ExchangeContext because it overwrites our Dialer
|
||||||
return client.Exchange(r, upstream)
|
return client.Exchange(r, upstream)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getClientPrivate returns a new DNS client bound to the local IP address of the Netbird interface
|
// GetClientPrivate returns a new DNS client bound to the local IP address of the Netbird interface
|
||||||
// This method is needed for iOS
|
// This method is needed for iOS
|
||||||
func (u *upstreamResolverIOS) getClientPrivate(dialTimeout time.Duration) *dns.Client {
|
func GetClientPrivate(ip net.IP, interfaceName string, dialTimeout time.Duration) (*dns.Client, error) {
|
||||||
|
index, err := getInterfaceIndex(interfaceName)
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("unable to get interface index for %s: %s", interfaceName, err)
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
dialer := &net.Dialer{
|
dialer := &net.Dialer{
|
||||||
LocalAddr: &net.UDPAddr{
|
LocalAddr: &net.UDPAddr{
|
||||||
IP: u.lIP,
|
IP: ip,
|
||||||
Port: 0, // Let the OS pick a free port
|
Port: 0, // Let the OS pick a free port
|
||||||
},
|
},
|
||||||
Timeout: dialTimeout,
|
Timeout: dialTimeout,
|
||||||
Control: func(network, address string, c syscall.RawConn) error {
|
Control: func(network, address string, c syscall.RawConn) error {
|
||||||
var operr error
|
var operr error
|
||||||
fn := func(s uintptr) {
|
fn := func(s uintptr) {
|
||||||
operr = unix.SetsockoptInt(int(s), unix.IPPROTO_IP, unix.IP_BOUND_IF, u.iIndex)
|
operr = unix.SetsockoptInt(int(s), unix.IPPROTO_IP, unix.IP_BOUND_IF, index)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := c.Control(fn); err != nil {
|
if err := c.Control(fn); err != nil {
|
||||||
@@ -100,7 +105,7 @@ func (u *upstreamResolverIOS) getClientPrivate(dialTimeout time.Duration) *dns.C
|
|||||||
client := &dns.Client{
|
client := &dns.Client{
|
||||||
Dialer: dialer,
|
Dialer: dialer,
|
||||||
}
|
}
|
||||||
return client
|
return client, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getInterfaceIndex(interfaceName string) (int, error) {
|
func getInterfaceIndex(interfaceName string) (int, error) {
|
||||||
|
|||||||
@@ -58,7 +58,7 @@ func TestUpstreamResolver_ServeDNS(t *testing.T) {
|
|||||||
for _, testCase := range testCases {
|
for _, testCase := range testCases {
|
||||||
t.Run(testCase.name, func(t *testing.T) {
|
t.Run(testCase.name, func(t *testing.T) {
|
||||||
ctx, cancel := context.WithCancel(context.TODO())
|
ctx, cancel := context.WithCancel(context.TODO())
|
||||||
resolver, _ := newUpstreamResolver(ctx, "", net.IP{}, &net.IPNet{}, nil)
|
resolver, _ := newUpstreamResolver(ctx, "", net.IP{}, &net.IPNet{}, nil, nil)
|
||||||
resolver.upstreamServers = testCase.InputServers
|
resolver.upstreamServers = testCase.InputServers
|
||||||
resolver.upstreamTimeout = testCase.timeout
|
resolver.upstreamTimeout = testCase.timeout
|
||||||
if testCase.cancelCTX {
|
if testCase.cancelCTX {
|
||||||
|
|||||||
@@ -2,12 +2,17 @@
|
|||||||
|
|
||||||
package dns
|
package dns
|
||||||
|
|
||||||
import "github.com/netbirdio/netbird/iface"
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/iface"
|
||||||
|
)
|
||||||
|
|
||||||
// WGIface defines subset methods of interface required for manager
|
// WGIface defines subset methods of interface required for manager
|
||||||
type WGIface interface {
|
type WGIface interface {
|
||||||
Name() string
|
Name() string
|
||||||
Address() iface.WGAddress
|
Address() iface.WGAddress
|
||||||
|
ToInterface() *net.Interface
|
||||||
IsUserspaceBind() bool
|
IsUserspaceBind() bool
|
||||||
GetFilter() iface.PacketFilter
|
GetFilter() iface.PacketFilter
|
||||||
GetDevice() *iface.DeviceWrapper
|
GetDevice() *iface.DeviceWrapper
|
||||||
|
|||||||
@@ -2,14 +2,18 @@ package internal
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"maps"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"net/netip"
|
"net/netip"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
"slices"
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pion/ice/v3"
|
"github.com/pion/ice/v3"
|
||||||
@@ -21,21 +25,29 @@ import (
|
|||||||
"github.com/netbirdio/netbird/client/firewall/manager"
|
"github.com/netbirdio/netbird/client/firewall/manager"
|
||||||
"github.com/netbirdio/netbird/client/internal/acl"
|
"github.com/netbirdio/netbird/client/internal/acl"
|
||||||
"github.com/netbirdio/netbird/client/internal/dns"
|
"github.com/netbirdio/netbird/client/internal/dns"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/networkmonitor"
|
||||||
"github.com/netbirdio/netbird/client/internal/peer"
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
"github.com/netbirdio/netbird/client/internal/relay"
|
"github.com/netbirdio/netbird/client/internal/relay"
|
||||||
"github.com/netbirdio/netbird/client/internal/rosenpass"
|
"github.com/netbirdio/netbird/client/internal/rosenpass"
|
||||||
"github.com/netbirdio/netbird/client/internal/routemanager"
|
"github.com/netbirdio/netbird/client/internal/routemanager"
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
|
||||||
"github.com/netbirdio/netbird/client/internal/wgproxy"
|
"github.com/netbirdio/netbird/client/internal/wgproxy"
|
||||||
nbssh "github.com/netbirdio/netbird/client/ssh"
|
nbssh "github.com/netbirdio/netbird/client/ssh"
|
||||||
|
"github.com/netbirdio/netbird/client/system"
|
||||||
nbdns "github.com/netbirdio/netbird/dns"
|
nbdns "github.com/netbirdio/netbird/dns"
|
||||||
"github.com/netbirdio/netbird/iface"
|
"github.com/netbirdio/netbird/iface"
|
||||||
"github.com/netbirdio/netbird/iface/bind"
|
"github.com/netbirdio/netbird/iface/bind"
|
||||||
mgm "github.com/netbirdio/netbird/management/client"
|
mgm "github.com/netbirdio/netbird/management/client"
|
||||||
|
"github.com/netbirdio/netbird/management/domain"
|
||||||
mgmProto "github.com/netbirdio/netbird/management/proto"
|
mgmProto "github.com/netbirdio/netbird/management/proto"
|
||||||
|
auth "github.com/netbirdio/netbird/relay/auth/hmac"
|
||||||
|
relayClient "github.com/netbirdio/netbird/relay/client"
|
||||||
"github.com/netbirdio/netbird/route"
|
"github.com/netbirdio/netbird/route"
|
||||||
signal "github.com/netbirdio/netbird/signal/client"
|
signal "github.com/netbirdio/netbird/signal/client"
|
||||||
sProto "github.com/netbirdio/netbird/signal/proto"
|
sProto "github.com/netbirdio/netbird/signal/proto"
|
||||||
"github.com/netbirdio/netbird/util"
|
"github.com/netbirdio/netbird/util"
|
||||||
|
nbnet "github.com/netbirdio/netbird/util/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
// PeerConnectionTimeoutMax is a timeout of an initial connection attempt to a remote peer.
|
// PeerConnectionTimeoutMax is a timeout of an initial connection attempt to a remote peer.
|
||||||
@@ -60,6 +72,9 @@ type EngineConfig struct {
|
|||||||
// WgPrivateKey is a Wireguard private key of our peer (it MUST never leave the machine)
|
// WgPrivateKey is a Wireguard private key of our peer (it MUST never leave the machine)
|
||||||
WgPrivateKey wgtypes.Key
|
WgPrivateKey wgtypes.Key
|
||||||
|
|
||||||
|
// NetworkMonitor is a flag to enable network monitoring
|
||||||
|
NetworkMonitor bool
|
||||||
|
|
||||||
// IFaceBlackList is a list of network interfaces to ignore when discovering connection candidates (ICE related)
|
// IFaceBlackList is a list of network interfaces to ignore when discovering connection candidates (ICE related)
|
||||||
IFaceBlackList []string
|
IFaceBlackList []string
|
||||||
DisableIPv6Discovery bool
|
DisableIPv6Discovery bool
|
||||||
@@ -83,16 +98,23 @@ type EngineConfig struct {
|
|||||||
RosenpassPermissive bool
|
RosenpassPermissive bool
|
||||||
|
|
||||||
ServerSSHAllowed bool
|
ServerSSHAllowed bool
|
||||||
|
|
||||||
|
DNSRouteInterval time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
// Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers.
|
// Engine is a mechanism responsible for reacting on Signal and Management stream events and managing connections to the remote peers.
|
||||||
type Engine struct {
|
type Engine struct {
|
||||||
// signal is a Signal Service client
|
// signal is a Signal Service client
|
||||||
signal signal.Client
|
signal signal.Client
|
||||||
|
signaler *peer.Signaler
|
||||||
// mgmClient is a Management Service client
|
// mgmClient is a Management Service client
|
||||||
mgmClient mgm.Client
|
mgmClient mgm.Client
|
||||||
// peerConns is a map that holds all the peers that are known to this peer
|
// peerConns is a map that holds all the peers that are known to this peer
|
||||||
peerConns map[string]*peer.Conn
|
peerConns map[string]*peer.Conn
|
||||||
|
|
||||||
|
beforePeerHook nbnet.AddHookFunc
|
||||||
|
afterPeerHook nbnet.RemoveHookFunc
|
||||||
|
|
||||||
// rpManager is a Rosenpass manager
|
// rpManager is a Rosenpass manager
|
||||||
rpManager *rosenpass.Manager
|
rpManager *rosenpass.Manager
|
||||||
|
|
||||||
@@ -105,13 +127,20 @@ type Engine struct {
|
|||||||
// STUNs is a list of STUN servers used by ICE
|
// STUNs is a list of STUN servers used by ICE
|
||||||
STUNs []*stun.URI
|
STUNs []*stun.URI
|
||||||
// TURNs is a list of STUN servers used by ICE
|
// TURNs is a list of STUN servers used by ICE
|
||||||
TURNs []*stun.URI
|
TURNs []*stun.URI
|
||||||
|
StunTurn atomic.Value
|
||||||
|
|
||||||
|
// clientRoutes is the most recent list of clientRoutes received from the Management Service
|
||||||
|
clientRoutes route.HAMap
|
||||||
|
clientRoutesMu sync.RWMutex
|
||||||
|
|
||||||
|
clientCtx context.Context
|
||||||
|
clientCancel context.CancelFunc
|
||||||
|
|
||||||
|
ctx context.Context
|
||||||
cancel context.CancelFunc
|
cancel context.CancelFunc
|
||||||
|
|
||||||
ctx context.Context
|
wgInterface iface.IWGIface
|
||||||
|
|
||||||
wgInterface *iface.WGIface
|
|
||||||
wgProxyFactory *wgproxy.Factory
|
wgProxyFactory *wgproxy.Factory
|
||||||
|
|
||||||
udpMux *bind.UniversalUDPMuxDefault
|
udpMux *bind.UniversalUDPMuxDefault
|
||||||
@@ -119,6 +148,8 @@ type Engine struct {
|
|||||||
// networkSerial is the latest CurrentSerial (state ID) of the network sent by the Management service
|
// networkSerial is the latest CurrentSerial (state ID) of the network sent by the Management service
|
||||||
networkSerial uint64
|
networkSerial uint64
|
||||||
|
|
||||||
|
networkMonitor *networkmonitor.NetworkMonitor
|
||||||
|
|
||||||
sshServerFunc func(hostKeyPEM []byte, addr string) (nbssh.Server, error)
|
sshServerFunc func(hostKeyPEM []byte, addr string) (nbssh.Server, error)
|
||||||
sshServer nbssh.Server
|
sshServer nbssh.Server
|
||||||
|
|
||||||
@@ -134,6 +165,11 @@ type Engine struct {
|
|||||||
signalProbe *Probe
|
signalProbe *Probe
|
||||||
relayProbe *Probe
|
relayProbe *Probe
|
||||||
wgProbe *Probe
|
wgProbe *Probe
|
||||||
|
|
||||||
|
// checks are the client-applied posture checks that need to be evaluated on the client
|
||||||
|
checks []*mgmProto.Checks
|
||||||
|
|
||||||
|
relayManager *relayClient.Manager
|
||||||
}
|
}
|
||||||
|
|
||||||
// Peer is an instance of the Connection Peer
|
// Peer is an instance of the Connection Peer
|
||||||
@@ -144,19 +180,22 @@ type Peer struct {
|
|||||||
|
|
||||||
// NewEngine creates a new Connection Engine
|
// NewEngine creates a new Connection Engine
|
||||||
func NewEngine(
|
func NewEngine(
|
||||||
ctx context.Context,
|
clientCtx context.Context,
|
||||||
cancel context.CancelFunc,
|
clientCancel context.CancelFunc,
|
||||||
signalClient signal.Client,
|
signalClient signal.Client,
|
||||||
mgmClient mgm.Client,
|
mgmClient mgm.Client,
|
||||||
|
relayManager *relayClient.Manager,
|
||||||
config *EngineConfig,
|
config *EngineConfig,
|
||||||
mobileDep MobileDependency,
|
mobileDep MobileDependency,
|
||||||
statusRecorder *peer.Status,
|
statusRecorder *peer.Status,
|
||||||
|
checks []*mgmProto.Checks,
|
||||||
) *Engine {
|
) *Engine {
|
||||||
return NewEngineWithProbes(
|
return NewEngineWithProbes(
|
||||||
ctx,
|
clientCtx,
|
||||||
cancel,
|
clientCancel,
|
||||||
signalClient,
|
signalClient,
|
||||||
mgmClient,
|
mgmClient,
|
||||||
|
relayManager,
|
||||||
config,
|
config,
|
||||||
mobileDep,
|
mobileDep,
|
||||||
statusRecorder,
|
statusRecorder,
|
||||||
@@ -164,15 +203,17 @@ func NewEngine(
|
|||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
nil,
|
nil,
|
||||||
|
checks,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewEngineWithProbes creates a new Connection Engine with probes attached
|
// NewEngineWithProbes creates a new Connection Engine with probes attached
|
||||||
func NewEngineWithProbes(
|
func NewEngineWithProbes(
|
||||||
ctx context.Context,
|
clientCtx context.Context,
|
||||||
cancel context.CancelFunc,
|
clientCancel context.CancelFunc,
|
||||||
signalClient signal.Client,
|
signalClient signal.Client,
|
||||||
mgmClient mgm.Client,
|
mgmClient mgm.Client,
|
||||||
|
relayManager *relayClient.Manager,
|
||||||
config *EngineConfig,
|
config *EngineConfig,
|
||||||
mobileDep MobileDependency,
|
mobileDep MobileDependency,
|
||||||
statusRecorder *peer.Status,
|
statusRecorder *peer.Status,
|
||||||
@@ -180,12 +221,15 @@ func NewEngineWithProbes(
|
|||||||
signalProbe *Probe,
|
signalProbe *Probe,
|
||||||
relayProbe *Probe,
|
relayProbe *Probe,
|
||||||
wgProbe *Probe,
|
wgProbe *Probe,
|
||||||
|
checks []*mgmProto.Checks,
|
||||||
) *Engine {
|
) *Engine {
|
||||||
return &Engine{
|
return &Engine{
|
||||||
ctx: ctx,
|
clientCtx: clientCtx,
|
||||||
cancel: cancel,
|
clientCancel: clientCancel,
|
||||||
signal: signalClient,
|
signal: signalClient,
|
||||||
|
signaler: peer.NewSignaler(signalClient, config.WgPrivateKey),
|
||||||
mgmClient: mgmClient,
|
mgmClient: mgmClient,
|
||||||
|
relayManager: relayManager,
|
||||||
peerConns: make(map[string]*peer.Conn),
|
peerConns: make(map[string]*peer.Conn),
|
||||||
syncMsgMux: &sync.Mutex{},
|
syncMsgMux: &sync.Mutex{},
|
||||||
config: config,
|
config: config,
|
||||||
@@ -195,11 +239,11 @@ func NewEngineWithProbes(
|
|||||||
networkSerial: 0,
|
networkSerial: 0,
|
||||||
sshServerFunc: nbssh.DefaultSSHServer,
|
sshServerFunc: nbssh.DefaultSSHServer,
|
||||||
statusRecorder: statusRecorder,
|
statusRecorder: statusRecorder,
|
||||||
wgProxyFactory: wgproxy.NewFactory(config.WgPort),
|
|
||||||
mgmProbe: mgmProbe,
|
mgmProbe: mgmProbe,
|
||||||
signalProbe: signalProbe,
|
signalProbe: signalProbe,
|
||||||
relayProbe: relayProbe,
|
relayProbe: relayProbe,
|
||||||
wgProbe: wgProbe,
|
wgProbe: wgProbe,
|
||||||
|
checks: checks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -207,13 +251,27 @@ func (e *Engine) Stop() error {
|
|||||||
e.syncMsgMux.Lock()
|
e.syncMsgMux.Lock()
|
||||||
defer e.syncMsgMux.Unlock()
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
|
if e.cancel != nil {
|
||||||
|
e.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
// stopping network monitor first to avoid starting the engine again
|
||||||
|
if e.networkMonitor != nil {
|
||||||
|
e.networkMonitor.Stop()
|
||||||
|
}
|
||||||
|
log.Info("Network monitor: stopped")
|
||||||
|
|
||||||
err := e.removeAllPeers()
|
err := e.removeAllPeers()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
e.clientRoutesMu.Lock()
|
||||||
|
e.clientRoutes = nil
|
||||||
|
e.clientRoutesMu.Unlock()
|
||||||
|
|
||||||
// very ugly but we want to remove peers from the WireGuard interface first before removing interface.
|
// very ugly but we want to remove peers from the WireGuard interface first before removing interface.
|
||||||
// Removing peers happens in the conn.CLose() asynchronously
|
// Removing peers happens in the conn.Close() asynchronously
|
||||||
time.Sleep(500 * time.Millisecond)
|
time.Sleep(500 * time.Millisecond)
|
||||||
|
|
||||||
e.close()
|
e.close()
|
||||||
@@ -228,13 +286,21 @@ func (e *Engine) Start() error {
|
|||||||
e.syncMsgMux.Lock()
|
e.syncMsgMux.Lock()
|
||||||
defer e.syncMsgMux.Unlock()
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
|
if e.cancel != nil {
|
||||||
|
e.cancel()
|
||||||
|
}
|
||||||
|
e.ctx, e.cancel = context.WithCancel(e.clientCtx)
|
||||||
|
|
||||||
wgIface, err := e.newWgIface()
|
wgIface, err := e.newWgIface()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed creating wireguard interface instance %s: [%s]", e.config.WgIfaceName, err.Error())
|
log.Errorf("failed creating wireguard interface instance %s: [%s]", e.config.WgIfaceName, err)
|
||||||
return err
|
return fmt.Errorf("new wg interface: %w", err)
|
||||||
}
|
}
|
||||||
e.wgInterface = wgIface
|
e.wgInterface = wgIface
|
||||||
|
|
||||||
|
userspace := e.wgInterface.IsUserspaceBind()
|
||||||
|
e.wgProxyFactory = wgproxy.NewFactory(e.ctx, userspace, e.config.WgPort)
|
||||||
|
|
||||||
if e.config.RosenpassEnabled {
|
if e.config.RosenpassEnabled {
|
||||||
log.Infof("rosenpass is enabled")
|
log.Infof("rosenpass is enabled")
|
||||||
if e.config.RosenpassPermissive {
|
if e.config.RosenpassPermissive {
|
||||||
@@ -244,29 +310,37 @@ func (e *Engine) Start() error {
|
|||||||
}
|
}
|
||||||
e.rpManager, err = rosenpass.NewManager(e.config.PreSharedKey, e.config.WgIfaceName)
|
e.rpManager, err = rosenpass.NewManager(e.config.PreSharedKey, e.config.WgIfaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("create rosenpass manager: %w", err)
|
||||||
}
|
}
|
||||||
err := e.rpManager.Run()
|
err := e.rpManager.Run()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("run rosenpass manager: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
initialRoutes, dnsServer, err := e.newDnsServer()
|
initialRoutes, dnsServer, err := e.newDnsServer()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.close()
|
e.close()
|
||||||
return err
|
return fmt.Errorf("create dns server: %w", err)
|
||||||
}
|
}
|
||||||
e.dnsServer = dnsServer
|
e.dnsServer = dnsServer
|
||||||
|
|
||||||
e.routeManager = routemanager.NewManager(e.ctx, e.config.WgPrivateKey.PublicKey().String(), e.wgInterface, e.statusRecorder, initialRoutes)
|
e.routeManager = routemanager.NewManager(e.ctx, e.config.WgPrivateKey.PublicKey().String(), e.config.DNSRouteInterval, e.wgInterface, e.statusRecorder, initialRoutes)
|
||||||
|
beforePeerHook, afterPeerHook, err := e.routeManager.Init()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to initialize route manager: %s", err)
|
||||||
|
} else {
|
||||||
|
e.beforePeerHook = beforePeerHook
|
||||||
|
e.afterPeerHook = afterPeerHook
|
||||||
|
}
|
||||||
|
|
||||||
e.routeManager.SetRouteChangeListener(e.mobileDep.NetworkChangeListener)
|
e.routeManager.SetRouteChangeListener(e.mobileDep.NetworkChangeListener)
|
||||||
|
|
||||||
err = e.wgInterfaceCreate()
|
err = e.wgInterfaceCreate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed creating tunnel interface %s: [%s]", e.config.WgIfaceName, err.Error())
|
log.Errorf("failed creating tunnel interface %s: [%s]", e.config.WgIfaceName, err.Error())
|
||||||
e.close()
|
e.close()
|
||||||
return err
|
return fmt.Errorf("create wg interface: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.firewall, err = firewall.NewFirewall(e.ctx, e.wgInterface)
|
e.firewall, err = firewall.NewFirewall(e.ctx, e.wgInterface)
|
||||||
@@ -278,7 +352,7 @@ func (e *Engine) Start() error {
|
|||||||
err = e.routeManager.EnableServerRouter(e.firewall)
|
err = e.routeManager.EnableServerRouter(e.firewall)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.close()
|
e.close()
|
||||||
return err
|
return fmt.Errorf("enable server router: %w", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -286,7 +360,7 @@ func (e *Engine) Start() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("failed to pull up wgInterface [%s]: %s", e.wgInterface.Name(), err.Error())
|
log.Errorf("failed to pull up wgInterface [%s]: %s", e.wgInterface.Name(), err.Error())
|
||||||
e.close()
|
e.close()
|
||||||
return err
|
return fmt.Errorf("up wg interface: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.firewall != nil {
|
if e.firewall != nil {
|
||||||
@@ -296,13 +370,16 @@ func (e *Engine) Start() error {
|
|||||||
err = e.dnsServer.Initialize()
|
err = e.dnsServer.Initialize()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
e.close()
|
e.close()
|
||||||
return err
|
return fmt.Errorf("initialize dns server: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.receiveSignalEvents()
|
e.receiveSignalEvents()
|
||||||
e.receiveManagementEvents()
|
e.receiveManagementEvents()
|
||||||
e.receiveProbeEvents()
|
e.receiveProbeEvents()
|
||||||
|
|
||||||
|
// starting network monitor at the very last to avoid disruptions
|
||||||
|
e.startNetworkMonitor()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -397,83 +474,49 @@ func (e *Engine) removePeer(peerKey string) error {
|
|||||||
conn, exists := e.peerConns[peerKey]
|
conn, exists := e.peerConns[peerKey]
|
||||||
if exists {
|
if exists {
|
||||||
delete(e.peerConns, peerKey)
|
delete(e.peerConns, peerKey)
|
||||||
err := conn.Close()
|
conn.Close()
|
||||||
if err != nil {
|
|
||||||
switch err.(type) {
|
|
||||||
case *peer.ConnectionAlreadyClosedError:
|
|
||||||
return nil
|
|
||||||
default:
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func signalCandidate(candidate ice.Candidate, myKey wgtypes.Key, remoteKey wgtypes.Key, s signal.Client) error {
|
|
||||||
err := s.Send(&sProto.Message{
|
|
||||||
Key: myKey.PublicKey().String(),
|
|
||||||
RemoteKey: remoteKey.String(),
|
|
||||||
Body: &sProto.Body{
|
|
||||||
Type: sProto.Body_CANDIDATE,
|
|
||||||
Payload: candidate.Marshal(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func sendSignal(message *sProto.Message, s signal.Client) error {
|
|
||||||
return s.Send(message)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SignalOfferAnswer signals either an offer or an answer to remote peer
|
|
||||||
func SignalOfferAnswer(offerAnswer peer.OfferAnswer, myKey wgtypes.Key, remoteKey wgtypes.Key, s signal.Client,
|
|
||||||
isAnswer bool) error {
|
|
||||||
var t sProto.Body_Type
|
|
||||||
if isAnswer {
|
|
||||||
t = sProto.Body_ANSWER
|
|
||||||
} else {
|
|
||||||
t = sProto.Body_OFFER
|
|
||||||
}
|
|
||||||
|
|
||||||
msg, err := signal.MarshalCredential(myKey, offerAnswer.WgListenPort, remoteKey, &signal.Credential{
|
|
||||||
UFrag: offerAnswer.IceCredentials.UFrag,
|
|
||||||
Pwd: offerAnswer.IceCredentials.Pwd,
|
|
||||||
}, t, offerAnswer.RosenpassPubKey, offerAnswer.RosenpassAddr)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
err = s.Send(msg)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
|
func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
|
||||||
e.syncMsgMux.Lock()
|
e.syncMsgMux.Lock()
|
||||||
defer e.syncMsgMux.Unlock()
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
if update.GetWiretrusteeConfig() != nil {
|
if update.GetWiretrusteeConfig() != nil {
|
||||||
err := e.updateTURNs(update.GetWiretrusteeConfig().GetTurns())
|
wCfg := update.GetWiretrusteeConfig()
|
||||||
|
err := e.updateTURNs(wCfg.GetTurns())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = e.updateSTUNs(update.GetWiretrusteeConfig().GetStuns())
|
err = e.updateSTUNs(wCfg.GetStuns())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var stunTurn []*stun.URI
|
||||||
|
stunTurn = append(stunTurn, e.STUNs...)
|
||||||
|
stunTurn = append(stunTurn, e.TURNs...)
|
||||||
|
e.StunTurn.Store(stunTurn)
|
||||||
|
|
||||||
|
relayMsg := wCfg.GetRelay()
|
||||||
|
if relayMsg != nil {
|
||||||
|
c := &auth.Token{
|
||||||
|
Payload: relayMsg.GetTokenPayload(),
|
||||||
|
Signature: relayMsg.GetTokenSignature(),
|
||||||
|
}
|
||||||
|
e.relayManager.UpdateToken(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// todo update relay address in the relay manager
|
||||||
// todo update signal
|
// todo update signal
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := e.updateChecksIfNew(update.Checks); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
if update.GetNetworkMap() != nil {
|
if update.GetNetworkMap() != nil {
|
||||||
// only apply new changes and ignore old ones
|
// only apply new changes and ignore old ones
|
||||||
err := e.updateNetworkMap(update.GetNetworkMap())
|
err := e.updateNetworkMap(update.GetNetworkMap())
|
||||||
@@ -481,7 +524,27 @@ func (e *Engine) handleSync(update *mgmProto.SyncResponse) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateChecksIfNew updates checks if there are changes and sync new meta with management
|
||||||
|
func (e *Engine) updateChecksIfNew(checks []*mgmProto.Checks) error {
|
||||||
|
// if checks are equal, we skip the update
|
||||||
|
if isChecksEqual(e.checks, checks) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
e.checks = checks
|
||||||
|
|
||||||
|
info, err := system.GetInfoWithChecks(e.ctx, checks)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("failed to get system info with checks: %v", err)
|
||||||
|
info = system.GetInfo(e.ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := e.mgmClient.SyncMeta(info); err != nil {
|
||||||
|
log.Errorf("could not sync meta: error %s", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -497,8 +560,8 @@ func (e *Engine) updateSSH(sshConf *mgmProto.SSHConfig) error {
|
|||||||
} else {
|
} else {
|
||||||
|
|
||||||
if sshConf.GetSshEnabled() {
|
if sshConf.GetSshEnabled() {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" || runtime.GOOS == "freebsd" {
|
||||||
log.Warnf("running SSH server on Windows is not supported")
|
log.Warnf("running SSH server on %s is not supported", runtime.GOOS)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// start SSH server if it wasn't running
|
// start SSH server if it wasn't running
|
||||||
@@ -571,12 +634,19 @@ func (e *Engine) updateConfig(conf *mgmProto.PeerConfig) error {
|
|||||||
// E.g. when a new peer has been registered and we are allowed to connect to it.
|
// E.g. when a new peer has been registered and we are allowed to connect to it.
|
||||||
func (e *Engine) receiveManagementEvents() {
|
func (e *Engine) receiveManagementEvents() {
|
||||||
go func() {
|
go func() {
|
||||||
err := e.mgmClient.Sync(e.handleSync)
|
info, err := system.GetInfoWithChecks(e.ctx, e.checks)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("failed to get system info with checks: %v", err)
|
||||||
|
info = system.GetInfo(e.ctx)
|
||||||
|
}
|
||||||
|
|
||||||
|
// err = e.mgmClient.Sync(info, e.handleSync)
|
||||||
|
err = e.mgmClient.Sync(e.ctx, info, e.handleSync)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// happens if management is unavailable for a long time.
|
// happens if management is unavailable for a long time.
|
||||||
// We want to cancel the operation of the whole client
|
// We want to cancel the operation of the whole client
|
||||||
_ = CtxGetState(e.ctx).Wrap(ErrResetConnection)
|
_ = CtxGetState(e.ctx).Wrap(ErrResetConnection)
|
||||||
e.cancel()
|
e.clientCancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Debugf("stopped receiving updates from Management Service")
|
log.Debugf("stopped receiving updates from Management Service")
|
||||||
@@ -638,6 +708,20 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
protoRoutes := networkMap.GetRoutes()
|
||||||
|
if protoRoutes == nil {
|
||||||
|
protoRoutes = []*mgmProto.Route{}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, clientRoutes, err := e.routeManager.UpdateRoutes(serial, toRoutes(protoRoutes))
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to update clientRoutes, err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
e.clientRoutesMu.Lock()
|
||||||
|
e.clientRoutes = clientRoutes
|
||||||
|
e.clientRoutesMu.Unlock()
|
||||||
|
|
||||||
log.Debugf("got peers update from Management Service, total peers to connect to = %d", len(networkMap.GetRemotePeers()))
|
log.Debugf("got peers update from Management Service, total peers to connect to = %d", len(networkMap.GetRemotePeers()))
|
||||||
|
|
||||||
e.updateOfflinePeers(networkMap.GetOfflinePeers())
|
e.updateOfflinePeers(networkMap.GetOfflinePeers())
|
||||||
@@ -679,14 +763,6 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
protoRoutes := networkMap.GetRoutes()
|
|
||||||
if protoRoutes == nil {
|
|
||||||
protoRoutes = []*mgmProto.Route{}
|
|
||||||
}
|
|
||||||
err := e.routeManager.UpdateRoutes(serial, toRoutes(protoRoutes))
|
|
||||||
if err != nil {
|
|
||||||
log.Errorf("failed to update routes, err: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
protoDNSConfig := networkMap.GetDNSConfig()
|
protoDNSConfig := networkMap.GetDNSConfig()
|
||||||
if protoDNSConfig == nil {
|
if protoDNSConfig == nil {
|
||||||
@@ -698,30 +774,40 @@ func (e *Engine) updateNetworkMap(networkMap *mgmProto.NetworkMap) error {
|
|||||||
log.Errorf("failed to update dns server, err: %v", err)
|
log.Errorf("failed to update dns server, err: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test received (upstream) servers for availability right away instead of upon usage.
|
|
||||||
// If no server of a server group responds this will disable the respective handler and retry later.
|
|
||||||
e.dnsServer.ProbeAvailability()
|
|
||||||
|
|
||||||
if e.acl != nil {
|
if e.acl != nil {
|
||||||
e.acl.ApplyFiltering(networkMap)
|
e.acl.ApplyFiltering(networkMap)
|
||||||
}
|
}
|
||||||
|
|
||||||
e.networkSerial = serial
|
e.networkSerial = serial
|
||||||
|
|
||||||
|
// Test received (upstream) servers for availability right away instead of upon usage.
|
||||||
|
// If no server of a server group responds this will disable the respective handler and retry later.
|
||||||
|
e.dnsServer.ProbeAvailability()
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func toRoutes(protoRoutes []*mgmProto.Route) []*route.Route {
|
func toRoutes(protoRoutes []*mgmProto.Route) []*route.Route {
|
||||||
routes := make([]*route.Route, 0)
|
routes := make([]*route.Route, 0)
|
||||||
for _, protoRoute := range protoRoutes {
|
for _, protoRoute := range protoRoutes {
|
||||||
_, prefix, _ := route.ParseNetwork(protoRoute.Network)
|
var prefix netip.Prefix
|
||||||
|
if len(protoRoute.Domains) == 0 {
|
||||||
|
var err error
|
||||||
|
if prefix, err = netip.ParsePrefix(protoRoute.Network); err != nil {
|
||||||
|
log.Errorf("Failed to parse prefix %s: %v", protoRoute.Network, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
convertedRoute := &route.Route{
|
convertedRoute := &route.Route{
|
||||||
ID: protoRoute.ID,
|
ID: route.ID(protoRoute.ID),
|
||||||
Network: prefix,
|
Network: prefix,
|
||||||
NetID: protoRoute.NetID,
|
Domains: domain.FromPunycodeList(protoRoute.Domains),
|
||||||
|
NetID: route.NetID(protoRoute.NetID),
|
||||||
NetworkType: route.NetworkType(protoRoute.NetworkType),
|
NetworkType: route.NetworkType(protoRoute.NetworkType),
|
||||||
Peer: protoRoute.Peer,
|
Peer: protoRoute.Peer,
|
||||||
Metric: int(protoRoute.Metric),
|
Metric: int(protoRoute.Metric),
|
||||||
Masquerade: protoRoute.Masquerade,
|
Masquerade: protoRoute.Masquerade,
|
||||||
|
KeepRoute: protoRoute.KeepRoute,
|
||||||
}
|
}
|
||||||
routes = append(routes, convertedRoute)
|
routes = append(routes, convertedRoute)
|
||||||
}
|
}
|
||||||
@@ -781,6 +867,7 @@ func (e *Engine) updateOfflinePeers(offlinePeers []*mgmProto.RemotePeerConfig) {
|
|||||||
FQDN: offlinePeer.GetFqdn(),
|
FQDN: offlinePeer.GetFqdn(),
|
||||||
ConnStatus: peer.StatusDisconnected,
|
ConnStatus: peer.StatusDisconnected,
|
||||||
ConnStatusUpdate: time.Now(),
|
ConnStatusUpdate: time.Now(),
|
||||||
|
Mux: new(sync.RWMutex),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.statusRecorder.ReplaceOfflinePeers(replacement)
|
e.statusRecorder.ReplaceOfflinePeers(replacement)
|
||||||
@@ -804,69 +891,27 @@ func (e *Engine) addNewPeer(peerConfig *mgmProto.RemotePeerConfig) error {
|
|||||||
if _, ok := e.peerConns[peerKey]; !ok {
|
if _, ok := e.peerConns[peerKey]; !ok {
|
||||||
conn, err := e.createPeerConn(peerKey, strings.Join(peerIPs, ","))
|
conn, err := e.createPeerConn(peerKey, strings.Join(peerIPs, ","))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return fmt.Errorf("create peer connection: %w", err)
|
||||||
}
|
}
|
||||||
e.peerConns[peerKey] = conn
|
e.peerConns[peerKey] = conn
|
||||||
|
|
||||||
|
if e.beforePeerHook != nil && e.afterPeerHook != nil {
|
||||||
|
conn.AddBeforeAddPeerHook(e.beforePeerHook)
|
||||||
|
conn.AddAfterRemovePeerHook(e.afterPeerHook)
|
||||||
|
}
|
||||||
|
|
||||||
err = e.statusRecorder.AddPeer(peerKey, peerConfig.Fqdn)
|
err = e.statusRecorder.AddPeer(peerKey, peerConfig.Fqdn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("error adding peer %s to status recorder, got error: %v", peerKey, err)
|
log.Warnf("error adding peer %s to status recorder, got error: %v", peerKey, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
go e.connWorker(conn, peerKey)
|
conn.Open()
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) connWorker(conn *peer.Conn, peerKey string) {
|
|
||||||
for {
|
|
||||||
|
|
||||||
// randomize starting time a bit
|
|
||||||
min := 500
|
|
||||||
max := 2000
|
|
||||||
time.Sleep(time.Duration(rand.Intn(max-min)+min) * time.Millisecond)
|
|
||||||
|
|
||||||
// if peer has been removed -> give up
|
|
||||||
if !e.peerExists(peerKey) {
|
|
||||||
log.Debugf("peer %s doesn't exist anymore, won't retry connection", peerKey)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if !e.signal.Ready() {
|
|
||||||
log.Infof("signal client isn't ready, skipping connection attempt %s", peerKey)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
// we might have received new STUN and TURN servers meanwhile, so update them
|
|
||||||
e.syncMsgMux.Lock()
|
|
||||||
conn.UpdateStunTurn(append(e.STUNs, e.TURNs...))
|
|
||||||
e.syncMsgMux.Unlock()
|
|
||||||
|
|
||||||
err := conn.Open()
|
|
||||||
if err != nil {
|
|
||||||
log.Debugf("connection to peer %s failed: %v", peerKey, err)
|
|
||||||
switch err.(type) {
|
|
||||||
case *peer.ConnectionClosedError:
|
|
||||||
// conn has been forced to close, so we exit the loop
|
|
||||||
return
|
|
||||||
default:
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Engine) peerExists(peerKey string) bool {
|
|
||||||
e.syncMsgMux.Lock()
|
|
||||||
defer e.syncMsgMux.Unlock()
|
|
||||||
_, ok := e.peerConns[peerKey]
|
|
||||||
return ok
|
|
||||||
}
|
|
||||||
|
|
||||||
func (e *Engine) createPeerConn(pubKey string, allowedIPs string) (*peer.Conn, error) {
|
func (e *Engine) createPeerConn(pubKey string, allowedIPs string) (*peer.Conn, error) {
|
||||||
log.Debugf("creating peer connection %s", pubKey)
|
log.Debugf("creating peer connection %s", pubKey)
|
||||||
var stunTurn []*stun.URI
|
|
||||||
stunTurn = append(stunTurn, e.STUNs...)
|
|
||||||
stunTurn = append(stunTurn, e.TURNs...)
|
|
||||||
|
|
||||||
wgConfig := peer.WgConfig{
|
wgConfig := peer.WgConfig{
|
||||||
RemoteKey: pubKey,
|
RemoteKey: pubKey,
|
||||||
@@ -899,53 +944,29 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs string) (*peer.Conn, e
|
|||||||
// randomize connection timeout
|
// randomize connection timeout
|
||||||
timeout := time.Duration(rand.Intn(PeerConnectionTimeoutMax-PeerConnectionTimeoutMin)+PeerConnectionTimeoutMin) * time.Millisecond
|
timeout := time.Duration(rand.Intn(PeerConnectionTimeoutMax-PeerConnectionTimeoutMin)+PeerConnectionTimeoutMin) * time.Millisecond
|
||||||
config := peer.ConnConfig{
|
config := peer.ConnConfig{
|
||||||
Key: pubKey,
|
Key: pubKey,
|
||||||
LocalKey: e.config.WgPrivateKey.PublicKey().String(),
|
LocalKey: e.config.WgPrivateKey.PublicKey().String(),
|
||||||
StunTurn: stunTurn,
|
Timeout: timeout,
|
||||||
InterfaceBlackList: e.config.IFaceBlackList,
|
WgConfig: wgConfig,
|
||||||
DisableIPv6Discovery: e.config.DisableIPv6Discovery,
|
LocalWgPort: e.config.WgPort,
|
||||||
Timeout: timeout,
|
RosenpassPubKey: e.getRosenpassPubKey(),
|
||||||
UDPMux: e.udpMux.UDPMuxDefault,
|
RosenpassAddr: e.getRosenpassAddr(),
|
||||||
UDPMuxSrflx: e.udpMux,
|
ICEConfig: peer.ICEConfig{
|
||||||
WgConfig: wgConfig,
|
StunTurn: e.StunTurn,
|
||||||
LocalWgPort: e.config.WgPort,
|
InterfaceBlackList: e.config.IFaceBlackList,
|
||||||
NATExternalIPs: e.parseNATExternalIPMappings(),
|
DisableIPv6Discovery: e.config.DisableIPv6Discovery,
|
||||||
UserspaceBind: e.wgInterface.IsUserspaceBind(),
|
UDPMux: e.udpMux.UDPMuxDefault,
|
||||||
RosenpassPubKey: e.getRosenpassPubKey(),
|
UDPMuxSrflx: e.udpMux,
|
||||||
RosenpassAddr: e.getRosenpassAddr(),
|
NATExternalIPs: e.parseNATExternalIPMappings(),
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
peerConn, err := peer.NewConn(config, e.statusRecorder, e.wgProxyFactory, e.mobileDep.TunAdapter, e.mobileDep.IFaceDiscover)
|
peerConn, err := peer.NewConn(e.ctx, config, e.statusRecorder, e.wgProxyFactory, e.signaler, e.mobileDep.IFaceDiscover, e.relayManager)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
wgPubKey, err := wgtypes.ParseKey(pubKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
signalOffer := func(offerAnswer peer.OfferAnswer) error {
|
|
||||||
return SignalOfferAnswer(offerAnswer, e.config.WgPrivateKey, wgPubKey, e.signal, false)
|
|
||||||
}
|
|
||||||
|
|
||||||
signalCandidate := func(candidate ice.Candidate) error {
|
|
||||||
return signalCandidate(candidate, e.config.WgPrivateKey, wgPubKey, e.signal)
|
|
||||||
}
|
|
||||||
|
|
||||||
signalAnswer := func(offerAnswer peer.OfferAnswer) error {
|
|
||||||
return SignalOfferAnswer(offerAnswer, e.config.WgPrivateKey, wgPubKey, e.signal, true)
|
|
||||||
}
|
|
||||||
|
|
||||||
peerConn.SetSignalCandidate(signalCandidate)
|
|
||||||
peerConn.SetSignalOffer(signalOffer)
|
|
||||||
peerConn.SetSignalAnswer(signalAnswer)
|
|
||||||
peerConn.SetSendSignalMessage(func(message *sProto.Message) error {
|
|
||||||
return sendSignal(message, e.signal)
|
|
||||||
})
|
|
||||||
|
|
||||||
if e.rpManager != nil {
|
if e.rpManager != nil {
|
||||||
|
|
||||||
peerConn.SetOnConnected(e.rpManager.OnConnected)
|
peerConn.SetOnConnected(e.rpManager.OnConnected)
|
||||||
peerConn.SetOnDisconnected(e.rpManager.OnDisconnected)
|
peerConn.SetOnDisconnected(e.rpManager.OnDisconnected)
|
||||||
}
|
}
|
||||||
@@ -957,7 +978,7 @@ func (e *Engine) createPeerConn(pubKey string, allowedIPs string) (*peer.Conn, e
|
|||||||
func (e *Engine) receiveSignalEvents() {
|
func (e *Engine) receiveSignalEvents() {
|
||||||
go func() {
|
go func() {
|
||||||
// connect to a stream of messages coming from the signal server
|
// connect to a stream of messages coming from the signal server
|
||||||
err := e.signal.Receive(func(msg *sProto.Message) error {
|
err := e.signal.Receive(e.ctx, func(msg *sProto.Message) error {
|
||||||
e.syncMsgMux.Lock()
|
e.syncMsgMux.Lock()
|
||||||
defer e.syncMsgMux.Unlock()
|
defer e.syncMsgMux.Unlock()
|
||||||
|
|
||||||
@@ -973,8 +994,6 @@ func (e *Engine) receiveSignalEvents() {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn.RegisterProtoSupportMeta(msg.Body.GetFeaturesSupported())
|
|
||||||
|
|
||||||
var rosenpassPubKey []byte
|
var rosenpassPubKey []byte
|
||||||
rosenpassAddr := ""
|
rosenpassAddr := ""
|
||||||
if msg.GetBody().GetRosenpassConfig() != nil {
|
if msg.GetBody().GetRosenpassConfig() != nil {
|
||||||
@@ -990,6 +1009,7 @@ func (e *Engine) receiveSignalEvents() {
|
|||||||
Version: msg.GetBody().GetNetBirdVersion(),
|
Version: msg.GetBody().GetNetBirdVersion(),
|
||||||
RosenpassPubKey: rosenpassPubKey,
|
RosenpassPubKey: rosenpassPubKey,
|
||||||
RosenpassAddr: rosenpassAddr,
|
RosenpassAddr: rosenpassAddr,
|
||||||
|
RelaySrvAddress: msg.GetBody().GetRelayServerAddress(),
|
||||||
})
|
})
|
||||||
case sProto.Body_ANSWER:
|
case sProto.Body_ANSWER:
|
||||||
remoteCred, err := signal.UnMarshalCredential(msg)
|
remoteCred, err := signal.UnMarshalCredential(msg)
|
||||||
@@ -997,8 +1017,6 @@ func (e *Engine) receiveSignalEvents() {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
conn.RegisterProtoSupportMeta(msg.GetBody().GetFeaturesSupported())
|
|
||||||
|
|
||||||
var rosenpassPubKey []byte
|
var rosenpassPubKey []byte
|
||||||
rosenpassAddr := ""
|
rosenpassAddr := ""
|
||||||
if msg.GetBody().GetRosenpassConfig() != nil {
|
if msg.GetBody().GetRosenpassConfig() != nil {
|
||||||
@@ -1014,6 +1032,7 @@ func (e *Engine) receiveSignalEvents() {
|
|||||||
Version: msg.GetBody().GetNetBirdVersion(),
|
Version: msg.GetBody().GetNetBirdVersion(),
|
||||||
RosenpassPubKey: rosenpassPubKey,
|
RosenpassPubKey: rosenpassPubKey,
|
||||||
RosenpassAddr: rosenpassAddr,
|
RosenpassAddr: rosenpassAddr,
|
||||||
|
RelaySrvAddress: msg.GetBody().GetRelayServerAddress(),
|
||||||
})
|
})
|
||||||
case sProto.Body_CANDIDATE:
|
case sProto.Body_CANDIDATE:
|
||||||
candidate, err := ice.UnmarshalCandidate(msg.GetBody().Payload)
|
candidate, err := ice.UnmarshalCandidate(msg.GetBody().Payload)
|
||||||
@@ -1021,7 +1040,8 @@ func (e *Engine) receiveSignalEvents() {
|
|||||||
log.Errorf("failed on parsing remote candidate %s -> %s", candidate, err)
|
log.Errorf("failed on parsing remote candidate %s -> %s", candidate, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
conn.OnRemoteCandidate(candidate)
|
|
||||||
|
go conn.OnRemoteCandidate(candidate, e.GetClientRoutes())
|
||||||
case sProto.Body_MODE:
|
case sProto.Body_MODE:
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -1031,7 +1051,7 @@ func (e *Engine) receiveSignalEvents() {
|
|||||||
// happens if signal is unavailable for a long time.
|
// happens if signal is unavailable for a long time.
|
||||||
// We want to cancel the operation of the whole client
|
// We want to cancel the operation of the whole client
|
||||||
_ = CtxGetState(e.ctx).Wrap(ErrResetConnection)
|
_ = CtxGetState(e.ctx).Wrap(ErrResetConnection)
|
||||||
e.cancel()
|
e.clientCancel()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
@@ -1092,13 +1112,20 @@ func (e *Engine) parseNATExternalIPMappings() []string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) close() {
|
func (e *Engine) close() {
|
||||||
if err := e.wgProxyFactory.Free(); err != nil {
|
if e.wgProxyFactory != nil {
|
||||||
log.Errorf("failed closing ebpf proxy: %s", err)
|
if err := e.wgProxyFactory.Free(); err != nil {
|
||||||
|
log.Errorf("failed closing ebpf proxy: %s", err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// stop/restore DNS first so dbus and friends don't complain because of a missing interface
|
// stop/restore DNS first so dbus and friends don't complain because of a missing interface
|
||||||
if e.dnsServer != nil {
|
if e.dnsServer != nil {
|
||||||
e.dnsServer.Stop()
|
e.dnsServer.Stop()
|
||||||
|
e.dnsServer = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if e.routeManager != nil {
|
||||||
|
e.routeManager.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("removing Netbird interface %s", e.config.WgIfaceName)
|
log.Debugf("removing Netbird interface %s", e.config.WgIfaceName)
|
||||||
@@ -1115,10 +1142,6 @@ func (e *Engine) close() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if e.routeManager != nil {
|
|
||||||
e.routeManager.Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
if e.firewall != nil {
|
if e.firewall != nil {
|
||||||
err := e.firewall.Reset()
|
err := e.firewall.Reset()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1132,7 +1155,8 @@ func (e *Engine) close() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) readInitialSettings() ([]*route.Route, *nbdns.Config, error) {
|
func (e *Engine) readInitialSettings() ([]*route.Route, *nbdns.Config, error) {
|
||||||
netMap, err := e.mgmClient.GetNetworkMap()
|
info := system.GetInfo(e.ctx)
|
||||||
|
netMap, err := e.mgmClient.GetNetworkMap(info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
@@ -1161,7 +1185,7 @@ func (e *Engine) newWgIface() (*iface.WGIface, error) {
|
|||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
|
|
||||||
return iface.NewWGIFace(e.config.WgIfaceName, e.config.WgAddr, e.config.WgPort, e.config.WgPrivateKey.String(), iface.DefaultMTU, transportNet, mArgs)
|
return iface.NewWGIFace(e.config.WgIfaceName, e.config.WgAddr, e.config.WgPort, e.config.WgPrivateKey.String(), iface.DefaultMTU, transportNet, mArgs, e.addrViaRoutes)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *Engine) wgInterfaceCreate() (err error) {
|
func (e *Engine) wgInterfaceCreate() (err error) {
|
||||||
@@ -1210,6 +1234,31 @@ func (e *Engine) newDnsServer() ([]*route.Route, dns.Server, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetClientRoutes returns the current routes from the route map
|
||||||
|
func (e *Engine) GetClientRoutes() route.HAMap {
|
||||||
|
e.clientRoutesMu.RLock()
|
||||||
|
defer e.clientRoutesMu.RUnlock()
|
||||||
|
|
||||||
|
return maps.Clone(e.clientRoutes)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetClientRoutesWithNetID returns the current routes from the route map, but the keys consist of the network ID only
|
||||||
|
func (e *Engine) GetClientRoutesWithNetID() map[route.NetID][]*route.Route {
|
||||||
|
e.clientRoutesMu.RLock()
|
||||||
|
defer e.clientRoutesMu.RUnlock()
|
||||||
|
|
||||||
|
routes := make(map[route.NetID][]*route.Route, len(e.clientRoutes))
|
||||||
|
for id, v := range e.clientRoutes {
|
||||||
|
routes[id.NetID()] = v
|
||||||
|
}
|
||||||
|
return routes
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRouteManager returns the route manager
|
||||||
|
func (e *Engine) GetRouteManager() routemanager.Manager {
|
||||||
|
return e.routeManager
|
||||||
|
}
|
||||||
|
|
||||||
func findIPFromInterfaceName(ifaceName string) (net.IP, error) {
|
func findIPFromInterfaceName(ifaceName string) (net.IP, error) {
|
||||||
iface, err := net.InterfaceByName(ifaceName)
|
iface, err := net.InterfaceByName(ifaceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -1288,7 +1337,7 @@ func (e *Engine) receiveProbeEvents() {
|
|||||||
|
|
||||||
for _, peer := range e.peerConns {
|
for _, peer := range e.peerConns {
|
||||||
key := peer.GetKey()
|
key := peer.GetKey()
|
||||||
wgStats, err := peer.GetConf().WgConfig.WgInterface.GetStats(key)
|
wgStats, err := peer.WgConfig().WgInterface.GetStats(key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debugf("failed to get wg stats for peer %s: %s", key, err)
|
log.Debugf("failed to get wg stats for peer %s: %s", key, err)
|
||||||
}
|
}
|
||||||
@@ -1310,3 +1359,89 @@ func (e *Engine) probeSTUNs() []relay.ProbeResult {
|
|||||||
func (e *Engine) probeTURNs() []relay.ProbeResult {
|
func (e *Engine) probeTURNs() []relay.ProbeResult {
|
||||||
return relay.ProbeAll(e.ctx, relay.ProbeTURN, e.TURNs)
|
return relay.ProbeAll(e.ctx, relay.ProbeTURN, e.TURNs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *Engine) restartEngine() {
|
||||||
|
if err := e.Stop(); err != nil {
|
||||||
|
log.Errorf("Failed to stop engine: %v", err)
|
||||||
|
}
|
||||||
|
if err := e.Start(); err != nil {
|
||||||
|
log.Errorf("Failed to start engine: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Engine) startNetworkMonitor() {
|
||||||
|
if !e.config.NetworkMonitor {
|
||||||
|
log.Infof("Network monitor is disabled, not starting")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
e.networkMonitor = networkmonitor.New()
|
||||||
|
go func() {
|
||||||
|
var mu sync.Mutex
|
||||||
|
var debounceTimer *time.Timer
|
||||||
|
|
||||||
|
// Start the network monitor with a callback, Start will block until the monitor is stopped,
|
||||||
|
// a network change is detected, or an error occurs on start up
|
||||||
|
err := e.networkMonitor.Start(e.ctx, func() {
|
||||||
|
// This function is called when a network change is detected
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
if debounceTimer != nil {
|
||||||
|
debounceTimer.Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set a new timer to debounce rapid network changes
|
||||||
|
debounceTimer = time.AfterFunc(1*time.Second, func() {
|
||||||
|
// This function is called after the debounce period
|
||||||
|
mu.Lock()
|
||||||
|
defer mu.Unlock()
|
||||||
|
|
||||||
|
log.Infof("Network monitor detected network change, restarting engine")
|
||||||
|
e.restartEngine()
|
||||||
|
})
|
||||||
|
})
|
||||||
|
if err != nil && !errors.Is(err, networkmonitor.ErrStopped) {
|
||||||
|
log.Errorf("Network monitor: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Engine) addrViaRoutes(addr netip.Addr) (bool, netip.Prefix, error) {
|
||||||
|
var vpnRoutes []netip.Prefix
|
||||||
|
for _, routes := range e.GetClientRoutes() {
|
||||||
|
if len(routes) > 0 && routes[0] != nil {
|
||||||
|
vpnRoutes = append(vpnRoutes, routes[0].Network)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if isVpn, prefix := systemops.IsAddrRouted(addr, vpnRoutes); isVpn {
|
||||||
|
return true, prefix, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return false, netip.Prefix{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// isChecksEqual checks if two slices of checks are equal.
|
||||||
|
func isChecksEqual(checks []*mgmProto.Checks, oChecks []*mgmProto.Checks) bool {
|
||||||
|
return slices.EqualFunc(checks, oChecks, func(checks, oChecks *mgmProto.Checks) bool {
|
||||||
|
return slices.Equal(checks.Files, oChecks.Files)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *Engine) IsWGIfaceUp() bool {
|
||||||
|
if e == nil || e.wgInterface == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
iface, err := net.InterfaceByName(e.wgInterface.Name())
|
||||||
|
if err != nil {
|
||||||
|
log.Debugf("failed to get interface by name %s: %v", e.wgInterface.Name(), err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if iface.Flags&net.FlagUp != 0 {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|||||||
@@ -17,10 +17,13 @@ import (
|
|||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/keepalive"
|
"google.golang.org/grpc/keepalive"
|
||||||
|
|
||||||
|
"github.com/netbirdio/management-integrations/integrations"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/client/internal/dns"
|
"github.com/netbirdio/netbird/client/internal/dns"
|
||||||
"github.com/netbirdio/netbird/client/internal/peer"
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
"github.com/netbirdio/netbird/client/internal/routemanager"
|
"github.com/netbirdio/netbird/client/internal/routemanager"
|
||||||
@@ -33,6 +36,7 @@ import (
|
|||||||
mgmtProto "github.com/netbirdio/netbird/management/proto"
|
mgmtProto "github.com/netbirdio/netbird/management/proto"
|
||||||
"github.com/netbirdio/netbird/management/server"
|
"github.com/netbirdio/netbird/management/server"
|
||||||
"github.com/netbirdio/netbird/management/server/activity"
|
"github.com/netbirdio/netbird/management/server/activity"
|
||||||
|
relayClient "github.com/netbirdio/netbird/relay/client"
|
||||||
"github.com/netbirdio/netbird/route"
|
"github.com/netbirdio/netbird/route"
|
||||||
signal "github.com/netbirdio/netbird/signal/client"
|
signal "github.com/netbirdio/netbird/signal/client"
|
||||||
"github.com/netbirdio/netbird/signal/proto"
|
"github.com/netbirdio/netbird/signal/proto"
|
||||||
@@ -54,10 +58,16 @@ var (
|
|||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestEngine_SSH(t *testing.T) {
|
func TestMain(m *testing.M) {
|
||||||
|
_ = util.InitLog("debug", "console")
|
||||||
|
code := m.Run()
|
||||||
|
os.Exit(code)
|
||||||
|
}
|
||||||
|
|
||||||
if runtime.GOOS == "windows" {
|
func TestEngine_SSH(t *testing.T) {
|
||||||
t.Skip("skipping TestEngine_SSH on Windows")
|
// todo resolve test execution on freebsd
|
||||||
|
if runtime.GOOS == "windows" || runtime.GOOS == "freebsd" {
|
||||||
|
t.Skip("skipping TestEngine_SSH")
|
||||||
}
|
}
|
||||||
|
|
||||||
key, err := wgtypes.GeneratePrivateKey()
|
key, err := wgtypes.GeneratePrivateKey()
|
||||||
@@ -69,13 +79,23 @@ func TestEngine_SSH(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, &EngineConfig{
|
relayMgr := relayClient.NewManager(ctx, "", key.PublicKey().String())
|
||||||
WgIfaceName: "utun101",
|
engine := NewEngine(
|
||||||
WgAddr: "100.64.0.1/24",
|
ctx, cancel,
|
||||||
WgPrivateKey: key,
|
&signal.MockClient{},
|
||||||
WgPort: 33100,
|
&mgmt.MockClient{},
|
||||||
ServerSSHAllowed: true,
|
relayMgr,
|
||||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"))
|
&EngineConfig{
|
||||||
|
WgIfaceName: "utun101",
|
||||||
|
WgAddr: "100.64.0.1/24",
|
||||||
|
WgPrivateKey: key,
|
||||||
|
WgPort: 33100,
|
||||||
|
ServerSSHAllowed: true,
|
||||||
|
},
|
||||||
|
MobileDependency{},
|
||||||
|
peer.NewRecorder("https://mgm"),
|
||||||
|
nil,
|
||||||
|
)
|
||||||
|
|
||||||
engine.dnsServer = &dns.MockServer{
|
engine.dnsServer = &dns.MockServer{
|
||||||
UpdateDNSServerFunc: func(serial uint64, update nbdns.Config) error { return nil },
|
UpdateDNSServerFunc: func(serial uint64, update nbdns.Config) error { return nil },
|
||||||
@@ -171,7 +191,7 @@ func TestEngine_SSH(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
//time.Sleep(250 * time.Millisecond)
|
// time.Sleep(250 * time.Millisecond)
|
||||||
assert.NotNil(t, engine.sshServer)
|
assert.NotNil(t, engine.sshServer)
|
||||||
assert.Contains(t, sshPeersRemoved, "MNHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=")
|
assert.Contains(t, sshPeersRemoved, "MNHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=")
|
||||||
|
|
||||||
@@ -204,21 +224,29 @@ func TestEngine_UpdateNetworkMap(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, &EngineConfig{
|
relayMgr := relayClient.NewManager(ctx, "", key.PublicKey().String())
|
||||||
WgIfaceName: "utun102",
|
engine := NewEngine(
|
||||||
WgAddr: "100.64.0.1/24",
|
ctx, cancel,
|
||||||
WgPrivateKey: key,
|
&signal.MockClient{},
|
||||||
WgPort: 33100,
|
&mgmt.MockClient{},
|
||||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"))
|
relayMgr,
|
||||||
newNet, err := stdnet.NewNet()
|
&EngineConfig{
|
||||||
if err != nil {
|
WgIfaceName: "utun102",
|
||||||
t.Fatal(err)
|
WgAddr: "100.64.0.1/24",
|
||||||
|
WgPrivateKey: key,
|
||||||
|
WgPort: 33100,
|
||||||
|
},
|
||||||
|
MobileDependency{},
|
||||||
|
peer.NewRecorder("https://mgm"),
|
||||||
|
nil)
|
||||||
|
|
||||||
|
wgIface := &iface.MockWGIface{
|
||||||
|
RemovePeerFunc: func(peerKey string) error {
|
||||||
|
return nil
|
||||||
|
},
|
||||||
}
|
}
|
||||||
engine.wgInterface, err = iface.NewWGIFace("utun102", "100.64.0.1/24", engine.config.WgPort, key.String(), iface.DefaultMTU, newNet, nil)
|
engine.wgInterface = wgIface
|
||||||
if err != nil {
|
engine.routeManager = routemanager.NewManager(ctx, key.PublicKey().String(), time.Minute, engine.wgInterface, engine.statusRecorder, nil)
|
||||||
t.Fatal(err)
|
|
||||||
}
|
|
||||||
engine.routeManager = routemanager.NewManager(ctx, key.PublicKey().String(), engine.wgInterface, engine.statusRecorder, nil)
|
|
||||||
engine.dnsServer = &dns.MockServer{
|
engine.dnsServer = &dns.MockServer{
|
||||||
UpdateDNSServerFunc: func(serial uint64, update nbdns.Config) error { return nil },
|
UpdateDNSServerFunc: func(serial uint64, update nbdns.Config) error { return nil },
|
||||||
}
|
}
|
||||||
@@ -227,6 +255,7 @@ func TestEngine_UpdateNetworkMap(t *testing.T) {
|
|||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
engine.udpMux = bind.NewUniversalUDPMuxDefault(bind.UniversalUDPMuxParams{UDPConn: conn})
|
engine.udpMux = bind.NewUniversalUDPMuxDefault(bind.UniversalUDPMuxParams{UDPConn: conn})
|
||||||
|
engine.ctx = ctx
|
||||||
|
|
||||||
type testCase struct {
|
type testCase struct {
|
||||||
name string
|
name string
|
||||||
@@ -390,7 +419,7 @@ func TestEngine_Sync(t *testing.T) {
|
|||||||
// feed updates to Engine via mocked Management client
|
// feed updates to Engine via mocked Management client
|
||||||
updates := make(chan *mgmtProto.SyncResponse)
|
updates := make(chan *mgmtProto.SyncResponse)
|
||||||
defer close(updates)
|
defer close(updates)
|
||||||
syncFunc := func(msgHandler func(msg *mgmtProto.SyncResponse) error) error {
|
syncFunc := func(ctx context.Context, info *system.Info, msgHandler func(msg *mgmtProto.SyncResponse) error) error {
|
||||||
for msg := range updates {
|
for msg := range updates {
|
||||||
err := msgHandler(msg)
|
err := msgHandler(msg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -399,13 +428,14 @@ func TestEngine_Sync(t *testing.T) {
|
|||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
relayMgr := relayClient.NewManager(ctx, "", key.PublicKey().String())
|
||||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{SyncFunc: syncFunc}, &EngineConfig{
|
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{SyncFunc: syncFunc}, relayMgr, &EngineConfig{
|
||||||
WgIfaceName: "utun103",
|
WgIfaceName: "utun103",
|
||||||
WgAddr: "100.64.0.1/24",
|
WgAddr: "100.64.0.1/24",
|
||||||
WgPrivateKey: key,
|
WgPrivateKey: key,
|
||||||
WgPort: 33100,
|
WgPort: 33100,
|
||||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"))
|
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil)
|
||||||
|
engine.ctx = ctx
|
||||||
|
|
||||||
engine.dnsServer = &dns.MockServer{
|
engine.dnsServer = &dns.MockServer{
|
||||||
UpdateDNSServerFunc: func(serial uint64, update nbdns.Config) error { return nil },
|
UpdateDNSServerFunc: func(serial uint64, update nbdns.Config) error { return nil },
|
||||||
@@ -558,17 +588,19 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) {
|
|||||||
wgIfaceName := fmt.Sprintf("utun%d", 104+n)
|
wgIfaceName := fmt.Sprintf("utun%d", 104+n)
|
||||||
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
|
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
|
||||||
|
|
||||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, &EngineConfig{
|
relayMgr := relayClient.NewManager(ctx, "", key.PublicKey().String())
|
||||||
|
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
|
||||||
WgIfaceName: wgIfaceName,
|
WgIfaceName: wgIfaceName,
|
||||||
WgAddr: wgAddr,
|
WgAddr: wgAddr,
|
||||||
WgPrivateKey: key,
|
WgPrivateKey: key,
|
||||||
WgPort: 33100,
|
WgPort: 33100,
|
||||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"))
|
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil)
|
||||||
|
engine.ctx = ctx
|
||||||
newNet, err := stdnet.NewNet()
|
newNet, err := stdnet.NewNet()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
engine.wgInterface, err = iface.NewWGIFace(wgIfaceName, wgAddr, engine.config.WgPort, key.String(), iface.DefaultMTU, newNet, nil)
|
engine.wgInterface, err = iface.NewWGIFace(wgIfaceName, wgAddr, engine.config.WgPort, key.String(), iface.DefaultMTU, newNet, nil, nil)
|
||||||
assert.NoError(t, err, "shouldn't return error")
|
assert.NoError(t, err, "shouldn't return error")
|
||||||
input := struct {
|
input := struct {
|
||||||
inputSerial uint64
|
inputSerial uint64
|
||||||
@@ -576,10 +608,10 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) {
|
|||||||
}{}
|
}{}
|
||||||
|
|
||||||
mockRouteManager := &routemanager.MockManager{
|
mockRouteManager := &routemanager.MockManager{
|
||||||
UpdateRoutesFunc: func(updateSerial uint64, newRoutes []*route.Route) error {
|
UpdateRoutesFunc: func(updateSerial uint64, newRoutes []*route.Route) (map[route.ID]*route.Route, route.HAMap, error) {
|
||||||
input.inputSerial = updateSerial
|
input.inputSerial = updateSerial
|
||||||
input.inputRoutes = newRoutes
|
input.inputRoutes = newRoutes
|
||||||
return testCase.inputErr
|
return nil, nil, testCase.inputErr
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -596,8 +628,8 @@ func TestEngine_UpdateNetworkMapWithRoutes(t *testing.T) {
|
|||||||
err = engine.updateNetworkMap(testCase.networkMap)
|
err = engine.updateNetworkMap(testCase.networkMap)
|
||||||
assert.NoError(t, err, "shouldn't return error")
|
assert.NoError(t, err, "shouldn't return error")
|
||||||
assert.Equal(t, testCase.expectedSerial, input.inputSerial, "serial should match")
|
assert.Equal(t, testCase.expectedSerial, input.inputSerial, "serial should match")
|
||||||
assert.Len(t, input.inputRoutes, testCase.expectedLen, "routes len should match")
|
assert.Len(t, input.inputRoutes, testCase.expectedLen, "clientRoutes len should match")
|
||||||
assert.Equal(t, testCase.expectedRoutes, input.inputRoutes, "routes should match")
|
assert.Equal(t, testCase.expectedRoutes, input.inputRoutes, "clientRoutes should match")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -727,22 +759,25 @@ func TestEngine_UpdateNetworkMapWithDNSUpdate(t *testing.T) {
|
|||||||
wgIfaceName := fmt.Sprintf("utun%d", 104+n)
|
wgIfaceName := fmt.Sprintf("utun%d", 104+n)
|
||||||
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
|
wgAddr := fmt.Sprintf("100.66.%d.1/24", n)
|
||||||
|
|
||||||
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, &EngineConfig{
|
relayMgr := relayClient.NewManager(ctx, "", key.PublicKey().String())
|
||||||
|
engine := NewEngine(ctx, cancel, &signal.MockClient{}, &mgmt.MockClient{}, relayMgr, &EngineConfig{
|
||||||
WgIfaceName: wgIfaceName,
|
WgIfaceName: wgIfaceName,
|
||||||
WgAddr: wgAddr,
|
WgAddr: wgAddr,
|
||||||
WgPrivateKey: key,
|
WgPrivateKey: key,
|
||||||
WgPort: 33100,
|
WgPort: 33100,
|
||||||
}, MobileDependency{}, peer.NewRecorder("https://mgm"))
|
}, MobileDependency{}, peer.NewRecorder("https://mgm"), nil)
|
||||||
|
engine.ctx = ctx
|
||||||
|
|
||||||
newNet, err := stdnet.NewNet()
|
newNet, err := stdnet.NewNet()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
engine.wgInterface, err = iface.NewWGIFace(wgIfaceName, wgAddr, 33100, key.String(), iface.DefaultMTU, newNet, nil)
|
engine.wgInterface, err = iface.NewWGIFace(wgIfaceName, wgAddr, 33100, key.String(), iface.DefaultMTU, newNet, nil, nil)
|
||||||
assert.NoError(t, err, "shouldn't return error")
|
assert.NoError(t, err, "shouldn't return error")
|
||||||
|
|
||||||
mockRouteManager := &routemanager.MockManager{
|
mockRouteManager := &routemanager.MockManager{
|
||||||
UpdateRoutesFunc: func(updateSerial uint64, newRoutes []*route.Route) error {
|
UpdateRoutesFunc: func(updateSerial uint64, newRoutes []*route.Route) (map[route.ID]*route.Route, route.HAMap, error) {
|
||||||
return nil
|
return nil, nil, nil
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -803,13 +838,13 @@ func TestEngine_MultiplePeers(t *testing.T) {
|
|||||||
ctx, cancel := context.WithCancel(CtxInitState(context.Background()))
|
ctx, cancel := context.WithCancel(CtxInitState(context.Background()))
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
sigServer, signalAddr, err := startSignal()
|
sigServer, signalAddr, err := startSignal(t)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer sigServer.Stop()
|
defer sigServer.Stop()
|
||||||
mgmtServer, mgmtAddr, err := startManagement(dir)
|
mgmtServer, mgmtAddr, err := startManagement(t, dir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
return
|
return
|
||||||
@@ -1001,10 +1036,15 @@ func createEngine(ctx context.Context, cancel context.CancelFunc, setupKey strin
|
|||||||
WgPort: wgPort,
|
WgPort: wgPort,
|
||||||
}
|
}
|
||||||
|
|
||||||
return NewEngine(ctx, cancel, signalClient, mgmtClient, conf, MobileDependency{}, peer.NewRecorder("https://mgm")), nil
|
relayMgr := relayClient.NewManager(ctx, "", key.PublicKey().String())
|
||||||
|
e, err := NewEngine(ctx, cancel, signalClient, mgmtClient, relayMgr, conf, MobileDependency{}, peer.NewRecorder("https://mgm"), nil), nil
|
||||||
|
e.ctx = ctx
|
||||||
|
return e, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func startSignal() (*grpc.Server, string, error) {
|
func startSignal(t *testing.T) (*grpc.Server, string, error) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
||||||
|
|
||||||
lis, err := net.Listen("tcp", "localhost:0")
|
lis, err := net.Listen("tcp", "localhost:0")
|
||||||
@@ -1012,7 +1052,9 @@ func startSignal() (*grpc.Server, string, error) {
|
|||||||
log.Fatalf("failed to listen: %v", err)
|
log.Fatalf("failed to listen: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
proto.RegisterSignalExchangeServer(s, signalServer.NewServer())
|
srv, err := signalServer.NewServer(otel.Meter(""))
|
||||||
|
require.NoError(t, err)
|
||||||
|
proto.RegisterSignalExchangeServer(s, srv)
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err = s.Serve(lis); err != nil {
|
if err = s.Serve(lis); err != nil {
|
||||||
@@ -1023,7 +1065,9 @@ func startSignal() (*grpc.Server, string, error) {
|
|||||||
return s, lis.Addr().String(), nil
|
return s, lis.Addr().String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func startManagement(dataDir string) (*grpc.Server, string, error) {
|
func startManagement(t *testing.T, dataDir string) (*grpc.Server, string, error) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
config := &server.Config{
|
config := &server.Config{
|
||||||
Stuns: []*server.Host{},
|
Stuns: []*server.Host{},
|
||||||
TURNConfig: &server.TURNConfig{},
|
TURNConfig: &server.TURNConfig{},
|
||||||
@@ -1040,22 +1084,28 @@ func startManagement(dataDir string) (*grpc.Server, string, error) {
|
|||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
s := grpc.NewServer(grpc.KeepaliveEnforcementPolicy(kaep), grpc.KeepaliveParams(kasp))
|
||||||
store, err := server.NewStoreFromJson(config.Datadir, nil)
|
|
||||||
|
store, cleanUp, err := server.NewTestStoreFromJson(context.Background(), config.Datadir)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
t.Cleanup(cleanUp)
|
||||||
|
|
||||||
peersUpdateManager := server.NewPeersUpdateManager(nil)
|
peersUpdateManager := server.NewPeersUpdateManager(nil)
|
||||||
eventStore := &activity.InMemoryEventStore{}
|
eventStore := &activity.InMemoryEventStore{}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
accountManager, err := server.BuildManager(store, peersUpdateManager, nil, "", "", eventStore, nil, false)
|
ia, _ := integrations.NewIntegratedValidator(context.Background(), eventStore)
|
||||||
|
accountManager, err := server.BuildManager(context.Background(), store, peersUpdateManager, nil, "", "netbird.selfhosted", eventStore, nil, false, ia)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
turnManager := server.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig)
|
rc := &server.RelayConfig{
|
||||||
mgmtServer, err := server.NewServer(config, accountManager, peersUpdateManager, turnManager, nil, nil)
|
Address: "127.0.0.1:1234",
|
||||||
|
}
|
||||||
|
turnManager := server.NewTimeBasedAuthSecretsManager(peersUpdateManager, config.TURNConfig, rc)
|
||||||
|
mgmtServer, err := server.NewServer(context.Background(), config, accountManager, peersUpdateManager, turnManager, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -68,7 +68,7 @@ func Login(ctx context.Context, config *Config, setupKey string, jwtToken string
|
|||||||
}
|
}
|
||||||
|
|
||||||
serverKey, err := doMgmLogin(ctx, mgmClient, pubSSHKey)
|
serverKey, err := doMgmLogin(ctx, mgmClient, pubSSHKey)
|
||||||
if isRegistrationNeeded(err) {
|
if serverKey != nil && isRegistrationNeeded(err) {
|
||||||
log.Debugf("peer registration required")
|
log.Debugf("peer registration required")
|
||||||
_, err = registerPeer(ctx, *serverKey, mgmClient, setupKey, jwtToken, pubSSHKey)
|
_, err = registerPeer(ctx, *serverKey, mgmClient, setupKey, jwtToken, pubSSHKey)
|
||||||
return err
|
return err
|
||||||
|
|||||||
21
client/internal/networkmonitor/monitor.go
Normal file
21
client/internal/networkmonitor/monitor.go
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
package networkmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
var ErrStopped = errors.New("monitor has been stopped")
|
||||||
|
|
||||||
|
// NetworkMonitor watches for changes in network configuration.
|
||||||
|
type NetworkMonitor struct {
|
||||||
|
cancel context.CancelFunc
|
||||||
|
wg sync.WaitGroup
|
||||||
|
mu sync.Mutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates a new network monitor.
|
||||||
|
func New() *NetworkMonitor {
|
||||||
|
return &NetworkMonitor{}
|
||||||
|
}
|
||||||
107
client/internal/networkmonitor/monitor_bsd.go
Normal file
107
client/internal/networkmonitor/monitor_bsd.go
Normal file
@@ -0,0 +1,107 @@
|
|||||||
|
//go:build (darwin && !ios) || dragonfly || freebsd || netbsd || openbsd
|
||||||
|
|
||||||
|
package networkmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"golang.org/x/net/route"
|
||||||
|
"golang.org/x/sys/unix"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkChange(ctx context.Context, nexthopv4, nexthopv6 systemops.Nexthop, callback func()) error {
|
||||||
|
fd, err := unix.Socket(syscall.AF_ROUTE, syscall.SOCK_RAW, syscall.AF_UNSPEC)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("failed to open routing socket: %v", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
err := unix.Close(fd)
|
||||||
|
if err != nil && !errors.Is(err, unix.EBADF) {
|
||||||
|
log.Errorf("Network monitor: failed to close routing socket: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-ctx.Done()
|
||||||
|
err := unix.Close(fd)
|
||||||
|
if err != nil && !errors.Is(err, unix.EBADF) {
|
||||||
|
log.Debugf("Network monitor: closed routing socket")
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ErrStopped
|
||||||
|
default:
|
||||||
|
buf := make([]byte, 2048)
|
||||||
|
n, err := unix.Read(fd, buf)
|
||||||
|
if err != nil {
|
||||||
|
if !errors.Is(err, unix.EBADF) && !errors.Is(err, unix.EINVAL) {
|
||||||
|
log.Errorf("Network monitor: failed to read from routing socket: %v", err)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if n < unix.SizeofRtMsghdr {
|
||||||
|
log.Errorf("Network monitor: read from routing socket returned less than expected: %d bytes", n)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := (*unix.RtMsghdr)(unsafe.Pointer(&buf[0]))
|
||||||
|
|
||||||
|
switch msg.Type {
|
||||||
|
// handle route changes
|
||||||
|
case unix.RTM_ADD, syscall.RTM_DELETE:
|
||||||
|
route, err := parseRouteMessage(buf[:n])
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Network monitor: error parsing routing message: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !route.Dst.Addr().IsUnspecified() {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
intf := "<nil>"
|
||||||
|
if route.Interface != nil {
|
||||||
|
intf = route.Interface.Name
|
||||||
|
}
|
||||||
|
switch msg.Type {
|
||||||
|
case unix.RTM_ADD:
|
||||||
|
log.Infof("Network monitor: default route changed: via %s, interface %s", route.Gw, intf)
|
||||||
|
go callback()
|
||||||
|
case unix.RTM_DELETE:
|
||||||
|
if nexthopv4.Intf != nil && route.Gw.Compare(nexthopv4.IP) == 0 || nexthopv6.Intf != nil && route.Gw.Compare(nexthopv6.IP) == 0 {
|
||||||
|
log.Infof("Network monitor: default route removed: via %s, interface %s", route.Gw, intf)
|
||||||
|
go callback()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseRouteMessage(buf []byte) (*systemops.Route, error) {
|
||||||
|
msgs, err := route.ParseRIB(route.RIBTypeRoute, buf)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("parse RIB: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(msgs) != 1 {
|
||||||
|
return nil, fmt.Errorf("unexpected RIB message msgs: %v", msgs)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg, ok := msgs[0].(*route.RouteMessage)
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("unexpected RIB message type: %T", msgs[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
return systemops.MsgToRoute(msg)
|
||||||
|
}
|
||||||
82
client/internal/networkmonitor/monitor_generic.go
Normal file
82
client/internal/networkmonitor/monitor_generic.go
Normal file
@@ -0,0 +1,82 @@
|
|||||||
|
//go:build !ios && !android
|
||||||
|
|
||||||
|
package networkmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/netip"
|
||||||
|
"runtime/debug"
|
||||||
|
|
||||||
|
"github.com/cenkalti/backoff/v4"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Start begins monitoring network changes. When a change is detected, it calls the callback asynchronously and returns.
|
||||||
|
func (nw *NetworkMonitor) Start(ctx context.Context, callback func()) (err error) {
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
nw.mu.Lock()
|
||||||
|
ctx, nw.cancel = context.WithCancel(ctx)
|
||||||
|
nw.mu.Unlock()
|
||||||
|
|
||||||
|
nw.wg.Add(1)
|
||||||
|
defer nw.wg.Done()
|
||||||
|
|
||||||
|
var nexthop4, nexthop6 systemops.Nexthop
|
||||||
|
|
||||||
|
operation := func() error {
|
||||||
|
var errv4, errv6 error
|
||||||
|
nexthop4, errv4 = systemops.GetNextHop(netip.IPv4Unspecified())
|
||||||
|
nexthop6, errv6 = systemops.GetNextHop(netip.IPv6Unspecified())
|
||||||
|
|
||||||
|
if errv4 != nil && errv6 != nil {
|
||||||
|
return errors.New("failed to get default next hops")
|
||||||
|
}
|
||||||
|
|
||||||
|
if errv4 == nil {
|
||||||
|
log.Debugf("Network monitor: IPv4 default route: %s, interface: %s", nexthop4.IP, nexthop4.Intf.Name)
|
||||||
|
}
|
||||||
|
if errv6 == nil {
|
||||||
|
log.Debugf("Network monitor: IPv6 default route: %s, interface: %s", nexthop6.IP, nexthop6.Intf.Name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// continue if either route was found
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
expBackOff := backoff.WithContext(backoff.NewExponentialBackOff(), ctx)
|
||||||
|
|
||||||
|
if err := backoff.Retry(operation, expBackOff); err != nil {
|
||||||
|
return fmt.Errorf("failed to get default next hops: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// recover in case sys ops panic
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
err = fmt.Errorf("panic occurred: %v, stack trace: %s", r, string(debug.Stack()))
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if err := checkChange(ctx, nexthop4, nexthop6, callback); err != nil {
|
||||||
|
return fmt.Errorf("check change: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stop stops the network monitor.
|
||||||
|
func (nw *NetworkMonitor) Stop() {
|
||||||
|
nw.mu.Lock()
|
||||||
|
defer nw.mu.Unlock()
|
||||||
|
|
||||||
|
if nw.cancel != nil {
|
||||||
|
nw.cancel()
|
||||||
|
nw.wg.Wait()
|
||||||
|
}
|
||||||
|
}
|
||||||
57
client/internal/networkmonitor/monitor_linux.go
Normal file
57
client/internal/networkmonitor/monitor_linux.go
Normal file
@@ -0,0 +1,57 @@
|
|||||||
|
//go:build !android
|
||||||
|
|
||||||
|
package networkmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
"github.com/vishvananda/netlink"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
|
||||||
|
)
|
||||||
|
|
||||||
|
func checkChange(ctx context.Context, nexthopv4, nexthopv6 systemops.Nexthop, callback func()) error {
|
||||||
|
if nexthopv4.Intf == nil && nexthopv6.Intf == nil {
|
||||||
|
return errors.New("no interfaces available")
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
|
||||||
|
routeChan := make(chan netlink.RouteUpdate)
|
||||||
|
if err := netlink.RouteSubscribe(routeChan, done); err != nil {
|
||||||
|
return fmt.Errorf("subscribe to route updates: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Info("Network monitor: started")
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ErrStopped
|
||||||
|
|
||||||
|
// handle route changes
|
||||||
|
case route := <-routeChan:
|
||||||
|
// default route and main table
|
||||||
|
if route.Dst != nil || route.Table != syscall.RT_TABLE_MAIN {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch route.Type {
|
||||||
|
// triggered on added/replaced routes
|
||||||
|
case syscall.RTM_NEWROUTE:
|
||||||
|
log.Infof("Network monitor: default route changed: via %s, interface %d", route.Gw, route.LinkIndex)
|
||||||
|
go callback()
|
||||||
|
return nil
|
||||||
|
case syscall.RTM_DELROUTE:
|
||||||
|
if nexthopv4.Intf != nil && route.Gw.Equal(nexthopv4.IP.AsSlice()) || nexthopv6.Intf != nil && route.Gw.Equal(nexthopv6.IP.AsSlice()) {
|
||||||
|
log.Infof("Network monitor: default route removed: via %s, interface %d", route.Gw, route.LinkIndex)
|
||||||
|
go callback()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
12
client/internal/networkmonitor/monitor_mobile.go
Normal file
12
client/internal/networkmonitor/monitor_mobile.go
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
//go:build ios || android
|
||||||
|
|
||||||
|
package networkmonitor
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
func (nw *NetworkMonitor) Start(context.Context, func()) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (nw *NetworkMonitor) Stop() {
|
||||||
|
}
|
||||||
254
client/internal/networkmonitor/monitor_windows.go
Normal file
254
client/internal/networkmonitor/monitor_windows.go
Normal file
@@ -0,0 +1,254 @@
|
|||||||
|
package networkmonitor
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/systemops"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
unreachable = 0
|
||||||
|
incomplete = 1
|
||||||
|
probe = 2
|
||||||
|
delay = 3
|
||||||
|
stale = 4
|
||||||
|
reachable = 5
|
||||||
|
permanent = 6
|
||||||
|
tbd = 7
|
||||||
|
)
|
||||||
|
|
||||||
|
const interval = 10 * time.Second
|
||||||
|
|
||||||
|
func checkChange(ctx context.Context, nexthopv4, nexthopv6 systemops.Nexthop, callback func()) error {
|
||||||
|
var neighborv4, neighborv6 *systemops.Neighbor
|
||||||
|
{
|
||||||
|
initialNeighbors, err := getNeighbors()
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("get neighbors: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
neighborv4 = assignNeighbor(nexthopv4, initialNeighbors)
|
||||||
|
neighborv6 = assignNeighbor(nexthopv6, initialNeighbors)
|
||||||
|
}
|
||||||
|
log.Debugf("Network monitor: initial IPv4 neighbor: %v, IPv6 neighbor: %v", neighborv4, neighborv6)
|
||||||
|
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
return ErrStopped
|
||||||
|
case <-ticker.C:
|
||||||
|
if changed(nexthopv4, neighborv4, nexthopv6, neighborv6) {
|
||||||
|
go callback()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func assignNeighbor(nexthop systemops.Nexthop, initialNeighbors map[netip.Addr]systemops.Neighbor) *systemops.Neighbor {
|
||||||
|
if n, ok := initialNeighbors[nexthop.IP]; ok &&
|
||||||
|
n.State != unreachable &&
|
||||||
|
n.State != incomplete &&
|
||||||
|
n.State != tbd {
|
||||||
|
return &n
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func changed(
|
||||||
|
nexthopv4 systemops.Nexthop,
|
||||||
|
neighborv4 *systemops.Neighbor,
|
||||||
|
nexthopv6 systemops.Nexthop,
|
||||||
|
neighborv6 *systemops.Neighbor,
|
||||||
|
) bool {
|
||||||
|
neighbors, err := getNeighbors()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("network monitor: error fetching current neighbors: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if neighborChanged(nexthopv4, neighborv4, neighbors) || neighborChanged(nexthopv6, neighborv6, neighbors) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
routes, err := getRoutes()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("network monitor: error fetching current routes: %v", err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if routeChanged(nexthopv4, nexthopv4.Intf, routes) || routeChanged(nexthopv6, nexthopv6.Intf, routes) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// routeChanged checks if the default routes still point to our nexthop/interface
|
||||||
|
func routeChanged(nexthop systemops.Nexthop, intf *net.Interface, routes []systemops.Route) bool {
|
||||||
|
if !nexthop.IP.IsValid() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
if isSoftInterface(nexthop.Intf.Name) {
|
||||||
|
log.Tracef("network monitor: ignoring default route change for soft interface %s", nexthop.Intf.Name)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
unspec := getUnspecifiedPrefix(nexthop.IP)
|
||||||
|
defaultRoutes, foundMatchingRoute := processRoutes(nexthop, intf, routes, unspec)
|
||||||
|
|
||||||
|
log.Tracef("network monitor: all default routes:\n%s", strings.Join(defaultRoutes, "\n"))
|
||||||
|
|
||||||
|
if !foundMatchingRoute {
|
||||||
|
logRouteChange(nexthop.IP, intf)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUnspecifiedPrefix(ip netip.Addr) netip.Prefix {
|
||||||
|
if ip.Is6() {
|
||||||
|
return netip.PrefixFrom(netip.IPv6Unspecified(), 0)
|
||||||
|
}
|
||||||
|
return netip.PrefixFrom(netip.IPv4Unspecified(), 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func processRoutes(nexthop systemops.Nexthop, nexthopIntf *net.Interface, routes []systemops.Route, unspec netip.Prefix) ([]string, bool) {
|
||||||
|
var defaultRoutes []string
|
||||||
|
foundMatchingRoute := false
|
||||||
|
|
||||||
|
for _, r := range routes {
|
||||||
|
if r.Destination == unspec {
|
||||||
|
routeInfo := formatRouteInfo(r)
|
||||||
|
defaultRoutes = append(defaultRoutes, routeInfo)
|
||||||
|
|
||||||
|
if r.Nexthop == nexthop.IP && compareIntf(r.Interface, nexthopIntf) == 0 {
|
||||||
|
foundMatchingRoute = true
|
||||||
|
log.Debugf("network monitor: found matching default route: %s", routeInfo)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return defaultRoutes, foundMatchingRoute
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatRouteInfo(r systemops.Route) string {
|
||||||
|
newIntf := "<nil>"
|
||||||
|
if r.Interface != nil {
|
||||||
|
newIntf = r.Interface.Name
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("Nexthop: %s, Interface: %s", r.Nexthop, newIntf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func logRouteChange(ip netip.Addr, intf *net.Interface) {
|
||||||
|
oldIntf := "<nil>"
|
||||||
|
if intf != nil {
|
||||||
|
oldIntf = intf.Name
|
||||||
|
}
|
||||||
|
log.Infof("network monitor: default route for %s (%s) is gone or changed", ip, oldIntf)
|
||||||
|
}
|
||||||
|
|
||||||
|
func neighborChanged(nexthop systemops.Nexthop, neighbor *systemops.Neighbor, neighbors map[netip.Addr]systemops.Neighbor) bool {
|
||||||
|
if neighbor == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: consider non-local nexthops, e.g. on point-to-point interfaces
|
||||||
|
if n, ok := neighbors[nexthop.IP]; ok {
|
||||||
|
if n.State == unreachable || n.State == incomplete {
|
||||||
|
log.Infof("network monitor: neighbor %s (%s) is not reachable: %s", neighbor.IPAddress, neighbor.LinkLayerAddress, stateFromInt(n.State))
|
||||||
|
return true
|
||||||
|
} else if n.InterfaceIndex != neighbor.InterfaceIndex {
|
||||||
|
log.Infof(
|
||||||
|
"network monitor: neighbor %s (%s) changed interface from '%s' (%d) to '%s' (%d): %s",
|
||||||
|
neighbor.IPAddress,
|
||||||
|
neighbor.LinkLayerAddress,
|
||||||
|
neighbor.InterfaceAlias,
|
||||||
|
neighbor.InterfaceIndex,
|
||||||
|
n.InterfaceAlias,
|
||||||
|
n.InterfaceIndex,
|
||||||
|
stateFromInt(n.State),
|
||||||
|
)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Infof("network monitor: neighbor %s (%s) is gone", neighbor.IPAddress, neighbor.LinkLayerAddress)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func getNeighbors() (map[netip.Addr]systemops.Neighbor, error) {
|
||||||
|
entries, err := systemops.GetNeighbors()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get neighbors: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
neighbours := make(map[netip.Addr]systemops.Neighbor, len(entries))
|
||||||
|
for _, entry := range entries {
|
||||||
|
neighbours[entry.IPAddress] = entry
|
||||||
|
}
|
||||||
|
|
||||||
|
return neighbours, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getRoutes() ([]systemops.Route, error) {
|
||||||
|
entries, err := systemops.GetRoutes()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("get routes: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func stateFromInt(state uint8) string {
|
||||||
|
switch state {
|
||||||
|
case unreachable:
|
||||||
|
return "unreachable"
|
||||||
|
case incomplete:
|
||||||
|
return "incomplete"
|
||||||
|
case probe:
|
||||||
|
return "probe"
|
||||||
|
case delay:
|
||||||
|
return "delay"
|
||||||
|
case stale:
|
||||||
|
return "stale"
|
||||||
|
case reachable:
|
||||||
|
return "reachable"
|
||||||
|
case permanent:
|
||||||
|
return "permanent"
|
||||||
|
case tbd:
|
||||||
|
return "tbd"
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func compareIntf(a, b *net.Interface) int {
|
||||||
|
switch {
|
||||||
|
case a == nil && b == nil:
|
||||||
|
return 0
|
||||||
|
case a == nil:
|
||||||
|
return -1
|
||||||
|
case b == nil:
|
||||||
|
return 1
|
||||||
|
default:
|
||||||
|
return a.Index - b.Index
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isSoftInterface(name string) bool {
|
||||||
|
return strings.Contains(strings.ToLower(name), "isatap") || strings.Contains(strings.ToLower(name), "teredo")
|
||||||
|
}
|
||||||
File diff suppressed because it is too large
Load Diff
@@ -1,25 +1,34 @@
|
|||||||
package peer
|
package peer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/magiconair/properties/assert"
|
"github.com/magiconair/properties/assert"
|
||||||
"github.com/pion/stun/v2"
|
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/client/internal/stdnet"
|
"github.com/netbirdio/netbird/client/internal/stdnet"
|
||||||
"github.com/netbirdio/netbird/client/internal/wgproxy"
|
"github.com/netbirdio/netbird/client/internal/wgproxy"
|
||||||
"github.com/netbirdio/netbird/iface"
|
"github.com/netbirdio/netbird/iface"
|
||||||
|
"github.com/netbirdio/netbird/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
var connConf = ConnConfig{
|
var connConf = ConnConfig{
|
||||||
Key: "LLHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
|
Key: "LLHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
|
||||||
LocalKey: "RRHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
|
LocalKey: "RRHf3Ma6z6mdLbriAJbqhX7+nM/B71lgw2+91q3LfhU=",
|
||||||
StunTurn: []*stun.URI{},
|
Timeout: time.Second,
|
||||||
InterfaceBlackList: nil,
|
LocalWgPort: 51820,
|
||||||
Timeout: time.Second,
|
ICEConfig: ICEConfig{
|
||||||
LocalWgPort: 51820,
|
InterfaceBlackList: nil,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestMain(m *testing.M) {
|
||||||
|
_ = util.InitLog("trace", "console")
|
||||||
|
code := m.Run()
|
||||||
|
os.Exit(code)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNewConn_interfaceFilter(t *testing.T) {
|
func TestNewConn_interfaceFilter(t *testing.T) {
|
||||||
@@ -35,11 +44,11 @@ func TestNewConn_interfaceFilter(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConn_GetKey(t *testing.T) {
|
func TestConn_GetKey(t *testing.T) {
|
||||||
wgProxyFactory := wgproxy.NewFactory(connConf.LocalWgPort)
|
wgProxyFactory := wgproxy.NewFactory(context.Background(), false, connConf.LocalWgPort)
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = wgProxyFactory.Free()
|
_ = wgProxyFactory.Free()
|
||||||
}()
|
}()
|
||||||
conn, err := NewConn(connConf, nil, wgProxyFactory, nil, nil)
|
conn, err := NewConn(context.Background(), connConf, nil, wgProxyFactory, nil, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -50,11 +59,11 @@ func TestConn_GetKey(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConn_OnRemoteOffer(t *testing.T) {
|
func TestConn_OnRemoteOffer(t *testing.T) {
|
||||||
wgProxyFactory := wgproxy.NewFactory(connConf.LocalWgPort)
|
wgProxyFactory := wgproxy.NewFactory(context.Background(), false, connConf.LocalWgPort)
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = wgProxyFactory.Free()
|
_ = wgProxyFactory.Free()
|
||||||
}()
|
}()
|
||||||
conn, err := NewConn(connConf, NewRecorder("https://mgm"), wgProxyFactory, nil, nil)
|
conn, err := NewConn(context.Background(), connConf, NewRecorder("https://mgm"), wgProxyFactory, nil, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -62,7 +71,7 @@ func TestConn_OnRemoteOffer(t *testing.T) {
|
|||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(2)
|
wg.Add(2)
|
||||||
go func() {
|
go func() {
|
||||||
<-conn.remoteOffersCh
|
<-conn.handshaker.remoteOffersCh
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -87,11 +96,11 @@ func TestConn_OnRemoteOffer(t *testing.T) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func TestConn_OnRemoteAnswer(t *testing.T) {
|
func TestConn_OnRemoteAnswer(t *testing.T) {
|
||||||
wgProxyFactory := wgproxy.NewFactory(connConf.LocalWgPort)
|
wgProxyFactory := wgproxy.NewFactory(context.Background(), false, connConf.LocalWgPort)
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = wgProxyFactory.Free()
|
_ = wgProxyFactory.Free()
|
||||||
}()
|
}()
|
||||||
conn, err := NewConn(connConf, NewRecorder("https://mgm"), wgProxyFactory, nil, nil)
|
conn, err := NewConn(context.Background(), connConf, NewRecorder("https://mgm"), wgProxyFactory, nil, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@@ -99,7 +108,7 @@ func TestConn_OnRemoteAnswer(t *testing.T) {
|
|||||||
wg := sync.WaitGroup{}
|
wg := sync.WaitGroup{}
|
||||||
wg.Add(2)
|
wg.Add(2)
|
||||||
go func() {
|
go func() {
|
||||||
<-conn.remoteAnswerCh
|
<-conn.handshaker.remoteAnswerCh
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
@@ -123,62 +132,37 @@ func TestConn_OnRemoteAnswer(t *testing.T) {
|
|||||||
wg.Wait()
|
wg.Wait()
|
||||||
}
|
}
|
||||||
func TestConn_Status(t *testing.T) {
|
func TestConn_Status(t *testing.T) {
|
||||||
wgProxyFactory := wgproxy.NewFactory(connConf.LocalWgPort)
|
wgProxyFactory := wgproxy.NewFactory(context.Background(), false, connConf.LocalWgPort)
|
||||||
defer func() {
|
defer func() {
|
||||||
_ = wgProxyFactory.Free()
|
_ = wgProxyFactory.Free()
|
||||||
}()
|
}()
|
||||||
conn, err := NewConn(connConf, NewRecorder("https://mgm"), wgProxyFactory, nil, nil)
|
conn, err := NewConn(context.Background(), connConf, NewRecorder("https://mgm"), wgProxyFactory, nil, nil, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
tables := []struct {
|
tables := []struct {
|
||||||
name string
|
name string
|
||||||
status ConnStatus
|
statusIce ConnStatus
|
||||||
want ConnStatus
|
statusRelay ConnStatus
|
||||||
|
want ConnStatus
|
||||||
}{
|
}{
|
||||||
{"StatusConnected", StatusConnected, StatusConnected},
|
{"StatusConnected", StatusConnected, StatusConnected, StatusConnected},
|
||||||
{"StatusDisconnected", StatusDisconnected, StatusDisconnected},
|
{"StatusDisconnected", StatusDisconnected, StatusDisconnected, StatusDisconnected},
|
||||||
{"StatusConnecting", StatusConnecting, StatusConnecting},
|
{"StatusConnecting", StatusConnecting, StatusConnecting, StatusConnecting},
|
||||||
|
{"StatusConnectingIce", StatusConnecting, StatusDisconnected, StatusConnecting},
|
||||||
|
{"StatusConnectingIceAlternative", StatusConnecting, StatusConnected, StatusConnected},
|
||||||
|
{"StatusConnectingRelay", StatusDisconnected, StatusConnecting, StatusConnecting},
|
||||||
|
{"StatusConnectingRelayAlternative", StatusConnected, StatusConnecting, StatusConnected},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, table := range tables {
|
for _, table := range tables {
|
||||||
t.Run(table.name, func(t *testing.T) {
|
t.Run(table.name, func(t *testing.T) {
|
||||||
conn.status = table.status
|
conn.statusICE = table.statusIce
|
||||||
|
conn.statusRelay = table.statusRelay
|
||||||
|
|
||||||
got := conn.Status()
|
got := conn.Status()
|
||||||
assert.Equal(t, got, table.want, "they should be equal")
|
assert.Equal(t, got, table.want, "they should be equal")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestConn_Close(t *testing.T) {
|
|
||||||
wgProxyFactory := wgproxy.NewFactory(connConf.LocalWgPort)
|
|
||||||
defer func() {
|
|
||||||
_ = wgProxyFactory.Free()
|
|
||||||
}()
|
|
||||||
conn, err := NewConn(connConf, NewRecorder("https://mgm"), wgProxyFactory, nil, nil)
|
|
||||||
if err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
wg := sync.WaitGroup{}
|
|
||||||
wg.Add(1)
|
|
||||||
go func() {
|
|
||||||
<-conn.closeCh
|
|
||||||
wg.Done()
|
|
||||||
}()
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
for {
|
|
||||||
err := conn.Close()
|
|
||||||
if err != nil {
|
|
||||||
continue
|
|
||||||
} else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
wg.Wait()
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -10,9 +10,10 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
envICEKeepAliveIntervalSec = "NB_ICE_KEEP_ALIVE_INTERVAL_SEC"
|
envICEKeepAliveIntervalSec = "NB_ICE_KEEP_ALIVE_INTERVAL_SEC"
|
||||||
envICEDisconnectedTimeoutSec = "NB_ICE_DISCONNECTED_TIMEOUT_SEC"
|
envICEDisconnectedTimeoutSec = "NB_ICE_DISCONNECTED_TIMEOUT_SEC"
|
||||||
envICEForceRelayConn = "NB_ICE_FORCE_RELAY_CONN"
|
envICERelayAcceptanceMinWaitSec = "NB_ICE_RELAY_ACCEPTANCE_MIN_WAIT_SEC"
|
||||||
|
envICEForceRelayConn = "NB_ICE_FORCE_RELAY_CONN"
|
||||||
)
|
)
|
||||||
|
|
||||||
func iceKeepAlive() time.Duration {
|
func iceKeepAlive() time.Duration {
|
||||||
@@ -21,7 +22,7 @@ func iceKeepAlive() time.Duration {
|
|||||||
return iceKeepAliveDefault
|
return iceKeepAliveDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("setting ICE keep alive interval to %s seconds", keepAliveEnv)
|
log.Infof("setting ICE keep alive interval to %s seconds", keepAliveEnv)
|
||||||
keepAliveEnvSec, err := strconv.Atoi(keepAliveEnv)
|
keepAliveEnvSec, err := strconv.Atoi(keepAliveEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("invalid value %s set for %s, using default %v", keepAliveEnv, envICEKeepAliveIntervalSec, iceKeepAliveDefault)
|
log.Warnf("invalid value %s set for %s, using default %v", keepAliveEnv, envICEKeepAliveIntervalSec, iceKeepAliveDefault)
|
||||||
@@ -37,7 +38,7 @@ func iceDisconnectedTimeout() time.Duration {
|
|||||||
return iceDisconnectedTimeoutDefault
|
return iceDisconnectedTimeoutDefault
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("setting ICE disconnected timeout to %s seconds", disconnectedTimeoutEnv)
|
log.Infof("setting ICE disconnected timeout to %s seconds", disconnectedTimeoutEnv)
|
||||||
disconnectedTimeoutSec, err := strconv.Atoi(disconnectedTimeoutEnv)
|
disconnectedTimeoutSec, err := strconv.Atoi(disconnectedTimeoutEnv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warnf("invalid value %s set for %s, using default %v", disconnectedTimeoutEnv, envICEDisconnectedTimeoutSec, iceDisconnectedTimeoutDefault)
|
log.Warnf("invalid value %s set for %s, using default %v", disconnectedTimeoutEnv, envICEDisconnectedTimeoutSec, iceDisconnectedTimeoutDefault)
|
||||||
@@ -47,6 +48,22 @@ func iceDisconnectedTimeout() time.Duration {
|
|||||||
return time.Duration(disconnectedTimeoutSec) * time.Second
|
return time.Duration(disconnectedTimeoutSec) * time.Second
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func iceRelayAcceptanceMinWait() time.Duration {
|
||||||
|
iceRelayAcceptanceMinWaitEnv := os.Getenv(envICERelayAcceptanceMinWaitSec)
|
||||||
|
if iceRelayAcceptanceMinWaitEnv == "" {
|
||||||
|
return iceRelayAcceptanceMinWaitDefault
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Infof("setting ICE relay acceptance min wait to %s seconds", iceRelayAcceptanceMinWaitEnv)
|
||||||
|
disconnectedTimeoutSec, err := strconv.Atoi(iceRelayAcceptanceMinWaitEnv)
|
||||||
|
if err != nil {
|
||||||
|
log.Warnf("invalid value %s set for %s, using default %v", iceRelayAcceptanceMinWaitEnv, envICERelayAcceptanceMinWaitSec, iceRelayAcceptanceMinWaitDefault)
|
||||||
|
return iceRelayAcceptanceMinWaitDefault
|
||||||
|
}
|
||||||
|
|
||||||
|
return time.Duration(disconnectedTimeoutSec) * time.Second
|
||||||
|
}
|
||||||
|
|
||||||
func hasICEForceRelayConn() bool {
|
func hasICEForceRelayConn() bool {
|
||||||
disconnectedTimeoutEnv := os.Getenv(envICEForceRelayConn)
|
disconnectedTimeoutEnv := os.Getenv(envICEForceRelayConn)
|
||||||
return strings.ToLower(disconnectedTimeoutEnv) == "true"
|
return strings.ToLower(disconnectedTimeoutEnv) == "true"
|
||||||
|
|||||||
191
client/internal/peer/handshaker.go
Normal file
191
client/internal/peer/handshaker.go
Normal file
@@ -0,0 +1,191 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/version"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrSignalIsNotReady = errors.New("signal is not ready")
|
||||||
|
)
|
||||||
|
|
||||||
|
// IceCredentials ICE protocol credentials struct
|
||||||
|
type IceCredentials struct {
|
||||||
|
UFrag string
|
||||||
|
Pwd string
|
||||||
|
}
|
||||||
|
|
||||||
|
// OfferAnswer represents a session establishment offer or answer
|
||||||
|
type OfferAnswer struct {
|
||||||
|
IceCredentials IceCredentials
|
||||||
|
// WgListenPort is a remote WireGuard listen port.
|
||||||
|
// This field is used when establishing a direct WireGuard connection without any proxy.
|
||||||
|
// We can set the remote peer's endpoint with this port.
|
||||||
|
WgListenPort int
|
||||||
|
|
||||||
|
// Version of NetBird Agent
|
||||||
|
Version string
|
||||||
|
// RosenpassPubKey is the Rosenpass public key of the remote peer when receiving this message
|
||||||
|
// This value is the local Rosenpass server public key when sending the message
|
||||||
|
RosenpassPubKey []byte
|
||||||
|
// RosenpassAddr is the Rosenpass server address (IP:port) of the remote peer when receiving this message
|
||||||
|
// This value is the local Rosenpass server address when sending the message
|
||||||
|
RosenpassAddr string
|
||||||
|
|
||||||
|
// relay server address
|
||||||
|
RelaySrvAddress string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Handshaker struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
ctx context.Context
|
||||||
|
log *log.Entry
|
||||||
|
config ConnConfig
|
||||||
|
signaler *Signaler
|
||||||
|
ice *WorkerICE
|
||||||
|
relay *WorkerRelay
|
||||||
|
onNewOfferListeners []func(*OfferAnswer)
|
||||||
|
|
||||||
|
// remoteOffersCh is a channel used to wait for remote credentials to proceed with the connection
|
||||||
|
remoteOffersCh chan OfferAnswer
|
||||||
|
// remoteAnswerCh is a channel used to wait for remote credentials answer (confirmation of our offer) to proceed with the connection
|
||||||
|
remoteAnswerCh chan OfferAnswer
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHandshaker(ctx context.Context, log *log.Entry, config ConnConfig, signaler *Signaler, ice *WorkerICE, relay *WorkerRelay) *Handshaker {
|
||||||
|
return &Handshaker{
|
||||||
|
ctx: ctx,
|
||||||
|
log: log,
|
||||||
|
config: config,
|
||||||
|
signaler: signaler,
|
||||||
|
ice: ice,
|
||||||
|
relay: relay,
|
||||||
|
remoteOffersCh: make(chan OfferAnswer),
|
||||||
|
remoteAnswerCh: make(chan OfferAnswer),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handshaker) AddOnNewOfferListener(offer func(remoteOfferAnswer *OfferAnswer)) {
|
||||||
|
h.onNewOfferListeners = append(h.onNewOfferListeners, offer)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handshaker) Listen() {
|
||||||
|
for {
|
||||||
|
h.log.Debugf("wait for remote offer confirmation")
|
||||||
|
remoteOfferAnswer, err := h.waitForRemoteOfferConfirmation()
|
||||||
|
if err != nil {
|
||||||
|
if _, ok := err.(*ConnectionClosedError); ok {
|
||||||
|
h.log.Tracef("stop handshaker")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.log.Errorf("failed to received remote offer confirmation: %s", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
h.log.Debugf("received connection confirmation, running version %s and with remote WireGuard listen port %d", remoteOfferAnswer.Version, remoteOfferAnswer.WgListenPort)
|
||||||
|
for _, listener := range h.onNewOfferListeners {
|
||||||
|
go listener(remoteOfferAnswer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handshaker) SendOffer() error {
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
return h.sendOffer()
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRemoteOffer handles an offer from the remote peer and returns true if the message was accepted, false otherwise
|
||||||
|
// doesn't block, discards the message if connection wasn't ready
|
||||||
|
func (h *Handshaker) OnRemoteOffer(offer OfferAnswer) bool {
|
||||||
|
select {
|
||||||
|
case h.remoteOffersCh <- offer:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
h.log.Debugf("OnRemoteOffer skipping message because is not ready")
|
||||||
|
// connection might not be ready yet to receive so we ignore the message
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRemoteAnswer handles an offer from the remote peer and returns true if the message was accepted, false otherwise
|
||||||
|
// doesn't block, discards the message if connection wasn't ready
|
||||||
|
func (h *Handshaker) OnRemoteAnswer(answer OfferAnswer) bool {
|
||||||
|
select {
|
||||||
|
case h.remoteAnswerCh <- answer:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
// connection might not be ready yet to receive so we ignore the message
|
||||||
|
h.log.Debugf("OnRemoteAnswer skipping message because is not ready")
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handshaker) waitForRemoteOfferConfirmation() (*OfferAnswer, error) {
|
||||||
|
select {
|
||||||
|
case remoteOfferAnswer := <-h.remoteOffersCh:
|
||||||
|
// received confirmation from the remote peer -> ready to proceed
|
||||||
|
err := h.sendAnswer()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &remoteOfferAnswer, nil
|
||||||
|
case remoteOfferAnswer := <-h.remoteAnswerCh:
|
||||||
|
return &remoteOfferAnswer, nil
|
||||||
|
case <-h.ctx.Done():
|
||||||
|
// closed externally
|
||||||
|
return nil, NewConnectionClosedError(h.config.Key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sendOffer prepares local user credentials and signals them to the remote peer
|
||||||
|
func (h *Handshaker) sendOffer() error {
|
||||||
|
if !h.signaler.Ready() {
|
||||||
|
return ErrSignalIsNotReady
|
||||||
|
}
|
||||||
|
|
||||||
|
iceUFrag, icePwd := h.ice.GetLocalUserCredentials()
|
||||||
|
offer := OfferAnswer{
|
||||||
|
IceCredentials: IceCredentials{iceUFrag, icePwd},
|
||||||
|
WgListenPort: h.config.LocalWgPort,
|
||||||
|
Version: version.NetbirdVersion(),
|
||||||
|
RosenpassPubKey: h.config.RosenpassPubKey,
|
||||||
|
RosenpassAddr: h.config.RosenpassAddr,
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := h.relay.RelayInstanceAddress()
|
||||||
|
if err == nil {
|
||||||
|
offer.RelaySrvAddress = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
return h.signaler.SignalOffer(offer, h.config.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *Handshaker) sendAnswer() error {
|
||||||
|
h.log.Debugf("sending answer")
|
||||||
|
uFrag, pwd := h.ice.GetLocalUserCredentials()
|
||||||
|
|
||||||
|
answer := OfferAnswer{
|
||||||
|
IceCredentials: IceCredentials{uFrag, pwd},
|
||||||
|
WgListenPort: h.config.LocalWgPort,
|
||||||
|
Version: version.NetbirdVersion(),
|
||||||
|
RosenpassPubKey: h.config.RosenpassPubKey,
|
||||||
|
RosenpassAddr: h.config.RosenpassAddr,
|
||||||
|
}
|
||||||
|
addr, err := h.relay.RelayInstanceAddress()
|
||||||
|
if err == nil {
|
||||||
|
answer.RelaySrvAddress = addr
|
||||||
|
}
|
||||||
|
|
||||||
|
err = h.signaler.SignalAnswer(answer, h.config.Key)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
70
client/internal/peer/signaler.go
Normal file
70
client/internal/peer/signaler.go
Normal file
@@ -0,0 +1,70 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/pion/ice/v3"
|
||||||
|
"golang.zx2c4.com/wireguard/wgctrl/wgtypes"
|
||||||
|
|
||||||
|
signal "github.com/netbirdio/netbird/signal/client"
|
||||||
|
sProto "github.com/netbirdio/netbird/signal/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Signaler struct {
|
||||||
|
signal signal.Client
|
||||||
|
wgPrivateKey wgtypes.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewSignaler(signal signal.Client, wgPrivateKey wgtypes.Key) *Signaler {
|
||||||
|
return &Signaler{
|
||||||
|
signal: signal,
|
||||||
|
wgPrivateKey: wgPrivateKey,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Signaler) SignalOffer(offer OfferAnswer, remoteKey string) error {
|
||||||
|
return s.signalOfferAnswer(offer, remoteKey, sProto.Body_OFFER)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Signaler) SignalAnswer(offer OfferAnswer, remoteKey string) error {
|
||||||
|
return s.signalOfferAnswer(offer, remoteKey, sProto.Body_ANSWER)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Signaler) SignalICECandidate(candidate ice.Candidate, remoteKey string) error {
|
||||||
|
return s.signal.Send(&sProto.Message{
|
||||||
|
Key: s.wgPrivateKey.PublicKey().String(),
|
||||||
|
RemoteKey: remoteKey,
|
||||||
|
Body: &sProto.Body{
|
||||||
|
Type: sProto.Body_CANDIDATE,
|
||||||
|
Payload: candidate.Marshal(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *Signaler) Ready() bool {
|
||||||
|
return s.signal.Ready()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SignalOfferAnswer signals either an offer or an answer to remote peer
|
||||||
|
func (s *Signaler) signalOfferAnswer(offerAnswer OfferAnswer, remoteKey string, bodyType sProto.Body_Type) error {
|
||||||
|
msg, err := signal.MarshalCredential(
|
||||||
|
s.wgPrivateKey,
|
||||||
|
offerAnswer.WgListenPort,
|
||||||
|
remoteKey,
|
||||||
|
&signal.Credential{
|
||||||
|
UFrag: offerAnswer.IceCredentials.UFrag,
|
||||||
|
Pwd: offerAnswer.IceCredentials.Pwd,
|
||||||
|
},
|
||||||
|
bodyType,
|
||||||
|
offerAnswer.RosenpassPubKey,
|
||||||
|
offerAnswer.RosenpassAddr,
|
||||||
|
offerAnswer.RelaySrvAddress)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = s.signal.Send(msg)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
@@ -2,34 +2,71 @@ package peer
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"net/netip"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"golang.org/x/exp/maps"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
gstatus "google.golang.org/grpc/status"
|
gstatus "google.golang.org/grpc/status"
|
||||||
|
|
||||||
"github.com/netbirdio/netbird/client/internal/relay"
|
"github.com/netbirdio/netbird/client/internal/relay"
|
||||||
"github.com/netbirdio/netbird/iface"
|
"github.com/netbirdio/netbird/iface"
|
||||||
|
"github.com/netbirdio/netbird/management/domain"
|
||||||
|
relayClient "github.com/netbirdio/netbird/relay/client"
|
||||||
)
|
)
|
||||||
|
|
||||||
// State contains the latest state of a peer
|
// State contains the latest state of a peer
|
||||||
type State struct {
|
type State struct {
|
||||||
|
Mux *sync.RWMutex
|
||||||
IP string
|
IP string
|
||||||
PubKey string
|
PubKey string
|
||||||
FQDN string
|
FQDN string
|
||||||
ConnStatus ConnStatus
|
ConnStatus ConnStatus
|
||||||
ConnStatusUpdate time.Time
|
ConnStatusUpdate time.Time
|
||||||
Relayed bool
|
Relayed bool
|
||||||
Direct bool
|
|
||||||
LocalIceCandidateType string
|
LocalIceCandidateType string
|
||||||
RemoteIceCandidateType string
|
RemoteIceCandidateType string
|
||||||
LocalIceCandidateEndpoint string
|
LocalIceCandidateEndpoint string
|
||||||
RemoteIceCandidateEndpoint string
|
RemoteIceCandidateEndpoint string
|
||||||
|
RelayServerAddress string
|
||||||
LastWireguardHandshake time.Time
|
LastWireguardHandshake time.Time
|
||||||
BytesTx int64
|
BytesTx int64
|
||||||
BytesRx int64
|
BytesRx int64
|
||||||
|
Latency time.Duration
|
||||||
RosenpassEnabled bool
|
RosenpassEnabled bool
|
||||||
Routes map[string]struct{}
|
routes map[string]struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddRoute add a single route to routes map
|
||||||
|
func (s *State) AddRoute(network string) {
|
||||||
|
s.Mux.Lock()
|
||||||
|
defer s.Mux.Unlock()
|
||||||
|
if s.routes == nil {
|
||||||
|
s.routes = make(map[string]struct{})
|
||||||
|
}
|
||||||
|
s.routes[network] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetRoutes set state routes
|
||||||
|
func (s *State) SetRoutes(routes map[string]struct{}) {
|
||||||
|
s.Mux.Lock()
|
||||||
|
defer s.Mux.Unlock()
|
||||||
|
s.routes = routes
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteRoute removes a route from the network amp
|
||||||
|
func (s *State) DeleteRoute(network string) {
|
||||||
|
s.Mux.Lock()
|
||||||
|
defer s.Mux.Unlock()
|
||||||
|
delete(s.routes, network)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetRoutes return routes map
|
||||||
|
func (s *State) GetRoutes() map[string]struct{} {
|
||||||
|
s.Mux.RLock()
|
||||||
|
defer s.Mux.RUnlock()
|
||||||
|
return s.routes
|
||||||
}
|
}
|
||||||
|
|
||||||
// LocalPeerState contains the latest state of the local peer
|
// LocalPeerState contains the latest state of the local peer
|
||||||
@@ -84,40 +121,50 @@ type FullStatus struct {
|
|||||||
|
|
||||||
// Status holds a state of peers, signal, management connections and relays
|
// Status holds a state of peers, signal, management connections and relays
|
||||||
type Status struct {
|
type Status struct {
|
||||||
mux sync.Mutex
|
mux sync.Mutex
|
||||||
peers map[string]State
|
peers map[string]State
|
||||||
changeNotify map[string]chan struct{}
|
changeNotify map[string]chan struct{}
|
||||||
signalState bool
|
signalState bool
|
||||||
signalError error
|
signalError error
|
||||||
managementState bool
|
managementState bool
|
||||||
managementError error
|
managementError error
|
||||||
relayStates []relay.ProbeResult
|
relayStates []relay.ProbeResult
|
||||||
localPeer LocalPeerState
|
localPeer LocalPeerState
|
||||||
offlinePeers []State
|
offlinePeers []State
|
||||||
mgmAddress string
|
mgmAddress string
|
||||||
signalAddress string
|
signalAddress string
|
||||||
notifier *notifier
|
notifier *notifier
|
||||||
rosenpassEnabled bool
|
rosenpassEnabled bool
|
||||||
rosenpassPermissive bool
|
rosenpassPermissive bool
|
||||||
nsGroupStates []NSGroupState
|
nsGroupStates []NSGroupState
|
||||||
|
resolvedDomainsStates map[domain.Domain][]netip.Prefix
|
||||||
|
|
||||||
// To reduce the number of notification invocation this bool will be true when need to call the notification
|
// To reduce the number of notification invocation this bool will be true when need to call the notification
|
||||||
// Some Peer actions mostly used by in a batch when the network map has been synchronized. In these type of events
|
// Some Peer actions mostly used by in a batch when the network map has been synchronized. In these type of events
|
||||||
// set to true this variable and at the end of the processing we will reset it by the FinishPeerListModifications()
|
// set to true this variable and at the end of the processing we will reset it by the FinishPeerListModifications()
|
||||||
peerListChangedForNotification bool
|
peerListChangedForNotification bool
|
||||||
|
|
||||||
|
relayMgr *relayClient.Manager
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewRecorder returns a new Status instance
|
// NewRecorder returns a new Status instance
|
||||||
func NewRecorder(mgmAddress string) *Status {
|
func NewRecorder(mgmAddress string) *Status {
|
||||||
return &Status{
|
return &Status{
|
||||||
peers: make(map[string]State),
|
peers: make(map[string]State),
|
||||||
changeNotify: make(map[string]chan struct{}),
|
changeNotify: make(map[string]chan struct{}),
|
||||||
offlinePeers: make([]State, 0),
|
offlinePeers: make([]State, 0),
|
||||||
notifier: newNotifier(),
|
notifier: newNotifier(),
|
||||||
mgmAddress: mgmAddress,
|
mgmAddress: mgmAddress,
|
||||||
|
resolvedDomainsStates: make(map[domain.Domain][]netip.Prefix),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Status) SetRelayMgr(manager *relayClient.Manager) {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
d.relayMgr = manager
|
||||||
|
}
|
||||||
|
|
||||||
// ReplaceOfflinePeers replaces
|
// ReplaceOfflinePeers replaces
|
||||||
func (d *Status) ReplaceOfflinePeers(replacement []State) {
|
func (d *Status) ReplaceOfflinePeers(replacement []State) {
|
||||||
d.mux.Lock()
|
d.mux.Lock()
|
||||||
@@ -142,6 +189,7 @@ func (d *Status) AddPeer(peerPubKey string, fqdn string) error {
|
|||||||
PubKey: peerPubKey,
|
PubKey: peerPubKey,
|
||||||
ConnStatus: StatusDisconnected,
|
ConnStatus: StatusDisconnected,
|
||||||
FQDN: fqdn,
|
FQDN: fqdn,
|
||||||
|
Mux: new(sync.RWMutex),
|
||||||
}
|
}
|
||||||
d.peerListChangedForNotification = true
|
d.peerListChangedForNotification = true
|
||||||
return nil
|
return nil
|
||||||
@@ -154,7 +202,7 @@ func (d *Status) GetPeer(peerPubKey string) (State, error) {
|
|||||||
|
|
||||||
state, ok := d.peers[peerPubKey]
|
state, ok := d.peers[peerPubKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
return State{}, errors.New("peer not found")
|
return State{}, iface.ErrPeerNotFound
|
||||||
}
|
}
|
||||||
return state, nil
|
return state, nil
|
||||||
}
|
}
|
||||||
@@ -188,21 +236,21 @@ func (d *Status) UpdatePeerState(receivedState State) error {
|
|||||||
peerState.IP = receivedState.IP
|
peerState.IP = receivedState.IP
|
||||||
}
|
}
|
||||||
|
|
||||||
if receivedState.Routes != nil {
|
if receivedState.GetRoutes() != nil {
|
||||||
peerState.Routes = receivedState.Routes
|
peerState.SetRoutes(receivedState.GetRoutes())
|
||||||
}
|
}
|
||||||
|
|
||||||
skipNotification := shouldSkipNotify(receivedState, peerState)
|
skipNotification := shouldSkipNotify(receivedState.ConnStatus, peerState)
|
||||||
|
|
||||||
if receivedState.ConnStatus != peerState.ConnStatus {
|
if receivedState.ConnStatus != peerState.ConnStatus {
|
||||||
peerState.ConnStatus = receivedState.ConnStatus
|
peerState.ConnStatus = receivedState.ConnStatus
|
||||||
peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate
|
peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate
|
||||||
peerState.Direct = receivedState.Direct
|
|
||||||
peerState.Relayed = receivedState.Relayed
|
peerState.Relayed = receivedState.Relayed
|
||||||
peerState.LocalIceCandidateType = receivedState.LocalIceCandidateType
|
peerState.LocalIceCandidateType = receivedState.LocalIceCandidateType
|
||||||
peerState.RemoteIceCandidateType = receivedState.RemoteIceCandidateType
|
peerState.RemoteIceCandidateType = receivedState.RemoteIceCandidateType
|
||||||
peerState.LocalIceCandidateEndpoint = receivedState.LocalIceCandidateEndpoint
|
peerState.LocalIceCandidateEndpoint = receivedState.LocalIceCandidateEndpoint
|
||||||
peerState.RemoteIceCandidateEndpoint = receivedState.RemoteIceCandidateEndpoint
|
peerState.RemoteIceCandidateEndpoint = receivedState.RemoteIceCandidateEndpoint
|
||||||
|
peerState.RelayServerAddress = receivedState.RelayServerAddress
|
||||||
peerState.RosenpassEnabled = receivedState.RosenpassEnabled
|
peerState.RosenpassEnabled = receivedState.RosenpassEnabled
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -222,6 +270,146 @@ func (d *Status) UpdatePeerState(receivedState State) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Status) UpdatePeerICEState(receivedState State) error {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
|
||||||
|
peerState, ok := d.peers[receivedState.PubKey]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("peer doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
if receivedState.IP != "" {
|
||||||
|
peerState.IP = receivedState.IP
|
||||||
|
}
|
||||||
|
|
||||||
|
skipNotification := shouldSkipNotify(receivedState.ConnStatus, peerState)
|
||||||
|
|
||||||
|
peerState.ConnStatus = receivedState.ConnStatus
|
||||||
|
peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate
|
||||||
|
peerState.Relayed = receivedState.Relayed
|
||||||
|
peerState.LocalIceCandidateType = receivedState.LocalIceCandidateType
|
||||||
|
peerState.RemoteIceCandidateType = receivedState.RemoteIceCandidateType
|
||||||
|
peerState.LocalIceCandidateEndpoint = receivedState.LocalIceCandidateEndpoint
|
||||||
|
peerState.RemoteIceCandidateEndpoint = receivedState.RemoteIceCandidateEndpoint
|
||||||
|
peerState.RosenpassEnabled = receivedState.RosenpassEnabled
|
||||||
|
|
||||||
|
d.peers[receivedState.PubKey] = peerState
|
||||||
|
|
||||||
|
if skipNotification {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, found := d.changeNotify[receivedState.PubKey]
|
||||||
|
if found && ch != nil {
|
||||||
|
close(ch)
|
||||||
|
d.changeNotify[receivedState.PubKey] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.notifyPeerListChanged()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Status) UpdatePeerRelayedState(receivedState State) error {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
|
||||||
|
peerState, ok := d.peers[receivedState.PubKey]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("peer doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
skipNotification := shouldSkipNotify(receivedState.ConnStatus, peerState)
|
||||||
|
|
||||||
|
peerState.ConnStatus = receivedState.ConnStatus
|
||||||
|
peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate
|
||||||
|
peerState.Relayed = receivedState.Relayed
|
||||||
|
peerState.RelayServerAddress = receivedState.RelayServerAddress
|
||||||
|
peerState.RosenpassEnabled = receivedState.RosenpassEnabled
|
||||||
|
|
||||||
|
d.peers[receivedState.PubKey] = peerState
|
||||||
|
|
||||||
|
if skipNotification {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, found := d.changeNotify[receivedState.PubKey]
|
||||||
|
if found && ch != nil {
|
||||||
|
close(ch)
|
||||||
|
d.changeNotify[receivedState.PubKey] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.notifyPeerListChanged()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Status) UpdatePeerRelayedStateToDisconnected(receivedState State) error {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
|
||||||
|
peerState, ok := d.peers[receivedState.PubKey]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("peer doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
skipNotification := shouldSkipNotify(receivedState.ConnStatus, peerState)
|
||||||
|
|
||||||
|
peerState.ConnStatus = receivedState.ConnStatus
|
||||||
|
peerState.Relayed = receivedState.Relayed
|
||||||
|
peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate
|
||||||
|
peerState.RelayServerAddress = ""
|
||||||
|
|
||||||
|
d.peers[receivedState.PubKey] = peerState
|
||||||
|
|
||||||
|
if skipNotification {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, found := d.changeNotify[receivedState.PubKey]
|
||||||
|
if found && ch != nil {
|
||||||
|
close(ch)
|
||||||
|
d.changeNotify[receivedState.PubKey] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.notifyPeerListChanged()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Status) UpdatePeerICEStateToDisconnected(receivedState State) error {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
|
||||||
|
peerState, ok := d.peers[receivedState.PubKey]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("peer doesn't exist")
|
||||||
|
}
|
||||||
|
|
||||||
|
skipNotification := shouldSkipNotify(receivedState.ConnStatus, peerState)
|
||||||
|
|
||||||
|
peerState.ConnStatus = receivedState.ConnStatus
|
||||||
|
peerState.Relayed = receivedState.Relayed
|
||||||
|
peerState.ConnStatusUpdate = receivedState.ConnStatusUpdate
|
||||||
|
peerState.LocalIceCandidateType = receivedState.LocalIceCandidateType
|
||||||
|
peerState.RemoteIceCandidateType = receivedState.RemoteIceCandidateType
|
||||||
|
peerState.LocalIceCandidateEndpoint = receivedState.LocalIceCandidateEndpoint
|
||||||
|
peerState.RemoteIceCandidateEndpoint = receivedState.RemoteIceCandidateEndpoint
|
||||||
|
|
||||||
|
d.peers[receivedState.PubKey] = peerState
|
||||||
|
|
||||||
|
if skipNotification {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ch, found := d.changeNotify[receivedState.PubKey]
|
||||||
|
if found && ch != nil {
|
||||||
|
close(ch)
|
||||||
|
d.changeNotify[receivedState.PubKey] = nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.notifyPeerListChanged()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// UpdateWireGuardPeerState updates the WireGuard bits of the peer state
|
// UpdateWireGuardPeerState updates the WireGuard bits of the peer state
|
||||||
func (d *Status) UpdateWireGuardPeerState(pubKey string, wgStats iface.WGStats) error {
|
func (d *Status) UpdateWireGuardPeerState(pubKey string, wgStats iface.WGStats) error {
|
||||||
d.mux.Lock()
|
d.mux.Lock()
|
||||||
@@ -241,13 +429,13 @@ func (d *Status) UpdateWireGuardPeerState(pubKey string, wgStats iface.WGStats)
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func shouldSkipNotify(received, curr State) bool {
|
func shouldSkipNotify(receivedConnStatus ConnStatus, curr State) bool {
|
||||||
switch {
|
switch {
|
||||||
case received.ConnStatus == StatusConnecting:
|
case receivedConnStatus == StatusConnecting:
|
||||||
return true
|
return true
|
||||||
case received.ConnStatus == StatusDisconnected && curr.ConnStatus == StatusConnecting:
|
case receivedConnStatus == StatusDisconnected && curr.ConnStatus == StatusConnecting:
|
||||||
return true
|
return true
|
||||||
case received.ConnStatus == StatusDisconnected && curr.ConnStatus == StatusDisconnected:
|
case receivedConnStatus == StatusDisconnected && curr.ConnStatus == StatusDisconnected:
|
||||||
return curr.IP != ""
|
return curr.IP != ""
|
||||||
default:
|
default:
|
||||||
return false
|
return false
|
||||||
@@ -395,6 +583,18 @@ func (d *Status) UpdateDNSStates(dnsStates []NSGroupState) {
|
|||||||
d.nsGroupStates = dnsStates
|
d.nsGroupStates = dnsStates
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Status) UpdateResolvedDomainsStates(domain domain.Domain, prefixes []netip.Prefix) {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
d.resolvedDomainsStates[domain] = prefixes
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Status) DeleteResolvedDomainsStates(domain domain.Domain) {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
delete(d.resolvedDomainsStates, domain)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *Status) GetRosenpassState() RosenpassState {
|
func (d *Status) GetRosenpassState() RosenpassState {
|
||||||
return RosenpassState{
|
return RosenpassState{
|
||||||
d.rosenpassEnabled,
|
d.rosenpassEnabled,
|
||||||
@@ -410,6 +610,22 @@ func (d *Status) GetManagementState() ManagementState {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Status) UpdateLatency(pubKey string, latency time.Duration) error {
|
||||||
|
if latency <= 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
peerState, ok := d.peers[pubKey]
|
||||||
|
if !ok {
|
||||||
|
return errors.New("peer doesn't exist")
|
||||||
|
}
|
||||||
|
peerState.Latency = latency
|
||||||
|
d.peers[pubKey] = peerState
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// IsLoginRequired determines if a peer's login has expired.
|
// IsLoginRequired determines if a peer's login has expired.
|
||||||
func (d *Status) IsLoginRequired() bool {
|
func (d *Status) IsLoginRequired() bool {
|
||||||
d.mux.Lock()
|
d.mux.Lock()
|
||||||
@@ -423,7 +639,6 @@ func (d *Status) IsLoginRequired() bool {
|
|||||||
s, ok := gstatus.FromError(d.managementError)
|
s, ok := gstatus.FromError(d.managementError)
|
||||||
if ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) {
|
if ok && (s.Code() == codes.InvalidArgument || s.Code() == codes.PermissionDenied) {
|
||||||
return true
|
return true
|
||||||
|
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
@@ -437,13 +652,40 @@ func (d *Status) GetSignalState() SignalState {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (d *Status) GetRelayStates() []relay.ProbeResult {
|
func (d *Status) GetRelayStates() []relay.ProbeResult {
|
||||||
return d.relayStates
|
if d.relayMgr == nil {
|
||||||
|
return d.relayStates
|
||||||
|
}
|
||||||
|
|
||||||
|
// extend the list of stun, turn servers with relay address
|
||||||
|
relaysState := make([]relay.ProbeResult, len(d.relayStates), len(d.relayStates)+1)
|
||||||
|
copy(relaysState, d.relayStates)
|
||||||
|
|
||||||
|
relayState := relay.ProbeResult{}
|
||||||
|
|
||||||
|
// if the server connection is not established then we will use the general address
|
||||||
|
// in case of connection we will use the instance specific address
|
||||||
|
instanceAddr, err := d.relayMgr.RelayInstanceAddress()
|
||||||
|
if err != nil {
|
||||||
|
relayState.URI = d.relayMgr.ServerURL()
|
||||||
|
relayState.Err = err
|
||||||
|
} else {
|
||||||
|
relayState.URI = instanceAddr
|
||||||
|
}
|
||||||
|
|
||||||
|
relaysState = append(relaysState, relayState)
|
||||||
|
return relaysState
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Status) GetDNSStates() []NSGroupState {
|
func (d *Status) GetDNSStates() []NSGroupState {
|
||||||
return d.nsGroupStates
|
return d.nsGroupStates
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Status) GetResolvedDomainsStates() map[domain.Domain][]netip.Prefix {
|
||||||
|
d.mux.Lock()
|
||||||
|
defer d.mux.Unlock()
|
||||||
|
return maps.Clone(d.resolvedDomainsStates)
|
||||||
|
}
|
||||||
|
|
||||||
// GetFullStatus gets full status
|
// GetFullStatus gets full status
|
||||||
func (d *Status) GetFullStatus() FullStatus {
|
func (d *Status) GetFullStatus() FullStatus {
|
||||||
d.mux.Lock()
|
d.mux.Lock()
|
||||||
@@ -463,7 +705,6 @@ func (d *Status) GetFullStatus() FullStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
fullStatus.Peers = append(fullStatus.Peers, d.offlinePeers...)
|
fullStatus.Peers = append(fullStatus.Peers, d.offlinePeers...)
|
||||||
|
|
||||||
return fullStatus
|
return fullStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package peer
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
@@ -42,6 +43,7 @@ func TestUpdatePeerState(t *testing.T) {
|
|||||||
status := NewRecorder("https://mgm")
|
status := NewRecorder("https://mgm")
|
||||||
peerState := State{
|
peerState := State{
|
||||||
PubKey: key,
|
PubKey: key,
|
||||||
|
Mux: new(sync.RWMutex),
|
||||||
}
|
}
|
||||||
|
|
||||||
status.peers[key] = peerState
|
status.peers[key] = peerState
|
||||||
@@ -62,6 +64,7 @@ func TestStatus_UpdatePeerFQDN(t *testing.T) {
|
|||||||
status := NewRecorder("https://mgm")
|
status := NewRecorder("https://mgm")
|
||||||
peerState := State{
|
peerState := State{
|
||||||
PubKey: key,
|
PubKey: key,
|
||||||
|
Mux: new(sync.RWMutex),
|
||||||
}
|
}
|
||||||
|
|
||||||
status.peers[key] = peerState
|
status.peers[key] = peerState
|
||||||
@@ -80,6 +83,7 @@ func TestGetPeerStateChangeNotifierLogic(t *testing.T) {
|
|||||||
status := NewRecorder("https://mgm")
|
status := NewRecorder("https://mgm")
|
||||||
peerState := State{
|
peerState := State{
|
||||||
PubKey: key,
|
PubKey: key,
|
||||||
|
Mux: new(sync.RWMutex),
|
||||||
}
|
}
|
||||||
|
|
||||||
status.peers[key] = peerState
|
status.peers[key] = peerState
|
||||||
@@ -104,6 +108,7 @@ func TestRemovePeer(t *testing.T) {
|
|||||||
status := NewRecorder("https://mgm")
|
status := NewRecorder("https://mgm")
|
||||||
peerState := State{
|
peerState := State{
|
||||||
PubKey: key,
|
PubKey: key,
|
||||||
|
Mux: new(sync.RWMutex),
|
||||||
}
|
}
|
||||||
|
|
||||||
status.peers[key] = peerState
|
status.peers[key] = peerState
|
||||||
|
|||||||
@@ -6,6 +6,6 @@ import (
|
|||||||
"github.com/netbirdio/netbird/client/internal/stdnet"
|
"github.com/netbirdio/netbird/client/internal/stdnet"
|
||||||
)
|
)
|
||||||
|
|
||||||
func (conn *Conn) newStdNet() (*stdnet.Net, error) {
|
func (w *WorkerICE) newStdNet() (*stdnet.Net, error) {
|
||||||
return stdnet.NewNet(conn.config.InterfaceBlackList)
|
return stdnet.NewNet(w.config.ICEConfig.InterfaceBlackList)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,6 @@ package peer
|
|||||||
|
|
||||||
import "github.com/netbirdio/netbird/client/internal/stdnet"
|
import "github.com/netbirdio/netbird/client/internal/stdnet"
|
||||||
|
|
||||||
func (conn *Conn) newStdNet() (*stdnet.Net, error) {
|
func (w *WorkerICE) newStdNet() (*stdnet.Net, error) {
|
||||||
return stdnet.NewNetWithDiscover(conn.iFaceDiscover, conn.config.InterfaceBlackList)
|
return stdnet.NewNetWithDiscover(w.iFaceDiscover, w.config.ICEConfig.InterfaceBlackList)
|
||||||
}
|
}
|
||||||
|
|||||||
457
client/internal/peer/worker_ice.go
Normal file
457
client/internal/peer/worker_ice.go
Normal file
@@ -0,0 +1,457 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/pion/ice/v3"
|
||||||
|
"github.com/pion/randutil"
|
||||||
|
"github.com/pion/stun/v2"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/stdnet"
|
||||||
|
"github.com/netbirdio/netbird/iface"
|
||||||
|
"github.com/netbirdio/netbird/iface/bind"
|
||||||
|
"github.com/netbirdio/netbird/route"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
iceKeepAliveDefault = 4 * time.Second
|
||||||
|
iceDisconnectedTimeoutDefault = 6 * time.Second
|
||||||
|
// iceRelayAcceptanceMinWaitDefault is the same as in the Pion ICE package
|
||||||
|
iceRelayAcceptanceMinWaitDefault = 2 * time.Second
|
||||||
|
|
||||||
|
lenUFrag = 16
|
||||||
|
lenPwd = 32
|
||||||
|
runesAlpha = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
failedTimeout = 6 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type ICEConfig struct {
|
||||||
|
// StunTurn is a list of STUN and TURN URLs
|
||||||
|
StunTurn atomic.Value // []*stun.URI
|
||||||
|
|
||||||
|
// InterfaceBlackList is a list of machine interfaces that should be filtered out by ICE Candidate gathering
|
||||||
|
// (e.g. if eth0 is in the list, host candidate of this interface won't be used)
|
||||||
|
InterfaceBlackList []string
|
||||||
|
DisableIPv6Discovery bool
|
||||||
|
|
||||||
|
UDPMux ice.UDPMux
|
||||||
|
UDPMuxSrflx ice.UniversalUDPMux
|
||||||
|
|
||||||
|
NATExternalIPs []string
|
||||||
|
}
|
||||||
|
|
||||||
|
type ICEConnInfo struct {
|
||||||
|
RemoteConn net.Conn
|
||||||
|
RosenpassPubKey []byte
|
||||||
|
RosenpassAddr string
|
||||||
|
LocalIceCandidateType string
|
||||||
|
RemoteIceCandidateType string
|
||||||
|
RemoteIceCandidateEndpoint string
|
||||||
|
LocalIceCandidateEndpoint string
|
||||||
|
Relayed bool
|
||||||
|
RelayedOnLocal bool
|
||||||
|
}
|
||||||
|
|
||||||
|
type WorkerICECallbacks struct {
|
||||||
|
OnConnReady func(ConnPriority, ICEConnInfo)
|
||||||
|
OnStatusChanged func(ConnStatus)
|
||||||
|
}
|
||||||
|
|
||||||
|
type WorkerICE struct {
|
||||||
|
ctx context.Context
|
||||||
|
log *log.Entry
|
||||||
|
config ConnConfig
|
||||||
|
signaler *Signaler
|
||||||
|
iFaceDiscover stdnet.ExternalIFaceDiscover
|
||||||
|
statusRecorder *Status
|
||||||
|
hasRelayOnLocally bool
|
||||||
|
conn WorkerICECallbacks
|
||||||
|
|
||||||
|
selectedPriority ConnPriority
|
||||||
|
|
||||||
|
agent *ice.Agent
|
||||||
|
muxAgent sync.Mutex
|
||||||
|
|
||||||
|
StunTurn []*stun.URI
|
||||||
|
|
||||||
|
sentExtraSrflx bool
|
||||||
|
|
||||||
|
localUfrag string
|
||||||
|
localPwd string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWorkerICE(ctx context.Context, log *log.Entry, config ConnConfig, signaler *Signaler, ifaceDiscover stdnet.ExternalIFaceDiscover, statusRecorder *Status, hasRelayOnLocally bool, callBacks WorkerICECallbacks) (*WorkerICE, error) {
|
||||||
|
w := &WorkerICE{
|
||||||
|
ctx: ctx,
|
||||||
|
log: log,
|
||||||
|
config: config,
|
||||||
|
signaler: signaler,
|
||||||
|
iFaceDiscover: ifaceDiscover,
|
||||||
|
statusRecorder: statusRecorder,
|
||||||
|
hasRelayOnLocally: hasRelayOnLocally,
|
||||||
|
conn: callBacks,
|
||||||
|
}
|
||||||
|
|
||||||
|
localUfrag, localPwd, err := generateICECredentials()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
w.localUfrag = localUfrag
|
||||||
|
w.localPwd = localPwd
|
||||||
|
return w, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerICE) OnNewOffer(remoteOfferAnswer *OfferAnswer) {
|
||||||
|
w.log.Debugf("OnNewOffer for ICE")
|
||||||
|
w.muxAgent.Lock()
|
||||||
|
|
||||||
|
if w.agent != nil {
|
||||||
|
w.log.Debugf("agent already exists, skipping the offer")
|
||||||
|
w.muxAgent.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var preferredCandidateTypes []ice.CandidateType
|
||||||
|
if w.hasRelayOnLocally && remoteOfferAnswer.RelaySrvAddress != "" {
|
||||||
|
w.selectedPriority = connPriorityICEP2P
|
||||||
|
preferredCandidateTypes = candidateTypesP2P()
|
||||||
|
} else {
|
||||||
|
w.selectedPriority = connPriorityICETurn
|
||||||
|
preferredCandidateTypes = candidateTypes()
|
||||||
|
}
|
||||||
|
|
||||||
|
w.log.Debugf("recreate ICE agent")
|
||||||
|
agentCtx, agentCancel := context.WithCancel(w.ctx)
|
||||||
|
agent, err := w.reCreateAgent(agentCancel, preferredCandidateTypes)
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("failed to recreate ICE Agent: %s", err)
|
||||||
|
w.muxAgent.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.agent = agent
|
||||||
|
w.muxAgent.Unlock()
|
||||||
|
|
||||||
|
w.log.Debugf("gather candidates")
|
||||||
|
err = w.agent.GatherCandidates()
|
||||||
|
if err != nil {
|
||||||
|
w.log.Debugf("failed to gather candidates: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// will block until connection succeeded
|
||||||
|
// but it won't release if ICE Agent went into Disconnected or Failed state,
|
||||||
|
// so we have to cancel it with the provided context once agent detected a broken connection
|
||||||
|
w.log.Debugf("turn agent dial")
|
||||||
|
remoteConn, err := w.turnAgentDial(agentCtx, remoteOfferAnswer)
|
||||||
|
if err != nil {
|
||||||
|
w.log.Debugf("failed to dial the remote peer: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.log.Debugf("agent dial succeeded")
|
||||||
|
|
||||||
|
pair, err := w.agent.GetSelectedCandidatePair()
|
||||||
|
if err != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !isRelayCandidate(pair.Local) {
|
||||||
|
// dynamically set remote WireGuard port if other side specified a different one from the default one
|
||||||
|
remoteWgPort := iface.DefaultWgPort
|
||||||
|
if remoteOfferAnswer.WgListenPort != 0 {
|
||||||
|
remoteWgPort = remoteOfferAnswer.WgListenPort
|
||||||
|
}
|
||||||
|
|
||||||
|
// To support old version's with direct mode we attempt to punch an additional role with the remote WireGuard port
|
||||||
|
go w.punchRemoteWGPort(pair, remoteWgPort)
|
||||||
|
}
|
||||||
|
|
||||||
|
ci := ICEConnInfo{
|
||||||
|
RemoteConn: remoteConn,
|
||||||
|
RosenpassPubKey: remoteOfferAnswer.RosenpassPubKey,
|
||||||
|
RosenpassAddr: remoteOfferAnswer.RosenpassAddr,
|
||||||
|
LocalIceCandidateType: pair.Local.Type().String(),
|
||||||
|
RemoteIceCandidateType: pair.Remote.Type().String(),
|
||||||
|
LocalIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Local.Address(), pair.Local.Port()),
|
||||||
|
RemoteIceCandidateEndpoint: fmt.Sprintf("%s:%d", pair.Remote.Address(), pair.Remote.Port()),
|
||||||
|
Relayed: isRelayed(pair),
|
||||||
|
RelayedOnLocal: isRelayCandidate(pair.Local),
|
||||||
|
}
|
||||||
|
w.log.Debugf("on ICE conn read to use ready")
|
||||||
|
go w.conn.OnConnReady(w.selectedPriority, ci)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRemoteCandidate Handles ICE connection Candidate provided by the remote peer.
|
||||||
|
func (w *WorkerICE) OnRemoteCandidate(candidate ice.Candidate, haRoutes route.HAMap) {
|
||||||
|
w.muxAgent.Lock()
|
||||||
|
defer w.muxAgent.Unlock()
|
||||||
|
w.log.Debugf("OnRemoteCandidate from peer %s -> %s", w.config.Key, candidate.String())
|
||||||
|
if w.agent == nil {
|
||||||
|
w.log.Warnf("ICE Agent is not initialized yet")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if candidateViaRoutes(candidate, haRoutes) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
err := w.agent.AddRemoteCandidate(candidate)
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("error while handling remote candidate")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerICE) GetLocalUserCredentials() (frag string, pwd string) {
|
||||||
|
w.muxAgent.Lock()
|
||||||
|
defer w.muxAgent.Unlock()
|
||||||
|
return w.localUfrag, w.localPwd
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerICE) reCreateAgent(agentCancel context.CancelFunc, relaySupport []ice.CandidateType) (*ice.Agent, error) {
|
||||||
|
transportNet, err := w.newStdNet()
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("failed to create pion's stdnet: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
iceKeepAlive := iceKeepAlive()
|
||||||
|
iceDisconnectedTimeout := iceDisconnectedTimeout()
|
||||||
|
iceRelayAcceptanceMinWait := iceRelayAcceptanceMinWait()
|
||||||
|
|
||||||
|
agentConfig := &ice.AgentConfig{
|
||||||
|
MulticastDNSMode: ice.MulticastDNSModeDisabled,
|
||||||
|
NetworkTypes: []ice.NetworkType{ice.NetworkTypeUDP4, ice.NetworkTypeUDP6},
|
||||||
|
Urls: w.config.ICEConfig.StunTurn.Load().([]*stun.URI),
|
||||||
|
CandidateTypes: relaySupport,
|
||||||
|
InterfaceFilter: stdnet.InterfaceFilter(w.config.ICEConfig.InterfaceBlackList),
|
||||||
|
UDPMux: w.config.ICEConfig.UDPMux,
|
||||||
|
UDPMuxSrflx: w.config.ICEConfig.UDPMuxSrflx,
|
||||||
|
NAT1To1IPs: w.config.ICEConfig.NATExternalIPs,
|
||||||
|
Net: transportNet,
|
||||||
|
FailedTimeout: &failedTimeout,
|
||||||
|
DisconnectedTimeout: &iceDisconnectedTimeout,
|
||||||
|
KeepaliveInterval: &iceKeepAlive,
|
||||||
|
RelayAcceptanceMinWait: &iceRelayAcceptanceMinWait,
|
||||||
|
LocalUfrag: w.localUfrag,
|
||||||
|
LocalPwd: w.localPwd,
|
||||||
|
}
|
||||||
|
|
||||||
|
if w.config.ICEConfig.DisableIPv6Discovery {
|
||||||
|
agentConfig.NetworkTypes = []ice.NetworkType{ice.NetworkTypeUDP4}
|
||||||
|
}
|
||||||
|
|
||||||
|
w.sentExtraSrflx = false
|
||||||
|
agent, err := ice.NewAgent(agentConfig)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = agent.OnCandidate(w.onICECandidate)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = agent.OnConnectionStateChange(func(state ice.ConnectionState) {
|
||||||
|
w.log.Debugf("ICE ConnectionState has changed to %s", state.String())
|
||||||
|
if state == ice.ConnectionStateFailed || state == ice.ConnectionStateDisconnected {
|
||||||
|
w.conn.OnStatusChanged(StatusDisconnected)
|
||||||
|
|
||||||
|
w.muxAgent.Lock()
|
||||||
|
agentCancel()
|
||||||
|
_ = agent.Close()
|
||||||
|
w.agent = nil
|
||||||
|
|
||||||
|
w.muxAgent.Unlock()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = agent.OnSelectedCandidatePairChange(w.onICESelectedCandidatePair)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
err = agent.OnSuccessfulSelectedPairBindingResponse(func(p *ice.CandidatePair) {
|
||||||
|
err := w.statusRecorder.UpdateLatency(w.config.Key, p.Latency())
|
||||||
|
if err != nil {
|
||||||
|
w.log.Debugf("failed to update latency for peer: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed setting binding response callback: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return agent, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerICE) punchRemoteWGPort(pair *ice.CandidatePair, remoteWgPort int) {
|
||||||
|
// wait local endpoint configuration
|
||||||
|
time.Sleep(time.Second)
|
||||||
|
addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf("%s:%d", pair.Remote.Address(), remoteWgPort))
|
||||||
|
if err != nil {
|
||||||
|
w.log.Warnf("got an error while resolving the udp address, err: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
mux, ok := w.config.ICEConfig.UDPMuxSrflx.(*bind.UniversalUDPMuxDefault)
|
||||||
|
if !ok {
|
||||||
|
w.log.Warn("invalid udp mux conversion")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, err = mux.GetSharedConn().WriteTo([]byte{0x6e, 0x62}, addr)
|
||||||
|
if err != nil {
|
||||||
|
w.log.Warnf("got an error while sending the punch packet, err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// onICECandidate is a callback attached to an ICE Agent to receive new local connection candidates
|
||||||
|
// and then signals them to the remote peer
|
||||||
|
func (w *WorkerICE) onICECandidate(candidate ice.Candidate) {
|
||||||
|
// nil means candidate gathering has been ended
|
||||||
|
if candidate == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: reported port is incorrect for CandidateTypeHost, makes understanding ICE use via logs confusing as port is ignored
|
||||||
|
w.log.Debugf("discovered local candidate %s", candidate.String())
|
||||||
|
go func() {
|
||||||
|
err := w.signaler.SignalICECandidate(candidate, w.config.Key)
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("failed signaling candidate to the remote peer %s %s", w.config.Key, err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
|
||||||
|
if !w.shouldSendExtraSrflxCandidate(candidate) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// sends an extra server reflexive candidate to the remote peer with our related port (usually the wireguard port)
|
||||||
|
// this is useful when network has an existing port forwarding rule for the wireguard port and this peer
|
||||||
|
extraSrflx, err := extraSrflxCandidate(candidate)
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("failed creating extra server reflexive candidate %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.sentExtraSrflx = true
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
err = w.signaler.SignalICECandidate(extraSrflx, w.config.Key)
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("failed signaling the extra server reflexive candidate: %s", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerICE) onICESelectedCandidatePair(c1 ice.Candidate, c2 ice.Candidate) {
|
||||||
|
w.log.Debugf("selected candidate pair [local <-> remote] -> [%s <-> %s], peer %s", c1.String(), c2.String(),
|
||||||
|
w.config.Key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerICE) shouldSendExtraSrflxCandidate(candidate ice.Candidate) bool {
|
||||||
|
if !w.sentExtraSrflx && candidate.Type() == ice.CandidateTypeServerReflexive && candidate.Port() != candidate.RelatedAddress().Port {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerICE) turnAgentDial(ctx context.Context, remoteOfferAnswer *OfferAnswer) (*ice.Conn, error) {
|
||||||
|
isControlling := w.config.LocalKey > w.config.Key
|
||||||
|
if isControlling {
|
||||||
|
return w.agent.Dial(ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
|
||||||
|
} else {
|
||||||
|
return w.agent.Accept(ctx, remoteOfferAnswer.IceCredentials.UFrag, remoteOfferAnswer.IceCredentials.Pwd)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func extraSrflxCandidate(candidate ice.Candidate) (*ice.CandidateServerReflexive, error) {
|
||||||
|
relatedAdd := candidate.RelatedAddress()
|
||||||
|
return ice.NewCandidateServerReflexive(&ice.CandidateServerReflexiveConfig{
|
||||||
|
Network: candidate.NetworkType().String(),
|
||||||
|
Address: candidate.Address(),
|
||||||
|
Port: relatedAdd.Port,
|
||||||
|
Component: candidate.Component(),
|
||||||
|
RelAddr: relatedAdd.Address,
|
||||||
|
RelPort: relatedAdd.Port,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func candidateViaRoutes(candidate ice.Candidate, clientRoutes route.HAMap) bool {
|
||||||
|
var routePrefixes []netip.Prefix
|
||||||
|
for _, routes := range clientRoutes {
|
||||||
|
if len(routes) > 0 && routes[0] != nil {
|
||||||
|
routePrefixes = append(routePrefixes, routes[0].Network)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
addr, err := netip.ParseAddr(candidate.Address())
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to parse IP address %s: %v", candidate.Address(), err)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, prefix := range routePrefixes {
|
||||||
|
// default route is
|
||||||
|
if prefix.Bits() == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if prefix.Contains(addr) {
|
||||||
|
log.Debugf("Ignoring candidate [%s], its address is part of routed network %s", candidate.String(), prefix)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func candidateTypes() []ice.CandidateType {
|
||||||
|
if hasICEForceRelayConn() {
|
||||||
|
return []ice.CandidateType{ice.CandidateTypeRelay}
|
||||||
|
}
|
||||||
|
// TODO: remove this once we have refactored userspace proxy into the bind package
|
||||||
|
if runtime.GOOS == "ios" {
|
||||||
|
return []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive}
|
||||||
|
}
|
||||||
|
return []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive, ice.CandidateTypeRelay}
|
||||||
|
}
|
||||||
|
|
||||||
|
func candidateTypesP2P() []ice.CandidateType {
|
||||||
|
return []ice.CandidateType{ice.CandidateTypeHost, ice.CandidateTypeServerReflexive}
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRelayCandidate(candidate ice.Candidate) bool {
|
||||||
|
return candidate.Type() == ice.CandidateTypeRelay
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRelayed(pair *ice.CandidatePair) bool {
|
||||||
|
if pair.Local.Type() == ice.CandidateTypeRelay || pair.Remote.Type() == ice.CandidateTypeRelay {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func generateICECredentials() (string, string, error) {
|
||||||
|
ufrag, err := randutil.GenerateCryptoRandomString(lenUFrag, runesAlpha)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
pwd, err := randutil.GenerateCryptoRandomString(lenPwd, runesAlpha)
|
||||||
|
if err != nil {
|
||||||
|
return "", "", err
|
||||||
|
}
|
||||||
|
return ufrag, pwd, nil
|
||||||
|
|
||||||
|
}
|
||||||
173
client/internal/peer/worker_relay.go
Normal file
173
client/internal/peer/worker_relay.go
Normal file
@@ -0,0 +1,173 @@
|
|||||||
|
package peer
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"sync/atomic"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
relayClient "github.com/netbirdio/netbird/relay/client"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
wgHandshakePeriod = 2 * time.Minute
|
||||||
|
wgHandshakeOvertime = 30000 * time.Millisecond
|
||||||
|
)
|
||||||
|
|
||||||
|
type RelayConnInfo struct {
|
||||||
|
relayedConn net.Conn
|
||||||
|
rosenpassPubKey []byte
|
||||||
|
rosenpassAddr string
|
||||||
|
}
|
||||||
|
|
||||||
|
type WorkerRelayCallbacks struct {
|
||||||
|
OnConnReady func(RelayConnInfo)
|
||||||
|
OnDisconnected func()
|
||||||
|
}
|
||||||
|
|
||||||
|
type WorkerRelay struct {
|
||||||
|
parentCtx context.Context
|
||||||
|
log *log.Entry
|
||||||
|
config ConnConfig
|
||||||
|
relayManager relayClient.ManagerService
|
||||||
|
conn WorkerRelayCallbacks
|
||||||
|
|
||||||
|
ctxCancel context.CancelFunc
|
||||||
|
relaySupportedOnRemotePeer atomic.Bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewWorkerRelay(ctx context.Context, log *log.Entry, config ConnConfig, relayManager relayClient.ManagerService, callbacks WorkerRelayCallbacks) *WorkerRelay {
|
||||||
|
r := &WorkerRelay{
|
||||||
|
parentCtx: ctx,
|
||||||
|
log: log,
|
||||||
|
config: config,
|
||||||
|
relayManager: relayManager,
|
||||||
|
conn: callbacks,
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) OnNewOffer(remoteOfferAnswer *OfferAnswer) {
|
||||||
|
if !w.isRelaySupported(remoteOfferAnswer) {
|
||||||
|
w.log.Infof("Relay is not supported by remote peer")
|
||||||
|
w.relaySupportedOnRemotePeer.Store(false)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.relaySupportedOnRemotePeer.Store(true)
|
||||||
|
|
||||||
|
// the relayManager will return with error in case if the connection has lost with relay server
|
||||||
|
currentRelayAddress, err := w.relayManager.RelayInstanceAddress()
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("failed to handle new offer: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
srv := w.preferredRelayServer(currentRelayAddress, remoteOfferAnswer.RelaySrvAddress)
|
||||||
|
|
||||||
|
relayedConn, err := w.relayManager.OpenConn(srv, w.config.Key)
|
||||||
|
if err != nil {
|
||||||
|
// todo handle all type errors
|
||||||
|
if errors.Is(err, relayClient.ErrConnAlreadyExists) {
|
||||||
|
w.log.Infof("do not need to reopen relay connection")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
w.log.Errorf("failed to open connection via Relay: %s", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, ctxCancel := context.WithCancel(w.parentCtx)
|
||||||
|
w.ctxCancel = ctxCancel
|
||||||
|
|
||||||
|
err = w.relayManager.AddCloseListener(srv, w.disconnected)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("failed to add close listener: %s", err)
|
||||||
|
_ = relayedConn.Close()
|
||||||
|
ctxCancel()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
go w.wgStateCheck(ctx, relayedConn)
|
||||||
|
|
||||||
|
w.log.Debugf("peer conn opened via Relay: %s", srv)
|
||||||
|
go w.conn.OnConnReady(RelayConnInfo{
|
||||||
|
relayedConn: relayedConn,
|
||||||
|
rosenpassPubKey: remoteOfferAnswer.RosenpassPubKey,
|
||||||
|
rosenpassAddr: remoteOfferAnswer.RosenpassAddr,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) RelayInstanceAddress() (string, error) {
|
||||||
|
return w.relayManager.RelayInstanceAddress()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) IsRelayConnectionSupportedWithPeer() bool {
|
||||||
|
return w.relaySupportedOnRemotePeer.Load() && w.RelayIsSupportedLocally()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) IsController() bool {
|
||||||
|
return w.config.LocalKey > w.config.Key
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) RelayIsSupportedLocally() bool {
|
||||||
|
return w.relayManager.HasRelayAddress()
|
||||||
|
}
|
||||||
|
|
||||||
|
// wgStateCheck help to check the state of the wireguard handshake and relay connection
|
||||||
|
func (w *WorkerRelay) wgStateCheck(ctx context.Context, conn net.Conn) {
|
||||||
|
timer := time.NewTimer(wgHandshakeOvertime)
|
||||||
|
defer timer.Stop()
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-timer.C:
|
||||||
|
lastHandshake, err := w.wgState()
|
||||||
|
if err != nil {
|
||||||
|
w.log.Errorf("failed to read wg stats: %v", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
w.log.Tracef("last handshake: %v", lastHandshake)
|
||||||
|
|
||||||
|
if time.Since(lastHandshake) > wgHandshakePeriod {
|
||||||
|
w.log.Infof("Wireguard handshake timed out, closing relay connection")
|
||||||
|
_ = conn.Close()
|
||||||
|
w.conn.OnDisconnected()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
resetTime := time.Until(lastHandshake.Add(wgHandshakeOvertime + wgHandshakePeriod))
|
||||||
|
timer.Reset(resetTime)
|
||||||
|
case <-ctx.Done():
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) isRelaySupported(answer *OfferAnswer) bool {
|
||||||
|
if !w.relayManager.HasRelayAddress() {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return answer.RelaySrvAddress != ""
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) preferredRelayServer(myRelayAddress, remoteRelayAddress string) string {
|
||||||
|
if w.IsController() {
|
||||||
|
return myRelayAddress
|
||||||
|
}
|
||||||
|
return remoteRelayAddress
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) wgState() (time.Time, error) {
|
||||||
|
wgState, err := w.config.WgConfig.WgInterface.GetStats(w.config.Key)
|
||||||
|
if err != nil {
|
||||||
|
return time.Time{}, err
|
||||||
|
}
|
||||||
|
return wgState.LastHandshake, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *WorkerRelay) disconnected() {
|
||||||
|
if w.ctxCancel != nil {
|
||||||
|
w.ctxCancel()
|
||||||
|
}
|
||||||
|
w.conn.OnDisconnected()
|
||||||
|
}
|
||||||
@@ -10,11 +10,14 @@ import (
|
|||||||
"github.com/pion/stun/v2"
|
"github.com/pion/stun/v2"
|
||||||
"github.com/pion/turn/v3"
|
"github.com/pion/turn/v3"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/stdnet"
|
||||||
|
nbnet "github.com/netbirdio/netbird/util/net"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ProbeResult holds the info about the result of a relay probe request
|
// ProbeResult holds the info about the result of a relay probe request
|
||||||
type ProbeResult struct {
|
type ProbeResult struct {
|
||||||
URI *stun.URI
|
URI string
|
||||||
Err error
|
Err error
|
||||||
Addr string
|
Addr string
|
||||||
}
|
}
|
||||||
@@ -27,7 +30,15 @@ func ProbeSTUN(ctx context.Context, uri *stun.URI) (addr string, probeErr error)
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
client, err := stun.DialURI(uri, &stun.DialConfig{})
|
net, err := stdnet.NewNet(nil)
|
||||||
|
if err != nil {
|
||||||
|
probeErr = fmt.Errorf("new net: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
client, err := stun.DialURI(uri, &stun.DialConfig{
|
||||||
|
Net: net,
|
||||||
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
probeErr = fmt.Errorf("dial: %w", err)
|
probeErr = fmt.Errorf("dial: %w", err)
|
||||||
return
|
return
|
||||||
@@ -85,14 +96,13 @@ func ProbeTURN(ctx context.Context, uri *stun.URI) (addr string, probeErr error)
|
|||||||
switch uri.Proto {
|
switch uri.Proto {
|
||||||
case stun.ProtoTypeUDP:
|
case stun.ProtoTypeUDP:
|
||||||
var err error
|
var err error
|
||||||
conn, err = net.ListenPacket("udp", "")
|
conn, err = nbnet.NewListener().ListenPacket(ctx, "udp", "")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
probeErr = fmt.Errorf("listen: %w", err)
|
probeErr = fmt.Errorf("listen: %w", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
case stun.ProtoTypeTCP:
|
case stun.ProtoTypeTCP:
|
||||||
dialer := net.Dialer{}
|
tcpConn, err := nbnet.NewDialer().DialContext(ctx, "tcp", turnServerAddr)
|
||||||
tcpConn, err := dialer.DialContext(ctx, "tcp", turnServerAddr)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
probeErr = fmt.Errorf("dial: %w", err)
|
probeErr = fmt.Errorf("dial: %w", err)
|
||||||
return
|
return
|
||||||
@@ -109,12 +119,18 @@ func ProbeTURN(ctx context.Context, uri *stun.URI) (addr string, probeErr error)
|
|||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
net, err := stdnet.NewNet(nil)
|
||||||
|
if err != nil {
|
||||||
|
probeErr = fmt.Errorf("new net: %w", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
cfg := &turn.ClientConfig{
|
cfg := &turn.ClientConfig{
|
||||||
STUNServerAddr: turnServerAddr,
|
STUNServerAddr: turnServerAddr,
|
||||||
TURNServerAddr: turnServerAddr,
|
TURNServerAddr: turnServerAddr,
|
||||||
Conn: conn,
|
Conn: conn,
|
||||||
Username: uri.Username,
|
Username: uri.Username,
|
||||||
Password: uri.Password,
|
Password: uri.Password,
|
||||||
|
Net: net,
|
||||||
}
|
}
|
||||||
client, err := turn.NewClient(cfg)
|
client, err := turn.NewClient(cfg)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -154,13 +170,13 @@ func ProbeAll(
|
|||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
for i, uri := range relays {
|
for i, uri := range relays {
|
||||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Second)
|
ctx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(res *ProbeResult, stunURI *stun.URI) {
|
go func(res *ProbeResult, stunURI *stun.URI) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
res.URI = stunURI
|
res.URI = stunURI.String()
|
||||||
res.Addr, res.Err = fn(ctx, stunURI)
|
res.Addr, res.Err = fn(ctx, stunURI)
|
||||||
}(&results[i], uri)
|
}(&results[i], uri)
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -3,21 +3,26 @@ package routemanager
|
|||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/netip"
|
"reflect"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
nberrors "github.com/netbirdio/netbird/client/errors"
|
||||||
|
nbdns "github.com/netbirdio/netbird/client/internal/dns"
|
||||||
"github.com/netbirdio/netbird/client/internal/peer"
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/dynamic"
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/refcounter"
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/static"
|
||||||
"github.com/netbirdio/netbird/iface"
|
"github.com/netbirdio/netbird/iface"
|
||||||
"github.com/netbirdio/netbird/route"
|
"github.com/netbirdio/netbird/route"
|
||||||
)
|
)
|
||||||
|
|
||||||
const minRangeBits = 7
|
|
||||||
|
|
||||||
type routerPeerStatus struct {
|
type routerPeerStatus struct {
|
||||||
connected bool
|
connected bool
|
||||||
relayed bool
|
relayed bool
|
||||||
direct bool
|
latency time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
type routesUpdate struct {
|
type routesUpdate struct {
|
||||||
@@ -25,38 +30,48 @@ type routesUpdate struct {
|
|||||||
routes []*route.Route
|
routes []*route.Route
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// RouteHandler defines the interface for handling routes
|
||||||
|
type RouteHandler interface {
|
||||||
|
String() string
|
||||||
|
AddRoute(ctx context.Context) error
|
||||||
|
RemoveRoute() error
|
||||||
|
AddAllowedIPs(peerKey string) error
|
||||||
|
RemoveAllowedIPs() error
|
||||||
|
}
|
||||||
|
|
||||||
type clientNetwork struct {
|
type clientNetwork struct {
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
stop context.CancelFunc
|
cancel context.CancelFunc
|
||||||
statusRecorder *peer.Status
|
statusRecorder *peer.Status
|
||||||
wgInterface *iface.WGIface
|
wgInterface iface.IWGIface
|
||||||
routes map[string]*route.Route
|
routes map[route.ID]*route.Route
|
||||||
routeUpdate chan routesUpdate
|
routeUpdate chan routesUpdate
|
||||||
peerStateUpdate chan struct{}
|
peerStateUpdate chan struct{}
|
||||||
routePeersNotifiers map[string]chan struct{}
|
routePeersNotifiers map[string]chan struct{}
|
||||||
chosenRoute *route.Route
|
currentChosen *route.Route
|
||||||
network netip.Prefix
|
handler RouteHandler
|
||||||
updateSerial uint64
|
updateSerial uint64
|
||||||
}
|
}
|
||||||
|
|
||||||
func newClientNetworkWatcher(ctx context.Context, wgInterface *iface.WGIface, statusRecorder *peer.Status, network netip.Prefix) *clientNetwork {
|
func newClientNetworkWatcher(ctx context.Context, dnsRouteInterval time.Duration, wgInterface iface.IWGIface, statusRecorder *peer.Status, rt *route.Route, routeRefCounter *refcounter.RouteRefCounter, allowedIPsRefCounter *refcounter.AllowedIPsRefCounter) *clientNetwork {
|
||||||
ctx, cancel := context.WithCancel(ctx)
|
ctx, cancel := context.WithCancel(ctx)
|
||||||
|
|
||||||
client := &clientNetwork{
|
client := &clientNetwork{
|
||||||
ctx: ctx,
|
ctx: ctx,
|
||||||
stop: cancel,
|
cancel: cancel,
|
||||||
statusRecorder: statusRecorder,
|
statusRecorder: statusRecorder,
|
||||||
wgInterface: wgInterface,
|
wgInterface: wgInterface,
|
||||||
routes: make(map[string]*route.Route),
|
routes: make(map[route.ID]*route.Route),
|
||||||
routePeersNotifiers: make(map[string]chan struct{}),
|
routePeersNotifiers: make(map[string]chan struct{}),
|
||||||
routeUpdate: make(chan routesUpdate),
|
routeUpdate: make(chan routesUpdate),
|
||||||
peerStateUpdate: make(chan struct{}),
|
peerStateUpdate: make(chan struct{}),
|
||||||
network: network,
|
handler: handlerFromRoute(rt, routeRefCounter, allowedIPsRefCounter, dnsRouteInterval, statusRecorder, wgInterface),
|
||||||
}
|
}
|
||||||
return client
|
return client
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientNetwork) getRouterPeerStatuses() map[string]routerPeerStatus {
|
func (c *clientNetwork) getRouterPeerStatuses() map[route.ID]routerPeerStatus {
|
||||||
routePeerStatuses := make(map[string]routerPeerStatus)
|
routePeerStatuses := make(map[route.ID]routerPeerStatus)
|
||||||
for _, r := range c.routes {
|
for _, r := range c.routes {
|
||||||
peerStatus, err := c.statusRecorder.GetPeer(r.Peer)
|
peerStatus, err := c.statusRecorder.GetPeer(r.Peer)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@@ -66,23 +81,37 @@ func (c *clientNetwork) getRouterPeerStatuses() map[string]routerPeerStatus {
|
|||||||
routePeerStatuses[r.ID] = routerPeerStatus{
|
routePeerStatuses[r.ID] = routerPeerStatus{
|
||||||
connected: peerStatus.ConnStatus == peer.StatusConnected,
|
connected: peerStatus.ConnStatus == peer.StatusConnected,
|
||||||
relayed: peerStatus.Relayed,
|
relayed: peerStatus.Relayed,
|
||||||
direct: peerStatus.Direct,
|
latency: peerStatus.Latency,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return routePeerStatuses
|
return routePeerStatuses
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientNetwork) getBestRouteFromStatuses(routePeerStatuses map[string]routerPeerStatus) string {
|
// getBestRouteFromStatuses determines the most optimal route from the available routes
|
||||||
chosen := ""
|
// within a clientNetwork, taking into account peer connection status, route metrics, and
|
||||||
chosenScore := 0
|
// preference for non-relayed and direct connections.
|
||||||
|
//
|
||||||
|
// It follows these prioritization rules:
|
||||||
|
// * Connected peers: Only routes with connected peers are considered.
|
||||||
|
// * Metric: Routes with lower metrics (better) are prioritized.
|
||||||
|
// * Non-relayed: Routes without relays are preferred.
|
||||||
|
// * Direct connections: Routes with direct peer connections are favored.
|
||||||
|
// * Latency: Routes with lower latency are prioritized.
|
||||||
|
// * Stability: In case of equal scores, the currently active route (if any) is maintained.
|
||||||
|
//
|
||||||
|
// It returns the ID of the selected optimal route.
|
||||||
|
func (c *clientNetwork) getBestRouteFromStatuses(routePeerStatuses map[route.ID]routerPeerStatus) route.ID {
|
||||||
|
chosen := route.ID("")
|
||||||
|
chosenScore := float64(0)
|
||||||
|
currScore := float64(0)
|
||||||
|
|
||||||
currID := ""
|
currID := route.ID("")
|
||||||
if c.chosenRoute != nil {
|
if c.currentChosen != nil {
|
||||||
currID = c.chosenRoute.ID
|
currID = c.currentChosen.ID
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, r := range c.routes {
|
for _, r := range c.routes {
|
||||||
tempScore := 0
|
tempScore := float64(0)
|
||||||
peerStatus, found := routePeerStatuses[r.ID]
|
peerStatus, found := routePeerStatuses[r.ID]
|
||||||
if !found || !peerStatus.connected {
|
if !found || !peerStatus.connected {
|
||||||
continue
|
continue
|
||||||
@@ -90,38 +119,59 @@ func (c *clientNetwork) getBestRouteFromStatuses(routePeerStatuses map[string]ro
|
|||||||
|
|
||||||
if r.Metric < route.MaxMetric {
|
if r.Metric < route.MaxMetric {
|
||||||
metricDiff := route.MaxMetric - r.Metric
|
metricDiff := route.MaxMetric - r.Metric
|
||||||
tempScore = metricDiff * 10
|
tempScore = float64(metricDiff) * 10
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// in some temporal cases, latency can be 0, so we set it to 1s to not block but try to avoid this route
|
||||||
|
latency := time.Second
|
||||||
|
if peerStatus.latency != 0 {
|
||||||
|
latency = peerStatus.latency
|
||||||
|
} else {
|
||||||
|
log.Warnf("peer %s has 0 latency", r.Peer)
|
||||||
|
}
|
||||||
|
tempScore += 1 - latency.Seconds()
|
||||||
|
|
||||||
if !peerStatus.relayed {
|
if !peerStatus.relayed {
|
||||||
tempScore++
|
tempScore++
|
||||||
}
|
}
|
||||||
|
|
||||||
if peerStatus.direct {
|
if tempScore > chosenScore || (tempScore == chosenScore && chosen == "") {
|
||||||
tempScore++
|
log.Infof("tempScore(%f) > chosenScore(%f) || (tempScore(%f) == chosenScore(%f) && chosen == \"\"(%s): chosen: %s", tempScore, chosenScore, tempScore, chosenScore, chosen, r.ID)
|
||||||
}
|
|
||||||
|
|
||||||
if tempScore > chosenScore || (tempScore == chosenScore && r.ID == currID) {
|
|
||||||
chosen = r.ID
|
chosen = r.ID
|
||||||
chosenScore = tempScore
|
chosenScore = tempScore
|
||||||
}
|
}
|
||||||
|
|
||||||
if chosen == "" && currID == "" {
|
if chosen == "" && currID == "" {
|
||||||
|
log.Infof("chosen == \"\" && currID == \"\" , chosen: %s", r.ID)
|
||||||
chosen = r.ID
|
chosen = r.ID
|
||||||
chosenScore = tempScore
|
chosenScore = tempScore
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if r.ID == currID {
|
||||||
|
currScore = tempScore
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if chosen == "" {
|
switch {
|
||||||
|
case chosen == "":
|
||||||
var peers []string
|
var peers []string
|
||||||
for _, r := range c.routes {
|
for _, r := range c.routes {
|
||||||
peers = append(peers, r.Peer)
|
peers = append(peers, r.Peer)
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Warnf("the network %s has not been assigned a routing peer as no peers from the list %s are currently connected", c.network, peers)
|
log.Warnf("The network [%v] has not been assigned a routing peer as no peers from the list %s are currently connected", c.handler, peers)
|
||||||
|
case chosen != currID:
|
||||||
} else if chosen != currID {
|
log.Infof("chosen != currID, chosen: %s", chosen)
|
||||||
log.Infof("new chosen route is %s with peer %s with score %d for network %s", chosen, c.routes[chosen].Peer, chosenScore, c.network)
|
// we compare the current score + 10ms to the chosen score to avoid flapping between routes
|
||||||
|
if currScore != 0 && currScore+0.01 > chosenScore {
|
||||||
|
log.Debugf("Keeping current routing peer because the score difference with latency is less than 0.01(10ms), current: %f, new: %f", currScore, chosenScore)
|
||||||
|
return currID
|
||||||
|
}
|
||||||
|
var p string
|
||||||
|
if rt := c.routes[chosen]; rt != nil {
|
||||||
|
p = rt.Peer
|
||||||
|
}
|
||||||
|
log.Infof("New chosen route is %s with peer %s with score %f for network [%v]", chosen, p, chosenScore, c.handler)
|
||||||
}
|
}
|
||||||
|
|
||||||
return chosen
|
return chosen
|
||||||
@@ -155,127 +205,136 @@ func (c *clientNetwork) startPeersStatusChangeWatcher() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientNetwork) removeRouteFromWireguardPeer(peerKey string) error {
|
func (c *clientNetwork) removeRouteFromWireguardPeer() error {
|
||||||
state, err := c.statusRecorder.GetPeer(peerKey)
|
c.removeStateRoute()
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
delete(state.Routes, c.network.String())
|
if err := c.handler.RemoveAllowedIPs(); err != nil {
|
||||||
if err := c.statusRecorder.UpdatePeerState(state); err != nil {
|
return fmt.Errorf("remove allowed IPs: %w", err)
|
||||||
log.Warnf("Failed to update peer state: %v", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
if state.ConnStatus != peer.StatusConnected {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
err = c.wgInterface.RemoveAllowedIP(peerKey, c.network.String())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't remove allowed IP %s removed for peer %s, err: %v",
|
|
||||||
c.network, c.chosenRoute.Peer, err)
|
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientNetwork) removeRouteFromPeerAndSystem() error {
|
func (c *clientNetwork) removeRouteFromPeerAndSystem() error {
|
||||||
if c.chosenRoute != nil {
|
if c.currentChosen == nil {
|
||||||
err := c.removeRouteFromWireguardPeer(c.chosenRoute.Peer)
|
return nil
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
err = removeFromRouteTableIfNonSystem(c.network, c.wgInterface.Address().IP.String())
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("couldn't remove route %s from system, err: %v",
|
|
||||||
c.network, err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
var merr *multierror.Error
|
||||||
|
|
||||||
|
if err := c.removeRouteFromWireguardPeer(); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("remove allowed IPs for peer %s: %w", c.currentChosen.Peer, err))
|
||||||
|
}
|
||||||
|
if err := c.handler.RemoveRoute(); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("remove route: %w", err))
|
||||||
|
}
|
||||||
|
|
||||||
|
return nberrors.FormatErrorOrNil(merr)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientNetwork) recalculateRouteAndUpdatePeerAndSystem() error {
|
func (c *clientNetwork) recalculateRouteAndUpdatePeerAndSystem() error {
|
||||||
|
|
||||||
var err error
|
|
||||||
|
|
||||||
routerPeerStatuses := c.getRouterPeerStatuses()
|
routerPeerStatuses := c.getRouterPeerStatuses()
|
||||||
|
|
||||||
chosen := c.getBestRouteFromStatuses(routerPeerStatuses)
|
newChosenID := c.getBestRouteFromStatuses(routerPeerStatuses)
|
||||||
if chosen == "" {
|
|
||||||
err = c.removeRouteFromPeerAndSystem()
|
// If no route is chosen, remove the route from the peer and system
|
||||||
if err != nil {
|
if newChosenID == "" {
|
||||||
return err
|
if err := c.removeRouteFromPeerAndSystem(); err != nil {
|
||||||
|
return fmt.Errorf("remove route for peer %s: %w", c.currentChosen.Peer, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
c.chosenRoute = nil
|
c.currentChosen = nil
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.chosenRoute != nil && c.chosenRoute.ID == chosen {
|
// If the chosen route is the same as the current route, do nothing
|
||||||
if c.chosenRoute.IsEqual(c.routes[chosen]) {
|
if c.currentChosen != nil && c.currentChosen.ID == newChosenID &&
|
||||||
return nil
|
c.currentChosen.IsEqual(c.routes[newChosenID]) {
|
||||||
}
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.chosenRoute != nil {
|
if c.currentChosen == nil {
|
||||||
err = c.removeRouteFromWireguardPeer(c.chosenRoute.Peer)
|
// If they were not previously assigned to another peer, add routes to the system first
|
||||||
if err != nil {
|
if err := c.handler.AddRoute(c.ctx); err != nil {
|
||||||
return err
|
return fmt.Errorf("add route: %w", err)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
err = addToRouteTableIfNoExists(c.network, c.wgInterface.Address().IP.String())
|
// Otherwise, remove the allowed IPs from the previous peer first
|
||||||
if err != nil {
|
if err := c.removeRouteFromWireguardPeer(); err != nil {
|
||||||
return fmt.Errorf("route %s couldn't be added for peer %s, err: %v",
|
return fmt.Errorf("remove allowed IPs for peer %s: %w", c.currentChosen.Peer, err)
|
||||||
c.network.String(), c.wgInterface.Address().IP.String(), err)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.chosenRoute = c.routes[chosen]
|
c.currentChosen = c.routes[newChosenID]
|
||||||
|
|
||||||
state, err := c.statusRecorder.GetPeer(c.chosenRoute.Peer)
|
if err := c.handler.AddAllowedIPs(c.currentChosen.Peer); err != nil {
|
||||||
if err != nil {
|
return fmt.Errorf("add allowed IPs for peer %s: %w", c.currentChosen.Peer, err)
|
||||||
log.Errorf("Failed to get peer state: %v", err)
|
|
||||||
} else {
|
|
||||||
if state.Routes == nil {
|
|
||||||
state.Routes = map[string]struct{}{}
|
|
||||||
}
|
|
||||||
state.Routes[c.network.String()] = struct{}{}
|
|
||||||
if err := c.statusRecorder.UpdatePeerState(state); err != nil {
|
|
||||||
log.Warnf("Failed to update peer state: %v", err)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = c.wgInterface.AddAllowedIP(c.chosenRoute.Peer, c.network.String())
|
c.addStateRoute()
|
||||||
if err != nil {
|
|
||||||
log.Errorf("couldn't add allowed IP %s added for peer %s, err: %v",
|
|
||||||
c.network, c.chosenRoute.Peer, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *clientNetwork) addStateRoute() {
|
||||||
|
state, err := c.statusRecorder.GetPeer(c.currentChosen.Peer)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to get peer state: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
state.AddRoute(c.handler.String())
|
||||||
|
if err := c.statusRecorder.UpdatePeerState(state); err != nil {
|
||||||
|
log.Warnf("Failed to update peer state: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *clientNetwork) removeStateRoute() {
|
||||||
|
state, err := c.statusRecorder.GetPeer(c.currentChosen.Peer)
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to get peer state: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
state.DeleteRoute(c.handler.String())
|
||||||
|
if err := c.statusRecorder.UpdatePeerState(state); err != nil {
|
||||||
|
log.Warnf("Failed to update peer state: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *clientNetwork) sendUpdateToClientNetworkWatcher(update routesUpdate) {
|
func (c *clientNetwork) sendUpdateToClientNetworkWatcher(update routesUpdate) {
|
||||||
go func() {
|
go func() {
|
||||||
c.routeUpdate <- update
|
c.routeUpdate <- update
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *clientNetwork) handleUpdate(update routesUpdate) {
|
func (c *clientNetwork) handleUpdate(update routesUpdate) bool {
|
||||||
updateMap := make(map[string]*route.Route)
|
isUpdateMapDifferent := false
|
||||||
|
updateMap := make(map[route.ID]*route.Route)
|
||||||
|
|
||||||
for _, r := range update.routes {
|
for _, r := range update.routes {
|
||||||
updateMap[r.ID] = r
|
updateMap[r.ID] = r
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if len(c.routes) != len(updateMap) {
|
||||||
|
isUpdateMapDifferent = true
|
||||||
|
}
|
||||||
|
|
||||||
for id, r := range c.routes {
|
for id, r := range c.routes {
|
||||||
_, found := updateMap[id]
|
_, found := updateMap[id]
|
||||||
if !found {
|
if !found {
|
||||||
close(c.routePeersNotifiers[r.Peer])
|
close(c.routePeersNotifiers[r.Peer])
|
||||||
delete(c.routePeersNotifiers, r.Peer)
|
delete(c.routePeersNotifiers, r.Peer)
|
||||||
|
isUpdateMapDifferent = true
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !reflect.DeepEqual(c.routes[id], updateMap[id]) {
|
||||||
|
isUpdateMapDifferent = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
c.routes = updateMap
|
c.routes = updateMap
|
||||||
|
return isUpdateMapDifferent
|
||||||
}
|
}
|
||||||
|
|
||||||
// peersStateAndUpdateWatcher is the main point of reacting on client network routing events.
|
// peersStateAndUpdateWatcher is the main point of reacting on client network routing events.
|
||||||
@@ -284,35 +343,48 @@ func (c *clientNetwork) peersStateAndUpdateWatcher() {
|
|||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-c.ctx.Done():
|
case <-c.ctx.Done():
|
||||||
log.Debugf("stopping watcher for network %s", c.network)
|
log.Debugf("Stopping watcher for network [%v]", c.handler)
|
||||||
err := c.removeRouteFromPeerAndSystem()
|
if err := c.removeRouteFromPeerAndSystem(); err != nil {
|
||||||
if err != nil {
|
log.Errorf("Failed to remove routes for [%v]: %v", c.handler, err)
|
||||||
log.Error(err)
|
|
||||||
}
|
}
|
||||||
return
|
return
|
||||||
case <-c.peerStateUpdate:
|
case <-c.peerStateUpdate:
|
||||||
err := c.recalculateRouteAndUpdatePeerAndSystem()
|
err := c.recalculateRouteAndUpdatePeerAndSystem()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error(err)
|
log.Errorf("Failed to recalculate routes for network [%v]: %v", c.handler, err)
|
||||||
}
|
}
|
||||||
case update := <-c.routeUpdate:
|
case update := <-c.routeUpdate:
|
||||||
if update.updateSerial < c.updateSerial {
|
if update.updateSerial < c.updateSerial {
|
||||||
log.Warnf("received a routes update with smaller serial number, ignoring it")
|
log.Warnf("Received a routes update with smaller serial number (%d -> %d), ignoring it", c.updateSerial, update.updateSerial)
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("received a new client network route update for %s", c.network)
|
log.Debugf("Received a new client network route update for [%v]", c.handler)
|
||||||
|
|
||||||
c.handleUpdate(update)
|
// hash update somehow
|
||||||
|
isTrueRouteUpdate := c.handleUpdate(update)
|
||||||
|
|
||||||
c.updateSerial = update.updateSerial
|
c.updateSerial = update.updateSerial
|
||||||
|
|
||||||
err := c.recalculateRouteAndUpdatePeerAndSystem()
|
if isTrueRouteUpdate {
|
||||||
if err != nil {
|
log.Debug("Client network update contains different routes, recalculating routes")
|
||||||
log.Error(err)
|
err := c.recalculateRouteAndUpdatePeerAndSystem()
|
||||||
|
if err != nil {
|
||||||
|
log.Errorf("Failed to recalculate routes for network [%v]: %v", c.handler, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
log.Debug("Route update is not different, skipping route recalculation")
|
||||||
}
|
}
|
||||||
|
|
||||||
c.startPeersStatusChangeWatcher()
|
c.startPeersStatusChangeWatcher()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func handlerFromRoute(rt *route.Route, routeRefCounter *refcounter.RouteRefCounter, allowedIPsRefCounter *refcounter.AllowedIPsRefCounter, dnsRouterInteval time.Duration, statusRecorder *peer.Status, wgInterface iface.IWGIface) RouteHandler {
|
||||||
|
if rt.IsDynamic() {
|
||||||
|
dns := nbdns.NewServiceViaMemory(wgInterface)
|
||||||
|
return dynamic.NewRoute(rt, routeRefCounter, allowedIPsRefCounter, dnsRouterInteval, statusRecorder, wgInterface, fmt.Sprintf("%s:%d", dns.RuntimeIP(), dns.RuntimePort()))
|
||||||
|
}
|
||||||
|
return static.NewRoute(rt, routeRefCounter, allowedIPsRefCounter)
|
||||||
|
}
|
||||||
|
|||||||
@@ -4,6 +4,9 @@ import (
|
|||||||
"net/netip"
|
"net/netip"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/static"
|
||||||
"github.com/netbirdio/netbird/route"
|
"github.com/netbirdio/netbird/route"
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -11,160 +14,24 @@ func TestGetBestrouteFromStatuses(t *testing.T) {
|
|||||||
|
|
||||||
testCases := []struct {
|
testCases := []struct {
|
||||||
name string
|
name string
|
||||||
statuses map[string]routerPeerStatus
|
statuses map[route.ID]routerPeerStatus
|
||||||
expectedRouteID string
|
expectedRouteID route.ID
|
||||||
currentRoute *route.Route
|
currentRoute route.ID
|
||||||
existingRoutes map[string]*route.Route
|
existingRoutes map[route.ID]*route.Route
|
||||||
}{
|
}{
|
||||||
{
|
|
||||||
name: "one route",
|
|
||||||
statuses: map[string]routerPeerStatus{
|
|
||||||
"route1": {
|
|
||||||
connected: true,
|
|
||||||
relayed: false,
|
|
||||||
direct: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingRoutes: map[string]*route.Route{
|
|
||||||
"route1": {
|
|
||||||
ID: "route1",
|
|
||||||
Metric: route.MaxMetric,
|
|
||||||
Peer: "peer1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
currentRoute: nil,
|
|
||||||
expectedRouteID: "route1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one connected routes with relayed and direct",
|
|
||||||
statuses: map[string]routerPeerStatus{
|
|
||||||
"route1": {
|
|
||||||
connected: true,
|
|
||||||
relayed: true,
|
|
||||||
direct: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingRoutes: map[string]*route.Route{
|
|
||||||
"route1": {
|
|
||||||
ID: "route1",
|
|
||||||
Metric: route.MaxMetric,
|
|
||||||
Peer: "peer1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
currentRoute: nil,
|
|
||||||
expectedRouteID: "route1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "one connected routes with relayed and no direct",
|
|
||||||
statuses: map[string]routerPeerStatus{
|
|
||||||
"route1": {
|
|
||||||
connected: true,
|
|
||||||
relayed: true,
|
|
||||||
direct: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingRoutes: map[string]*route.Route{
|
|
||||||
"route1": {
|
|
||||||
ID: "route1",
|
|
||||||
Metric: route.MaxMetric,
|
|
||||||
Peer: "peer1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
currentRoute: nil,
|
|
||||||
expectedRouteID: "route1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "no connected peers",
|
|
||||||
statuses: map[string]routerPeerStatus{
|
|
||||||
"route1": {
|
|
||||||
connected: false,
|
|
||||||
relayed: false,
|
|
||||||
direct: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingRoutes: map[string]*route.Route{
|
|
||||||
"route1": {
|
|
||||||
ID: "route1",
|
|
||||||
Metric: route.MaxMetric,
|
|
||||||
Peer: "peer1",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
currentRoute: nil,
|
|
||||||
expectedRouteID: "",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "multiple connected peers with different metrics",
|
|
||||||
statuses: map[string]routerPeerStatus{
|
|
||||||
"route1": {
|
|
||||||
connected: true,
|
|
||||||
relayed: false,
|
|
||||||
direct: true,
|
|
||||||
},
|
|
||||||
"route2": {
|
|
||||||
connected: true,
|
|
||||||
relayed: false,
|
|
||||||
direct: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingRoutes: map[string]*route.Route{
|
|
||||||
"route1": {
|
|
||||||
ID: "route1",
|
|
||||||
Metric: 9000,
|
|
||||||
Peer: "peer1",
|
|
||||||
},
|
|
||||||
"route2": {
|
|
||||||
ID: "route2",
|
|
||||||
Metric: route.MaxMetric,
|
|
||||||
Peer: "peer2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
currentRoute: nil,
|
|
||||||
expectedRouteID: "route1",
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "multiple connected peers with one relayed",
|
|
||||||
statuses: map[string]routerPeerStatus{
|
|
||||||
"route1": {
|
|
||||||
connected: true,
|
|
||||||
relayed: false,
|
|
||||||
direct: true,
|
|
||||||
},
|
|
||||||
"route2": {
|
|
||||||
connected: true,
|
|
||||||
relayed: true,
|
|
||||||
direct: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
existingRoutes: map[string]*route.Route{
|
|
||||||
"route1": {
|
|
||||||
ID: "route1",
|
|
||||||
Metric: route.MaxMetric,
|
|
||||||
Peer: "peer1",
|
|
||||||
},
|
|
||||||
"route2": {
|
|
||||||
ID: "route2",
|
|
||||||
Metric: route.MaxMetric,
|
|
||||||
Peer: "peer2",
|
|
||||||
},
|
|
||||||
},
|
|
||||||
currentRoute: nil,
|
|
||||||
expectedRouteID: "route1",
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
name: "multiple connected peers with one direct",
|
name: "multiple connected peers with one direct",
|
||||||
statuses: map[string]routerPeerStatus{
|
statuses: map[route.ID]routerPeerStatus{
|
||||||
"route1": {
|
"route1": {
|
||||||
connected: true,
|
connected: true,
|
||||||
relayed: false,
|
relayed: false,
|
||||||
direct: true,
|
|
||||||
},
|
},
|
||||||
"route2": {
|
"route2": {
|
||||||
connected: true,
|
connected: true,
|
||||||
relayed: false,
|
relayed: false,
|
||||||
direct: false,
|
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
existingRoutes: map[string]*route.Route{
|
existingRoutes: map[route.ID]*route.Route{
|
||||||
"route1": {
|
"route1": {
|
||||||
ID: "route1",
|
ID: "route1",
|
||||||
Metric: route.MaxMetric,
|
Metric: route.MaxMetric,
|
||||||
@@ -176,24 +43,34 @@ func TestGetBestrouteFromStatuses(t *testing.T) {
|
|||||||
Peer: "peer2",
|
Peer: "peer2",
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
currentRoute: nil,
|
currentRoute: "",
|
||||||
expectedRouteID: "route1",
|
expectedRouteID: "route1",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, tc := range testCases {
|
for i := 0; i < 10; i++ {
|
||||||
t.Run(tc.name, func(t *testing.T) {
|
log.Infof("Test iteration %d", i)
|
||||||
// create new clientNetwork
|
for _, tc := range testCases {
|
||||||
client := &clientNetwork{
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
network: netip.MustParsePrefix("192.168.0.0/24"),
|
currentRoute := &route.Route{
|
||||||
routes: tc.existingRoutes,
|
ID: "routeDoesntExistAnymore",
|
||||||
chosenRoute: tc.currentRoute,
|
}
|
||||||
}
|
if tc.currentRoute != "" {
|
||||||
|
currentRoute = tc.existingRoutes[tc.currentRoute]
|
||||||
|
}
|
||||||
|
|
||||||
chosenRoute := client.getBestRouteFromStatuses(tc.statuses)
|
// create new clientNetwork
|
||||||
if chosenRoute != tc.expectedRouteID {
|
client := &clientNetwork{
|
||||||
t.Errorf("expected routeID %s, got %s", tc.expectedRouteID, chosenRoute)
|
handler: static.NewRoute(&route.Route{Network: netip.MustParsePrefix("192.168.0.0/24")}, nil, nil),
|
||||||
}
|
routes: tc.existingRoutes,
|
||||||
})
|
currentChosen: currentRoute,
|
||||||
|
}
|
||||||
|
|
||||||
|
chosenRoute := client.getBestRouteFromStatuses(tc.statuses)
|
||||||
|
if chosenRoute != tc.expectedRouteID {
|
||||||
|
t.Errorf("expected routeID %s, got %s", tc.expectedRouteID, chosenRoute)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
396
client/internal/routemanager/dynamic/route.go
Normal file
396
client/internal/routemanager/dynamic/route.go
Normal file
@@ -0,0 +1,396 @@
|
|||||||
|
package dynamic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/netip"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/hashicorp/go-multierror"
|
||||||
|
log "github.com/sirupsen/logrus"
|
||||||
|
|
||||||
|
nberrors "github.com/netbirdio/netbird/client/errors"
|
||||||
|
"github.com/netbirdio/netbird/client/internal/peer"
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/refcounter"
|
||||||
|
"github.com/netbirdio/netbird/client/internal/routemanager/util"
|
||||||
|
"github.com/netbirdio/netbird/iface"
|
||||||
|
"github.com/netbirdio/netbird/management/domain"
|
||||||
|
"github.com/netbirdio/netbird/route"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
DefaultInterval = time.Minute
|
||||||
|
|
||||||
|
minInterval = 2 * time.Second
|
||||||
|
failureInterval = 5 * time.Second
|
||||||
|
|
||||||
|
addAllowedIP = "add allowed IP %s: %w"
|
||||||
|
)
|
||||||
|
|
||||||
|
type domainMap map[domain.Domain][]netip.Prefix
|
||||||
|
|
||||||
|
type resolveResult struct {
|
||||||
|
domain domain.Domain
|
||||||
|
prefix netip.Prefix
|
||||||
|
err error
|
||||||
|
}
|
||||||
|
|
||||||
|
type Route struct {
|
||||||
|
route *route.Route
|
||||||
|
routeRefCounter *refcounter.RouteRefCounter
|
||||||
|
allowedIPsRefcounter *refcounter.AllowedIPsRefCounter
|
||||||
|
interval time.Duration
|
||||||
|
dynamicDomains domainMap
|
||||||
|
mu sync.Mutex
|
||||||
|
currentPeerKey string
|
||||||
|
cancel context.CancelFunc
|
||||||
|
statusRecorder *peer.Status
|
||||||
|
wgInterface iface.IWGIface
|
||||||
|
resolverAddr string
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewRoute(
|
||||||
|
rt *route.Route,
|
||||||
|
routeRefCounter *refcounter.RouteRefCounter,
|
||||||
|
allowedIPsRefCounter *refcounter.AllowedIPsRefCounter,
|
||||||
|
interval time.Duration,
|
||||||
|
statusRecorder *peer.Status,
|
||||||
|
wgInterface iface.IWGIface,
|
||||||
|
resolverAddr string,
|
||||||
|
) *Route {
|
||||||
|
return &Route{
|
||||||
|
route: rt,
|
||||||
|
routeRefCounter: routeRefCounter,
|
||||||
|
allowedIPsRefcounter: allowedIPsRefCounter,
|
||||||
|
interval: interval,
|
||||||
|
dynamicDomains: domainMap{},
|
||||||
|
statusRecorder: statusRecorder,
|
||||||
|
wgInterface: wgInterface,
|
||||||
|
resolverAddr: resolverAddr,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) String() string {
|
||||||
|
s, err := r.route.Domains.String()
|
||||||
|
if err != nil {
|
||||||
|
return r.route.Domains.PunycodeString()
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) AddRoute(ctx context.Context) error {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
if r.cancel != nil {
|
||||||
|
r.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, r.cancel = context.WithCancel(ctx)
|
||||||
|
|
||||||
|
go r.startResolver(ctx)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveRoute will stop the dynamic resolver and remove all dynamic routes.
|
||||||
|
// It doesn't touch allowed IPs, these should be removed separately and before calling this method.
|
||||||
|
func (r *Route) RemoveRoute() error {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
if r.cancel != nil {
|
||||||
|
r.cancel()
|
||||||
|
}
|
||||||
|
|
||||||
|
var merr *multierror.Error
|
||||||
|
for domain, prefixes := range r.dynamicDomains {
|
||||||
|
for _, prefix := range prefixes {
|
||||||
|
if _, err := r.routeRefCounter.Decrement(prefix); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("remove dynamic route for IP %s: %w", prefix, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Debugf("Removed dynamic route(s) for [%s]: %s", domain.SafeString(), strings.ReplaceAll(fmt.Sprintf("%s", prefixes), " ", ", "))
|
||||||
|
|
||||||
|
r.statusRecorder.DeleteResolvedDomainsStates(domain)
|
||||||
|
}
|
||||||
|
|
||||||
|
r.dynamicDomains = domainMap{}
|
||||||
|
|
||||||
|
return nberrors.FormatErrorOrNil(merr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) AddAllowedIPs(peerKey string) error {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
var merr *multierror.Error
|
||||||
|
for domain, domainPrefixes := range r.dynamicDomains {
|
||||||
|
for _, prefix := range domainPrefixes {
|
||||||
|
if err := r.incrementAllowedIP(domain, prefix, peerKey); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf(addAllowedIP, prefix, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r.currentPeerKey = peerKey
|
||||||
|
return nberrors.FormatErrorOrNil(merr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) RemoveAllowedIPs() error {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
var merr *multierror.Error
|
||||||
|
for _, domainPrefixes := range r.dynamicDomains {
|
||||||
|
for _, prefix := range domainPrefixes {
|
||||||
|
if _, err := r.allowedIPsRefcounter.Decrement(prefix); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("remove allowed IP %s: %w", prefix, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
r.currentPeerKey = ""
|
||||||
|
return nberrors.FormatErrorOrNil(merr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) startResolver(ctx context.Context) {
|
||||||
|
log.Debugf("Starting dynamic route resolver for domains [%v]", r)
|
||||||
|
|
||||||
|
interval := r.interval
|
||||||
|
if interval < minInterval {
|
||||||
|
interval = minInterval
|
||||||
|
log.Warnf("Dynamic route resolver interval %s is too low, setting to minimum value %s", r.interval, minInterval)
|
||||||
|
}
|
||||||
|
|
||||||
|
ticker := time.NewTicker(interval)
|
||||||
|
defer ticker.Stop()
|
||||||
|
|
||||||
|
if err := r.update(ctx); err != nil {
|
||||||
|
log.Errorf("Failed to resolve domains for route [%v]: %v", r, err)
|
||||||
|
if interval > failureInterval {
|
||||||
|
ticker.Reset(failureInterval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-ctx.Done():
|
||||||
|
log.Debugf("Stopping dynamic route resolver for domains [%v]", r)
|
||||||
|
return
|
||||||
|
case <-ticker.C:
|
||||||
|
if err := r.update(ctx); err != nil {
|
||||||
|
log.Errorf("Failed to resolve domains for route [%v]: %v", r, err)
|
||||||
|
// Use a lower ticker interval if the update fails
|
||||||
|
if interval > failureInterval {
|
||||||
|
ticker.Reset(failureInterval)
|
||||||
|
}
|
||||||
|
} else if interval > failureInterval {
|
||||||
|
// Reset to the original interval if the update succeeds
|
||||||
|
ticker.Reset(interval)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) update(ctx context.Context) error {
|
||||||
|
resolved, err := r.resolveDomains()
|
||||||
|
if err != nil {
|
||||||
|
if len(resolved) == 0 {
|
||||||
|
return fmt.Errorf("resolve domains: %w", err)
|
||||||
|
}
|
||||||
|
log.Warnf("Failed to resolve domains: %v", err)
|
||||||
|
}
|
||||||
|
if err := r.updateDynamicRoutes(ctx, resolved); err != nil {
|
||||||
|
return fmt.Errorf("update dynamic routes: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) resolveDomains() (domainMap, error) {
|
||||||
|
results := make(chan resolveResult)
|
||||||
|
go r.resolve(results)
|
||||||
|
|
||||||
|
resolved := domainMap{}
|
||||||
|
var merr *multierror.Error
|
||||||
|
|
||||||
|
for result := range results {
|
||||||
|
if result.err != nil {
|
||||||
|
merr = multierror.Append(merr, result.err)
|
||||||
|
} else {
|
||||||
|
resolved[result.domain] = append(resolved[result.domain], result.prefix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return resolved, nberrors.FormatErrorOrNil(merr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) resolve(results chan resolveResult) {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
for _, d := range r.route.Domains {
|
||||||
|
wg.Add(1)
|
||||||
|
go func(domain domain.Domain) {
|
||||||
|
defer wg.Done()
|
||||||
|
|
||||||
|
ips, err := r.getIPsFromResolver(domain)
|
||||||
|
if err != nil {
|
||||||
|
log.Tracef("Failed to resolve domain %s with private resolver: %v", domain.SafeString(), err)
|
||||||
|
ips, err = net.LookupIP(string(domain))
|
||||||
|
if err != nil {
|
||||||
|
results <- resolveResult{domain: domain, err: fmt.Errorf("resolve d %s: %w", domain.SafeString(), err)}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ip := range ips {
|
||||||
|
prefix, err := util.GetPrefixFromIP(ip)
|
||||||
|
if err != nil {
|
||||||
|
results <- resolveResult{domain: domain, err: fmt.Errorf("get prefix from IP %s: %w", ip.String(), err)}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
results <- resolveResult{domain: domain, prefix: prefix}
|
||||||
|
}
|
||||||
|
}(d)
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
close(results)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) updateDynamicRoutes(ctx context.Context, newDomains domainMap) error {
|
||||||
|
r.mu.Lock()
|
||||||
|
defer r.mu.Unlock()
|
||||||
|
|
||||||
|
if ctx.Err() != nil {
|
||||||
|
return ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
var merr *multierror.Error
|
||||||
|
|
||||||
|
for domain, newPrefixes := range newDomains {
|
||||||
|
oldPrefixes := r.dynamicDomains[domain]
|
||||||
|
toAdd, toRemove := determinePrefixChanges(oldPrefixes, newPrefixes)
|
||||||
|
|
||||||
|
addedPrefixes, err := r.addRoutes(domain, toAdd)
|
||||||
|
if err != nil {
|
||||||
|
merr = multierror.Append(merr, err)
|
||||||
|
} else if len(addedPrefixes) > 0 {
|
||||||
|
log.Debugf("Added dynamic route(s) for [%s]: %s", domain.SafeString(), strings.ReplaceAll(fmt.Sprintf("%s", addedPrefixes), " ", ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
removedPrefixes, err := r.removeRoutes(toRemove)
|
||||||
|
if err != nil {
|
||||||
|
merr = multierror.Append(merr, err)
|
||||||
|
} else if len(removedPrefixes) > 0 {
|
||||||
|
log.Debugf("Removed dynamic route(s) for [%s]: %s", domain.SafeString(), strings.ReplaceAll(fmt.Sprintf("%s", removedPrefixes), " ", ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
updatedPrefixes := combinePrefixes(oldPrefixes, removedPrefixes, addedPrefixes)
|
||||||
|
r.dynamicDomains[domain] = updatedPrefixes
|
||||||
|
|
||||||
|
r.statusRecorder.UpdateResolvedDomainsStates(domain, updatedPrefixes)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nberrors.FormatErrorOrNil(merr)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) addRoutes(domain domain.Domain, prefixes []netip.Prefix) ([]netip.Prefix, error) {
|
||||||
|
var addedPrefixes []netip.Prefix
|
||||||
|
var merr *multierror.Error
|
||||||
|
|
||||||
|
for _, prefix := range prefixes {
|
||||||
|
if _, err := r.routeRefCounter.Increment(prefix, nil); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("add dynamic route for IP %s: %w", prefix, err))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if r.currentPeerKey != "" {
|
||||||
|
if err := r.incrementAllowedIP(domain, prefix, r.currentPeerKey); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf(addAllowedIP, prefix, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
addedPrefixes = append(addedPrefixes, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
return addedPrefixes, merr.ErrorOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) removeRoutes(prefixes []netip.Prefix) ([]netip.Prefix, error) {
|
||||||
|
if r.route.KeepRoute {
|
||||||
|
return nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var removedPrefixes []netip.Prefix
|
||||||
|
var merr *multierror.Error
|
||||||
|
|
||||||
|
for _, prefix := range prefixes {
|
||||||
|
if _, err := r.routeRefCounter.Decrement(prefix); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("remove dynamic route for IP %s: %w", prefix, err))
|
||||||
|
}
|
||||||
|
if r.currentPeerKey != "" {
|
||||||
|
if _, err := r.allowedIPsRefcounter.Decrement(prefix); err != nil {
|
||||||
|
merr = multierror.Append(merr, fmt.Errorf("remove allowed IP %s: %w", prefix, err))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
removedPrefixes = append(removedPrefixes, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
return removedPrefixes, merr.ErrorOrNil()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Route) incrementAllowedIP(domain domain.Domain, prefix netip.Prefix, peerKey string) error {
|
||||||
|
if ref, err := r.allowedIPsRefcounter.Increment(prefix, peerKey); err != nil {
|
||||||
|
return fmt.Errorf(addAllowedIP, prefix, err)
|
||||||
|
} else if ref.Count > 1 && ref.Out != peerKey {
|
||||||
|
log.Warnf("IP [%s] for domain [%s] is already routed by peer [%s]. HA routing disabled",
|
||||||
|
prefix.Addr(),
|
||||||
|
domain.SafeString(),
|
||||||
|
ref.Out,
|
||||||
|
)
|
||||||
|
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func determinePrefixChanges(oldPrefixes, newPrefixes []netip.Prefix) (toAdd, toRemove []netip.Prefix) {
|
||||||
|
prefixSet := make(map[netip.Prefix]bool)
|
||||||
|
for _, prefix := range oldPrefixes {
|
||||||
|
prefixSet[prefix] = false
|
||||||
|
}
|
||||||
|
for _, prefix := range newPrefixes {
|
||||||
|
if _, exists := prefixSet[prefix]; exists {
|
||||||
|
prefixSet[prefix] = true
|
||||||
|
} else {
|
||||||
|
toAdd = append(toAdd, prefix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for prefix, inUse := range prefixSet {
|
||||||
|
if !inUse {
|
||||||
|
toRemove = append(toRemove, prefix)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
func combinePrefixes(oldPrefixes, removedPrefixes, addedPrefixes []netip.Prefix) []netip.Prefix {
|
||||||
|
prefixSet := make(map[netip.Prefix]struct{})
|
||||||
|
for _, prefix := range oldPrefixes {
|
||||||
|
prefixSet[prefix] = struct{}{}
|
||||||
|
}
|
||||||
|
for _, prefix := range removedPrefixes {
|
||||||
|
delete(prefixSet, prefix)
|
||||||
|
}
|
||||||
|
for _, prefix := range addedPrefixes {
|
||||||
|
prefixSet[prefix] = struct{}{}
|
||||||
|
}
|
||||||
|
|
||||||
|
var combinedPrefixes []netip.Prefix
|
||||||
|
for prefix := range prefixSet {
|
||||||
|
combinedPrefixes = append(combinedPrefixes, prefix)
|
||||||
|
}
|
||||||
|
|
||||||
|
return combinedPrefixes
|
||||||
|
}
|
||||||
13
client/internal/routemanager/dynamic/route_generic.go
Normal file
13
client/internal/routemanager/dynamic/route_generic.go
Normal file
@@ -0,0 +1,13 @@
|
|||||||
|
//go:build !ios
|
||||||
|
|
||||||
|
package dynamic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/management/domain"
|
||||||
|
)
|
||||||
|
|
||||||
|
func (r *Route) getIPsFromResolver(domain domain.Domain) ([]net.IP, error) {
|
||||||
|
return net.LookupIP(string(domain))
|
||||||
|
}
|
||||||
55
client/internal/routemanager/dynamic/route_ios.go
Normal file
55
client/internal/routemanager/dynamic/route_ios.go
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
//go:build ios
|
||||||
|
|
||||||
|
package dynamic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/miekg/dns"
|
||||||
|
|
||||||
|
nbdns "github.com/netbirdio/netbird/client/internal/dns"
|
||||||
|
|
||||||
|
"github.com/netbirdio/netbird/management/domain"
|
||||||
|
)
|
||||||
|
|
||||||
|
const dialTimeout = 10 * time.Second
|
||||||
|
|
||||||
|
func (r *Route) getIPsFromResolver(domain domain.Domain) ([]net.IP, error) {
|
||||||
|
privateClient, err := nbdns.GetClientPrivate(r.wgInterface.Address().IP, r.wgInterface.Name(), dialTimeout)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("error while creating private client: %s", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msg := new(dns.Msg)
|
||||||
|
msg.SetQuestion(dns.Fqdn(string(domain)), dns.TypeA)
|
||||||
|
|
||||||
|
startTime := time.Now()
|
||||||
|
|
||||||
|
response, _, err := privateClient.Exchange(msg, r.resolverAddr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("DNS query for %s failed after %s: %s ", domain.SafeString(), time.Since(startTime), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if response.Rcode != dns.RcodeSuccess {
|
||||||
|
return nil, fmt.Errorf("dns response code: %s", dns.RcodeToString[response.Rcode])
|
||||||
|
}
|
||||||
|
|
||||||
|
ips := make([]net.IP, 0)
|
||||||
|
|
||||||
|
for _, answ := range response.Answer {
|
||||||
|
if aRecord, ok := answ.(*dns.A); ok {
|
||||||
|
ips = append(ips, aRecord.A)
|
||||||
|
}
|
||||||
|
if aaaaRecord, ok := answ.(*dns.AAAA); ok {
|
||||||
|
ips = append(ips, aaaaRecord.AAAA)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ips) == 0 {
|
||||||
|
return nil, fmt.Errorf("no A or AAAA records found for %s", domain.SafeString())
|
||||||
|
}
|
||||||
|
|
||||||
|
return ips, nil
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user