Compare commits
16 Commits
main
...
release-br
Author | SHA1 | Date |
---|---|---|
![]() |
066193c18d | |
![]() |
0438c67e25 | |
![]() |
6842c3c194 | |
![]() |
576b08e5ee | |
![]() |
a8231b18cc | |
![]() |
6d98b5c9a8 | |
![]() |
fe33b17db3 | |
![]() |
5a98bbcbbb | |
![]() |
1e1c16bc24 | |
![]() |
ad504be066 | |
![]() |
6bdb9daec0 | |
![]() |
a3ce35d0c6 | |
![]() |
d1fc9bba7e | |
![]() |
9271e8a062 | |
![]() |
50cf21a779 | |
![]() |
ab998de989 |
|
@ -12,6 +12,7 @@ body:
|
|||
attributes:
|
||||
label: What is the issue?
|
||||
description: What happened? What did you expect to happen?
|
||||
placeholder: oh no
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
||||
|
@ -60,13 +61,6 @@ body:
|
|||
placeholder: e.g., 1.14.4
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
||||
id: other-software
|
||||
attributes:
|
||||
label: Other software
|
||||
description: What [other software](https://github.com/tailscale/tailscale/wiki/OtherSoftwareInterop) (networking, security, etc) are you running?
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
id: bug-report
|
||||
attributes:
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
name: CIFuzz
|
||||
on: [pull_request]
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
Fuzzing:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Build Fuzzers
|
||||
id: build
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
|
||||
with:
|
||||
oss-fuzz-project-name: 'tailscale'
|
||||
dry-run: false
|
||||
language: go
|
||||
- name: Run Fuzzers
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
|
||||
with:
|
||||
oss-fuzz-project-name: 'tailscale'
|
||||
fuzz-seconds: 300
|
||||
dry-run: false
|
||||
language: go
|
||||
- name: Upload Crash
|
||||
uses: actions/upload-artifact@v3
|
||||
if: failure() && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
path: ./out/artifacts
|
|
@ -17,8 +17,6 @@ on:
|
|||
pull_request:
|
||||
# The branches below must be a subset of the branches above
|
||||
branches: [ main ]
|
||||
merge_group:
|
||||
branches: [ main ]
|
||||
schedule:
|
||||
- cron: '31 14 * * 5'
|
||||
|
||||
|
@ -49,7 +47,7 @@ jobs:
|
|||
|
||||
# Initializes the CodeQL tools for scanning.
|
||||
- name: Initialize CodeQL
|
||||
uses: github/codeql-action/init@v2
|
||||
uses: github/codeql-action/init@v1
|
||||
with:
|
||||
languages: ${{ matrix.language }}
|
||||
# If you wish to specify custom queries, you can do so here or in a config file.
|
||||
|
@ -60,7 +58,7 @@ jobs:
|
|||
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
|
||||
# If this step fails, then you should remove it and run the build manually (see below)
|
||||
- name: Autobuild
|
||||
uses: github/codeql-action/autobuild@v2
|
||||
uses: github/codeql-action/autobuild@v1
|
||||
|
||||
# ℹ️ Command-line programs to run using the OS shell.
|
||||
# 📚 https://git.io/JvXDl
|
||||
|
@ -74,4 +72,4 @@ jobs:
|
|||
# make release
|
||||
|
||||
- name: Perform CodeQL Analysis
|
||||
uses: github/codeql-action/analyze@v2
|
||||
uses: github/codeql-action/analyze@v1
|
|
@ -0,0 +1,55 @@
|
|||
name: Android-Cross
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: Android smoke build
|
||||
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
|
||||
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
|
||||
# some Android breakages early.
|
||||
# TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482
|
||||
env:
|
||||
GOOS: android
|
||||
GOARCH: arm64
|
||||
run: go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/interfaces ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,63 @@
|
|||
name: Darwin-Cross
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: macOS build cmd
|
||||
env:
|
||||
GOOS: darwin
|
||||
GOARCH: amd64
|
||||
run: go build ./cmd/...
|
||||
|
||||
- name: macOS build tests
|
||||
env:
|
||||
GOOS: darwin
|
||||
GOARCH: amd64
|
||||
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
|
||||
|
||||
- name: iOS build most
|
||||
env:
|
||||
GOOS: ios
|
||||
GOARCH: arm64
|
||||
run: go install ./ipn/... ./wgengine/ ./types/... ./control/controlclient
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,57 @@
|
|||
name: FreeBSD-Cross
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: FreeBSD build cmd
|
||||
env:
|
||||
GOOS: freebsd
|
||||
GOARCH: amd64
|
||||
run: go build ./cmd/...
|
||||
|
||||
- name: FreeBSD build tests
|
||||
env:
|
||||
GOOS: freebsd
|
||||
GOARCH: amd64
|
||||
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,57 @@
|
|||
name: Loongnix-Cross
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: Loongnix build cmd
|
||||
env:
|
||||
GOOS: linux
|
||||
GOARCH: loong64
|
||||
run: go build ./cmd/...
|
||||
|
||||
- name: Loongnix build tests
|
||||
env:
|
||||
GOOS: linux
|
||||
GOARCH: loong64
|
||||
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,57 @@
|
|||
name: OpenBSD-Cross
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: OpenBSD build cmd
|
||||
env:
|
||||
GOOS: openbsd
|
||||
GOARCH: amd64
|
||||
run: go build ./cmd/...
|
||||
|
||||
- name: OpenBSD build tests
|
||||
env:
|
||||
GOOS: openbsd
|
||||
GOARCH: amd64
|
||||
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,58 @@
|
|||
name: Wasm-Cross
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: Wasm client build
|
||||
env:
|
||||
GOOS: js
|
||||
GOARCH: wasm
|
||||
run: go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
|
||||
|
||||
- name: tsconnect static build
|
||||
# Use our custom Go toolchain, we set build tags (to control binary size)
|
||||
# that depend on it.
|
||||
run: |
|
||||
./tool/go run ./cmd/tsconnect --fast-compression build
|
||||
./tool/go run ./cmd/tsconnect --fast-compression build-pkg
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,57 @@
|
|||
name: Windows-Cross
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: Windows build cmd
|
||||
env:
|
||||
GOOS: windows
|
||||
GOARCH: amd64
|
||||
run: go build ./cmd/...
|
||||
|
||||
- name: Windows build tests
|
||||
env:
|
||||
GOOS: windows
|
||||
GOARCH: amd64
|
||||
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,33 @@
|
|||
name: depaware
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: depaware
|
||||
run: go run github.com/tailscale/depaware --check
|
||||
tailscale.com/cmd/tailscaled
|
||||
tailscale.com/cmd/tailscale
|
||||
tailscale.com/cmd/derper
|
|
@ -1,15 +0,0 @@
|
|||
name: "Dockerfile build"
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- "*"
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: "Build Docker image"
|
||||
run: docker build .
|
|
@ -17,7 +17,7 @@ concurrency:
|
|||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
update-licenses:
|
||||
tailscale:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
|
@ -25,7 +25,7 @@ jobs:
|
|||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
|
@ -42,7 +42,7 @@ jobs:
|
|||
go-licenses report tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled > licenses/tailscale.md --template .github/licenses.tmpl
|
||||
|
||||
- name: Get access token
|
||||
uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 # v1.8.0
|
||||
uses: tibdex/github-app-token@f717b5ecd4534d3c4df4ce9b5c1c2214f0f7cd06 # v1.6.0
|
||||
id: generate-token
|
||||
with:
|
||||
app_id: ${{ secrets.LICENSING_APP_ID }}
|
||||
|
@ -50,11 +50,11 @@ jobs:
|
|||
private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Send pull request
|
||||
uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666 #v5.0.1
|
||||
uses: peter-evans/create-pull-request@ad43dccb4d726ca8514126628bec209b8354b6dd #v4.1.4
|
||||
with:
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
author: License Updater <noreply+license-updater@tailscale.com>
|
||||
committer: License Updater <noreply+license-updater@tailscale.com>
|
||||
author: License Updater <noreply@tailscale.com>
|
||||
committer: License Updater <noreply@tailscale.com>
|
||||
branch: licenses/cli
|
||||
commit-message: "licenses: update tailscale{,d} licenses"
|
||||
title: "licenses: update tailscale{,d} licenses"
|
|
@ -0,0 +1,42 @@
|
|||
name: go generate
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- "release-branch/*"
|
||||
pull_request:
|
||||
branches:
|
||||
- "*"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: check 'go generate' is clean
|
||||
run: |
|
||||
if [[ "${{github.ref}}" == release-branch/* ]]
|
||||
then
|
||||
pkgs=$(go list ./... | grep -v dnsfallback)
|
||||
else
|
||||
pkgs=$(go list ./... | grep -v dnsfallback)
|
||||
fi
|
||||
go generate $pkgs
|
||||
echo
|
||||
echo
|
||||
git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1)
|
|
@ -0,0 +1,35 @@
|
|||
name: go mod tidy
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- "*"
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
check:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: check 'go mod tidy' is clean
|
||||
run: |
|
||||
go mod tidy
|
||||
echo
|
||||
echo
|
||||
git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1)
|
|
@ -1,40 +0,0 @@
|
|||
name: golangci-lint
|
||||
on:
|
||||
# For now, only lint pull requests, not the main branches.
|
||||
pull_request:
|
||||
|
||||
# TODO(andrew): enable for main branch after an initial waiting period.
|
||||
#push:
|
||||
# branches:
|
||||
# - main
|
||||
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
golangci:
|
||||
name: lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: false
|
||||
|
||||
- name: golangci-lint
|
||||
# Note: this is the 'v3' tag as of 2023-04-17
|
||||
uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299
|
||||
with:
|
||||
version: v1.52.2
|
||||
|
||||
# Show only new issues if it's a pull request.
|
||||
only-new-issues: true
|
|
@ -1,102 +0,0 @@
|
|||
name: test installer.sh
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
paths:
|
||||
- scripts/installer.sh
|
||||
pull_request:
|
||||
branches:
|
||||
- "*"
|
||||
paths:
|
||||
- scripts/installer.sh
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
# Don't abort the entire matrix if one element fails.
|
||||
fail-fast: false
|
||||
# Don't start all of these at once, which could saturate Github workers.
|
||||
max-parallel: 4
|
||||
matrix:
|
||||
image:
|
||||
# This is a list of Docker images against which we test our installer.
|
||||
# If you find that some of these no longer exist, please feel free
|
||||
# to remove them from the list.
|
||||
# When adding new images, please only use official ones.
|
||||
- "debian:oldstable-slim"
|
||||
- "debian:stable-slim"
|
||||
- "debian:testing-slim"
|
||||
- "debian:sid-slim"
|
||||
- "ubuntu:18.04"
|
||||
- "ubuntu:20.04"
|
||||
- "ubuntu:22.04"
|
||||
- "ubuntu:22.10"
|
||||
- "ubuntu:23.04"
|
||||
- "elementary/docker:stable"
|
||||
- "elementary/docker:unstable"
|
||||
- "parrotsec/core:lts-amd64"
|
||||
- "parrotsec/core:latest"
|
||||
- "kalilinux/kali-rolling"
|
||||
- "kalilinux/kali-dev"
|
||||
- "oraclelinux:9"
|
||||
- "oraclelinux:8"
|
||||
- "fedora:latest"
|
||||
- "rockylinux:8.7"
|
||||
- "rockylinux:9"
|
||||
- "amazonlinux:latest"
|
||||
- "opensuse/leap:latest"
|
||||
- "opensuse/tumbleweed:latest"
|
||||
- "archlinux:latest"
|
||||
- "alpine:3.14"
|
||||
- "alpine:latest"
|
||||
- "alpine:edge"
|
||||
deps:
|
||||
# Run all images installing curl as a dependency.
|
||||
- curl
|
||||
include:
|
||||
# Check a few images with wget rather than curl.
|
||||
- { image: "debian:oldstable-slim", deps: "wget" }
|
||||
- { image: "debian:sid-slim", deps: "wget" }
|
||||
- { image: "ubuntu:23.04", deps: "wget" }
|
||||
# Ubuntu 16.04 also needs apt-transport-https installed.
|
||||
- { image: "ubuntu:16.04", deps: "curl apt-transport-https" }
|
||||
- { image: "ubuntu:16.04", deps: "wget apt-transport-https" }
|
||||
runs-on: ubuntu-latest
|
||||
container:
|
||||
image: ${{ matrix.image }}
|
||||
options: --user root
|
||||
steps:
|
||||
- name: install dependencies (yum)
|
||||
# tar and gzip are needed by the actions/checkout below.
|
||||
run: yum install -y --allowerasing tar gzip ${{ matrix.deps }}
|
||||
if: |
|
||||
contains(matrix.image, 'centos')
|
||||
|| contains(matrix.image, 'oraclelinux')
|
||||
|| contains(matrix.image, 'fedora')
|
||||
|| contains(matrix.image, 'amazonlinux')
|
||||
- name: install dependencies (zypper)
|
||||
# tar and gzip are needed by the actions/checkout below.
|
||||
run: zypper --non-interactive install tar gzip
|
||||
if: contains(matrix.image, 'opensuse')
|
||||
- name: install dependencies (apt-get)
|
||||
run: |
|
||||
apt-get update
|
||||
apt-get install -y ${{ matrix.deps }}
|
||||
if: |
|
||||
contains(matrix.image, 'debian')
|
||||
|| contains(matrix.image, 'ubuntu')
|
||||
|| contains(matrix.image, 'elementary')
|
||||
|| contains(matrix.image, 'parrotsec')
|
||||
|| contains(matrix.image, 'kalilinux')
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: run installer
|
||||
run: scripts/installer.sh
|
||||
# Package installation can fail in docker because systemd is not running
|
||||
# as PID 1, so ignore errors at this step. The real check is the
|
||||
# `tailscale --version` command below.
|
||||
continue-on-error: true
|
||||
- name: check tailscale version
|
||||
run: tailscale --version
|
|
@ -0,0 +1,45 @@
|
|||
name: license
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Run license checker
|
||||
run: ./scripts/check_license_headers.sh .
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,70 @@
|
|||
name: Linux race
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: Build test wrapper
|
||||
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
||||
|
||||
- name: Basic build
|
||||
run: go build ./cmd/...
|
||||
|
||||
- name: Run tests and benchmarks with -race flag on linux
|
||||
run: go test -exec=/tmp/testwrapper -race -bench=. -benchtime=1x ./...
|
||||
|
||||
- name: Check that no tracked files in the repo have been modified
|
||||
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
|
||||
|
||||
- name: Check that no files have been added to the repo
|
||||
run: |
|
||||
# Note: The "error: pathspec..." you see below is normal!
|
||||
# In the success case in which there are no new untracked files,
|
||||
# git ls-files complains about the pathspec not matching anything.
|
||||
# That's OK. It's not worth the effort to suppress. Please ignore it.
|
||||
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
|
||||
then
|
||||
echo "Build/test created untracked files in the repo (file names above)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
|
@ -0,0 +1,80 @@
|
|||
name: Linux
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: Basic build
|
||||
run: go build ./cmd/...
|
||||
|
||||
- name: Build test wrapper
|
||||
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
||||
|
||||
- name: Build variants
|
||||
run: |
|
||||
go install --tags=ts_include_cli ./cmd/tailscaled
|
||||
go install --tags=ts_omit_aws ./cmd/tailscaled
|
||||
|
||||
- name: Get QEMU
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install qemu-user
|
||||
|
||||
- name: Run tests on linux
|
||||
run: go test -exec=/tmp/testwrapper -bench=. -benchtime=1x ./...
|
||||
|
||||
- name: Check that no tracked files in the repo have been modified
|
||||
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
|
||||
|
||||
- name: Check that no files have been added to the repo
|
||||
run: |
|
||||
# Note: The "error: pathspec..." you see below is normal!
|
||||
# In the success case in which there are no new untracked files,
|
||||
# git ls-files complains about the pathspec not matching anything.
|
||||
# That's OK. It's not worth the effort to suppress. Please ignore it.
|
||||
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
|
||||
then
|
||||
echo "Build/test created untracked files in the repo (file names above)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
name: Linux 32-bit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Check out code into the Go module directory
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
id: go
|
||||
|
||||
- name: Basic build
|
||||
run: GOARCH=386 go build ./cmd/...
|
||||
|
||||
- name: Run tests on linux
|
||||
run: GOARCH=386 go test -bench=. -benchtime=1x ./...
|
||||
|
||||
- name: Check that no tracked files in the repo have been modified
|
||||
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
|
||||
|
||||
- name: Check that no files have been added to the repo
|
||||
run: |
|
||||
# Note: The "error: pathspec..." you see below is normal!
|
||||
# In the success case in which there are no new untracked files,
|
||||
# git ls-files complains about the pathspec not matching anything.
|
||||
# That's OK. It's not worth the effort to suppress. Please ignore it.
|
||||
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
|
||||
then
|
||||
echo "Build/test created untracked files in the repo (file names above)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
|
@ -0,0 +1,113 @@
|
|||
name: static-analysis
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
gofmt:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
- name: Run gofmt (goimports)
|
||||
run: go run golang.org/x/tools/cmd/goimports -d --format-only .
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
||||
vet:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
- name: Run go vet
|
||||
run: go vet ./...
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
||||
staticcheck:
|
||||
runs-on: ubuntu-latest
|
||||
strategy:
|
||||
matrix:
|
||||
goos: [linux, windows, darwin]
|
||||
goarch: [amd64]
|
||||
include:
|
||||
- goos: windows
|
||||
goarch: 386
|
||||
|
||||
steps:
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.19
|
||||
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install staticcheck
|
||||
run: "GOBIN=~/.local/bin go install honnef.co/go/tools/cmd/staticcheck"
|
||||
|
||||
- name: Print staticcheck version
|
||||
run: "staticcheck -version"
|
||||
|
||||
- name: "Run staticcheck (${{ matrix.goos }}/${{ matrix.goarch }})"
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
run: "staticcheck -- $(go list ./... | grep -v tempfork)"
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -1,500 +0,0 @@
|
|||
# This is our main "CI tests" workflow. It runs everything that should run on
|
||||
# both PRs and merged commits, and for the latter reports failures to slack.
|
||||
name: CI
|
||||
|
||||
env:
|
||||
# Our fuzz job, powered by OSS-Fuzz, fails periodically because we upgrade to
|
||||
# new Go versions very eagerly. OSS-Fuzz is a little more conservative, and
|
||||
# ends up being unable to compile our code.
|
||||
#
|
||||
# When this happens, we want to disable the fuzz target until OSS-Fuzz catches
|
||||
# up. However, we also don't want to forget to turn it back on when OSS-Fuzz
|
||||
# can once again build our code.
|
||||
#
|
||||
# This variable toggles the fuzz job between two modes:
|
||||
# - false: we expect fuzzing to be happy, and should report failure if it's not.
|
||||
# - true: we expect fuzzing is broken, and should report failure if it start working.
|
||||
TS_FUZZ_CURRENTLY_BROKEN: false
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- "main"
|
||||
- "release-branch/*"
|
||||
pull_request:
|
||||
branches:
|
||||
- "*"
|
||||
merge_group:
|
||||
branches:
|
||||
- "main"
|
||||
|
||||
concurrency:
|
||||
# For PRs, later CI runs preempt previous ones. e.g. a force push on a PR
|
||||
# cancels running CI jobs and starts all new ones.
|
||||
#
|
||||
# For non-PR pushes, concurrency.group needs to be unique for every distinct
|
||||
# CI run we want to have happen. Use run_id, which in practice means all
|
||||
# non-PR CI runs will be allowed to run without preempting each other.
|
||||
group: ${{ github.workflow }}-$${{ github.pull_request.number || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
strategy:
|
||||
fail-fast: false # don't abort the entire matrix if one element fails
|
||||
matrix:
|
||||
include:
|
||||
- goarch: amd64
|
||||
- goarch: amd64
|
||||
buildflags: "-race"
|
||||
- goarch: "386" # thanks yaml
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
# Note: unlike the other setups, this is only grabbing the mod download
|
||||
# cache, rather than the whole mod directory, as the download cache
|
||||
# contains zips that can be unpacked in parallel faster than they can be
|
||||
# fetched and extracted by tar
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod/cache
|
||||
~\AppData\Local\go-build
|
||||
# The -2- here should be incremented when the scheme of data to be
|
||||
# cached changes (e.g. path above changes).
|
||||
key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-
|
||||
- name: build all
|
||||
run: ./tool/go build ${{matrix.buildflags}} ./...
|
||||
env:
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
- name: build variant CLIs
|
||||
run: |
|
||||
export TS_USE_TOOLCHAIN=1
|
||||
./build_dist.sh --extra-small ./cmd/tailscaled
|
||||
./build_dist.sh --box ./cmd/tailscaled
|
||||
./build_dist.sh --extra-small --box ./cmd/tailscaled
|
||||
rm -f tailscaled
|
||||
env:
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
- name: get qemu # for tstest/archtest
|
||||
if: matrix.goarch == 'amd64' && matrix.variant == ''
|
||||
run: |
|
||||
sudo apt-get -y update
|
||||
sudo apt-get -y install qemu-user
|
||||
- name: build test wrapper
|
||||
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
|
||||
- name: test all
|
||||
run: PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}}
|
||||
env:
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
- name: bench all
|
||||
run: PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$
|
||||
env:
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
- name: check that no tracked files changed
|
||||
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
|
||||
- name: check that no new files were added
|
||||
run: |
|
||||
# Note: The "error: pathspec..." you see below is normal!
|
||||
# In the success case in which there are no new untracked files,
|
||||
# git ls-files complains about the pathspec not matching anything.
|
||||
# That's OK. It's not worth the effort to suppress. Please ignore it.
|
||||
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
|
||||
then
|
||||
echo "Build/test created untracked files in the repo (file names above)."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
windows:
|
||||
runs-on: windows-2022
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
cache: false
|
||||
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
# Note: unlike the other setups, this is only grabbing the mod download
|
||||
# cache, rather than the whole mod directory, as the download cache
|
||||
# contains zips that can be unpacked in parallel faster than they can be
|
||||
# fetched and extracted by tar
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod/cache
|
||||
~\AppData\Local\go-build
|
||||
# The -2- here should be incremented when the scheme of data to be
|
||||
# cached changes (e.g. path above changes).
|
||||
key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||
${{ github.job }}-${{ runner.os }}-go-2-
|
||||
- name: test
|
||||
# Don't use -bench=. -benchtime=1x.
|
||||
# Somewhere in the layers (powershell?)
|
||||
# the equals signs cause great confusion.
|
||||
run: go test -bench . -benchtime 1x ./...
|
||||
|
||||
vm:
|
||||
runs-on: ["self-hosted", "linux", "vm"]
|
||||
# VM tests run with some privileges, don't let them run on 3p PRs.
|
||||
if: github.repository == 'tailscale/tailscale'
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Run VM tests
|
||||
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
|
||||
env:
|
||||
HOME: "/tmp"
|
||||
TMPDIR: "/tmp"
|
||||
XDB_CACHE_HOME: "/var/lib/ghrunner/cache"
|
||||
|
||||
cross: # cross-compile checks, build only.
|
||||
strategy:
|
||||
fail-fast: false # don't abort the entire matrix if one element fails
|
||||
matrix:
|
||||
include:
|
||||
# Note: linux/amd64 is not in this matrix, because that goos/goarch is
|
||||
# tested more exhaustively in the 'test' job above.
|
||||
- goos: linux
|
||||
goarch: arm64
|
||||
- goos: linux
|
||||
goarch: "386" # thanks yaml
|
||||
- goos: linux
|
||||
goarch: loong64
|
||||
- goos: linux
|
||||
goarch: arm
|
||||
goarm: "5"
|
||||
- goos: linux
|
||||
goarch: arm
|
||||
goarm: "7"
|
||||
# macOS
|
||||
- goos: darwin
|
||||
goarch: amd64
|
||||
- goos: darwin
|
||||
goarch: arm64
|
||||
# Windows
|
||||
- goos: windows
|
||||
goarch: amd64
|
||||
- goos: windows
|
||||
goarch: arm64
|
||||
# BSDs
|
||||
- goos: freebsd
|
||||
goarch: amd64
|
||||
- goos: openbsd
|
||||
goarch: amd64
|
||||
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
# Note: unlike the other setups, this is only grabbing the mod download
|
||||
# cache, rather than the whole mod directory, as the download cache
|
||||
# contains zips that can be unpacked in parallel faster than they can be
|
||||
# fetched and extracted by tar
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod/cache
|
||||
~\AppData\Local\go-build
|
||||
# The -2- here should be incremented when the scheme of data to be
|
||||
# cached changes (e.g. path above changes).
|
||||
key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
|
||||
- name: build all
|
||||
run: ./tool/go build ./cmd/...
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
GOARM: ${{ matrix.goarm }}
|
||||
CGO_ENABLED: "0"
|
||||
- name: build tests
|
||||
run: ./tool/go test -exec=true ./...
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
CGO_ENABLED: "0"
|
||||
|
||||
ios: # similar to cross above, but iOS can't build most of the repo. So, just
|
||||
#make it build a few smoke packages.
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: build some
|
||||
run: ./tool/go build ./ipn/... ./wgengine/ ./types/... ./control/controlclient
|
||||
env:
|
||||
GOOS: ios
|
||||
GOARCH: arm64
|
||||
|
||||
android:
|
||||
# similar to cross above, but android fails to build a few pieces of the
|
||||
# repo. We should fix those pieces, they're small, but as a stepping stone,
|
||||
# only test the subset of android that our past smoke test checked.
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
|
||||
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
|
||||
# some Android breakages early.
|
||||
# TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482
|
||||
- name: build some
|
||||
run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/interfaces ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version
|
||||
env:
|
||||
GOOS: android
|
||||
GOARCH: arm64
|
||||
|
||||
wasm: # builds tsconnect, which is the only wasm build we support
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
# Note: unlike the other setups, this is only grabbing the mod download
|
||||
# cache, rather than the whole mod directory, as the download cache
|
||||
# contains zips that can be unpacked in parallel faster than they can be
|
||||
# fetched and extracted by tar
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod/cache
|
||||
~\AppData\Local\go-build
|
||||
# The -2- here should be incremented when the scheme of data to be
|
||||
# cached changes (e.g. path above changes).
|
||||
key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
|
||||
restore-keys: |
|
||||
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||
${{ github.job }}-${{ runner.os }}-go-2-
|
||||
- name: build tsconnect client
|
||||
run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
|
||||
env:
|
||||
GOOS: js
|
||||
GOARCH: wasm
|
||||
- name: build tsconnect server
|
||||
# Note, no GOOS/GOARCH in env on this build step, we're running a build
|
||||
# tool that handles the build itself.
|
||||
run: |
|
||||
./tool/go run ./cmd/tsconnect --fast-compression build
|
||||
./tool/go run ./cmd/tsconnect --fast-compression build-pkg
|
||||
|
||||
tailscale_go: # Subset of tests that depend on our custom Go toolchain.
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: test tailscale_go
|
||||
run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/...
|
||||
|
||||
|
||||
fuzz:
|
||||
# This target periodically breaks (see TS_FUZZ_CURRENTLY_BROKEN at the top
|
||||
# of the file), so it's more complex than usual: the 'build fuzzers' step
|
||||
# might fail, and depending on the value of 'TS_FUZZ_CURRENTLY_BROKEN', that
|
||||
# might or might not be fine. The steps after the build figure out whether
|
||||
# the success/failure is expected, and appropriately pass/fail the job
|
||||
# overall accordingly.
|
||||
#
|
||||
# Practically, this means that all steps after 'build fuzzers' must have an
|
||||
# explicit 'if' condition, because the default condition for steps is
|
||||
# 'success()', meaning "only run this if no previous steps failed".
|
||||
if: github.event_name == 'pull_request'
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: build fuzzers
|
||||
id: build
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
|
||||
# continue-on-error makes steps.build.conclusion be 'success' even if
|
||||
# steps.build.outcome is 'failure'. This means this step does not
|
||||
# contribute to the job's overall pass/fail evaluation.
|
||||
continue-on-error: true
|
||||
with:
|
||||
oss-fuzz-project-name: 'tailscale'
|
||||
dry-run: false
|
||||
language: go
|
||||
- name: report unexpectedly broken fuzz build
|
||||
if: steps.build.outcome == 'failure' && env.TS_FUZZ_CURRENTLY_BROKEN != 'true'
|
||||
run: |
|
||||
echo "fuzzer build failed, see above for why"
|
||||
echo "if the failure is due to OSS-Fuzz not being on the latest Go yet,"
|
||||
echo "set TS_FUZZ_CURRENTLY_BROKEN=true in .github/workflows/test.yml"
|
||||
echo "to temporarily disable fuzzing until OSS-Fuzz works again."
|
||||
exit 1
|
||||
- name: report unexpectedly working fuzz build
|
||||
if: steps.build.outcome == 'success' && env.TS_FUZZ_CURRENTLY_BROKEN == 'true'
|
||||
run: |
|
||||
echo "fuzzer build succeeded, but we expect it to be broken"
|
||||
echo "please set TS_FUZZ_CURRENTLY_BROKEN=false in .github/workflows/test.yml"
|
||||
echo "to reenable fuzz testing"
|
||||
exit 1
|
||||
- name: run fuzzers
|
||||
id: run
|
||||
# Run the fuzzers whenever they're able to build, even if we're going to
|
||||
# report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong
|
||||
# value.
|
||||
if: steps.build.outcome == 'success'
|
||||
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
|
||||
with:
|
||||
oss-fuzz-project-name: 'tailscale'
|
||||
fuzz-seconds: 300
|
||||
dry-run: false
|
||||
language: go
|
||||
- name: upload crash
|
||||
uses: actions/upload-artifact@v3
|
||||
if: steps.run.outcome != 'success' && steps.build.outcome == 'success'
|
||||
with:
|
||||
name: artifacts
|
||||
path: ./out/artifacts
|
||||
|
||||
depaware:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: check depaware
|
||||
run: |
|
||||
export PATH=$(./tool/go env GOROOT)/bin:$PATH
|
||||
find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check
|
||||
|
||||
go_generate:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: check that 'go generate' is clean
|
||||
run: |
|
||||
pkgs=$(./tool/go list ./... | grep -v dnsfallback)
|
||||
./tool/go generate $pkgs
|
||||
echo
|
||||
echo
|
||||
git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1)
|
||||
|
||||
go_mod_tidy:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: check that 'go mod tidy' is clean
|
||||
run: |
|
||||
./tool/go mod tidy
|
||||
echo
|
||||
echo
|
||||
git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1)
|
||||
|
||||
licenses:
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: check licenses
|
||||
run: ./scripts/check_license_headers.sh .
|
||||
|
||||
staticcheck:
|
||||
runs-on: ubuntu-22.04
|
||||
strategy:
|
||||
fail-fast: false # don't abort the entire matrix if one element fails
|
||||
matrix:
|
||||
goos: ["linux", "windows", "darwin"]
|
||||
goarch: ["amd64"]
|
||||
include:
|
||||
- goos: "windows"
|
||||
goarch: "386"
|
||||
steps:
|
||||
- name: checkout
|
||||
uses: actions/checkout@v3
|
||||
- name: install staticcheck
|
||||
run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck
|
||||
- name: run staticcheck
|
||||
run: |
|
||||
export GOROOT=$(./tool/go env GOROOT)
|
||||
export PATH=$GOROOT/bin:$PATH
|
||||
staticcheck -- $(./tool/go list ./... | grep -v tempfork)
|
||||
env:
|
||||
GOOS: ${{ matrix.goos }}
|
||||
GOARCH: ${{ matrix.goarch }}
|
||||
|
||||
notify_slack:
|
||||
if: always()
|
||||
# Any of these jobs failing causes a slack notification.
|
||||
needs:
|
||||
- android
|
||||
- test
|
||||
- windows
|
||||
- vm
|
||||
- cross
|
||||
- ios
|
||||
- wasm
|
||||
- tailscale_go
|
||||
- fuzz
|
||||
- depaware
|
||||
- go_generate
|
||||
- go_mod_tidy
|
||||
- licenses
|
||||
- staticcheck
|
||||
runs-on: ubuntu-22.04
|
||||
steps:
|
||||
- name: notify
|
||||
# Only notify slack for merged commits, not PR failures.
|
||||
#
|
||||
# It may be tempting to move this condition into the job's 'if' block, but
|
||||
# don't: Github only collapses the test list into "everything is OK" if
|
||||
# all jobs succeeded. A skipped job results in the list staying expanded.
|
||||
# By having the job always run, but skipping its only step as needed, we
|
||||
# let the CI output collapse nicely in PRs.
|
||||
if: failure() && github.event_name == 'push'
|
||||
uses: ruby/action-slack@v3.2.1
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"title": "Failure: ${{ github.workflow }}",
|
||||
"title_link": "https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks",
|
||||
"text": "${{ github.repository }}@${{ github.ref_name }}: <https://github.com/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}>",
|
||||
"fields": [{ "value": ${{ toJson(github.event.head_commit.message) }}, "short": false }],
|
||||
"footer": "${{ github.event.head_commit.committer.name }} at ${{ github.event.head_commit.timestamp }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
|
||||
check_mergeability:
|
||||
if: always()
|
||||
runs-on: ubuntu-22.04
|
||||
needs:
|
||||
- android
|
||||
- test
|
||||
- windows
|
||||
- vm
|
||||
- cross
|
||||
- ios
|
||||
- wasm
|
||||
- tailscale_go
|
||||
- fuzz
|
||||
- depaware
|
||||
- go_generate
|
||||
- go_mod_tidy
|
||||
- licenses
|
||||
- staticcheck
|
||||
steps:
|
||||
- name: Decide if change is okay to merge
|
||||
if: github.event_name != 'push'
|
||||
uses: re-actors/alls-green@release/v1
|
||||
with:
|
||||
jobs: ${{ toJSON(needs) }}
|
|
@ -0,0 +1,30 @@
|
|||
name: "@tailscale/connect npm publish"
|
||||
|
||||
on: workflow_dispatch
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up node
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: "16.x"
|
||||
registry-url: "https://registry.npmjs.org"
|
||||
|
||||
- name: Build package
|
||||
# Build with build_dist.sh to ensure that version information is embedded.
|
||||
# GOROOT is specified so that the Go/Wasm that is trigged by build-pk
|
||||
# also picks up our custom Go toolchain.
|
||||
run: |
|
||||
./build_dist.sh tailscale.com/cmd/tsconnect
|
||||
GOROOT="${HOME}/.cache/tailscale-go" ./tsconnect build-pkg
|
||||
|
||||
- name: Publish
|
||||
env:
|
||||
NODE_AUTH_TOKEN: ${{ secrets.TSCONNECT_NPM_PUBLISH_AUTH_TOKEN }}
|
||||
run: ./tool/yarn --cwd ./cmd/tsconnect/pkg publish --access public
|
|
@ -1,49 +0,0 @@
|
|||
name: update-flake
|
||||
|
||||
on:
|
||||
# run action when a change lands in the main branch which updates go.mod. Also
|
||||
# allow manual triggering.
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
paths:
|
||||
- go.mod
|
||||
- .github/workflows/update-flakes.yml
|
||||
workflow_dispatch:
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
update-flake:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Run update-flakes
|
||||
run: ./update-flake.sh
|
||||
|
||||
- name: Get access token
|
||||
uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 # v1.8.0
|
||||
id: generate-token
|
||||
with:
|
||||
app_id: ${{ secrets.LICENSING_APP_ID }}
|
||||
installation_id: ${{ secrets.LICENSING_APP_INSTALLATION_ID }}
|
||||
private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }}
|
||||
|
||||
- name: Send pull request
|
||||
uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666 #v5.0.1
|
||||
with:
|
||||
token: ${{ steps.generate-token.outputs.token }}
|
||||
author: Flakes Updater <noreply+flakes-updater@tailscale.com>
|
||||
committer: Flakes Updater <noreply+flakes-updater@tailscale.com>
|
||||
branch: flakes
|
||||
commit-message: "go.mod.sri: update SRI hash for go.mod changes"
|
||||
title: "go.mod.sri: update SRI hash for go.mod changes"
|
||||
body: Triggered by ${{ github.repository }}@${{ github.sha }}
|
||||
signoff: true
|
||||
delete-branch: true
|
||||
reviewers: danderson
|
|
@ -0,0 +1,51 @@
|
|||
name: VM
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
ubuntu2004-LTS-cloud-base:
|
||||
runs-on: [ self-hosted, linux, vm ]
|
||||
|
||||
if: "(github.repository == 'tailscale/tailscale') && !contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Set GOPATH
|
||||
run: echo "GOPATH=$HOME/go" >> $GITHUB_ENV
|
||||
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Run VM tests
|
||||
run: go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
|
||||
env:
|
||||
HOME: "/tmp"
|
||||
TMPDIR: "/tmp"
|
||||
XDG_CACHE_HOME: "/var/lib/ghrunner/cache"
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
|
@ -0,0 +1,67 @@
|
|||
name: Windows
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
pull_request:
|
||||
branches:
|
||||
- '*'
|
||||
- 'release-branch/*'
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
test:
|
||||
runs-on: windows-latest
|
||||
|
||||
if: "!contains(github.event.head_commit.message, '[ci skip]')"
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version-file: go.mod
|
||||
|
||||
- name: Restore Cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
# Note: unlike some other setups, this is only grabbing the mod download
|
||||
# cache, rather than the whole mod directory, as the download cache
|
||||
# contains zips that can be unpacked in parallel faster than they can be
|
||||
# fetched and extracted by tar
|
||||
path: |
|
||||
~/go/pkg/mod/cache
|
||||
~\AppData\Local\go-build
|
||||
|
||||
# The -2- here should be incremented when the scheme of data to be
|
||||
# cached changes (e.g. path above changes).
|
||||
# TODO(raggi): add a go version here.
|
||||
key: ${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
|
||||
|
||||
- name: Test
|
||||
# Don't use -bench=. -benchtime=1x.
|
||||
# Somewhere in the layers (powershell?)
|
||||
# the equals signs cause great confusion.
|
||||
run: go test -bench . -benchtime 1x ./...
|
||||
|
||||
- uses: k0kubun/action-slack@v2.0.0
|
||||
with:
|
||||
payload: |
|
||||
{
|
||||
"attachments": [{
|
||||
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
|
||||
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
|
||||
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
|
||||
"color": "danger"
|
||||
}]
|
||||
}
|
||||
env:
|
||||
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
|
||||
if: failure() && github.event_name == 'push'
|
||||
|
|
@ -26,14 +26,5 @@ cmd/tailscaled/tailscaled
|
|||
# Ignore personal VS Code settings
|
||||
.vscode/
|
||||
|
||||
# Support personal project-specific GOPATH
|
||||
.gopath/
|
||||
|
||||
# Ignore nix build result path
|
||||
/result
|
||||
|
||||
# Ignore direnv nix-shell environment cache
|
||||
.direnv/
|
||||
|
||||
/gocross
|
||||
/dist
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
linters:
|
||||
# Don't enable any linters by default; just the ones that we explicitly
|
||||
# enable in the list below.
|
||||
disable-all: true
|
||||
enable:
|
||||
- bidichk
|
||||
- gofmt
|
||||
- goimports
|
||||
- misspell
|
||||
- revive
|
||||
|
||||
# Configuration for how we run golangci-lint
|
||||
run:
|
||||
timeout: 5m
|
||||
|
||||
issues:
|
||||
# Excluding configuration per-path, per-linter, per-text and per-source
|
||||
exclude-rules:
|
||||
# These are forks of an upstream package and thus are exempt from stylistic
|
||||
# changes that would make pulling in upstream changes harder.
|
||||
- path: tempfork/.*\.go
|
||||
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
|
||||
- path: util/singleflight/.*\.go
|
||||
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
|
||||
|
||||
# Per-linter settings are contained in this top-level key
|
||||
linters-settings:
|
||||
# Enable all rules by default; we don't use invisible unicode runes.
|
||||
bidichk:
|
||||
|
||||
gofmt:
|
||||
rewrite-rules:
|
||||
- pattern: 'interface{}'
|
||||
replacement: 'any'
|
||||
|
||||
goimports:
|
||||
|
||||
misspell:
|
||||
|
||||
revive:
|
||||
enable-all-rules: false
|
||||
ignore-generated-header: true
|
||||
rules:
|
||||
- name: atomic
|
||||
- name: context-keys-type
|
||||
- name: defer
|
||||
arguments: [[
|
||||
# Calling 'recover' at the time a defer is registered (i.e. "defer recover()") has no effect.
|
||||
"immediate-recover",
|
||||
# Calling 'recover' outside of a deferred function has no effect
|
||||
"recover",
|
||||
# Returning values from a deferred function has no effect
|
||||
"return",
|
||||
]]
|
||||
- name: duplicated-imports
|
||||
- name: errorf
|
||||
- name: string-of-int
|
||||
- name: time-equal
|
||||
- name: unconditional-recursion
|
||||
- name: useless-break
|
||||
- name: waitgroup-by-value
|
18
Dockerfile
18
Dockerfile
|
@ -1,5 +1,6 @@
|
|||
# Copyright (c) Tailscale Inc & AUTHORS
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
############################################################################
|
||||
#
|
||||
|
@ -31,7 +32,7 @@
|
|||
# $ docker exec tailscaled tailscale status
|
||||
|
||||
|
||||
FROM golang:1.20-alpine AS build-env
|
||||
FROM golang:1.19-alpine AS build-env
|
||||
|
||||
WORKDIR /go/src/tailscale
|
||||
|
||||
|
@ -47,7 +48,8 @@ RUN go install \
|
|||
golang.org/x/crypto/ssh \
|
||||
golang.org/x/crypto/acme \
|
||||
nhooyr.io/websocket \
|
||||
github.com/mdlayher/netlink
|
||||
github.com/mdlayher/netlink \
|
||||
golang.zx2c4.com/wireguard/device
|
||||
|
||||
COPY . .
|
||||
|
||||
|
@ -61,9 +63,9 @@ ENV VERSION_GIT_HASH=$VERSION_GIT_HASH
|
|||
ARG TARGETARCH
|
||||
|
||||
RUN GOARCH=$TARGETARCH go install -ldflags="\
|
||||
-X tailscale.com/version.longStamp=$VERSION_LONG \
|
||||
-X tailscale.com/version.shortStamp=$VERSION_SHORT \
|
||||
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
|
||||
-X tailscale.com/version.Long=$VERSION_LONG \
|
||||
-X tailscale.com/version.Short=$VERSION_SHORT \
|
||||
-X tailscale.com/version.GitCommit=$VERSION_GIT_HASH" \
|
||||
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
|
||||
|
||||
FROM alpine:3.16
|
||||
|
@ -72,4 +74,4 @@ RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables
|
|||
COPY --from=build-env /go/bin/* /usr/local/bin/
|
||||
# For compat with the previous run.sh, although ideally you should be
|
||||
# using build_docker.sh which sets an entrypoint for the image.
|
||||
RUN mkdir /tailscale && ln -s /usr/local/bin/containerboot /tailscale/run.sh
|
||||
RUN ln -s /usr/local/bin/containerboot /tailscale/run.sh
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
# Copyright (c) Tailscale Inc & AUTHORS
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
FROM alpine:3.16
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils
|
||||
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables
|
||||
|
|
3
LICENSE
3
LICENSE
|
@ -1,6 +1,7 @@
|
|||
BSD 3-Clause License
|
||||
|
||||
Copyright (c) 2020 Tailscale Inc & AUTHORS.
|
||||
Copyright (c) 2020 Tailscale & AUTHORS.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are met:
|
||||
|
|
58
Makefile
58
Makefile
|
@ -2,63 +2,63 @@ IMAGE_REPO ?= tailscale/tailscale
|
|||
SYNO_ARCH ?= "amd64"
|
||||
SYNO_DSM ?= "7"
|
||||
|
||||
vet: ## Run go vet
|
||||
usage:
|
||||
echo "See Makefile"
|
||||
|
||||
vet:
|
||||
./tool/go vet ./...
|
||||
|
||||
tidy: ## Run go mod tidy
|
||||
tidy:
|
||||
./tool/go mod tidy
|
||||
|
||||
updatedeps: ## Update depaware deps
|
||||
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
|
||||
# it finds in its $$PATH is the right one.
|
||||
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update \
|
||||
updatedeps:
|
||||
./tool/go run github.com/tailscale/depaware --update \
|
||||
tailscale.com/cmd/tailscaled \
|
||||
tailscale.com/cmd/tailscale \
|
||||
tailscale.com/cmd/derper
|
||||
|
||||
depaware: ## Run depaware checks
|
||||
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
|
||||
# it finds in its $$PATH is the right one.
|
||||
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check \
|
||||
depaware:
|
||||
./tool/go run github.com/tailscale/depaware --check \
|
||||
tailscale.com/cmd/tailscaled \
|
||||
tailscale.com/cmd/tailscale \
|
||||
tailscale.com/cmd/derper
|
||||
|
||||
buildwindows: ## Build tailscale CLI for windows/amd64
|
||||
buildwindows:
|
||||
GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
|
||||
|
||||
build386: ## Build tailscale CLI for linux/386
|
||||
build386:
|
||||
GOOS=linux GOARCH=386 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
|
||||
|
||||
buildlinuxarm: ## Build tailscale CLI for linux/arm
|
||||
buildlinuxarm:
|
||||
GOOS=linux GOARCH=arm ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
|
||||
|
||||
buildwasm: ## Build tailscale CLI for js/wasm
|
||||
buildwasm:
|
||||
GOOS=js GOARCH=wasm ./tool/go install ./cmd/tsconnect/wasm ./cmd/tailscale/cli
|
||||
|
||||
buildlinuxloong64: ## Build tailscale CLI for linux/loong64
|
||||
buildlinuxloong64:
|
||||
GOOS=linux GOARCH=loong64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
|
||||
|
||||
buildmultiarchimage: ## Build (and optionally push) multiarch docker image
|
||||
buildmultiarchimage:
|
||||
./build_docker.sh
|
||||
|
||||
check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm ## Perform basic checks and compilation tests
|
||||
check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm
|
||||
|
||||
staticcheck: ## Run staticcheck.io checks
|
||||
staticcheck:
|
||||
./tool/go run honnef.co/go/tools/cmd/staticcheck -- $$(./tool/go list ./... | grep -v tempfork)
|
||||
|
||||
spk: ## Build synology package for ${SYNO_ARCH} architecture and ${SYNO_DSM} DSM version
|
||||
./tool/go run ./cmd/dist build synology/dsm${SYNO_DSM}/${SYNO_ARCH}
|
||||
spk:
|
||||
PATH="${PWD}/tool:${PATH}" ./tool/go run github.com/tailscale/tailscale-synology@main -o tailscale.spk --source=. --goarch=${SYNO_ARCH} --dsm-version=${SYNO_DSM}
|
||||
|
||||
spkall: ## Build synology packages for all architectures and DSM versions
|
||||
./tool/go run ./cmd/dist build synology
|
||||
spkall:
|
||||
mkdir -p spks
|
||||
PATH="${PWD}/tool:${PATH}" ./tool/go run github.com/tailscale/tailscale-synology@main -o spks --source=. --goarch=all --dsm-version=all
|
||||
|
||||
pushspk: spk ## Push and install synology package on ${SYNO_HOST} host
|
||||
pushspk: spk
|
||||
echo "Pushing SPK to root@${SYNO_HOST} (env var SYNO_HOST) ..."
|
||||
scp tailscale.spk root@${SYNO_HOST}:
|
||||
ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk
|
||||
|
||||
publishdevimage: ## Build and publish tailscale image to location specified by ${REPO}
|
||||
publishdevimage:
|
||||
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
|
||||
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
|
||||
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
|
||||
|
@ -66,18 +66,10 @@ publishdevimage: ## Build and publish tailscale image to location specified by $
|
|||
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
|
||||
TAGS=latest REPOS=${REPO} PUSH=true TARGET=client ./build_docker.sh
|
||||
|
||||
publishdevoperator: ## Build and publish k8s-operator image to location specified by ${REPO}
|
||||
publishdevoperator:
|
||||
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
|
||||
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
|
||||
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
|
||||
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1)
|
||||
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
|
||||
TAGS=latest REPOS=${REPO} PUSH=true TARGET=operator ./build_docker.sh
|
||||
|
||||
help: ## Show this help
|
||||
@echo "\nSpecify a command. The choices are:\n"
|
||||
@grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}'
|
||||
@echo ""
|
||||
.PHONY: help
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
|
|
|
@ -37,7 +37,7 @@ not open source.
|
|||
|
||||
## Building
|
||||
|
||||
We always require the latest Go release, currently Go 1.20. (While we build
|
||||
We always require the latest Go release, currently Go 1.19. (While we build
|
||||
releases with our [Go fork](https://github.com/tailscale/go/), its use is not
|
||||
required.)
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
1.45.0
|
||||
1.36.2
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2019 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package atomicfile contains code related to writing to filesystems
|
||||
// atomically.
|
||||
|
@ -8,20 +9,14 @@
|
|||
package atomicfile // import "tailscale.com/atomicfile"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// WriteFile writes data to filename+some suffix, then renames it into filename.
|
||||
// The perm argument is ignored on Windows. If the target filename already
|
||||
// exists but is not a regular file, WriteFile returns an error.
|
||||
// WriteFile writes data to filename+some suffix, then renames it
|
||||
// into filename. The perm argument is ignored on Windows.
|
||||
func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
|
||||
fi, err := os.Stat(filename)
|
||||
if err == nil && !fi.Mode().IsRegular() {
|
||||
return fmt.Errorf("%s already exists and is not a regular file", filename)
|
||||
}
|
||||
f, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)+".tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !js && !windows
|
||||
|
||||
package atomicfile
|
||||
|
||||
import (
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDoesNotOverwriteIrregularFiles(t *testing.T) {
|
||||
// Per tailscale/tailscale#7658 as one example, almost any imagined use of
|
||||
// atomicfile.Write should likely not attempt to overwrite an irregular file
|
||||
// such as a device node, socket, or named pipe.
|
||||
|
||||
const filename = "TestDoesNotOverwriteIrregularFiles"
|
||||
var path string
|
||||
// macOS private temp does not allow unix socket creation, but /tmp does.
|
||||
if runtime.GOOS == "darwin" {
|
||||
path = filepath.Join("/tmp", filename)
|
||||
t.Cleanup(func() { os.Remove(path) })
|
||||
} else {
|
||||
path = filepath.Join(t.TempDir(), filename)
|
||||
}
|
||||
|
||||
// The least troublesome thing to make that is not a file is a unix socket.
|
||||
// Making a null device sadly requires root.
|
||||
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer l.Close()
|
||||
|
||||
err = WriteFile(path, []byte("hello"), 0644)
|
||||
if err == nil {
|
||||
t.Fatal("expected error, got nil")
|
||||
}
|
||||
if !strings.Contains(err.Error(), "is not a regular file") {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
}
|
|
@ -11,25 +11,42 @@
|
|||
|
||||
set -eu
|
||||
|
||||
go="go"
|
||||
if [ -n "${TS_USE_TOOLCHAIN:-}" ]; then
|
||||
go="./tool/go"
|
||||
IFS=".$IFS" read -r major minor patch <VERSION.txt
|
||||
git_hash=$(git rev-parse HEAD)
|
||||
if ! git diff-index --quiet HEAD; then
|
||||
git_hash="${git_hash}-dirty"
|
||||
fi
|
||||
base_hash=$(git rev-list --max-count=1 HEAD -- VERSION.txt)
|
||||
change_count=$(git rev-list --count HEAD "^$base_hash")
|
||||
short_hash=$(echo "$git_hash" | cut -c1-9)
|
||||
|
||||
if expr "$minor" : "[0-9]*[13579]$" >/dev/null; then
|
||||
patch="$change_count"
|
||||
change_suffix=""
|
||||
elif [ "$change_count" != "0" ]; then
|
||||
change_suffix="-$change_count"
|
||||
else
|
||||
change_suffix=""
|
||||
fi
|
||||
|
||||
eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion`
|
||||
long_suffix="$change_suffix-t$short_hash"
|
||||
MINOR="$major.$minor"
|
||||
SHORT="$MINOR.$patch"
|
||||
LONG="${SHORT}$long_suffix"
|
||||
GIT_HASH="$git_hash"
|
||||
|
||||
if [ "$1" = "shellvars" ]; then
|
||||
cat <<EOF
|
||||
VERSION_MINOR="$VERSION_MINOR"
|
||||
VERSION_SHORT="$VERSION_SHORT"
|
||||
VERSION_LONG="$VERSION_LONG"
|
||||
VERSION_GIT_HASH="$VERSION_GIT_HASH"
|
||||
VERSION_MINOR="$MINOR"
|
||||
VERSION_SHORT="$SHORT"
|
||||
VERSION_LONG="$LONG"
|
||||
VERSION_GIT_HASH="$GIT_HASH"
|
||||
EOF
|
||||
exit 0
|
||||
fi
|
||||
|
||||
tags=""
|
||||
ldflags="-X tailscale.com/version.longStamp=${VERSION_LONG} -X tailscale.com/version.shortStamp=${VERSION_SHORT}"
|
||||
ldflags="-X tailscale.com/version.Long=${LONG} -X tailscale.com/version.Short=${SHORT} -X tailscale.com/version.GitCommit=${GIT_HASH}"
|
||||
|
||||
# build_dist.sh arguments must precede go build arguments.
|
||||
while [ "$#" -gt 1 ]; do
|
||||
|
@ -37,7 +54,7 @@ while [ "$#" -gt 1 ]; do
|
|||
--extra-small)
|
||||
shift
|
||||
ldflags="$ldflags -w -s"
|
||||
tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube"
|
||||
tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap"
|
||||
;;
|
||||
--box)
|
||||
shift
|
||||
|
@ -49,4 +66,4 @@ while [ "$#" -gt 1 ]; do
|
|||
esac
|
||||
done
|
||||
|
||||
exec $go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@"
|
||||
exec ./tool/go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@"
|
||||
|
|
|
@ -23,29 +23,28 @@ set -eu
|
|||
export PATH=$PWD/tool:$PATH
|
||||
|
||||
eval $(./build_dist.sh shellvars)
|
||||
|
||||
DEFAULT_TARGET="client"
|
||||
DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}"
|
||||
DEFAULT_BASE="tailscale/alpine-base:3.16"
|
||||
DEFAULT_REPOS="tailscale/tailscale,ghcr.io/tailscale/tailscale"
|
||||
DEFAULT_BASE="ghcr.io/tailscale/alpine-base:3.16"
|
||||
DEFAULT_TARGET="client"
|
||||
|
||||
PUSH="${PUSH:-false}"
|
||||
TARGET="${TARGET:-${DEFAULT_TARGET}}"
|
||||
REPOS="${REPOS:-${DEFAULT_REPOS}}"
|
||||
TAGS="${TAGS:-${DEFAULT_TAGS}}"
|
||||
BASE="${BASE:-${DEFAULT_BASE}}"
|
||||
TARGET="${TARGET:-${DEFAULT_TARGET}}"
|
||||
|
||||
case "$TARGET" in
|
||||
client)
|
||||
DEFAULT_REPOS="tailscale/tailscale"
|
||||
REPOS="${REPOS:-${DEFAULT_REPOS}}"
|
||||
go run github.com/tailscale/mkctr \
|
||||
--gopaths="\
|
||||
tailscale.com/cmd/tailscale:/usr/local/bin/tailscale, \
|
||||
tailscale.com/cmd/tailscaled:/usr/local/bin/tailscaled, \
|
||||
tailscale.com/cmd/containerboot:/usr/local/bin/containerboot" \
|
||||
--ldflags="\
|
||||
-X tailscale.com/version.longStamp=${VERSION_LONG} \
|
||||
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
|
||||
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
|
||||
-X tailscale.com/version.Long=${VERSION_LONG} \
|
||||
-X tailscale.com/version.Short=${VERSION_SHORT} \
|
||||
-X tailscale.com/version.GitCommit=${VERSION_GIT_HASH}" \
|
||||
--base="${BASE}" \
|
||||
--tags="${TAGS}" \
|
||||
--repos="${REPOS}" \
|
||||
|
@ -53,14 +52,12 @@ case "$TARGET" in
|
|||
/usr/local/bin/containerboot
|
||||
;;
|
||||
operator)
|
||||
DEFAULT_REPOS="tailscale/k8s-operator"
|
||||
REPOS="${REPOS:-${DEFAULT_REPOS}}"
|
||||
go run github.com/tailscale/mkctr \
|
||||
--gopaths="tailscale.com/cmd/k8s-operator:/usr/local/bin/operator" \
|
||||
--ldflags="\
|
||||
-X tailscale.com/version.longStamp=${VERSION_LONG} \
|
||||
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
|
||||
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
|
||||
-X tailscale.com/version.Long=${VERSION_LONG} \
|
||||
-X tailscale.com/version.Short=${VERSION_SHORT} \
|
||||
-X tailscale.com/version.GitCommit=${VERSION_GIT_HASH}" \
|
||||
--base="${BASE}" \
|
||||
--tags="${TAGS}" \
|
||||
--repos="${REPOS}" \
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package chirp implements a client to communicate with the BIRD Internet
|
||||
// Routing Daemon.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
package chirp
|
||||
|
||||
import (
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
@ -103,7 +104,7 @@ func (c *Client) ACL(ctx context.Context) (acl *ACL, err error) {
|
|||
// it as a string.
|
||||
// HuJSON is JSON with a few modifications to make it more human-friendly. The primary
|
||||
// changes are allowing comments and trailing comments. See the following links for more info:
|
||||
// https://tailscale.com/s/acl-format
|
||||
// https://tailscale.com/kb/1018/acls?q=acl#tailscale-acl-policy-format
|
||||
// https://github.com/tailscale/hujson
|
||||
func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) {
|
||||
// Format return errors to be descriptive.
|
||||
|
@ -436,7 +437,7 @@ func (c *Client) ValidateACLJSON(ctx context.Context, source, dest string) (test
|
|||
}
|
||||
}()
|
||||
|
||||
tests := []ACLTest{{User: source, Allow: []string{dest}}}
|
||||
tests := []ACLTest{ACLTest{User: source, Allow: []string{dest}}}
|
||||
postData, err := json.Marshal(tests)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package apitype contains types for the Tailscale LocalAPI and control plane API.
|
||||
package apitype
|
||||
|
@ -32,9 +33,3 @@ type WaitingFile struct {
|
|||
Name string
|
||||
Size int64
|
||||
}
|
||||
|
||||
// SetPushDeviceTokenRequest is the body POSTed to the LocalAPI endpoint /set-device-token.
|
||||
type SetPushDeviceTokenRequest struct {
|
||||
// PushDeviceToken is the iOS/macOS APNs device token (and any future Android equivalent).
|
||||
PushDeviceToken string
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package apitype
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
@ -12,6 +13,7 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/types/opt"
|
||||
)
|
||||
|
@ -43,18 +45,17 @@ type Device struct {
|
|||
Name string `json:"name"`
|
||||
Hostname string `json:"hostname"`
|
||||
|
||||
ClientVersion string `json:"clientVersion"` // Empty for external devices.
|
||||
UpdateAvailable bool `json:"updateAvailable"` // Empty for external devices.
|
||||
OS string `json:"os"`
|
||||
Tags []string `json:"tags"`
|
||||
Created string `json:"created"` // Empty for external devices.
|
||||
LastSeen string `json:"lastSeen"`
|
||||
KeyExpiryDisabled bool `json:"keyExpiryDisabled"`
|
||||
Expires string `json:"expires"`
|
||||
Authorized bool `json:"authorized"`
|
||||
IsExternal bool `json:"isExternal"`
|
||||
MachineKey string `json:"machineKey"` // Empty for external devices.
|
||||
NodeKey string `json:"nodeKey"`
|
||||
ClientVersion string `json:"clientVersion"` // Empty for external devices.
|
||||
UpdateAvailable bool `json:"updateAvailable"` // Empty for external devices.
|
||||
OS string `json:"os"`
|
||||
Created string `json:"created"` // Empty for external devices.
|
||||
LastSeen string `json:"lastSeen"`
|
||||
KeyExpiryDisabled bool `json:"keyExpiryDisabled"`
|
||||
Expires string `json:"expires"`
|
||||
Authorized bool `json:"authorized"`
|
||||
IsExternal bool `json:"isExternal"`
|
||||
MachineKey string `json:"machineKey"` // Empty for external devices.
|
||||
NodeKey string `json:"nodeKey"`
|
||||
|
||||
// BlocksIncomingConnections is configured via the device's
|
||||
// Tailscale client preferences. This field is only reported
|
||||
|
@ -212,20 +213,8 @@ func (c *Client) DeleteDevice(ctx context.Context, deviceID string) (err error)
|
|||
|
||||
// AuthorizeDevice marks a device as authorized.
|
||||
func (c *Client) AuthorizeDevice(ctx context.Context, deviceID string) error {
|
||||
return c.SetAuthorized(ctx, deviceID, true)
|
||||
}
|
||||
|
||||
// SetAuthorized marks a device as authorized or not.
|
||||
func (c *Client) SetAuthorized(ctx context.Context, deviceID string, authorized bool) error {
|
||||
params := &struct {
|
||||
Authorized bool `json:"authorized"`
|
||||
}{Authorized: authorized}
|
||||
data, err := json.Marshal(params)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
path := fmt.Sprintf("%s/api/v2/device/%s/authorized", c.baseURL(), url.PathEscape(deviceID))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(data))
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", path, strings.NewReader(`{"authorized":true}`))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
@ -63,7 +64,7 @@ func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, er
|
|||
return b, nil
|
||||
}
|
||||
|
||||
func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData any) ([]byte, error) {
|
||||
func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData interface{}) ([]byte, error) {
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint)
|
||||
data, err := json.Marshal(&postData)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The servetls program shows how to run an HTTPS server
|
||||
// using a Tailscale cert via LetsEncrypt.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package tailscale
|
||||
|
||||
|
@ -68,32 +69,12 @@ func (c *Client) Keys(ctx context.Context) ([]string, error) {
|
|||
}
|
||||
|
||||
// CreateKey creates a new key for the current user. Currently, only auth keys
|
||||
// can be created. It returns the secret key itself, which cannot be retrieved again
|
||||
// can be created. Returns the key itself, which cannot be retrieved again
|
||||
// later, and the key metadata.
|
||||
//
|
||||
// To create a key with a specific expiry, use CreateKeyWithExpiry.
|
||||
func (c *Client) CreateKey(ctx context.Context, caps KeyCapabilities) (keySecret string, keyMeta *Key, _ error) {
|
||||
return c.CreateKeyWithExpiry(ctx, caps, 0)
|
||||
}
|
||||
|
||||
// CreateKeyWithExpiry is like CreateKey, but allows specifying a expiration time.
|
||||
//
|
||||
// The time is truncated to a whole number of seconds. If zero, that means no expiration.
|
||||
func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities, expiry time.Duration) (keySecret string, keyMeta *Key, _ error) {
|
||||
|
||||
// convert expirySeconds to an int64 (seconds)
|
||||
expirySeconds := int64(expiry.Seconds())
|
||||
if expirySeconds < 0 {
|
||||
return "", nil, fmt.Errorf("expiry must be positive")
|
||||
}
|
||||
if expirySeconds == 0 && expiry != 0 {
|
||||
return "", nil, fmt.Errorf("non-zero expiry must be at least one second")
|
||||
}
|
||||
|
||||
func (c *Client) CreateKey(ctx context.Context, caps KeyCapabilities) (string, *Key, error) {
|
||||
keyRequest := struct {
|
||||
Capabilities KeyCapabilities `json:"capabilities"`
|
||||
ExpirySeconds int64 `json:"expirySeconds,omitempty"`
|
||||
}{caps, int64(expirySeconds)}
|
||||
Capabilities KeyCapabilities `json:"capabilities"`
|
||||
}{caps}
|
||||
bs, err := json.Marshal(keyRequest)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
@ -36,7 +37,6 @@ import (
|
|||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/tkatype"
|
||||
)
|
||||
|
||||
// defaultLocalClient is the default LocalClient when using the legacy
|
||||
|
@ -96,9 +96,8 @@ func (lc *LocalClient) defaultDialer(ctx context.Context, network, addr string)
|
|||
// a TCP server on a random port, find the random port. For HTTP connections,
|
||||
// we don't send the token. It gets added in an HTTP Basic-Auth header.
|
||||
if port, _, err := safesocket.LocalTCPPortAndToken(); err == nil {
|
||||
// We use 127.0.0.1 and not "localhost" (issue 7851).
|
||||
var d net.Dialer
|
||||
return d.DialContext(ctx, "tcp", "127.0.0.1:"+strconv.Itoa(port))
|
||||
return d.DialContext(ctx, "tcp", "localhost:"+strconv.Itoa(port))
|
||||
}
|
||||
}
|
||||
s := safesocket.DefaultConnectionStrategy(lc.socket())
|
||||
|
@ -369,34 +368,6 @@ func (lc *LocalClient) DebugAction(ctx context.Context, action string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// DebugPortmap invokes the debug-portmap endpoint, and returns an
|
||||
// io.ReadCloser that can be used to read the logs that are printed during this
|
||||
// process.
|
||||
func (lc *LocalClient) DebugPortmap(ctx context.Context, duration time.Duration, ty, gwSelf string) (io.ReadCloser, error) {
|
||||
vals := make(url.Values)
|
||||
vals.Set("duration", duration.String())
|
||||
vals.Set("type", ty)
|
||||
if gwSelf != "" {
|
||||
vals.Set("gateway_and_self", gwSelf)
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := lc.doLocalRequestNiceError(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
body, _ := io.ReadAll(res.Body)
|
||||
res.Body.Close()
|
||||
return nil, fmt.Errorf("HTTP %s: %s", res.Status, body)
|
||||
}
|
||||
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// SetDevStoreKeyValue set a statestore key/value. It's only meant for development.
|
||||
// The schema (including when keys are re-read) is not a stable interface.
|
||||
func (lc *LocalClient) SetDevStoreKeyValue(ctx context.Context, key, value string) error {
|
||||
|
@ -851,30 +822,6 @@ func (lc *LocalClient) NetworkLockInit(ctx context.Context, keys []tka.Key, disa
|
|||
return decodeJSON[*ipnstate.NetworkLockStatus](body)
|
||||
}
|
||||
|
||||
// NetworkLockWrapPreauthKey wraps a pre-auth key with information to
|
||||
// enable unattended bringup in the locked tailnet.
|
||||
func (lc *LocalClient) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) {
|
||||
encodedPrivate, err := tkaKey.MarshalText()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
type wrapRequest struct {
|
||||
TSKey string
|
||||
TKAKey string // key.NLPrivate.MarshalText
|
||||
}
|
||||
if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("error: %w", err)
|
||||
}
|
||||
return string(body), nil
|
||||
}
|
||||
|
||||
// NetworkLockModify adds and/or removes key(s) to the tailnet key authority.
|
||||
func (lc *LocalClient) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error {
|
||||
var b bytes.Buffer
|
||||
|
@ -912,15 +859,6 @@ func (lc *LocalClient) NetworkLockSign(ctx context.Context, nodeKey key.NodePubl
|
|||
return nil
|
||||
}
|
||||
|
||||
// NetworkLockAffectedSigs returns all signatures signed by the specified keyID.
|
||||
func (lc *LocalClient) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) {
|
||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error: %w", err)
|
||||
}
|
||||
return decodeJSON[[]tkatype.MarshaledSignature](body)
|
||||
}
|
||||
|
||||
// NetworkLockLog returns up to maxEntries number of changes to network-lock state.
|
||||
func (lc *LocalClient) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) {
|
||||
v := url.Values{}
|
||||
|
@ -946,21 +884,6 @@ func (lc *LocalClient) NetworkLockForceLocalDisable(ctx context.Context) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained
|
||||
// in url and returns information extracted from it.
|
||||
func (lc *LocalClient) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) {
|
||||
vr := struct {
|
||||
URL string
|
||||
}{url}
|
||||
|
||||
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("sending verify-deeplink: %w", err)
|
||||
}
|
||||
|
||||
return decodeJSON[*tka.DeeplinkValidationResult](body)
|
||||
}
|
||||
|
||||
// SetServeConfig sets or replaces the serving settings.
|
||||
// If config is nil, settings are cleared and serving is disabled.
|
||||
func (lc *LocalClient) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error {
|
||||
|
@ -1106,26 +1029,6 @@ func (lc *LocalClient) DebugSetExpireIn(ctx context.Context, d time.Duration) er
|
|||
return err
|
||||
}
|
||||
|
||||
// StreamDebugCapture streams a pcap-formatted packet capture.
|
||||
//
|
||||
// The provided context does not determine the lifetime of the
|
||||
// returned io.ReadCloser.
|
||||
func (lc *LocalClient) StreamDebugCapture(ctx context.Context) (io.ReadCloser, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "POST", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-capture", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := lc.doLocalRequestNiceError(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if res.StatusCode != 200 {
|
||||
res.Body.Close()
|
||||
return nil, errors.New(res.Status)
|
||||
}
|
||||
return res.Body, nil
|
||||
}
|
||||
|
||||
// WatchIPNBus subscribes to the IPN notification bus. It returns a watcher
|
||||
// once the bus is connected successfully.
|
||||
//
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build !go1.20
|
||||
//go:build !go1.19
|
||||
|
||||
package tailscale
|
||||
|
||||
func init() {
|
||||
you_need_Go_1_20_to_compile_Tailscale()
|
||||
you_need_Go_1_19_to_compile_Tailscale()
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
@ -10,8 +11,6 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
// TailnetDeleteRequest handles sending a DELETE request for a tailnet to control.
|
||||
|
@ -23,7 +22,7 @@ func (c *Client) TailnetDeleteRequest(ctx context.Context, tailnetID string) (er
|
|||
}()
|
||||
|
||||
path := fmt.Sprintf("%s/api/v2/tailnet/%s", c.baseURL(), url.PathEscape(string(tailnetID)))
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, path, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.19
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Program addlicense adds a license header to a file.
|
||||
// It is intended for use with 'go generate',
|
||||
|
@ -14,24 +15,26 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
year = flag.Int("year", 0, "copyright year")
|
||||
file = flag.String("file", "", "file to modify")
|
||||
)
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintf(os.Stderr, `
|
||||
usage: addlicense -file FILE <subcommand args...>
|
||||
usage: addlicense -year YEAR -file FILE <subcommand args...>
|
||||
`[1:])
|
||||
|
||||
flag.PrintDefaults()
|
||||
fmt.Fprintf(os.Stderr, `
|
||||
addlicense adds a Tailscale license to the beginning of file.
|
||||
addlicense adds a Tailscale license to the beginning of file,
|
||||
using year as the copyright year.
|
||||
|
||||
It is intended for use with 'go generate', so it also runs a subcommand,
|
||||
which presumably creates the file.
|
||||
|
||||
Sample usage:
|
||||
|
||||
addlicense -file pull_strings.go stringer -type=pull
|
||||
addlicense -year 2021 -file pull_strings.go stringer -type=pull
|
||||
`[1:])
|
||||
os.Exit(2)
|
||||
}
|
||||
|
@ -51,7 +54,7 @@ func main() {
|
|||
check(err)
|
||||
f, err := os.OpenFile(*file, os.O_TRUNC|os.O_WRONLY, 0644)
|
||||
check(err)
|
||||
_, err = fmt.Fprint(f, license)
|
||||
_, err = fmt.Fprintf(f, license, *year)
|
||||
check(err)
|
||||
_, err = f.Write(b)
|
||||
check(err)
|
||||
|
@ -67,7 +70,8 @@ func check(err error) {
|
|||
}
|
||||
|
||||
var license = `
|
||||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) %d Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
`[1:]
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Cloner is a tool to automate the creation of a Clone method.
|
||||
//
|
||||
|
@ -79,7 +80,7 @@ func main() {
|
|||
w("}")
|
||||
}
|
||||
cloneOutput := pkg.Name + "_clone.go"
|
||||
if err := codegen.WritePackageFile("tailscale.com/cmd/cloner", pkg, cloneOutput, it, buf); err != nil {
|
||||
if err := codegen.WritePackageFile("tailscale.com/cmd/cloner", pkg, cloneOutput, codegen.CopyrightYear("."), it, buf); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,33 +1,144 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/kube"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/multierr"
|
||||
)
|
||||
|
||||
// checkSecretPermissions checks the secret access permissions of the current
|
||||
// pod. It returns an error if the basic permissions tailscale needs are
|
||||
// missing, and reports whether the patch permission is additionally present.
|
||||
//
|
||||
// Errors encountered during the access checking process are logged, but ignored
|
||||
// so that the pod tries to fail alive if the permissions exist and there's just
|
||||
// something wrong with SelfSubjectAccessReviews. There shouldn't be, pods
|
||||
// should always be able to use SSARs to assess their own permissions, but since
|
||||
// we didn't use to check permissions this way we'll be cautious in case some
|
||||
// old version of k8s deviates from the current behavior.
|
||||
func checkSecretPermissions(ctx context.Context, secretName string) (canPatch bool, err error) {
|
||||
var errs []error
|
||||
for _, verb := range []string{"get", "update"} {
|
||||
ok, err := checkPermission(ctx, verb, secretName)
|
||||
if err != nil {
|
||||
log.Printf("error checking %s permission on secret %s: %v", verb, secretName, err)
|
||||
} else if !ok {
|
||||
errs = append(errs, fmt.Errorf("missing %s permission on secret %q", verb, secretName))
|
||||
}
|
||||
}
|
||||
if len(errs) > 0 {
|
||||
return false, multierr.New(errs...)
|
||||
}
|
||||
ok, err := checkPermission(ctx, "patch", secretName)
|
||||
if err != nil {
|
||||
log.Printf("error checking patch permission on secret %s: %v", secretName, err)
|
||||
return false, nil
|
||||
}
|
||||
return ok, nil
|
||||
}
|
||||
|
||||
// checkPermission reports whether the current pod has permission to use the
|
||||
// given verb (e.g. get, update, patch) on secretName.
|
||||
func checkPermission(ctx context.Context, verb, secretName string) (bool, error) {
|
||||
sar := map[string]any{
|
||||
"apiVersion": "authorization.k8s.io/v1",
|
||||
"kind": "SelfSubjectAccessReview",
|
||||
"spec": map[string]any{
|
||||
"resourceAttributes": map[string]any{
|
||||
"namespace": kubeNamespace,
|
||||
"verb": verb,
|
||||
"resource": "secrets",
|
||||
"name": secretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
bs, err := json.Marshal(sar)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
req, err := http.NewRequest("POST", "/apis/authorization.k8s.io/v1/selfsubjectaccessreviews", bytes.NewReader(bs))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
resp, err := doKubeRequest(ctx, req)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
bs, err = io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
var res struct {
|
||||
Status struct {
|
||||
Allowed bool `json:"allowed"`
|
||||
} `json:"status"`
|
||||
}
|
||||
if err := json.Unmarshal(bs, &res); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return res.Status.Allowed, nil
|
||||
}
|
||||
|
||||
// findKeyInKubeSecret inspects the kube secret secretName for a data
|
||||
// field called "authkey", and returns its value if present.
|
||||
func findKeyInKubeSecret(ctx context.Context, secretName string) (string, error) {
|
||||
s, err := kc.GetSecret(ctx, secretName)
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("/api/v1/namespaces/%s/secrets/%s", kubeNamespace, secretName), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ak, ok := s.Data["authkey"]
|
||||
if !ok {
|
||||
return "", nil
|
||||
resp, err := doKubeRequest(ctx, req)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusNotFound {
|
||||
// Kube secret doesn't exist yet, can't have an authkey.
|
||||
return "", nil
|
||||
}
|
||||
return "", err
|
||||
}
|
||||
return string(ak), nil
|
||||
defer resp.Body.Close()
|
||||
|
||||
bs, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// We use a map[string]any here rather than import corev1.Secret,
|
||||
// because we only do very limited things to the secret, and
|
||||
// importing corev1 adds 12MiB to the compiled binary.
|
||||
var s map[string]any
|
||||
if err := json.Unmarshal(bs, &s); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if d, ok := s["data"].(map[string]any); ok {
|
||||
if v, ok := d["authkey"].(string); ok {
|
||||
bs, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return string(bs), nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// storeDeviceInfo writes deviceID into the "device_id" data field of the kube
|
||||
|
@ -35,38 +146,65 @@ func findKeyInKubeSecret(ctx context.Context, secretName string) (string, error)
|
|||
func storeDeviceInfo(ctx context.Context, secretName string, deviceID tailcfg.StableNodeID, fqdn string) error {
|
||||
// First check if the secret exists at all. Even if running on
|
||||
// kubernetes, we do not necessarily store state in a k8s secret.
|
||||
if _, err := kc.GetSecret(ctx, secretName); err != nil {
|
||||
if s, ok := err.(*kube.Status); ok {
|
||||
if s.Code >= 400 && s.Code <= 499 {
|
||||
// Assume the secret doesn't exist, or we don't have
|
||||
// permission to access it.
|
||||
return nil
|
||||
}
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("/api/v1/namespaces/%s/secrets/%s", kubeNamespace, secretName), nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp, err := doKubeRequest(ctx, req)
|
||||
if err != nil {
|
||||
if resp != nil && resp.StatusCode >= 400 && resp.StatusCode <= 499 {
|
||||
// Assume the secret doesn't exist, or we don't have
|
||||
// permission to access it.
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
m := &kube.Secret{
|
||||
Data: map[string][]byte{
|
||||
"device_id": []byte(deviceID),
|
||||
"device_fqdn": []byte(fqdn),
|
||||
m := map[string]map[string]string{
|
||||
"stringData": {
|
||||
"device_id": string(deviceID),
|
||||
"device_fqdn": fqdn,
|
||||
},
|
||||
}
|
||||
return kc.StrategicMergePatchSecret(ctx, secretName, m, "tailscale-container")
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(m); err != nil {
|
||||
return err
|
||||
}
|
||||
req, err = http.NewRequest("PATCH", fmt.Sprintf("/api/v1/namespaces/%s/secrets/%s?fieldManager=tailscale-container", kubeNamespace, secretName), &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/strategic-merge-patch+json")
|
||||
if _, err := doKubeRequest(ctx, req); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteAuthKey deletes the 'authkey' field of the given kube
|
||||
// secret. No-op if there is no authkey in the secret.
|
||||
func deleteAuthKey(ctx context.Context, secretName string) error {
|
||||
// m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902.
|
||||
m := []kube.JSONPatch{
|
||||
m := []struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
}{
|
||||
{
|
||||
Op: "remove",
|
||||
Path: "/data/authkey",
|
||||
},
|
||||
}
|
||||
if err := kc.JSONPatchSecret(ctx, secretName, m); err != nil {
|
||||
if s, ok := err.(*kube.Status); ok && s.Code == http.StatusUnprocessableEntity {
|
||||
var b bytes.Buffer
|
||||
if err := json.NewEncoder(&b).Encode(m); err != nil {
|
||||
return err
|
||||
}
|
||||
req, err := http.NewRequest("PATCH", fmt.Sprintf("/api/v1/namespaces/%s/secrets/%s?fieldManager=tailscale-container", kubeNamespace, secretName), &b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Set("Content-Type", "application/json-patch+json")
|
||||
if resp, err := doKubeRequest(ctx, req); err != nil {
|
||||
if resp != nil && resp.StatusCode == http.StatusUnprocessableEntity {
|
||||
// This is kubernetes-ese for "the field you asked to
|
||||
// delete already doesn't exist", aka no-op.
|
||||
return nil
|
||||
|
@ -76,22 +214,65 @@ func deleteAuthKey(ctx context.Context, secretName string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
var kc *kube.Client
|
||||
var (
|
||||
kubeHost string
|
||||
kubeNamespace string
|
||||
kubeToken string
|
||||
kubeHTTP *http.Transport
|
||||
)
|
||||
|
||||
func initKube(root string) {
|
||||
if root != "/" {
|
||||
// If we are running in a test, we need to set the root path to the fake
|
||||
// service account directory.
|
||||
kube.SetRootPathForTesting(root)
|
||||
// If running in Kubernetes, set things up so that doKubeRequest
|
||||
// can talk successfully to the kube apiserver.
|
||||
if os.Getenv("KUBERNETES_SERVICE_HOST") == "" {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
kc, err = kube.New()
|
||||
|
||||
kubeHost = os.Getenv("KUBERNETES_SERVICE_HOST") + ":" + os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")
|
||||
|
||||
bs, err := os.ReadFile(filepath.Join(root, "var/run/secrets/kubernetes.io/serviceaccount/namespace"))
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating kube client: %v", err)
|
||||
log.Fatalf("Error reading kube namespace: %v", err)
|
||||
}
|
||||
if root != "/" {
|
||||
// If we are running in a test, we need to set the URL to the
|
||||
// httptest server.
|
||||
kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
|
||||
kubeNamespace = strings.TrimSpace(string(bs))
|
||||
|
||||
bs, err = os.ReadFile(filepath.Join(root, "var/run/secrets/kubernetes.io/serviceaccount/token"))
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading kube token: %v", err)
|
||||
}
|
||||
kubeToken = strings.TrimSpace(string(bs))
|
||||
|
||||
bs, err = os.ReadFile(filepath.Join(root, "var/run/secrets/kubernetes.io/serviceaccount/ca.crt"))
|
||||
if err != nil {
|
||||
log.Fatalf("Error reading kube CA cert: %v", err)
|
||||
}
|
||||
cp := x509.NewCertPool()
|
||||
cp.AppendCertsFromPEM(bs)
|
||||
kubeHTTP = &http.Transport{
|
||||
TLSClientConfig: &tls.Config{
|
||||
RootCAs: cp,
|
||||
},
|
||||
IdleConnTimeout: time.Second,
|
||||
}
|
||||
}
|
||||
|
||||
// doKubeRequest sends r to the kube apiserver.
|
||||
func doKubeRequest(ctx context.Context, r *http.Request) (*http.Response, error) {
|
||||
if kubeHTTP == nil {
|
||||
panic("not in kubernetes")
|
||||
}
|
||||
|
||||
r.URL.Scheme = "https"
|
||||
r.URL.Host = kubeHost
|
||||
r.Header.Set("Authorization", "Bearer "+kubeToken)
|
||||
r.Header.Set("Accept", "application/json")
|
||||
|
||||
resp, err := kubeHTTP.RoundTrip(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
|
||||
return resp, fmt.Errorf("got non-200/201 status code %d", resp.StatusCode)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
|
||||
|
@ -12,7 +13,6 @@
|
|||
// variables. All configuration is optional.
|
||||
//
|
||||
// - TS_AUTHKEY: the authkey to use for login.
|
||||
// - TS_HOSTNAME: the hostname to request for the node.
|
||||
// - TS_ROUTES: subnet routes to advertise.
|
||||
// - TS_DEST_IP: proxy all incoming Tailscale traffic to the given
|
||||
// destination.
|
||||
|
@ -74,7 +74,6 @@ func main() {
|
|||
|
||||
cfg := &settings{
|
||||
AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""),
|
||||
Hostname: defaultEnv("TS_HOSTNAME", ""),
|
||||
Routes: defaultEnv("TS_ROUTES", ""),
|
||||
ProxyTo: defaultEnv("TS_DEST_IP", ""),
|
||||
DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""),
|
||||
|
@ -123,7 +122,7 @@ func main() {
|
|||
defer cancel()
|
||||
|
||||
if cfg.InKubernetes && cfg.KubeSecret != "" {
|
||||
canPatch, err := kc.CheckSecretPermissions(ctx, cfg.KubeSecret)
|
||||
canPatch, err := checkSecretPermissions(ctx, cfg.KubeSecret)
|
||||
if err != nil {
|
||||
log.Fatalf("Some Kubernetes permissions are missing, please check your RBAC configuration: %v", err)
|
||||
}
|
||||
|
@ -356,11 +355,7 @@ func tailscaledArgs(cfg *settings) []string {
|
|||
args := []string{"--socket=" + cfg.Socket}
|
||||
switch {
|
||||
case cfg.InKubernetes && cfg.KubeSecret != "":
|
||||
args = append(args, "--state=kube:"+cfg.KubeSecret)
|
||||
if cfg.StateDir == "" {
|
||||
cfg.StateDir = "/tmp"
|
||||
}
|
||||
fallthrough
|
||||
args = append(args, "--state=kube:"+cfg.KubeSecret, "--statedir=/tmp")
|
||||
case cfg.StateDir != "":
|
||||
args = append(args, "--statedir="+cfg.StateDir)
|
||||
default:
|
||||
|
@ -399,9 +394,6 @@ func tailscaleUp(ctx context.Context, cfg *settings) error {
|
|||
if cfg.Routes != "" {
|
||||
args = append(args, "--advertise-routes="+cfg.Routes)
|
||||
}
|
||||
if cfg.Hostname != "" {
|
||||
args = append(args, "--hostname="+cfg.Hostname)
|
||||
}
|
||||
if cfg.ExtraArgs != "" {
|
||||
args = append(args, strings.Fields(cfg.ExtraArgs)...)
|
||||
}
|
||||
|
@ -530,7 +522,6 @@ func installIPTablesRule(ctx context.Context, dstStr string, tsIPs []netip.Prefi
|
|||
// settings is all the configuration for containerboot.
|
||||
type settings struct {
|
||||
AuthKey string
|
||||
Hostname string
|
||||
Routes string
|
||||
ProxyTo string
|
||||
DaemonExtraArgs string
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
|
||||
|
@ -549,22 +550,6 @@ func TestContainerBoot(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "hostname",
|
||||
Env: map[string]string{
|
||||
"TS_HOSTNAME": "my-server",
|
||||
},
|
||||
Phases: []phase{
|
||||
{
|
||||
WantCmds: []string{
|
||||
"/usr/bin/tailscaled --socket=/tmp/tailscaled.sock --state=mem: --statedir=/tmp --tun=userspace-networking",
|
||||
"/usr/bin/tailscale --socket=/tmp/tailscaled.sock up --accept-dns=false --hostname=my-server",
|
||||
},
|
||||
}, {
|
||||
Notify: runningNotify,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
|
@ -607,7 +592,7 @@ func TestContainerBoot(t *testing.T) {
|
|||
}()
|
||||
|
||||
var wantCmds []string
|
||||
for i, p := range test.Phases {
|
||||
for _, p := range test.Phases {
|
||||
lapi.Notify(p.Notify)
|
||||
wantCmds = append(wantCmds, p.WantCmds...)
|
||||
waitArgs(t, 2*time.Second, d, argFile, strings.Join(wantCmds, "\n"))
|
||||
|
@ -626,7 +611,7 @@ func TestContainerBoot(t *testing.T) {
|
|||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("phase %d: %v", i, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
err = tstest.WaitFor(2*time.Second, func() error {
|
||||
for path, want := range p.WantFiles {
|
||||
|
@ -983,13 +968,13 @@ func (k *kubeServer) serveSecret(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
case "application/strategic-merge-patch+json":
|
||||
req := struct {
|
||||
Data map[string][]byte `json:"data"`
|
||||
Data map[string]string `json:"stringData"`
|
||||
}{}
|
||||
if err := json.Unmarshal(bs, &req); err != nil {
|
||||
panic(fmt.Sprintf("json decode failed: %v. Body:\n\n%s", err, string(bs)))
|
||||
}
|
||||
for key, val := range req.Data {
|
||||
k.secret[key] = string(val)
|
||||
k.secret[key] = val
|
||||
}
|
||||
default:
|
||||
panic(fmt.Sprintf("unknown content type %q", r.Header.Get("Content-Type")))
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
@ -14,7 +15,6 @@ import (
|
|||
"time"
|
||||
|
||||
"tailscale.com/syncs"
|
||||
"tailscale.com/util/slicesx"
|
||||
)
|
||||
|
||||
const refreshTimeout = time.Minute
|
||||
|
@ -53,13 +53,6 @@ func refreshBootstrapDNS() {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), refreshTimeout)
|
||||
defer cancel()
|
||||
dnsEntries := resolveList(ctx, strings.Split(*bootstrapDNS, ","))
|
||||
// Randomize the order of the IPs for each name to avoid the client biasing
|
||||
// to IPv6
|
||||
for k := range dnsEntries {
|
||||
ips := dnsEntries[k]
|
||||
slicesx.Shuffle(ips)
|
||||
dnsEntries[k] = ips
|
||||
}
|
||||
j, err := json.MarshalIndent(dnsEntries, "", "\t")
|
||||
if err != nil {
|
||||
// leave the old values in place
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
@ -11,12 +12,14 @@ import (
|
|||
"net/url"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/tstest"
|
||||
)
|
||||
|
||||
func BenchmarkHandleBootstrapDNS(b *testing.B) {
|
||||
tstest.Replace(b, bootstrapDNS, "log.tailscale.io,login.tailscale.com,controlplane.tailscale.com,login.us.tailscale.com")
|
||||
prev := *bootstrapDNS
|
||||
*bootstrapDNS = "log.tailscale.io,login.tailscale.com,controlplane.tailscale.com,login.us.tailscale.com"
|
||||
defer func() {
|
||||
*bootstrapDNS = prev
|
||||
}()
|
||||
refreshBootstrapDNS()
|
||||
w := new(bitbucketResponseWriter)
|
||||
req, _ := http.NewRequest("GET", "https://localhost/bootstrap-dns?q="+url.QueryEscape("log.tailscale.io"), nil)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
@ -72,7 +73,7 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) {
|
|||
return nil, fmt.Errorf("can not load cert: %w", err)
|
||||
}
|
||||
if err := x509Cert.VerifyHostname(hostname); err != nil {
|
||||
// return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err)
|
||||
return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err)
|
||||
}
|
||||
return &manualCertManager{cert: &cert, hostname: hostname}, nil
|
||||
}
|
||||
|
@ -81,7 +82,7 @@ func (m *manualCertManager) TLSConfig() *tls.Config {
|
|||
return &tls.Config{
|
||||
Certificates: nil,
|
||||
NextProtos: []string{
|
||||
"http/1.1",
|
||||
"h2", "http/1.1", // enable HTTP/2
|
||||
},
|
||||
GetCertificate: m.getCertificate,
|
||||
}
|
||||
|
@ -89,7 +90,7 @@ func (m *manualCertManager) TLSConfig() *tls.Config {
|
|||
|
||||
func (m *manualCertManager) getCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
if hi.ServerName != m.hostname {
|
||||
//return nil, fmt.Errorf("cert mismatch with hostname: %q", hi.ServerName)
|
||||
return nil, fmt.Errorf("cert mismatch with hostname: %q", hi.ServerName)
|
||||
}
|
||||
|
||||
// Return a shallow copy of the cert so the caller can append to its
|
||||
|
|
|
@ -3,94 +3,26 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus
|
||||
filippo.io/edwards25519/field from filippo.io/edwards25519
|
||||
W 💣 github.com/Microsoft/go-winio from tailscale.com/safesocket
|
||||
W 💣 github.com/Microsoft/go-winio/internal/fs from github.com/Microsoft/go-winio
|
||||
W 💣 github.com/Microsoft/go-winio/internal/socket from github.com/Microsoft/go-winio
|
||||
W github.com/Microsoft/go-winio/internal/stringbuffer from github.com/Microsoft/go-winio/internal/fs
|
||||
W github.com/Microsoft/go-winio/pkg/guid from github.com/Microsoft/go-winio+
|
||||
W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+
|
||||
W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate
|
||||
W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy
|
||||
github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus
|
||||
💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus
|
||||
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
|
||||
github.com/fxamacker/cbor/v2 from tailscale.com/tka
|
||||
github.com/golang/groupcache/lru from tailscale.com/net/dnscache
|
||||
github.com/golang/protobuf/proto from github.com/matttproud/golang_protobuf_extensions/pbutil+
|
||||
L github.com/google/nftables from tailscale.com/util/linuxfw
|
||||
L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt
|
||||
L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+
|
||||
L github.com/google/nftables/expr from github.com/google/nftables+
|
||||
L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+
|
||||
L github.com/google/nftables/xt from github.com/google/nftables/expr+
|
||||
github.com/hdevalence/ed25519consensus from tailscale.com/tka
|
||||
L github.com/josharian/native from github.com/mdlayher/netlink+
|
||||
L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/interfaces+
|
||||
LW github.com/josharian/native from github.com/mdlayher/netlink+
|
||||
L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/interfaces
|
||||
L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink
|
||||
github.com/klauspost/compress/flate from nhooyr.io/websocket
|
||||
github.com/matttproud/golang_protobuf_extensions/pbutil from github.com/prometheus/common/expfmt
|
||||
L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+
|
||||
L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+
|
||||
L github.com/mdlayher/netlink/nltest from github.com/google/nftables
|
||||
L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink
|
||||
💣 github.com/mitchellh/go-ps from tailscale.com/safesocket
|
||||
💣 github.com/prometheus/client_golang/prometheus from tailscale.com/tsweb/promvarz
|
||||
github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus
|
||||
github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+
|
||||
github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+
|
||||
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg from github.com/prometheus/common/expfmt
|
||||
github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+
|
||||
LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus
|
||||
LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs
|
||||
LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs
|
||||
L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw
|
||||
L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink
|
||||
L github.com/vishvananda/netns from github.com/tailscale/netlink+
|
||||
github.com/x448/float16 from github.com/fxamacker/cbor/v2
|
||||
💣 go4.org/mem from tailscale.com/client/tailscale+
|
||||
go4.org/netipx from tailscale.com/wgengine/filter
|
||||
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/interfaces+
|
||||
google.golang.org/protobuf/encoding/prototext from github.com/golang/protobuf/proto+
|
||||
google.golang.org/protobuf/encoding/protowire from github.com/golang/protobuf/proto+
|
||||
google.golang.org/protobuf/internal/descfmt from google.golang.org/protobuf/internal/filedesc
|
||||
google.golang.org/protobuf/internal/descopts from google.golang.org/protobuf/internal/filedesc+
|
||||
google.golang.org/protobuf/internal/detrand from google.golang.org/protobuf/internal/descfmt+
|
||||
google.golang.org/protobuf/internal/encoding/defval from google.golang.org/protobuf/internal/encoding/tag+
|
||||
google.golang.org/protobuf/internal/encoding/messageset from google.golang.org/protobuf/encoding/prototext+
|
||||
google.golang.org/protobuf/internal/encoding/tag from google.golang.org/protobuf/internal/impl
|
||||
google.golang.org/protobuf/internal/encoding/text from google.golang.org/protobuf/encoding/prototext+
|
||||
google.golang.org/protobuf/internal/errors from google.golang.org/protobuf/encoding/prototext+
|
||||
google.golang.org/protobuf/internal/filedesc from google.golang.org/protobuf/internal/encoding/tag+
|
||||
google.golang.org/protobuf/internal/filetype from google.golang.org/protobuf/runtime/protoimpl
|
||||
google.golang.org/protobuf/internal/flags from google.golang.org/protobuf/encoding/prototext+
|
||||
google.golang.org/protobuf/internal/genid from google.golang.org/protobuf/encoding/prototext+
|
||||
💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+
|
||||
google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+
|
||||
google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+
|
||||
google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext
|
||||
💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+
|
||||
google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl
|
||||
google.golang.org/protobuf/proto from github.com/golang/protobuf/proto+
|
||||
google.golang.org/protobuf/reflect/protodesc from github.com/golang/protobuf/proto
|
||||
💣 google.golang.org/protobuf/reflect/protoreflect from github.com/golang/protobuf/proto+
|
||||
google.golang.org/protobuf/reflect/protoregistry from github.com/golang/protobuf/proto+
|
||||
google.golang.org/protobuf/runtime/protoiface from github.com/golang/protobuf/proto+
|
||||
google.golang.org/protobuf/runtime/protoimpl from github.com/golang/protobuf/proto+
|
||||
google.golang.org/protobuf/types/descriptorpb from google.golang.org/protobuf/reflect/protodesc
|
||||
google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
|
||||
L gvisor.dev/gvisor/pkg/abi from gvisor.dev/gvisor/pkg/abi/linux
|
||||
L 💣 gvisor.dev/gvisor/pkg/abi/linux from tailscale.com/util/linuxfw
|
||||
L gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/abi/linux
|
||||
L gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/abi/linux
|
||||
L 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/abi/linux+
|
||||
L 💣 gvisor.dev/gvisor/pkg/hostarch from gvisor.dev/gvisor/pkg/abi/linux+
|
||||
L gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log
|
||||
L gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context
|
||||
L gvisor.dev/gvisor/pkg/marshal from gvisor.dev/gvisor/pkg/abi/linux+
|
||||
L 💣 gvisor.dev/gvisor/pkg/marshal/primitive from gvisor.dev/gvisor/pkg/abi/linux
|
||||
L 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/abi/linux+
|
||||
L gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state
|
||||
L 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/linewriter+
|
||||
L gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context
|
||||
nhooyr.io/websocket from tailscale.com/cmd/derper+
|
||||
nhooyr.io/websocket/internal/errd from nhooyr.io/websocket
|
||||
nhooyr.io/websocket/internal/xsync from nhooyr.io/websocket
|
||||
|
@ -102,7 +34,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
tailscale.com/derp/derphttp from tailscale.com/cmd/derper
|
||||
tailscale.com/disco from tailscale.com/derp
|
||||
tailscale.com/envknob from tailscale.com/derp+
|
||||
tailscale.com/health from tailscale.com/net/tlsdial
|
||||
tailscale.com/hostinfo from tailscale.com/net/interfaces+
|
||||
tailscale.com/ipn from tailscale.com/client/tailscale
|
||||
tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+
|
||||
|
@ -112,13 +43,10 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
💣 tailscale.com/net/interfaces from tailscale.com/net/netns+
|
||||
tailscale.com/net/netaddr from tailscale.com/ipn+
|
||||
tailscale.com/net/netknob from tailscale.com/net/netns
|
||||
tailscale.com/net/netmon from tailscale.com/net/sockstats+
|
||||
tailscale.com/net/netns from tailscale.com/derp/derphttp
|
||||
tailscale.com/net/netutil from tailscale.com/client/tailscale
|
||||
tailscale.com/net/packet from tailscale.com/wgengine/filter
|
||||
tailscale.com/net/sockstats from tailscale.com/derp/derphttp
|
||||
tailscale.com/net/stun from tailscale.com/cmd/derper
|
||||
L tailscale.com/net/tcpinfo from tailscale.com/derp
|
||||
tailscale.com/net/tlsdial from tailscale.com/derp/derphttp
|
||||
tailscale.com/net/tsaddr from tailscale.com/ipn+
|
||||
💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+
|
||||
|
@ -130,15 +58,12 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
tailscale.com/tka from tailscale.com/client/tailscale+
|
||||
W tailscale.com/tsconst from tailscale.com/net/interfaces
|
||||
💣 tailscale.com/tstime/mono from tailscale.com/tstime/rate
|
||||
tailscale.com/tstime/rate from tailscale.com/wgengine/filter+
|
||||
tailscale.com/tstime/rate from tailscale.com/wgengine/filter
|
||||
tailscale.com/tsweb from tailscale.com/cmd/derper
|
||||
tailscale.com/tsweb/promvarz from tailscale.com/tsweb
|
||||
tailscale.com/tsweb/varz from tailscale.com/tsweb+
|
||||
tailscale.com/types/dnstype from tailscale.com/tailcfg
|
||||
tailscale.com/types/empty from tailscale.com/ipn
|
||||
tailscale.com/types/ipproto from tailscale.com/net/flowtrack+
|
||||
tailscale.com/types/key from tailscale.com/cmd/derper+
|
||||
tailscale.com/types/lazy from tailscale.com/version+
|
||||
tailscale.com/types/logger from tailscale.com/cmd/derper+
|
||||
tailscale.com/types/netmap from tailscale.com/ipn
|
||||
tailscale.com/types/opt from tailscale.com/client/tailscale+
|
||||
|
@ -151,18 +76,12 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
W tailscale.com/util/clientmetric from tailscale.com/net/tshttpproxy
|
||||
tailscale.com/util/cloudenv from tailscale.com/hostinfo+
|
||||
W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy
|
||||
tailscale.com/util/cmpx from tailscale.com/cmd/derper+
|
||||
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
|
||||
tailscale.com/util/dnsname from tailscale.com/hostinfo+
|
||||
tailscale.com/util/httpm from tailscale.com/client/tailscale
|
||||
tailscale.com/util/lineread from tailscale.com/hostinfo+
|
||||
L 💣 tailscale.com/util/linuxfw from tailscale.com/net/netns
|
||||
tailscale.com/util/mak from tailscale.com/syncs+
|
||||
tailscale.com/util/multierr from tailscale.com/health+
|
||||
tailscale.com/util/set from tailscale.com/health+
|
||||
tailscale.com/util/singleflight from tailscale.com/net/dnscache
|
||||
tailscale.com/util/slicesx from tailscale.com/cmd/derper+
|
||||
tailscale.com/util/vizerror from tailscale.com/tsweb
|
||||
tailscale.com/util/strs from tailscale.com/hostinfo+
|
||||
W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+
|
||||
tailscale.com/version from tailscale.com/derp+
|
||||
tailscale.com/version/distro from tailscale.com/hostinfo+
|
||||
|
@ -176,18 +95,17 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
golang.org/x/crypto/chacha20poly1305 from crypto/tls
|
||||
golang.org/x/crypto/cryptobyte from crypto/ecdsa+
|
||||
golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+
|
||||
golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+
|
||||
golang.org/x/crypto/curve25519 from crypto/tls+
|
||||
golang.org/x/crypto/hkdf from crypto/tls
|
||||
golang.org/x/crypto/nacl/box from tailscale.com/types/key
|
||||
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
|
||||
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
|
||||
golang.org/x/exp/constraints from golang.org/x/exp/slices
|
||||
golang.org/x/exp/maps from tailscale.com/types/views
|
||||
golang.org/x/exp/slices from tailscale.com/net/tsaddr+
|
||||
L golang.org/x/net/bpf from github.com/mdlayher/netlink+
|
||||
golang.org/x/net/dns/dnsmessage from net+
|
||||
golang.org/x/net/http/httpguts from net/http
|
||||
golang.org/x/net/http/httpproxy from net/http+
|
||||
golang.org/x/net/http/httpproxy from net/http
|
||||
golang.org/x/net/http2/hpack from net/http
|
||||
golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+
|
||||
golang.org/x/net/proxy from tailscale.com/net/netns
|
||||
|
@ -208,7 +126,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
bytes from bufio+
|
||||
compress/flate from compress/gzip+
|
||||
compress/gzip from internal/profile+
|
||||
L compress/zlib from debug/elf
|
||||
container/list from crypto/tls+
|
||||
context from crypto/tls+
|
||||
crypto from crypto/ecdsa+
|
||||
|
@ -216,7 +133,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
crypto/cipher from crypto/aes+
|
||||
crypto/des from crypto/tls+
|
||||
crypto/dsa from crypto/x509
|
||||
crypto/ecdh from crypto/ecdsa+
|
||||
crypto/ecdsa from crypto/tls+
|
||||
crypto/ed25519 from crypto/tls+
|
||||
crypto/elliptic from crypto/ecdsa+
|
||||
|
@ -232,8 +148,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
crypto/tls from golang.org/x/crypto/acme+
|
||||
crypto/x509 from crypto/tls+
|
||||
crypto/x509/pkix from crypto/x509+
|
||||
L debug/dwarf from debug/elf
|
||||
L debug/elf from golang.org/x/sys/unix
|
||||
embed from crypto/internal/nistec+
|
||||
encoding from encoding/json+
|
||||
encoding/asn1 from crypto/x509+
|
||||
|
@ -247,18 +161,14 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
expvar from tailscale.com/cmd/derper+
|
||||
flag from tailscale.com/cmd/derper
|
||||
fmt from compress/flate+
|
||||
go/token from google.golang.org/protobuf/internal/strs
|
||||
hash from crypto+
|
||||
L hash/adler32 from compress/zlib
|
||||
hash/crc32 from compress/gzip+
|
||||
hash/fnv from google.golang.org/protobuf/internal/detrand
|
||||
hash/maphash from go4.org/mem
|
||||
html from net/http/pprof+
|
||||
io from bufio+
|
||||
io/fs from crypto/x509+
|
||||
io/ioutil from github.com/mitchellh/go-ps+
|
||||
log from expvar+
|
||||
log/internal from log
|
||||
math from compress/flate+
|
||||
math/big from crypto/dsa+
|
||||
math/bits from compress/flate+
|
||||
|
@ -270,7 +180,7 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
net/http from expvar+
|
||||
net/http/httptrace from net/http+
|
||||
net/http/internal from net/http
|
||||
net/http/pprof from tailscale.com/tsweb+
|
||||
net/http/pprof from tailscale.com/tsweb
|
||||
net/netip from go4.org/netipx+
|
||||
net/textproto from golang.org/x/net/http/httpguts+
|
||||
net/url from crypto/x509+
|
||||
|
@ -283,7 +193,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
|
|||
regexp from internal/profile+
|
||||
regexp/syntax from regexp
|
||||
runtime/debug from golang.org/x/crypto/acme+
|
||||
runtime/metrics from github.com/prometheus/client_golang/prometheus+
|
||||
runtime/pprof from net/http/pprof
|
||||
runtime/trace from net/http/pprof
|
||||
sort from compress/flate+
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The derper binary is a simple DERP server.
|
||||
package main // import "tailscale.com/cmd/derper"
|
||||
|
@ -33,12 +34,11 @@ import (
|
|||
"tailscale.com/net/stun"
|
||||
"tailscale.com/tsweb"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/util/cmpx"
|
||||
)
|
||||
|
||||
var (
|
||||
dev = flag.Bool("dev", false, "run in localhost development mode (overrides -a)")
|
||||
addr = flag.String("a", ":443", "server HTTP/HTTPS listen address, in form \":port\", \"ip:port\", or for IPv6 \"[ip]:port\". If the IP is omitted, it defaults to all interfaces. Serves HTTPS if the port is 443 and/or -certmode is manual, otherwise HTTP.")
|
||||
dev = flag.Bool("dev", false, "run in localhost development mode")
|
||||
addr = flag.String("a", ":443", "server HTTPS listen address, in form \":port\", \"ip:port\", or for IPv6 \"[ip]:port\". If the IP is omitted, it defaults to all interfaces.")
|
||||
httpPort = flag.Int("http-port", 80, "The port on which to serve HTTP. Set to -1 to disable. The listener is bound to the same IP (if any) as specified in the -a flag.")
|
||||
stunPort = flag.Int("stun-port", 3478, "The UDP port on which to serve STUN. The listener is bound to the same IP (if any) as specified in the -a flag.")
|
||||
configPath = flag.String("c", "", "config file path")
|
||||
|
@ -437,7 +437,11 @@ func defaultMeshPSKFile() string {
|
|||
}
|
||||
|
||||
func rateLimitedListenAndServeTLS(srv *http.Server) error {
|
||||
ln, err := net.Listen("tcp", cmpx.Or(srv.Addr, ":https"))
|
||||
addr := srv.Addr
|
||||
if addr == "" {
|
||||
addr = ":https"
|
||||
}
|
||||
ln, err := net.Listen("tcp", addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
@ -16,6 +17,7 @@ import (
|
|||
"tailscale.com/derp/derphttp"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/strs"
|
||||
)
|
||||
|
||||
func startMesh(s *derp.Server) error {
|
||||
|
@ -49,7 +51,7 @@ func startMeshWithHost(s *derp.Server, host string) error {
|
|||
}
|
||||
var d net.Dialer
|
||||
var r net.Resolver
|
||||
if base, ok := strings.CutSuffix(host, ".tailscale.com"); ok && port == "443" {
|
||||
if base, ok := strs.CutSuffix(host, ".tailscale.com"); ok && port == "443" {
|
||||
subCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
|
||||
defer cancel()
|
||||
vpcHost := base + "-vpc.tailscale.com"
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
|
|
@ -1,46 +1,74 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The derpprobe binary probes derpers.
|
||||
package main
|
||||
package main // import "tailscale.com/cmd/derper/derpprobe"
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
crand "crypto/rand"
|
||||
"crypto/x509"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"html"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/prober"
|
||||
"tailscale.com/tsweb"
|
||||
"tailscale.com/derp"
|
||||
"tailscale.com/derp/derphttp"
|
||||
"tailscale.com/net/stun"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/key"
|
||||
)
|
||||
|
||||
var (
|
||||
derpMapURL = flag.String("derp-map", "https://login.tailscale.com/derpmap/default", "URL to DERP map (https:// or file://)")
|
||||
listen = flag.String("listen", ":8030", "HTTP listen address")
|
||||
probeOnce = flag.Bool("once", false, "probe once and print results, then exit; ignores the listen flag")
|
||||
spread = flag.Bool("spread", true, "whether to spread probing over time")
|
||||
interval = flag.Duration("interval", 15*time.Second, "probe interval")
|
||||
)
|
||||
|
||||
// certReissueAfter is the time after which we expect all certs to be
|
||||
// reissued, at minimum.
|
||||
//
|
||||
// This is currently set to the date of the LetsEncrypt ALPN revocation event of Jan 2022:
|
||||
// https://community.letsencrypt.org/t/questions-about-renewing-before-tls-alpn-01-revocations/170449
|
||||
//
|
||||
// If there's another revocation event, bump this again.
|
||||
var certReissueAfter = time.Unix(1643226768, 0)
|
||||
|
||||
var (
|
||||
mu sync.Mutex
|
||||
state = map[nodePair]pairStatus{}
|
||||
lastDERPMap *tailcfg.DERPMap
|
||||
lastDERPMapAt time.Time
|
||||
certs = map[string]*x509.Certificate{}
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
p := prober.New().WithSpread(*spread).WithOnce(*probeOnce).WithMetricNamespace("derpprobe")
|
||||
dp, err := prober.DERP(p, *derpMapURL, *interval, *interval, *interval)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
p.Run("derpmap-probe", *interval, nil, dp.ProbeMap)
|
||||
// proactively load the DERP map. Nothing terrible happens if this fails, so we ignore
|
||||
// the error. The Slack bot will print a notification that the DERP map was empty.
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
_, _ = getDERPMap(ctx)
|
||||
|
||||
if *probeOnce {
|
||||
log.Printf("Waiting for all probes (may take up to 1m)")
|
||||
p.Wait()
|
||||
|
||||
st := getOverallStatus(p)
|
||||
log.Printf("Starting probe (may take up to 1m)")
|
||||
probe()
|
||||
log.Printf("Probe results:")
|
||||
st := getOverallStatus()
|
||||
for _, s := range st.good {
|
||||
log.Printf("good: %s", s)
|
||||
}
|
||||
|
@ -50,10 +78,15 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
tsweb.Debugger(mux)
|
||||
mux.HandleFunc("/", http.HandlerFunc(serveFunc(p)))
|
||||
log.Fatal(http.ListenAndServe(*listen, mux))
|
||||
go probeLoop()
|
||||
go slackLoop()
|
||||
log.Fatal(http.ListenAndServe(*listen, http.HandlerFunc(serve)))
|
||||
}
|
||||
|
||||
func setCert(name string, cert *x509.Certificate) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
certs[name] = cert
|
||||
}
|
||||
|
||||
type overallStatus struct {
|
||||
|
@ -68,43 +101,471 @@ func (st *overallStatus) addGoodf(format string, a ...any) {
|
|||
st.good = append(st.good, fmt.Sprintf(format, a...))
|
||||
}
|
||||
|
||||
func getOverallStatus(p *prober.Prober) (o overallStatus) {
|
||||
for p, i := range p.ProbeInfo() {
|
||||
if i.End.IsZero() {
|
||||
// Do not show probes that have not finished yet.
|
||||
continue
|
||||
}
|
||||
if i.Result {
|
||||
o.addGoodf("%s: %s", p, i.Latency)
|
||||
} else {
|
||||
o.addBadf("%s: %s", p, i.Error)
|
||||
func getOverallStatus() (o overallStatus) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if lastDERPMap == nil {
|
||||
o.addBadf("no DERP map")
|
||||
return
|
||||
}
|
||||
now := time.Now()
|
||||
if age := now.Sub(lastDERPMapAt); age > time.Minute {
|
||||
o.addBadf("DERPMap hasn't been successfully refreshed in %v", age.Round(time.Second))
|
||||
}
|
||||
|
||||
addPairMeta := func(pair nodePair) {
|
||||
st, ok := state[pair]
|
||||
age := now.Sub(st.at).Round(time.Second)
|
||||
switch {
|
||||
case !ok:
|
||||
o.addBadf("no state for %v", pair)
|
||||
case st.err != nil:
|
||||
o.addBadf("%v: %v", pair, st.err)
|
||||
case age > 90*time.Second:
|
||||
o.addBadf("%v: update is %v old", pair, age)
|
||||
default:
|
||||
o.addGoodf("%v: %v, %v ago", pair, st.latency.Round(time.Millisecond), age)
|
||||
}
|
||||
}
|
||||
|
||||
sort.Strings(o.bad)
|
||||
sort.Strings(o.good)
|
||||
for _, reg := range sortedRegions(lastDERPMap) {
|
||||
for _, from := range reg.Nodes {
|
||||
addPairMeta(nodePair{"UDP", from.Name})
|
||||
for _, to := range reg.Nodes {
|
||||
addPairMeta(nodePair{from.Name, to.Name})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var subjs []string
|
||||
for k := range certs {
|
||||
subjs = append(subjs, k)
|
||||
}
|
||||
sort.Strings(subjs)
|
||||
|
||||
soon := time.Now().Add(14 * 24 * time.Hour) // in 2 weeks; autocert does 30 days by default
|
||||
for _, s := range subjs {
|
||||
cert := certs[s]
|
||||
if cert.NotBefore.Before(certReissueAfter) {
|
||||
o.addBadf("cert %q needs reissuing; NotBefore=%v", s, cert.NotBefore.Format(time.RFC3339))
|
||||
continue
|
||||
}
|
||||
if cert.NotAfter.Before(soon) {
|
||||
o.addBadf("cert %q expiring soon (%v); wasn't auto-refreshed", s, cert.NotAfter.Format(time.RFC3339))
|
||||
continue
|
||||
}
|
||||
o.addGoodf("cert %q good %v - %v", s, cert.NotBefore.Format(time.RFC3339), cert.NotAfter.Format(time.RFC3339))
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func serveFunc(p *prober.Prober) func(w http.ResponseWriter, r *http.Request) {
|
||||
return func(w http.ResponseWriter, r *http.Request) {
|
||||
st := getOverallStatus(p)
|
||||
summary := "All good"
|
||||
if (float64(len(st.bad)) / float64(len(st.bad)+len(st.good))) > 0.25 {
|
||||
// Returning a 500 allows monitoring this server externally and configuring
|
||||
// an alert on HTTP response code.
|
||||
w.WriteHeader(500)
|
||||
summary = fmt.Sprintf("%d problems", len(st.bad))
|
||||
func serve(w http.ResponseWriter, r *http.Request) {
|
||||
st := getOverallStatus()
|
||||
summary := "All good"
|
||||
if (float64(len(st.bad)) / float64(len(st.bad)+len(st.good))) > 0.25 {
|
||||
// This will generate an alert and page a human.
|
||||
// It also ends up in Slack, but as part of the alert handling pipeline not
|
||||
// because we generated a Slack notification from here.
|
||||
w.WriteHeader(500)
|
||||
summary = fmt.Sprintf("%d problems", len(st.bad))
|
||||
}
|
||||
|
||||
io.WriteString(w, "<html><head><style>.bad { font-weight: bold; color: #700; }</style></head>\n")
|
||||
fmt.Fprintf(w, "<body><h1>derp probe</h1>\n%s:<ul>", summary)
|
||||
for _, s := range st.bad {
|
||||
fmt.Fprintf(w, "<li class=bad>%s</li>\n", html.EscapeString(s))
|
||||
}
|
||||
for _, s := range st.good {
|
||||
fmt.Fprintf(w, "<li>%s</li>\n", html.EscapeString(s))
|
||||
}
|
||||
io.WriteString(w, "</ul></body></html>\n")
|
||||
}
|
||||
|
||||
func notifySlack(text string) error {
|
||||
type SlackRequestBody struct {
|
||||
Text string `json:"text"`
|
||||
}
|
||||
|
||||
slackBody, err := json.Marshal(SlackRequestBody{Text: text})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
webhookUrl := os.Getenv("SLACK_WEBHOOK")
|
||||
if webhookUrl == "" {
|
||||
return errors.New("No SLACK_WEBHOOK configured")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", webhookUrl, bytes.NewReader(slackBody))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
req.Header.Add("Content-Type", "application/json")
|
||||
|
||||
client := &http.Client{Timeout: 10 * time.Second}
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != 200 {
|
||||
return errors.New(resp.Status)
|
||||
}
|
||||
|
||||
body, _ := io.ReadAll(resp.Body)
|
||||
if string(body) != "ok" {
|
||||
return errors.New("Non-ok response returned from Slack")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// We only page a human if it looks like there is a significant outage across multiple regions.
|
||||
// To Slack, we report all failures great and small.
|
||||
func slackLoop() {
|
||||
inBadState := false
|
||||
for {
|
||||
time.Sleep(time.Second * 30)
|
||||
st := getOverallStatus()
|
||||
|
||||
if len(st.bad) > 0 && !inBadState {
|
||||
err := notifySlack(strings.Join(st.bad, "\n"))
|
||||
if err == nil {
|
||||
inBadState = true
|
||||
} else {
|
||||
log.Printf("%d problems, notify Slack failed: %v", len(st.bad), err)
|
||||
}
|
||||
}
|
||||
|
||||
io.WriteString(w, "<html><head><style>.bad { font-weight: bold; color: #700; }</style></head>\n")
|
||||
fmt.Fprintf(w, "<body><h1>derp probe</h1>\n%s:<ul>", summary)
|
||||
for _, s := range st.bad {
|
||||
fmt.Fprintf(w, "<li class=bad>%s</li>\n", html.EscapeString(s))
|
||||
if len(st.bad) == 0 && inBadState {
|
||||
err := notifySlack("All DERPs recovered.")
|
||||
if err == nil {
|
||||
inBadState = false
|
||||
}
|
||||
}
|
||||
for _, s := range st.good {
|
||||
fmt.Fprintf(w, "<li>%s</li>\n", html.EscapeString(s))
|
||||
}
|
||||
io.WriteString(w, "</ul></body></html>\n")
|
||||
}
|
||||
}
|
||||
|
||||
func sortedRegions(dm *tailcfg.DERPMap) []*tailcfg.DERPRegion {
|
||||
ret := make([]*tailcfg.DERPRegion, 0, len(dm.Regions))
|
||||
for _, r := range dm.Regions {
|
||||
ret = append(ret, r)
|
||||
}
|
||||
sort.Slice(ret, func(i, j int) bool { return ret[i].RegionID < ret[j].RegionID })
|
||||
return ret
|
||||
}
|
||||
|
||||
type nodePair struct {
|
||||
from string // DERPNode.Name, or "UDP" for a STUN query to 'to'
|
||||
to string // DERPNode.Name
|
||||
}
|
||||
|
||||
func (p nodePair) String() string { return fmt.Sprintf("(%s→%s)", p.from, p.to) }
|
||||
|
||||
type pairStatus struct {
|
||||
err error
|
||||
latency time.Duration
|
||||
at time.Time
|
||||
}
|
||||
|
||||
func setDERPMap(dm *tailcfg.DERPMap) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
lastDERPMap = dm
|
||||
lastDERPMapAt = time.Now()
|
||||
}
|
||||
|
||||
func setState(p nodePair, latency time.Duration, err error) {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
st := pairStatus{
|
||||
err: err,
|
||||
latency: latency,
|
||||
at: time.Now(),
|
||||
}
|
||||
state[p] = st
|
||||
if err != nil {
|
||||
log.Printf("%+v error: %v", p, err)
|
||||
} else {
|
||||
log.Printf("%+v: %v", p, latency.Round(time.Millisecond))
|
||||
}
|
||||
}
|
||||
|
||||
func probeLoop() {
|
||||
ticker := time.NewTicker(15 * time.Second)
|
||||
for {
|
||||
err := probe()
|
||||
if err != nil {
|
||||
log.Printf("probe: %v", err)
|
||||
}
|
||||
<-ticker.C
|
||||
}
|
||||
}
|
||||
|
||||
func probe() error {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
|
||||
defer cancel()
|
||||
dm, err := getDERPMap(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(dm.Regions))
|
||||
for _, reg := range dm.Regions {
|
||||
reg := reg
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
for _, from := range reg.Nodes {
|
||||
latency, err := probeUDP(ctx, dm, from)
|
||||
setState(nodePair{"UDP", from.Name}, latency, err)
|
||||
for _, to := range reg.Nodes {
|
||||
latency, err := probeNodePair(ctx, dm, from, to)
|
||||
setState(nodePair{from.Name, to.Name}, latency, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
func probeUDP(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode) (latency time.Duration, err error) {
|
||||
pc, err := net.ListenPacket("udp", ":0")
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer pc.Close()
|
||||
uc := pc.(*net.UDPConn)
|
||||
|
||||
tx := stun.NewTxID()
|
||||
req := stun.Request(tx)
|
||||
|
||||
for _, ipStr := range []string{n.IPv4, n.IPv6} {
|
||||
if ipStr == "" {
|
||||
continue
|
||||
}
|
||||
port := n.STUNPort
|
||||
if port == -1 {
|
||||
continue
|
||||
}
|
||||
if port == 0 {
|
||||
port = 3478
|
||||
}
|
||||
for {
|
||||
ip := net.ParseIP(ipStr)
|
||||
_, err := uc.WriteToUDP(req, &net.UDPAddr{IP: ip, Port: port})
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
buf := make([]byte, 1500)
|
||||
uc.SetReadDeadline(time.Now().Add(2 * time.Second))
|
||||
t0 := time.Now()
|
||||
n, _, err := uc.ReadFromUDP(buf)
|
||||
d := time.Since(t0)
|
||||
if err != nil {
|
||||
if ctx.Err() != nil {
|
||||
return 0, fmt.Errorf("timeout reading from %v: %v", ip, err)
|
||||
}
|
||||
if d < time.Second {
|
||||
return 0, fmt.Errorf("error reading from %v: %v", ip, err)
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
continue
|
||||
}
|
||||
txBack, _, err := stun.ParseResponse(buf[:n])
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("parsing STUN response from %v: %v", ip, err)
|
||||
}
|
||||
if txBack != tx {
|
||||
return 0, fmt.Errorf("read wrong tx back from %v", ip)
|
||||
}
|
||||
if latency == 0 || d < latency {
|
||||
latency = d
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return latency, nil
|
||||
}
|
||||
|
||||
func probeNodePair(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode) (latency time.Duration, err error) {
|
||||
// The passed in context is a minute for the whole region. The
|
||||
// idea is that each node pair in the region will be done
|
||||
// serially and regularly in the future, reusing connections
|
||||
// (at least in the happy path). For now they don't reuse
|
||||
// connections and probe at most once every 15 seconds. We
|
||||
// bound the duration of a single node pair within a region
|
||||
// so one bad one can't starve others.
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
defer cancel()
|
||||
|
||||
fromc, err := newConn(ctx, dm, from)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer fromc.Close()
|
||||
toc, err := newConn(ctx, dm, to)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer toc.Close()
|
||||
|
||||
// Wait a bit for from's node to hear about to existing on the
|
||||
// other node in the region, in the case where the two nodes
|
||||
// are different.
|
||||
if from.Name != to.Name {
|
||||
time.Sleep(100 * time.Millisecond) // pretty arbitrary
|
||||
}
|
||||
|
||||
// Make a random packet
|
||||
pkt := make([]byte, 8)
|
||||
crand.Read(pkt)
|
||||
|
||||
t0 := time.Now()
|
||||
|
||||
// Send the random packet.
|
||||
sendc := make(chan error, 1)
|
||||
go func() {
|
||||
sendc <- fromc.Send(toc.SelfPublicKey(), pkt)
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, fmt.Errorf("timeout sending via %q: %w", from.Name, ctx.Err())
|
||||
case err := <-sendc:
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("error sending via %q: %w", from.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Receive the random packet.
|
||||
recvc := make(chan any, 1) // either derp.ReceivedPacket or error
|
||||
go func() {
|
||||
for {
|
||||
m, err := toc.Recv()
|
||||
if err != nil {
|
||||
recvc <- err
|
||||
return
|
||||
}
|
||||
switch v := m.(type) {
|
||||
case derp.ReceivedPacket:
|
||||
recvc <- v
|
||||
default:
|
||||
log.Printf("%v: ignoring Recv frame type %T", to.Name, v)
|
||||
// Loop.
|
||||
}
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return 0, fmt.Errorf("timeout receiving from %q: %w", to.Name, ctx.Err())
|
||||
case v := <-recvc:
|
||||
if err, ok := v.(error); ok {
|
||||
return 0, fmt.Errorf("error receiving from %q: %w", to.Name, err)
|
||||
}
|
||||
p := v.(derp.ReceivedPacket)
|
||||
if p.Source != fromc.SelfPublicKey() {
|
||||
return 0, fmt.Errorf("got data packet from unexpected source, %v", p.Source)
|
||||
}
|
||||
if !bytes.Equal(p.Data, pkt) {
|
||||
return 0, fmt.Errorf("unexpected data packet %q", p.Data)
|
||||
}
|
||||
}
|
||||
return time.Since(t0), nil
|
||||
}
|
||||
|
||||
func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode) (*derphttp.Client, error) {
|
||||
priv := key.NewNode()
|
||||
dc := derphttp.NewRegionClient(priv, log.Printf, func() *tailcfg.DERPRegion {
|
||||
rid := n.RegionID
|
||||
return &tailcfg.DERPRegion{
|
||||
RegionID: rid,
|
||||
RegionCode: fmt.Sprintf("%s-%s", dm.Regions[rid].RegionCode, n.Name),
|
||||
RegionName: dm.Regions[rid].RegionName,
|
||||
Nodes: []*tailcfg.DERPNode{n},
|
||||
}
|
||||
})
|
||||
dc.IsProber = true
|
||||
err := dc.Connect(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs, ok := dc.TLSConnectionState()
|
||||
if !ok {
|
||||
dc.Close()
|
||||
return nil, errors.New("no TLS state")
|
||||
}
|
||||
if len(cs.PeerCertificates) == 0 {
|
||||
dc.Close()
|
||||
return nil, errors.New("no peer certificates")
|
||||
}
|
||||
if cs.ServerName != n.HostName {
|
||||
dc.Close()
|
||||
return nil, fmt.Errorf("TLS server name %q != derp hostname %q", cs.ServerName, n.HostName)
|
||||
}
|
||||
setCert(cs.ServerName, cs.PeerCertificates[0])
|
||||
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
m, err := dc.Recv()
|
||||
if err != nil {
|
||||
errc <- err
|
||||
return
|
||||
}
|
||||
switch m.(type) {
|
||||
case derp.ServerInfoMessage:
|
||||
errc <- nil
|
||||
default:
|
||||
errc <- fmt.Errorf("unexpected first message type %T", errc)
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case err := <-errc:
|
||||
if err != nil {
|
||||
go dc.Close()
|
||||
return nil, err
|
||||
}
|
||||
case <-ctx.Done():
|
||||
go dc.Close()
|
||||
return nil, fmt.Errorf("timeout waiting for ServerInfoMessage: %w", ctx.Err())
|
||||
}
|
||||
return dc, nil
|
||||
}
|
||||
|
||||
var httpOrFileClient = &http.Client{Transport: httpOrFileTransport()}
|
||||
|
||||
func httpOrFileTransport() http.RoundTripper {
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
|
||||
return tr
|
||||
}
|
||||
|
||||
func getDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, "GET", *derpMapURL, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res, err := httpOrFileClient.Do(req)
|
||||
if err != nil {
|
||||
mu.Lock()
|
||||
defer mu.Unlock()
|
||||
if lastDERPMap != nil && time.Since(lastDERPMapAt) < 10*time.Minute {
|
||||
// Assume that control is restarting and use
|
||||
// the same one for a bit.
|
||||
return lastDERPMap, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode != 200 {
|
||||
return nil, fmt.Errorf("fetching %s: %s", *derpMapURL, res.Status)
|
||||
}
|
||||
dm := new(tailcfg.DERPMap)
|
||||
if err := json.NewDecoder(res.Body).Decode(dm); err != nil {
|
||||
return nil, fmt.Errorf("decoding %s JSON: %v", *derpMapURL, err)
|
||||
}
|
||||
setDERPMap(dm)
|
||||
return dm, nil
|
||||
}
|
||||
|
|
|
@ -1,51 +0,0 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// The dist command builds Tailscale release packages for distribution.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"tailscale.com/release/dist"
|
||||
"tailscale.com/release/dist/cli"
|
||||
"tailscale.com/release/dist/synology"
|
||||
"tailscale.com/release/dist/unixpkgs"
|
||||
)
|
||||
|
||||
var synologyPackageCenter bool
|
||||
|
||||
func getTargets() ([]dist.Target, error) {
|
||||
var ret []dist.Target
|
||||
|
||||
ret = append(ret, unixpkgs.Targets()...)
|
||||
// Synology packages can be built either for sideloading, or for
|
||||
// distribution by Synology in their package center. When
|
||||
// distributed through the package center, apps can request
|
||||
// additional permissions to use a tuntap interface and control
|
||||
// the NAS's network stack, rather than be forced to run in
|
||||
// userspace mode.
|
||||
//
|
||||
// Since only we can provide packages to Synology for
|
||||
// distribution, we default to building the "sideload" variant of
|
||||
// packages that we distribute on pkgs.tailscale.com.
|
||||
ret = append(ret, synology.Targets(synologyPackageCenter)...)
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
cmd := cli.CLI(getTargets)
|
||||
for _, subcmd := range cmd.Subcommands {
|
||||
if subcmd.Name == "build" {
|
||||
subcmd.FlagSet.BoolVar(&synologyPackageCenter, "synology-package-center", false, "build synology packages with extra metadata for the official package center")
|
||||
}
|
||||
}
|
||||
|
||||
if err := cmd.ParseAndRun(context.Background(), os.Args[1:]); err != nil && !errors.Is(err, flag.ErrHelp) {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -2,7 +2,7 @@
|
|||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// get-authkey allocates an authkey using an OAuth API client
|
||||
// https://tailscale.com/s/oauth-clients and prints it
|
||||
// https://tailscale.com/kb/1215/oauth-clients/ and prints it
|
||||
// to stdout for scripts to capture and use.
|
||||
package main
|
||||
|
||||
|
@ -16,7 +16,6 @@ import (
|
|||
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/util/cmpx"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -30,29 +29,28 @@ func main() {
|
|||
tags := flag.String("tags", "", "comma-separated list of tags to apply to the authkey")
|
||||
flag.Parse()
|
||||
|
||||
clientID := os.Getenv("TS_API_CLIENT_ID")
|
||||
clientId := os.Getenv("TS_API_CLIENT_ID")
|
||||
clientSecret := os.Getenv("TS_API_CLIENT_SECRET")
|
||||
if clientID == "" || clientSecret == "" {
|
||||
if clientId == "" || clientSecret == "" {
|
||||
log.Fatal("TS_API_CLIENT_ID and TS_API_CLIENT_SECRET must be set")
|
||||
}
|
||||
|
||||
if *tags == "" {
|
||||
log.Fatal("at least one tag must be specified")
|
||||
baseUrl := os.Getenv("TS_BASE_URL")
|
||||
if baseUrl == "" {
|
||||
baseUrl = "https://api.tailscale.com"
|
||||
}
|
||||
|
||||
baseURL := cmpx.Or(os.Getenv("TS_BASE_URL"), "https://api.tailscale.com")
|
||||
|
||||
credentials := clientcredentials.Config{
|
||||
ClientID: clientID,
|
||||
ClientID: clientId,
|
||||
ClientSecret: clientSecret,
|
||||
TokenURL: baseURL + "/api/v2/oauth/token",
|
||||
TokenURL: baseUrl + "/api/v2/oauth/token",
|
||||
Scopes: []string{"device"},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
tsClient := tailscale.NewClient("-", nil)
|
||||
tsClient.HTTPClient = credentials.Client(ctx)
|
||||
tsClient.BaseURL = baseURL
|
||||
tsClient.BaseURL = baseUrl
|
||||
|
||||
caps := tailscale.KeyCapabilities{
|
||||
Devices: tailscale.KeyDeviceCapabilities{
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Command gitops-pusher allows users to use a GitOps flow for managing Tailscale ACLs.
|
||||
//
|
||||
|
@ -22,8 +23,6 @@ import (
|
|||
|
||||
"github.com/peterbourgon/ff/v3/ffcli"
|
||||
"github.com/tailscale/hujson"
|
||||
"golang.org/x/oauth2/clientcredentials"
|
||||
"tailscale.com/util/httpm"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -43,9 +42,9 @@ func modifiedExternallyError() {
|
|||
}
|
||||
}
|
||||
|
||||
func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
func apply(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
return func(ctx context.Context, args []string) error {
|
||||
controlEtag, err := getACLETag(ctx, client, tailnet, apiKey)
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -74,7 +73,7 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := applyNewACL(ctx, client, tailnet, apiKey, *policyFname, controlEtag); err != nil {
|
||||
if err := applyNewACL(ctx, tailnet, apiKey, *policyFname, controlEtag); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -84,9 +83,9 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte
|
|||
}
|
||||
}
|
||||
|
||||
func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
func test(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
return func(ctx context.Context, args []string) error {
|
||||
controlEtag, err := getACLETag(ctx, client, tailnet, apiKey)
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -114,16 +113,16 @@ func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(contex
|
|||
return nil
|
||||
}
|
||||
|
||||
if err := testNewACLs(ctx, client, tailnet, apiKey, *policyFname); err != nil {
|
||||
if err := testNewACLs(ctx, tailnet, apiKey, *policyFname); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func getChecksums(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
func getChecksums(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
|
||||
return func(ctx context.Context, args []string) error {
|
||||
controlEtag, err := getACLETag(ctx, client, tailnet, apiKey)
|
||||
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -152,24 +151,8 @@ func main() {
|
|||
log.Fatal("set envvar TS_TAILNET to your tailnet's name")
|
||||
}
|
||||
apiKey, ok := os.LookupEnv("TS_API_KEY")
|
||||
oauthId, oiok := os.LookupEnv("TS_OAUTH_ID")
|
||||
oauthSecret, osok := os.LookupEnv("TS_OAUTH_SECRET")
|
||||
if !ok && (!oiok || !osok) {
|
||||
log.Fatal("set envvar TS_API_KEY to your Tailscale API key or TS_OAUTH_ID and TS_OAUTH_SECRET to your Tailscale OAuth ID and Secret")
|
||||
}
|
||||
if ok && (oiok || osok) {
|
||||
log.Fatal("set either the envvar TS_API_KEY or TS_OAUTH_ID and TS_OAUTH_SECRET")
|
||||
}
|
||||
var client *http.Client
|
||||
if oiok {
|
||||
oauthConfig := &clientcredentials.Config{
|
||||
ClientID: oauthId,
|
||||
ClientSecret: oauthSecret,
|
||||
TokenURL: fmt.Sprintf("https://%s/api/v2/oauth/token", *apiServer),
|
||||
}
|
||||
client = oauthConfig.Client(context.Background())
|
||||
} else {
|
||||
client = http.DefaultClient
|
||||
if !ok {
|
||||
log.Fatal("set envvar TS_API_KEY to your Tailscale API key")
|
||||
}
|
||||
cache, err := LoadCache(*cacheFname)
|
||||
if err != nil {
|
||||
|
@ -186,7 +169,7 @@ func main() {
|
|||
ShortUsage: "gitops-pusher [options] apply",
|
||||
ShortHelp: "Pushes changes to CONTROL",
|
||||
LongHelp: `Pushes changes to CONTROL`,
|
||||
Exec: apply(cache, client, tailnet, apiKey),
|
||||
Exec: apply(cache, tailnet, apiKey),
|
||||
}
|
||||
|
||||
testCmd := &ffcli.Command{
|
||||
|
@ -194,7 +177,7 @@ func main() {
|
|||
ShortUsage: "gitops-pusher [options] test",
|
||||
ShortHelp: "Tests ACL changes",
|
||||
LongHelp: "Tests ACL changes",
|
||||
Exec: test(cache, client, tailnet, apiKey),
|
||||
Exec: test(cache, tailnet, apiKey),
|
||||
}
|
||||
|
||||
cksumCmd := &ffcli.Command{
|
||||
|
@ -202,7 +185,7 @@ func main() {
|
|||
ShortUsage: "Shows checksums of ACL files",
|
||||
ShortHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison",
|
||||
LongHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison",
|
||||
Exec: getChecksums(cache, client, tailnet, apiKey),
|
||||
Exec: getChecksums(cache, tailnet, apiKey),
|
||||
}
|
||||
|
||||
root := &ffcli.Command{
|
||||
|
@ -245,14 +228,14 @@ func sumFile(fname string) (string, error) {
|
|||
return fmt.Sprintf("%x", h.Sum(nil)), nil
|
||||
}
|
||||
|
||||
func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname, oldEtag string) error {
|
||||
func applyNewACL(ctx context.Context, tailnet, apiKey, policyFname, oldEtag string) error {
|
||||
fin, err := os.Open(policyFname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer fin.Close()
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.POST, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), fin)
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), fin)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -261,7 +244,7 @@ func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, poli
|
|||
req.Header.Set("Content-Type", "application/hujson")
|
||||
req.Header.Set("If-Match", `"`+oldEtag+`"`)
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -282,7 +265,7 @@ func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, poli
|
|||
return nil
|
||||
}
|
||||
|
||||
func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname string) error {
|
||||
func testNewACLs(ctx context.Context, tailnet, apiKey, policyFname string) error {
|
||||
data, err := os.ReadFile(policyFname)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -292,7 +275,7 @@ func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, poli
|
|||
return err
|
||||
}
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.POST, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl/validate", *apiServer, tailnet), bytes.NewBuffer(data))
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl/validate", *apiServer, tailnet), bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -300,7 +283,7 @@ func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, poli
|
|||
req.SetBasicAuth(apiKey, "")
|
||||
req.Header.Set("Content-Type", "application/hujson")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -363,8 +346,8 @@ type ACLTestErrorDetail struct {
|
|||
Errors []string `json:"errors"`
|
||||
}
|
||||
|
||||
func getACLETag(ctx context.Context, client *http.Client, tailnet, apiKey string) (string, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, httpm.GET, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), nil)
|
||||
func getACLETag(ctx context.Context, tailnet, apiKey string) (string, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), nil)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -372,7 +355,7 @@ func getACLETag(ctx context.Context, client *http.Client, tailnet, apiKey string
|
|||
req.SetBasicAuth(apiKey, "")
|
||||
req.Header.Set("Accept", "application/hujson")
|
||||
|
||||
resp, err := client.Do(req)
|
||||
resp, err := http.DefaultClient.Do(req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The hello binary runs hello.ts.net.
|
||||
package main // import "tailscale.com/cmd/hello"
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
# Copyright (c) Tailscale Inc & AUTHORS
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: tailscale-auth-proxy
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["users", "groups"]
|
||||
verbs: ["impersonate"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: tailscale-auth-proxy
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: operator
|
||||
namespace: tailscale
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: tailscale-auth-proxy
|
||||
apiGroup: rbac.authorization.k8s.io
|
|
@ -1,5 +1,6 @@
|
|||
# Copyright (c) Tailscale Inc & AUTHORS
|
||||
# SPDX-License-Identifier: BSD-3-Clause
|
||||
# Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
# Use of this source code is governed by a BSD-style
|
||||
# license that can be found in the LICENSE file.
|
||||
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
|
@ -124,7 +125,7 @@ spec:
|
|||
secretName: operator-oauth
|
||||
containers:
|
||||
- name: operator
|
||||
image: tailscale/k8s-operator:unstable
|
||||
image: tailscale/k8s-operator:latest
|
||||
resources:
|
||||
requests:
|
||||
cpu: 500m
|
||||
|
@ -145,11 +146,9 @@ spec:
|
|||
- name: CLIENT_SECRET_FILE
|
||||
value: /oauth/client_secret
|
||||
- name: PROXY_IMAGE
|
||||
value: tailscale/tailscale:unstable
|
||||
value: tailscale/tailscale:latest
|
||||
- name: PROXY_TAGS
|
||||
value: tag:k8s
|
||||
- name: AUTH_PROXY
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- name: oauth
|
||||
mountPath: /oauth
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// tailscale-operator provides a way to expose services running in a Kubernetes
|
||||
// cluster to your Tailnet.
|
||||
|
@ -7,10 +8,8 @@ package main
|
|||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
_ "embed"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
@ -25,8 +24,8 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/transport"
|
||||
"sigs.k8s.io/controller-runtime/pkg/builder"
|
||||
"sigs.k8s.io/controller-runtime/pkg/cache"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
|
@ -37,16 +36,13 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/manager"
|
||||
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
"sigs.k8s.io/controller-runtime/pkg/source"
|
||||
"sigs.k8s.io/yaml"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/kubestore"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/opt"
|
||||
"tailscale.com/util/dnsname"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
func main() {
|
||||
|
@ -55,17 +51,15 @@ func main() {
|
|||
tailscale.I_Acknowledge_This_API_Is_Unstable = true
|
||||
|
||||
var (
|
||||
hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator")
|
||||
kubeSecret = defaultEnv("OPERATOR_SECRET", "")
|
||||
operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator")
|
||||
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
|
||||
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
|
||||
clientIDPath = defaultEnv("CLIENT_ID_FILE", "")
|
||||
clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "")
|
||||
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
|
||||
priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "")
|
||||
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
|
||||
shouldRunAuthProxy = defaultBool("AUTH_PROXY", false)
|
||||
hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator")
|
||||
kubeSecret = defaultEnv("OPERATOR_SECRET", "")
|
||||
operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator")
|
||||
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
|
||||
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
|
||||
clientIDPath = defaultEnv("CLIENT_ID_FILE", "")
|
||||
clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "")
|
||||
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
|
||||
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
|
||||
)
|
||||
|
||||
var opts []kzap.Opts
|
||||
|
@ -99,13 +93,6 @@ func main() {
|
|||
}
|
||||
tsClient := tailscale.NewClient("-", nil)
|
||||
tsClient.HTTPClient = credentials.Client(context.Background())
|
||||
|
||||
if shouldRunAuthProxy {
|
||||
hostinfo.SetApp("k8s-operator-proxy")
|
||||
} else {
|
||||
hostinfo.SetApp("k8s-operator")
|
||||
}
|
||||
|
||||
s := &tsnet.Server{
|
||||
Hostname: hostname,
|
||||
Logf: zlog.Named("tailscaled").Debugf,
|
||||
|
@ -168,7 +155,7 @@ waitOnline:
|
|||
loginDone = true
|
||||
case "NeedsMachineAuth":
|
||||
if !machineAuthShown {
|
||||
startlog.Infof("Machine approval required, please visit the admin panel to approve")
|
||||
startlog.Infof("Machine authorization required, please visit the admin panel to authorize")
|
||||
machineAuthShown = true
|
||||
}
|
||||
default:
|
||||
|
@ -183,33 +170,31 @@ waitOnline:
|
|||
// the cache that sits a few layers below the builder stuff, which will
|
||||
// implicitly filter what parts of the world the builder code gets to see at
|
||||
// all.
|
||||
nsFilter := cache.ByObject{
|
||||
Field: client.InNamespace(tsNamespace).AsSelector(),
|
||||
nsFilter := cache.ObjectSelector{
|
||||
Field: fields.SelectorFromSet(fields.Set{"metadata.namespace": tsNamespace}),
|
||||
}
|
||||
restConfig := config.GetConfigOrDie()
|
||||
mgr, err := manager.New(restConfig, manager.Options{
|
||||
Cache: cache.Options{
|
||||
ByObject: map[client.Object]cache.ByObject{
|
||||
mgr, err := manager.New(config.GetConfigOrDie(), manager.Options{
|
||||
NewCache: cache.BuilderWithOptions(cache.Options{
|
||||
SelectorsByObject: map[client.Object]cache.ObjectSelector{
|
||||
&corev1.Secret{}: nsFilter,
|
||||
&appsv1.StatefulSet{}: nsFilter,
|
||||
},
|
||||
},
|
||||
}),
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create manager: %v", err)
|
||||
}
|
||||
|
||||
sr := &ServiceReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
tsClient: tsClient,
|
||||
defaultTags: strings.Split(tags, ","),
|
||||
operatorNamespace: tsNamespace,
|
||||
proxyImage: image,
|
||||
proxyPriorityClassName: priorityClassName,
|
||||
logger: zlog.Named("service-reconciler"),
|
||||
Client: mgr.GetClient(),
|
||||
tsClient: tsClient,
|
||||
defaultTags: strings.Split(tags, ","),
|
||||
operatorNamespace: tsNamespace,
|
||||
proxyImage: image,
|
||||
logger: zlog.Named("service-reconciler"),
|
||||
}
|
||||
|
||||
reconcileFilter := handler.EnqueueRequestsFromMapFunc(func(_ context.Context, o client.Object) []reconcile.Request {
|
||||
reconcileFilter := handler.EnqueueRequestsFromMapFunc(func(o client.Object) []reconcile.Request {
|
||||
ls := o.GetLabels()
|
||||
if ls[LabelManaged] != "true" {
|
||||
return nil
|
||||
|
@ -229,35 +214,14 @@ waitOnline:
|
|||
err = builder.
|
||||
ControllerManagedBy(mgr).
|
||||
For(&corev1.Service{}).
|
||||
Watches(&appsv1.StatefulSet{}, reconcileFilter).
|
||||
Watches(&corev1.Secret{}, reconcileFilter).
|
||||
Watches(&source.Kind{Type: &appsv1.StatefulSet{}}, reconcileFilter).
|
||||
Watches(&source.Kind{Type: &corev1.Secret{}}, reconcileFilter).
|
||||
Complete(sr)
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create controller: %v", err)
|
||||
}
|
||||
|
||||
startlog.Infof("Startup complete, operator running, version: %s", version.Long())
|
||||
if shouldRunAuthProxy {
|
||||
cfg, err := restConfig.TransportConfig()
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not get rest.TransportConfig(): %v", err)
|
||||
}
|
||||
|
||||
// Kubernetes uses SPDY for exec and port-forward, however SPDY is
|
||||
// incompatible with HTTP/2; so disable HTTP/2 in the proxy.
|
||||
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||
tr.TLSClientConfig, err = transport.TLSConfigFor(cfg)
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not get transport.TLSConfigFor(): %v", err)
|
||||
}
|
||||
tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
|
||||
|
||||
rt, err := transport.HTTPWrappersForConfig(cfg, tr)
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not get rest.TransportConfig(): %v", err)
|
||||
}
|
||||
go runAuthProxy(s, rt, zlog.Named("auth-proxy").Infof)
|
||||
}
|
||||
startlog.Infof("Startup complete, operator running")
|
||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||
startlog.Fatalf("could not start manager: %v", err)
|
||||
}
|
||||
|
@ -271,20 +235,18 @@ const (
|
|||
|
||||
FinalizerName = "tailscale.com/finalizer"
|
||||
|
||||
AnnotationExpose = "tailscale.com/expose"
|
||||
AnnotationTags = "tailscale.com/tags"
|
||||
AnnotationHostname = "tailscale.com/hostname"
|
||||
AnnotationExpose = "tailscale.com/expose"
|
||||
AnnotationTags = "tailscale.com/tags"
|
||||
)
|
||||
|
||||
// ServiceReconciler is a simple ControllerManagedBy example implementation.
|
||||
type ServiceReconciler struct {
|
||||
client.Client
|
||||
tsClient tsClient
|
||||
defaultTags []string
|
||||
operatorNamespace string
|
||||
proxyImage string
|
||||
proxyPriorityClassName string
|
||||
logger *zap.SugaredLogger
|
||||
tsClient tsClient
|
||||
defaultTags []string
|
||||
operatorNamespace string
|
||||
proxyImage string
|
||||
logger *zap.SugaredLogger
|
||||
}
|
||||
|
||||
type tsClient interface {
|
||||
|
@ -408,11 +370,6 @@ func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.Sugare
|
|||
// This function adds a finalizer to svc, ensuring that we can handle orderly
|
||||
// deprovisioning later.
|
||||
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) error {
|
||||
hostname, err := nameForService(svc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !slices.Contains(svc.Finalizers, FinalizerName) {
|
||||
// This log line is printed exactly once during initial provisioning,
|
||||
// because once the finalizer is in place this block gets skipped. So,
|
||||
|
@ -439,7 +396,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to create or get API key secret: %w", err)
|
||||
}
|
||||
_, err = a.reconcileSTS(ctx, logger, svc, hsvc, secretName, hostname)
|
||||
_, err = a.reconcileSTS(ctx, logger, svc, hsvc, secretName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to reconcile statefulset: %w", err)
|
||||
}
|
||||
|
@ -568,9 +525,6 @@ func (a *ServiceReconciler) getDeviceInfo(ctx context.Context, svc *corev1.Servi
|
|||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
if sec == nil {
|
||||
return "", "", nil
|
||||
}
|
||||
id = string(sec.Data["device_id"])
|
||||
if id == "" {
|
||||
return "", "", nil
|
||||
|
@ -594,7 +548,6 @@ func (a *ServiceReconciler) newAuthKey(ctx context.Context, tags []string) (stri
|
|||
},
|
||||
},
|
||||
}
|
||||
|
||||
key, _, err := a.tsClient.CreateKey(ctx, caps)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
@ -605,7 +558,7 @@ func (a *ServiceReconciler) newAuthKey(ctx context.Context, tags []string) (stri
|
|||
//go:embed manifests/proxy.yaml
|
||||
var proxyYaml []byte
|
||||
|
||||
func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, parentSvc, headlessSvc *corev1.Service, authKeySecret, hostname string) (*appsv1.StatefulSet, error) {
|
||||
func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, parentSvc, headlessSvc *corev1.Service, authKeySecret string) (*appsv1.StatefulSet, error) {
|
||||
var ss appsv1.StatefulSet
|
||||
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
|
||||
|
@ -620,10 +573,6 @@ func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.Sugare
|
|||
corev1.EnvVar{
|
||||
Name: "TS_KUBE_SECRET",
|
||||
Value: authKeySecret,
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "TS_HOSTNAME",
|
||||
Value: hostname,
|
||||
})
|
||||
ss.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: headlessSvc.Name,
|
||||
|
@ -639,7 +588,6 @@ func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.Sugare
|
|||
ss.Spec.Template.ObjectMeta.Labels = map[string]string{
|
||||
"app": string(parentSvc.UID),
|
||||
}
|
||||
ss.Spec.Template.Spec.PriorityClassName = a.proxyPriorityClassName
|
||||
logger.Debugf("reconciling statefulset %s/%s", ss.GetNamespace(), ss.GetName())
|
||||
return createOrUpdate(ctx, a.Client, a.operatorNamespace, &ss, func(s *appsv1.StatefulSet) { s.Spec = ss.Spec })
|
||||
}
|
||||
|
@ -724,15 +672,6 @@ func getSingleObject[T any, O ptrObject[T]](ctx context.Context, c client.Client
|
|||
return ret, nil
|
||||
}
|
||||
|
||||
func defaultBool(envName string, defVal bool) bool {
|
||||
vs := os.Getenv(envName)
|
||||
if vs == "" {
|
||||
return defVal
|
||||
}
|
||||
v, _ := opt.Bool(vs).Get()
|
||||
return v
|
||||
}
|
||||
|
||||
func defaultEnv(envName, defVal string) string {
|
||||
v := os.Getenv(envName)
|
||||
if v == "" {
|
||||
|
@ -740,13 +679,3 @@ func defaultEnv(envName, defVal string) string {
|
|||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func nameForService(svc *corev1.Service) (string, error) {
|
||||
if h, ok := svc.Annotations[AnnotationHostname]; ok {
|
||||
if err := dnsname.ValidLabel(h); err != nil {
|
||||
return "", fmt.Errorf("invalid Tailscale hostname %q: %w", h, err)
|
||||
}
|
||||
return h, nil
|
||||
}
|
||||
return svc.Namespace + "-" + svc.Name, nil
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package main
|
||||
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
"go.uber.org/zap"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
@ -64,7 +66,7 @@ func TestLoadBalancerClass(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, then verify reconcile again and verify
|
||||
|
@ -110,8 +112,6 @@ func TestLoadBalancerClass(t *testing.T) {
|
|||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
s.Spec.Type = corev1.ServiceTypeClusterIP
|
||||
s.Spec.LoadBalancerClass = nil
|
||||
})
|
||||
mustUpdateStatus(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
// Fake client doesn't automatically delete the LoadBalancer status when
|
||||
// changing away from the LoadBalancer type, we have to do
|
||||
// controller-manager's work by hand.
|
||||
|
@ -187,7 +187,7 @@ func TestAnnotations(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
want := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
|
@ -284,7 +284,7 @@ func TestAnnotationIntoLB(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, since it would have normally happened at
|
||||
|
@ -328,7 +328,7 @@ func TestAnnotationIntoLB(t *testing.T) {
|
|||
expectReconciled(t, sr, "default", "test")
|
||||
// None of the proxy machinery should have changed...
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
// ... but the service should have a LoadBalancer status.
|
||||
|
||||
want = &corev1.Service{
|
||||
|
@ -400,7 +400,7 @@ func TestLBIntoAnnotation(t *testing.T) {
|
|||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
|
||||
// Normally the Tailscale proxy pod would come up here and write its info
|
||||
// into the secret. Simulate that, then verify reconcile again and verify
|
||||
|
@ -449,8 +449,6 @@ func TestLBIntoAnnotation(t *testing.T) {
|
|||
}
|
||||
s.Spec.Type = corev1.ServiceTypeClusterIP
|
||||
s.Spec.LoadBalancerClass = nil
|
||||
})
|
||||
mustUpdateStatus(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
// Fake client doesn't automatically delete the LoadBalancer status when
|
||||
// changing away from the LoadBalancer type, we have to do
|
||||
// controller-manager's work by hand.
|
||||
|
@ -459,7 +457,7 @@ func TestLBIntoAnnotation(t *testing.T) {
|
|||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName))
|
||||
|
||||
want = &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
@ -483,153 +481,6 @@ func TestLBIntoAnnotation(t *testing.T) {
|
|||
expectEqual(t, fc, want)
|
||||
}
|
||||
|
||||
func TestCustomHostname(t *testing.T) {
|
||||
fc := fake.NewFakeClient()
|
||||
ft := &fakeTSClient{}
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
operatorNamespace: "operator-ns",
|
||||
proxyImage: "tailscale/tailscale",
|
||||
logger: zl.Sugar(),
|
||||
}
|
||||
|
||||
// Create a service that we should manage, and check that the initial round
|
||||
// of objects looks right.
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/expose": "true",
|
||||
"tailscale.com/hostname": "reindeer-flotilla",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
})
|
||||
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test")
|
||||
|
||||
expectEqual(t, fc, expectedSecret(fullName))
|
||||
expectEqual(t, fc, expectedHeadlessService(shortName))
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "reindeer-flotilla", ""))
|
||||
want := &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
Finalizers: []string{"tailscale.com/finalizer"},
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/expose": "true",
|
||||
"tailscale.com/hostname": "reindeer-flotilla",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
|
||||
// Turn the service back into a ClusterIP service, which should make the
|
||||
// operator clean up.
|
||||
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
|
||||
delete(s.ObjectMeta.Annotations, "tailscale.com/expose")
|
||||
})
|
||||
// synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
|
||||
// didn't create any child resources since this is all faked, so the
|
||||
// deletion goes through immediately.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
// Second time around, the rest of cleanup happens.
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
|
||||
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
|
||||
want = &corev1.Service{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Service",
|
||||
APIVersion: "v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/hostname": "reindeer-flotilla",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
}
|
||||
expectEqual(t, fc, want)
|
||||
}
|
||||
|
||||
func TestCustomPriorityClassName(t *testing.T) {
|
||||
fc := fake.NewFakeClient()
|
||||
ft := &fakeTSClient{}
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
sr := &ServiceReconciler{
|
||||
Client: fc,
|
||||
tsClient: ft,
|
||||
defaultTags: []string{"tag:k8s"},
|
||||
operatorNamespace: "operator-ns",
|
||||
proxyImage: "tailscale/tailscale",
|
||||
proxyPriorityClassName: "tailscale-critical",
|
||||
logger: zl.Sugar(),
|
||||
}
|
||||
|
||||
// Create a service that we should manage, and check that the initial round
|
||||
// of objects looks right.
|
||||
mustCreate(t, fc, &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
// The apiserver is supposed to set the UID, but the fake client
|
||||
// doesn't. So, set it explicitly because other code later depends
|
||||
// on it being set.
|
||||
UID: types.UID("1234-UID"),
|
||||
Annotations: map[string]string{
|
||||
"tailscale.com/expose": "true",
|
||||
"tailscale.com/hostname": "custom-priority-class-name",
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "10.20.30.40",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
},
|
||||
})
|
||||
|
||||
expectReconciled(t, sr, "default", "test")
|
||||
|
||||
fullName, shortName := findGenName(t, fc, "default", "test")
|
||||
|
||||
expectEqual(t, fc, expectedSTS(shortName, fullName, "custom-priority-class-name", "tailscale-critical"))
|
||||
}
|
||||
|
||||
func expectedSecret(name string) *corev1.Secret {
|
||||
return &corev1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
|
@ -678,7 +529,7 @@ func expectedHeadlessService(name string) *corev1.Service {
|
|||
}
|
||||
}
|
||||
|
||||
func expectedSTS(stsName, secretName, hostname, priorityClassName string) *appsv1.StatefulSet {
|
||||
func expectedSTS(stsName, secretName string) *appsv1.StatefulSet {
|
||||
return &appsv1.StatefulSet{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "StatefulSet",
|
||||
|
@ -707,7 +558,6 @@ func expectedSTS(stsName, secretName, hostname, priorityClassName string) *appsv
|
|||
},
|
||||
Spec: corev1.PodSpec{
|
||||
ServiceAccountName: "proxies",
|
||||
PriorityClassName: priorityClassName,
|
||||
InitContainers: []corev1.Container{
|
||||
{
|
||||
Name: "sysctler",
|
||||
|
@ -719,16 +569,15 @@ func expectedSTS(stsName, secretName, hostname, priorityClassName string) *appsv
|
|||
},
|
||||
},
|
||||
},
|
||||
Containers: []corev1.Container{
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: "tailscale",
|
||||
Image: "tailscale/tailscale",
|
||||
Env: []corev1.EnvVar{
|
||||
Env: []v1.EnvVar{
|
||||
{Name: "TS_USERSPACE", Value: "false"},
|
||||
{Name: "TS_AUTH_ONCE", Value: "true"},
|
||||
{Name: "TS_DEST_IP", Value: "10.20.30.40"},
|
||||
{Name: "TS_KUBE_SECRET", Value: secretName},
|
||||
{Name: "TS_HOSTNAME", Value: hostname},
|
||||
},
|
||||
SecurityContext: &corev1.SecurityContext{
|
||||
Capabilities: &corev1.Capabilities{
|
||||
|
@ -781,21 +630,6 @@ func mustUpdate[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, n
|
|||
}
|
||||
}
|
||||
|
||||
func mustUpdateStatus[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string, update func(O)) {
|
||||
t.Helper()
|
||||
obj := O(new(T))
|
||||
if err := client.Get(context.Background(), types.NamespacedName{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
}, obj); err != nil {
|
||||
t.Fatalf("getting %q: %v", name, err)
|
||||
}
|
||||
update(obj)
|
||||
if err := client.Status().Update(context.Background(), obj); err != nil {
|
||||
t.Fatalf("updating %q: %v", name, err)
|
||||
}
|
||||
}
|
||||
|
||||
func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O) {
|
||||
t.Helper()
|
||||
got := O(new(T))
|
||||
|
@ -879,6 +713,7 @@ func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabili
|
|||
k := &tailscale.Key{
|
||||
ID: "key",
|
||||
Created: time.Now(),
|
||||
Expires: time.Now().Add(24 * time.Hour),
|
||||
Capabilities: caps,
|
||||
}
|
||||
return "secret-authkey", k, nil
|
||||
|
|
|
@ -1,120 +0,0 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/types/logger"
|
||||
)
|
||||
|
||||
type whoIsKey struct{}
|
||||
|
||||
// authProxy is an http.Handler that authenticates requests using the Tailscale
|
||||
// LocalAPI and then proxies them to the Kubernetes API.
|
||||
type authProxy struct {
|
||||
logf logger.Logf
|
||||
lc *tailscale.LocalClient
|
||||
rp *httputil.ReverseProxy
|
||||
}
|
||||
|
||||
func (h *authProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
who, err := h.lc.WhoIs(r.Context(), r.RemoteAddr)
|
||||
if err != nil {
|
||||
h.logf("failed to authenticate caller: %v", err)
|
||||
http.Error(w, "failed to authenticate caller", http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
r = r.WithContext(context.WithValue(r.Context(), whoIsKey{}, who))
|
||||
h.rp.ServeHTTP(w, r)
|
||||
}
|
||||
|
||||
// runAuthProxy runs an HTTP server that authenticates requests using the
|
||||
// Tailscale LocalAPI and then proxies them to the Kubernetes API.
|
||||
// It listens on :443 and uses the Tailscale HTTPS certificate.
|
||||
// s will be started if it is not already running.
|
||||
// rt is used to proxy requests to the Kubernetes API.
|
||||
//
|
||||
// It never returns.
|
||||
func runAuthProxy(s *tsnet.Server, rt http.RoundTripper, logf logger.Logf) {
|
||||
ln, err := s.Listen("tcp", ":443")
|
||||
if err != nil {
|
||||
log.Fatalf("could not listen on :443: %v", err)
|
||||
}
|
||||
u, err := url.Parse(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
|
||||
if err != nil {
|
||||
log.Fatalf("runAuthProxy: failed to parse URL %v", err)
|
||||
}
|
||||
|
||||
lc, err := s.LocalClient()
|
||||
if err != nil {
|
||||
log.Fatalf("could not get local client: %v", err)
|
||||
}
|
||||
ap := &authProxy{
|
||||
logf: logf,
|
||||
lc: lc,
|
||||
rp: &httputil.ReverseProxy{
|
||||
Director: func(r *http.Request) {
|
||||
// We want to proxy to the Kubernetes API, but we want to use
|
||||
// the caller's identity to do so. We do this by impersonating
|
||||
// the caller using the Kubernetes User Impersonation feature:
|
||||
// https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation
|
||||
|
||||
// Out of paranoia, remove all authentication headers that might
|
||||
// have been set by the client.
|
||||
r.Header.Del("Authorization")
|
||||
r.Header.Del("Impersonate-Group")
|
||||
r.Header.Del("Impersonate-User")
|
||||
r.Header.Del("Impersonate-Uid")
|
||||
for k := range r.Header {
|
||||
if strings.HasPrefix(k, "Impersonate-Extra-") {
|
||||
r.Header.Del(k)
|
||||
}
|
||||
}
|
||||
|
||||
// Now add the impersonation headers that we want.
|
||||
who := r.Context().Value(whoIsKey{}).(*apitype.WhoIsResponse)
|
||||
if who.Node.IsTagged() {
|
||||
// Use the nodes FQDN as the username, and the nodes tags as the groups.
|
||||
// "Impersonate-Group" requires "Impersonate-User" to be set.
|
||||
r.Header.Set("Impersonate-User", strings.TrimSuffix(who.Node.Name, "."))
|
||||
for _, tag := range who.Node.Tags {
|
||||
r.Header.Add("Impersonate-Group", tag)
|
||||
}
|
||||
} else {
|
||||
r.Header.Set("Impersonate-User", who.UserProfile.LoginName)
|
||||
}
|
||||
|
||||
// Replace the URL with the Kubernetes APIServer.
|
||||
r.URL.Scheme = u.Scheme
|
||||
r.URL.Host = u.Host
|
||||
},
|
||||
Transport: rt,
|
||||
},
|
||||
}
|
||||
hs := &http.Server{
|
||||
// Kubernetes uses SPDY for exec and port-forward, however SPDY is
|
||||
// incompatible with HTTP/2; so disable HTTP/2 in the proxy.
|
||||
TLSConfig: &tls.Config{
|
||||
GetCertificate: lc.GetCertificate,
|
||||
NextProtos: []string{"http/1.1"},
|
||||
},
|
||||
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
|
||||
Handler: ap,
|
||||
}
|
||||
if err := hs.ServeTLS(ln, "", ""); err != nil {
|
||||
log.Fatalf("runAuthProxy: failed to serve %v", err)
|
||||
}
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The mkmanifest command is a simple helper utility to create a '.syso' file
|
||||
// that contains a Windows manifest file.
|
||||
|
@ -19,7 +20,7 @@ func main() {
|
|||
|
||||
arch := winres.Arch(os.Args[1])
|
||||
switch arch {
|
||||
case winres.ArchAMD64, winres.ArchARM64, winres.ArchI386:
|
||||
case winres.ArchAMD64, winres.ArchARM64, winres.ArchI386, winres.ArchARM:
|
||||
default:
|
||||
log.Fatalf("unsupported arch: %s", arch)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// mkpkg builds the Tailscale rpm and deb packages.
|
||||
package main
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// mkversion gets version info from git and outputs a bunch of shell variables
|
||||
// that get used elsewhere in the build system to embed version numbers into
|
||||
// binaries.
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/version/mkversion"
|
||||
)
|
||||
|
||||
func main() {
|
||||
prefix := ""
|
||||
if len(os.Args) > 1 {
|
||||
if os.Args[1] == "--export" {
|
||||
prefix = "export "
|
||||
} else {
|
||||
fmt.Println("usage: mkversion [--export|-h|--help]")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
var b bytes.Buffer
|
||||
io.WriteString(&b, mkversion.Info().String())
|
||||
// Copyright and the client capability are not part of the version
|
||||
// information, but similarly used in Xcode builds to embed in the metadata,
|
||||
// thus generate them now.
|
||||
copyright := fmt.Sprintf("Copyright © %d Tailscale Inc. All Rights Reserved.", time.Now().Year())
|
||||
fmt.Fprintf(&b, "VERSION_COPYRIGHT=%q\n", copyright)
|
||||
fmt.Fprintf(&b, "VERSION_CAPABILITY=%d\n", tailcfg.CurrentCapabilityVersion)
|
||||
s := bufio.NewScanner(&b)
|
||||
for s.Scan() {
|
||||
fmt.Println(prefix + s.Text())
|
||||
}
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// nardump is like nix-store --dump, but in Go, writing a NAR
|
||||
// file (tar-like, but focused on being reproducible) to stdout
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// netlogfmt parses a stream of JSON log messages from stdin and
|
||||
// formats the network traffic logs produced by "tailscale.com/wgengine/netlog"
|
||||
|
@ -43,7 +44,7 @@ import (
|
|||
jsonv2 "github.com/go-json-experiment/json"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
"tailscale.com/types/logid"
|
||||
"tailscale.com/logtail"
|
||||
"tailscale.com/types/netlogtype"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
@ -136,8 +137,8 @@ func processObject(dec *jsonv2.Decoder) {
|
|||
|
||||
type message struct {
|
||||
Logtail struct {
|
||||
ID logid.PublicID `json:"id"`
|
||||
Logged time.Time `json:"server_time"`
|
||||
ID logtail.PublicID `json:"id"`
|
||||
Logged time.Time `json:"server_time"`
|
||||
} `json:"logtail"`
|
||||
Logged time.Time `json:"logged"`
|
||||
netlogtype.Message
|
||||
|
|
|
@ -129,7 +129,7 @@ the `Expected-Tailnet` header to your auth request:
|
|||
```nginx
|
||||
location /auth {
|
||||
# ...
|
||||
proxy_set_header Expected-Tailnet "tailnet012345.ts.net";
|
||||
proxy_set_header Expected-Tailnet "tailscale.com";
|
||||
}
|
||||
```
|
||||
|
||||
|
@ -146,8 +146,6 @@ generic "forbidden" error page:
|
|||
</html>
|
||||
```
|
||||
|
||||
You can get the tailnet name from [the admin panel](https://login.tailscale.com/admin/dns).
|
||||
|
||||
## Building
|
||||
|
||||
Install `cmd/mkpkg`:
|
||||
|
|
|
@ -2,31 +2,30 @@
|
|||
|
||||
set -e
|
||||
|
||||
VERSION=0.1.3
|
||||
for ARCH in amd64 arm64; do
|
||||
CGO_ENABLED=0 GOARCH=${ARCH} GOOS=linux go build -o tailscale.nginx-auth .
|
||||
CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -o tailscale.nginx-auth .
|
||||
|
||||
mkpkg \
|
||||
--out=tailscale-nginx-auth-${VERSION}-${ARCH}.deb \
|
||||
--name=tailscale-nginx-auth \
|
||||
--version=${VERSION} \
|
||||
--type=deb \
|
||||
--arch=${ARCH} \
|
||||
--postinst=deb/postinst.sh \
|
||||
--postrm=deb/postrm.sh \
|
||||
--prerm=deb/prerm.sh \
|
||||
--description="Tailscale NGINX authentication protocol handler" \
|
||||
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md
|
||||
VERSION=0.1.2
|
||||
|
||||
mkpkg \
|
||||
--out=tailscale-nginx-auth-${VERSION}-${ARCH}.rpm \
|
||||
--name=tailscale-nginx-auth \
|
||||
--version=${VERSION} \
|
||||
--type=rpm \
|
||||
--arch=${ARCH} \
|
||||
--postinst=rpm/postinst.sh \
|
||||
--postrm=rpm/postrm.sh \
|
||||
--prerm=rpm/prerm.sh \
|
||||
--description="Tailscale NGINX authentication protocol handler" \
|
||||
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md
|
||||
done
|
||||
mkpkg \
|
||||
--out=tailscale-nginx-auth-${VERSION}-amd64.deb \
|
||||
--name=tailscale-nginx-auth \
|
||||
--version=${VERSION} \
|
||||
--type=deb \
|
||||
--arch=amd64 \
|
||||
--postinst=deb/postinst.sh \
|
||||
--postrm=deb/postrm.sh \
|
||||
--prerm=deb/prerm.sh \
|
||||
--description="Tailscale NGINX authentication protocol handler" \
|
||||
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md
|
||||
|
||||
mkpkg \
|
||||
--out=tailscale-nginx-auth-${VERSION}-amd64.rpm \
|
||||
--name=tailscale-nginx-auth \
|
||||
--version=${VERSION} \
|
||||
--type=rpm \
|
||||
--arch=amd64 \
|
||||
--postinst=rpm/postinst.sh \
|
||||
--postrm=rpm/postrm.sh \
|
||||
--prerm=rpm/prerm.sh \
|
||||
--description="Tailscale NGINX authentication protocol handler" \
|
||||
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build linux
|
||||
|
||||
|
@ -56,7 +57,7 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
if info.Node.IsTagged() {
|
||||
if len(info.Node.Tags) != 0 {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
log.Printf("node %s is tagged", info.Node.Hostinfo.Hostname())
|
||||
return
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The pgproxy server is a proxy for the Postgres wire protocol.
|
||||
package main
|
||||
|
@ -272,7 +273,7 @@ func (p *proxy) serve(sessionID int64, c net.Conn) error {
|
|||
}
|
||||
if buf[0] != 'S' {
|
||||
p.errors.Add("upstream-bad-protocol", 1)
|
||||
return fmt.Errorf("upstream didn't acknowledge start-ssl, said %q", buf[0])
|
||||
return fmt.Errorf("upstream didn't acknowldge start-ssl, said %q", buf[0])
|
||||
}
|
||||
tlsConf := &tls.Config{
|
||||
ServerName: p.upstreamHost,
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The printdep command is a build system tool for printing out information
|
||||
// about dependencies.
|
||||
|
@ -18,6 +19,7 @@ import (
|
|||
var (
|
||||
goToolchain = flag.Bool("go", false, "print the supported Go toolchain git hash (a github.com/tailscale/go commit)")
|
||||
goToolchainURL = flag.Bool("go-url", false, "print the URL to the tarball of the Tailscale Go toolchain")
|
||||
goToolchainSRI = flag.Bool("go-sri", false, "print the SRI hash of the Tailscale Go toolchain")
|
||||
alpine = flag.Bool("alpine", false, "print the tag of alpine docker image")
|
||||
)
|
||||
|
||||
|
@ -31,11 +33,23 @@ func main() {
|
|||
fmt.Println(strings.TrimSpace(ts.GoToolchainRev))
|
||||
}
|
||||
if *goToolchainURL {
|
||||
var suffix string
|
||||
switch runtime.GOARCH {
|
||||
case "amd64":
|
||||
// None
|
||||
case "arm64":
|
||||
suffix = "-" + runtime.GOARCH
|
||||
default:
|
||||
log.Fatalf("unsupported GOARCH %q", runtime.GOARCH)
|
||||
}
|
||||
switch runtime.GOOS {
|
||||
case "linux", "darwin":
|
||||
default:
|
||||
log.Fatalf("unsupported GOOS %q", runtime.GOOS)
|
||||
}
|
||||
fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s-%s.tar.gz\n", strings.TrimSpace(ts.GoToolchainRev), runtime.GOOS, runtime.GOARCH)
|
||||
fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s%s.tar.gz\n", strings.TrimSpace(ts.GoToolchainRev), runtime.GOOS, suffix)
|
||||
}
|
||||
if *goToolchainSRI {
|
||||
fmt.Println(strings.TrimSpace(ts.GoToolchainSRI))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// proxy-to-grafana is a reverse proxy which identifies users based on their
|
||||
// originating Tailscale identity and maps them to corresponding Grafana
|
||||
|
@ -147,7 +148,7 @@ func getTailscaleUser(ctx context.Context, localClient *tailscale.LocalClient, i
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to identify remote host: %w", err)
|
||||
}
|
||||
if whois.Node.IsTagged() {
|
||||
if len(whois.Node.Tags) != 0 {
|
||||
return nil, fmt.Errorf("tagged nodes are not users")
|
||||
}
|
||||
if whois.UserProfile == nil || whois.UserProfile.LoginName == "" {
|
||||
|
|
|
@ -1,224 +0,0 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// The sniproxy is an outbound SNI proxy. It receives TLS connections over
|
||||
// Tailscale on one or more TCP ports and sends them out to the same SNI
|
||||
// hostname & port on the internet. It only does TCP.
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/dns/dnsmessage"
|
||||
"inet.af/tcpproxy"
|
||||
"tailscale.com/client/tailscale"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/net/netutil"
|
||||
"tailscale.com/tsnet"
|
||||
"tailscale.com/types/nettype"
|
||||
)
|
||||
|
||||
var (
|
||||
ports = flag.String("ports", "443", "comma-separated list of ports to proxy")
|
||||
wgPort = flag.Int("wg-listen-port", 0, "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select")
|
||||
promoteHTTPS = flag.Bool("promote-https", true, "promote HTTP to HTTPS")
|
||||
)
|
||||
|
||||
var tsMBox = dnsmessage.MustNewName("support.tailscale.com.")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
if *ports == "" {
|
||||
log.Fatal("no ports")
|
||||
}
|
||||
|
||||
hostinfo.SetApp("sniproxy")
|
||||
|
||||
var s server
|
||||
s.ts.Port = uint16(*wgPort)
|
||||
defer s.ts.Close()
|
||||
|
||||
lc, err := s.ts.LocalClient()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
s.lc = lc
|
||||
|
||||
for _, portStr := range strings.Split(*ports, ",") {
|
||||
ln, err := s.ts.Listen("tcp", ":"+portStr)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Serving on port %v ...", portStr)
|
||||
go s.serve(ln)
|
||||
}
|
||||
|
||||
ln, err := s.ts.Listen("udp", ":53")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go s.serveDNS(ln)
|
||||
|
||||
if *promoteHTTPS {
|
||||
ln, err := s.ts.Listen("tcp", ":80")
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
log.Printf("Promoting HTTP to HTTPS ...")
|
||||
go s.promoteHTTPS(ln)
|
||||
}
|
||||
|
||||
select {}
|
||||
}
|
||||
|
||||
type server struct {
|
||||
ts tsnet.Server
|
||||
lc *tailscale.LocalClient
|
||||
}
|
||||
|
||||
func (s *server) serve(ln net.Listener) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go s.serveConn(c)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) serveDNS(ln net.Listener) {
|
||||
for {
|
||||
c, err := ln.Accept()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
go s.serveDNSConn(c.(nettype.ConnPacketConn))
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) serveDNSConn(c nettype.ConnPacketConn) {
|
||||
defer c.Close()
|
||||
c.SetReadDeadline(time.Now().Add(5 * time.Second))
|
||||
buf := make([]byte, 1500)
|
||||
n, err := c.Read(buf)
|
||||
if err != nil {
|
||||
log.Printf("c.Read failed: %v\n ", err)
|
||||
return
|
||||
}
|
||||
|
||||
var msg dnsmessage.Message
|
||||
err = msg.Unpack(buf[:n])
|
||||
if err != nil {
|
||||
log.Printf("dnsmessage unpack failed: %v\n ", err)
|
||||
return
|
||||
}
|
||||
|
||||
buf, err = s.dnsResponse(&msg)
|
||||
if err != nil {
|
||||
log.Printf("s.dnsResponse failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = c.Write(buf)
|
||||
if err != nil {
|
||||
log.Printf("c.Write failed: %v\n", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (s *server) serveConn(c net.Conn) {
|
||||
addrPortStr := c.LocalAddr().String()
|
||||
_, port, err := net.SplitHostPort(addrPortStr)
|
||||
if err != nil {
|
||||
log.Printf("bogus addrPort %q", addrPortStr)
|
||||
c.Close()
|
||||
return
|
||||
}
|
||||
|
||||
var dialer net.Dialer
|
||||
dialer.Timeout = 5 * time.Second
|
||||
|
||||
var p tcpproxy.Proxy
|
||||
p.ListenFunc = func(net, laddr string) (net.Listener, error) {
|
||||
return netutil.NewOneConnListener(c, nil), nil
|
||||
}
|
||||
p.AddSNIRouteFunc(addrPortStr, func(ctx context.Context, sniName string) (t tcpproxy.Target, ok bool) {
|
||||
return &tcpproxy.DialProxy{
|
||||
Addr: net.JoinHostPort(sniName, port),
|
||||
DialContext: dialer.DialContext,
|
||||
}, true
|
||||
})
|
||||
p.Start()
|
||||
}
|
||||
|
||||
func (s *server) dnsResponse(req *dnsmessage.Message) (buf []byte, err error) {
|
||||
resp := dnsmessage.NewBuilder(buf,
|
||||
dnsmessage.Header{
|
||||
ID: req.Header.ID,
|
||||
Response: true,
|
||||
Authoritative: true,
|
||||
})
|
||||
resp.EnableCompression()
|
||||
|
||||
if len(req.Questions) == 0 {
|
||||
buf, _ = resp.Finish()
|
||||
return
|
||||
}
|
||||
|
||||
q := req.Questions[0]
|
||||
err = resp.StartQuestions()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
resp.Question(q)
|
||||
|
||||
ip4, ip6 := s.ts.TailscaleIPs()
|
||||
err = resp.StartAnswers()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
switch q.Type {
|
||||
case dnsmessage.TypeAAAA:
|
||||
err = resp.AAAAResource(
|
||||
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||
dnsmessage.AAAAResource{AAAA: ip6.As16()},
|
||||
)
|
||||
|
||||
case dnsmessage.TypeA:
|
||||
err = resp.AResource(
|
||||
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||
dnsmessage.AResource{A: ip4.As4()},
|
||||
)
|
||||
case dnsmessage.TypeSOA:
|
||||
err = resp.SOAResource(
|
||||
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||
dnsmessage.SOAResource{NS: q.Name, MBox: tsMBox, Serial: 2023030600,
|
||||
Refresh: 120, Retry: 120, Expire: 120, MinTTL: 60},
|
||||
)
|
||||
case dnsmessage.TypeNS:
|
||||
err = resp.NSResource(
|
||||
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
|
||||
dnsmessage.NSResource{NS: tsMBox},
|
||||
)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
return resp.Finish()
|
||||
}
|
||||
|
||||
func (s *server) promoteHTTPS(ln net.Listener) {
|
||||
err := http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
http.Redirect(w, r, "https://"+r.Host+r.RequestURI, http.StatusFound)
|
||||
}))
|
||||
log.Fatalf("promoteHTTPS http.Serve: %v", err)
|
||||
}
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Program speedtest provides the speedtest command. The reason to keep it separate from
|
||||
// the normal tailscale cli is because it is not yet ready to go in the tailscale binary.
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// ssh-auth-none-demo is a demo SSH server that's meant to run on the
|
||||
// public internet (at 188.166.70.128 port 2222) and
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Command stunc makes a STUN request to a STUN server and prints the result.
|
||||
package main
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// The sync-containers command synchronizes container image tags from one
|
||||
// registry to another.
|
||||
|
@ -23,7 +24,6 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/authn/github"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/remote"
|
||||
|
@ -47,9 +47,8 @@ func main() {
|
|||
log.Fatalf("--dst is required")
|
||||
}
|
||||
|
||||
keychain := authn.NewMultiKeychain(authn.DefaultKeychain, github.Keychain)
|
||||
opts := []remote.Option{
|
||||
remote.WithAuthFromKeychain(keychain),
|
||||
remote.WithAuthFromKeychain(authn.DefaultKeychain),
|
||||
remote.WithContext(context.Background()),
|
||||
}
|
||||
|
||||
|
@ -85,15 +84,6 @@ func main() {
|
|||
log.Printf("%d tags to remove: %s\n", len(remove), strings.Join(remove, ", "))
|
||||
log.Printf("Not removing any tags for safety.\n")
|
||||
}
|
||||
|
||||
var wellKnown = [...]string{"latest", "stable"}
|
||||
for _, tag := range wellKnown {
|
||||
if needsUpdate(*src, *dst, tag) {
|
||||
if err := copyTag(*src, *dst, tag, opts...); err != nil {
|
||||
log.Printf("Updating tag %q: progress error: %v", tag, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func copyTag(srcStr, dstStr, tag string, opts ...remote.Option) error {
|
||||
|
@ -187,26 +177,3 @@ func diffTags(src, dst []string) (add, remove []string) {
|
|||
sort.Strings(remove)
|
||||
return add, remove
|
||||
}
|
||||
|
||||
func needsUpdate(srcStr, dstStr, tag string) bool {
|
||||
src, err := name.ParseReference(fmt.Sprintf("%s:%s", srcStr, tag))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
dst, err := name.ParseReference(fmt.Sprintf("%s:%s", dstStr, tag))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
srcDesc, err := remote.Get(src)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
dstDesc, err := remote.Get(dst)
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
|
||||
return srcDesc.Digest != dstDesc.Digest
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cli
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cli
|
||||
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package cli contains the cmd/tailscale CLI code in a package that can be included
|
||||
// in other wrapper binaries such as the Mac and Windows clients.
|
||||
|
@ -14,6 +15,7 @@ import (
|
|||
"log"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"text/tabwriter"
|
||||
|
@ -46,6 +48,52 @@ func outln(a ...any) {
|
|||
fmt.Fprintln(Stdout, a...)
|
||||
}
|
||||
|
||||
// ActLikeCLI reports whether a GUI application should act like the
|
||||
// CLI based on os.Args, GOOS, the context the process is running in
|
||||
// (pty, parent PID), etc.
|
||||
func ActLikeCLI() bool {
|
||||
// This function is only used on macOS.
|
||||
if runtime.GOOS != "darwin" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Escape hatch to let people force running the macOS
|
||||
// GUI Tailscale binary as the CLI.
|
||||
if v, _ := strconv.ParseBool(os.Getenv("TAILSCALE_BE_CLI")); v {
|
||||
return true
|
||||
}
|
||||
|
||||
// If our parent is launchd, we're definitely not
|
||||
// being run as a CLI.
|
||||
if os.Getppid() == 1 {
|
||||
return false
|
||||
}
|
||||
|
||||
// Xcode adds the -NSDocumentRevisionsDebugMode flag on execution.
|
||||
// If present, we are almost certainly being run as a GUI.
|
||||
for _, arg := range os.Args {
|
||||
if arg == "-NSDocumentRevisionsDebugMode" {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// Looking at the environment of the GUI Tailscale app (ps eww
|
||||
// $PID), empirically none of these environment variables are
|
||||
// present. But all or some of these should be present with
|
||||
// Terminal.all and bash or zsh.
|
||||
for _, e := range []string{
|
||||
"SHLVL",
|
||||
"TERM",
|
||||
"TERM_PROGRAM",
|
||||
"PS1",
|
||||
} {
|
||||
if os.Getenv(e) != "" {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func newFlagSet(name string) *flag.FlagSet {
|
||||
onError := flag.ExitOnError
|
||||
if runtime.GOOS == "js" {
|
||||
|
@ -113,15 +161,12 @@ change in the future.
|
|||
loginCmd,
|
||||
logoutCmd,
|
||||
switchCmd,
|
||||
configureCmd,
|
||||
netcheckCmd,
|
||||
ipCmd,
|
||||
statusCmd,
|
||||
pingCmd,
|
||||
ncCmd,
|
||||
sshCmd,
|
||||
funnelCmd,
|
||||
serveCmd,
|
||||
versionCmd,
|
||||
webCmd,
|
||||
fileCmd,
|
||||
|
@ -149,6 +194,8 @@ change in the future.
|
|||
switch {
|
||||
case slices.Contains(args, "debug"):
|
||||
rootCmd.Subcommands = append(rootCmd.Subcommands, debugCmd)
|
||||
case slices.Contains(args, "serve"):
|
||||
rootCmd.Subcommands = append(rootCmd.Subcommands, serveCmd)
|
||||
case slices.Contains(args, "update"):
|
||||
rootCmd.Subcommands = append(rootCmd.Subcommands, updateCmd)
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cli
|
||||
|
||||
|
@ -22,7 +23,6 @@ import (
|
|||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/preftype"
|
||||
"tailscale.com/util/cmpx"
|
||||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
|
@ -622,16 +622,9 @@ func TestPrefsFromUpArgs(t *testing.T) {
|
|||
{
|
||||
name: "error_long_hostname",
|
||||
args: upArgsT{
|
||||
hostname: strings.Repeat(strings.Repeat("a", 63)+".", 4),
|
||||
hostname: strings.Repeat("a", 300),
|
||||
},
|
||||
wantErr: `"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa.aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" is too long to be a DNS name`,
|
||||
},
|
||||
{
|
||||
name: "error_long_label",
|
||||
args: upArgsT{
|
||||
hostname: strings.Repeat("a", 64) + ".example.com",
|
||||
},
|
||||
wantErr: `"aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" is not a valid DNS label`,
|
||||
wantErr: `hostname too long: 300 bytes (max 256)`,
|
||||
},
|
||||
{
|
||||
name: "error_linux_netfilter_empty",
|
||||
|
@ -720,7 +713,10 @@ func TestPrefsFromUpArgs(t *testing.T) {
|
|||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
var warnBuf tstest.MemLogger
|
||||
goos := cmpx.Or(tt.goos, "linux")
|
||||
goos := tt.goos
|
||||
if goos == "" {
|
||||
goos = "linux"
|
||||
}
|
||||
st := tt.st
|
||||
if st == nil {
|
||||
st = new(ipnstate.Status)
|
||||
|
@ -1076,42 +1072,13 @@ func TestUpdatePrefs(t *testing.T) {
|
|||
},
|
||||
env: upCheckEnv{backendState: "Running"},
|
||||
},
|
||||
{
|
||||
name: "force_reauth_over_ssh_no_risk",
|
||||
flags: []string{"--force-reauth"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
},
|
||||
env: upCheckEnv{backendState: "Running"},
|
||||
wantErrSubtr: "aborted, no changes made",
|
||||
},
|
||||
{
|
||||
name: "force_reauth_over_ssh",
|
||||
flags: []string{"--force-reauth", "--accept-risk=lose-ssh"},
|
||||
sshOverTailscale: true,
|
||||
curPrefs: &ipn.Prefs{
|
||||
ControlURL: "https://login.tailscale.com",
|
||||
AllowSingleHosts: true,
|
||||
CorpDNS: true,
|
||||
NetfilterMode: preftype.NetfilterOn,
|
||||
},
|
||||
wantJustEditMP: nil,
|
||||
env: upCheckEnv{backendState: "Running"},
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.sshOverTailscale {
|
||||
tstest.Replace(t, &getSSHClientEnvVar, func() string { return "100.100.100.100 1 1" })
|
||||
} else if isSSHOverTailscale() {
|
||||
// The test is being executed over a "real" tailscale SSH
|
||||
// session, but sshOverTailscale is unset. Make the test appear
|
||||
// as if it's not over tailscale SSH.
|
||||
tstest.Replace(t, &getSSHClientEnvVar, func() string { return "" })
|
||||
old := getSSHClientEnvVar
|
||||
getSSHClientEnvVar = func() string { return "100.100.100.100 1 1" }
|
||||
t.Cleanup(func() { getSSHClientEnvVar = old })
|
||||
}
|
||||
if tt.env.goos == "" {
|
||||
tt.env.goos = "linux"
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package cli
|
||||
|
||||
|
@ -18,38 +19,26 @@ import (
|
|||
"tailscale.com/version/distro"
|
||||
)
|
||||
|
||||
// configureHostCmd is the "tailscale configure-host" command which was once
|
||||
// used to configure Synology devices, but is now a compatibility alias to
|
||||
// "tailscale configure synology".
|
||||
var configureHostCmd = &ffcli.Command{
|
||||
Name: "configure-host",
|
||||
Exec: runConfigureSynology,
|
||||
ShortHelp: synologyConfigureCmd.ShortHelp,
|
||||
LongHelp: synologyConfigureCmd.LongHelp,
|
||||
Exec: runConfigureHost,
|
||||
ShortHelp: "Configure Synology to enable more Tailscale features",
|
||||
LongHelp: strings.TrimSpace(`
|
||||
The 'configure-host' command is intended to run at boot as root
|
||||
to create the /dev/net/tun device and give the tailscaled binary
|
||||
permission to use it.
|
||||
|
||||
See: https://tailscale.com/kb/1152/synology-outbound/
|
||||
`),
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("configure-host")
|
||||
return fs
|
||||
})(),
|
||||
}
|
||||
|
||||
var synologyConfigureCmd = &ffcli.Command{
|
||||
Name: "synology",
|
||||
Exec: runConfigureSynology,
|
||||
ShortHelp: "Configure Synology to enable outbound connections",
|
||||
LongHelp: strings.TrimSpace(`
|
||||
This command is intended to run at boot as root on a Synology device to
|
||||
create the /dev/net/tun device and give the tailscaled binary permission
|
||||
to use it.
|
||||
var configureHostArgs struct{}
|
||||
|
||||
See: https://tailscale.com/s/synology-outbound
|
||||
`),
|
||||
FlagSet: (func() *flag.FlagSet {
|
||||
fs := newFlagSet("synology")
|
||||
return fs
|
||||
})(),
|
||||
}
|
||||
|
||||
func runConfigureSynology(ctx context.Context, args []string) error {
|
||||
func runConfigureHost(ctx context.Context, args []string) error {
|
||||
if len(args) > 0 {
|
||||
return errors.New("unknown arguments")
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue