Compare commits

..

1 Commits

Author SHA1 Message Date
Maisem Ali 63d58b3358 ssh/tailssh: drop unused noPubKeyPolicyAuthError
Signed-off-by: Maisem Ali <maisem@tailscale.com>
2022-10-11 14:55:06 -07:00
1072 changed files with 18368 additions and 72243 deletions

View File

@ -1,17 +1,18 @@
name: Bug report
description: File a bug report. If you need help, contact support instead
description: File a bug report
labels: [needs-triage, bug]
body:
- type: markdown
attributes:
value: |
Need help with your tailnet? [Contact support](https://tailscale.com/contact/support) instead.
Otherwise, please check if your bug is [already filed](https://github.com/tailscale/tailscale/issues) before filing a new one.
Please check if your bug is [already filed](https://github.com/tailscale/tailscale/issues).
Have an urgent issue? Let us know by emailing us at <support@tailscale.com>.
- type: textarea
id: what-happened
attributes:
label: What is the issue?
description: What happened? What did you expect to happen?
placeholder: oh no
validations:
required: true
- type: textarea
@ -60,13 +61,6 @@ body:
placeholder: e.g., 1.14.4
validations:
required: false
- type: textarea
id: other-software
attributes:
label: Other software
description: What [other software](https://github.com/tailscale/tailscale/wiki/OtherSoftwareInterop) (networking, security, etc) are you running?
validations:
required: false
- type: input
id: bug-report
attributes:

View File

@ -5,4 +5,4 @@ contact_links:
about: Contact us for support
- name: Troubleshooting
url: https://tailscale.com/kb/1023/troubleshooting
about: See the troubleshooting guide for help addressing common issues
about: Troubleshoot common issues

31
.github/workflows/cifuzz.yml vendored 100644
View File

@ -0,0 +1,31 @@
name: CIFuzz
on: [pull_request]
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
Fuzzing:
runs-on: ubuntu-latest
steps:
- name: Build Fuzzers
id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
with:
oss-fuzz-project-name: 'tailscale'
dry-run: false
language: go
- name: Run Fuzzers
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
with:
oss-fuzz-project-name: 'tailscale'
fuzz-seconds: 300
dry-run: false
language: go
- name: Upload Crash
uses: actions/upload-artifact@v3
if: failure() && steps.build.outcome == 'success'
with:
name: artifacts
path: ./out/artifacts

View File

@ -17,8 +17,6 @@ on:
pull_request:
# The branches below must be a subset of the branches above
branches: [ main ]
merge_group:
branches: [ main ]
schedule:
- cron: '31 14 * * 5'
@ -49,7 +47,7 @@ jobs:
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v2
uses: github/codeql-action/init@v1
with:
languages: ${{ matrix.language }}
# If you wish to specify custom queries, you can do so here or in a config file.
@ -60,7 +58,7 @@ jobs:
# Autobuild attempts to build any compiled languages (C/C++, C#, or Java).
# If this step fails, then you should remove it and run the build manually (see below)
- name: Autobuild
uses: github/codeql-action/autobuild@v2
uses: github/codeql-action/autobuild@v1
# Command-line programs to run using the OS shell.
# 📚 https://git.io/JvXDl
@ -74,4 +72,4 @@ jobs:
# make release
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v2
uses: github/codeql-action/analyze@v1

View File

@ -0,0 +1,54 @@
name: Android-Cross
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: Android smoke build
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
# some Android breakages early.
# TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482
env:
GOOS: android
GOARCH: arm64
run: go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/interfaces ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -0,0 +1,62 @@
name: Darwin-Cross
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: macOS build cmd
env:
GOOS: darwin
GOARCH: amd64
run: go build ./cmd/...
- name: macOS build tests
env:
GOOS: darwin
GOARCH: amd64
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
- name: iOS build most
env:
GOOS: ios
GOARCH: arm64
run: go install ./ipn/... ./wgengine/ ./types/... ./control/controlclient
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -0,0 +1,56 @@
name: FreeBSD-Cross
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: FreeBSD build cmd
env:
GOOS: freebsd
GOARCH: amd64
run: go build ./cmd/...
- name: FreeBSD build tests
env:
GOOS: freebsd
GOARCH: amd64
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -0,0 +1,56 @@
name: OpenBSD-Cross
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: OpenBSD build cmd
env:
GOOS: openbsd
GOARCH: amd64
run: go build ./cmd/...
- name: OpenBSD build tests
env:
GOOS: openbsd
GOARCH: amd64
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -0,0 +1,57 @@
name: Wasm-Cross
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: Wasm client build
env:
GOOS: js
GOARCH: wasm
run: go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
- name: tsconnect static build
# Use our custom Go toolchain, we set build tags (to control binary size)
# that depend on it.
run: |
./tool/go run ./cmd/tsconnect --fast-compression build
./tool/go run ./cmd/tsconnect build-pkg
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -0,0 +1,56 @@
name: Windows-Cross
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: Windows build cmd
env:
GOOS: windows
GOARCH: amd64
run: go build ./cmd/...
- name: Windows build tests
env:
GOOS: windows
GOARCH: amd64
run: for d in $(go list -f '{{if .TestGoFiles}}{{.Dir}}{{end}}' ./... ); do (echo $d; cd $d && go test -c ); done
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

32
.github/workflows/depaware.yml vendored 100644
View File

@ -0,0 +1,32 @@
name: depaware
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
- name: depaware
run: go run github.com/tailscale/depaware --check
tailscale.com/cmd/tailscaled
tailscale.com/cmd/tailscale
tailscale.com/cmd/derper

View File

@ -1,15 +0,0 @@
name: "Dockerfile build"
on:
push:
branches:
- main
pull_request:
branches:
- "*"
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: "Build Docker image"
run: docker build .

View File

@ -17,7 +17,7 @@ concurrency:
cancel-in-progress: true
jobs:
update-licenses:
tailscale:
runs-on: ubuntu-latest
steps:
@ -25,7 +25,7 @@ jobs:
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v4
uses: actions/setup-go@v3
with:
go-version-file: go.mod
@ -42,7 +42,7 @@ jobs:
go-licenses report tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled > licenses/tailscale.md --template .github/licenses.tmpl
- name: Get access token
uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 # v1.8.0
uses: tibdex/github-app-token@f717b5ecd4534d3c4df4ce9b5c1c2214f0f7cd06 # v1.6.0
id: generate-token
with:
app_id: ${{ secrets.LICENSING_APP_ID }}
@ -50,11 +50,11 @@ jobs:
private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }}
- name: Send pull request
uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666 #v5.0.1
uses: peter-evans/create-pull-request@18f90432bedd2afd6a825469ffd38aa24712a91d #v4.1.1
with:
token: ${{ steps.generate-token.outputs.token }}
author: License Updater <noreply+license-updater@tailscale.com>
committer: License Updater <noreply+license-updater@tailscale.com>
author: License Updater <noreply@tailscale.com>
committer: License Updater <noreply@tailscale.com>
branch: licenses/cli
commit-message: "licenses: update tailscale{,d} licenses"
title: "licenses: update tailscale{,d} licenses"

View File

@ -0,0 +1,42 @@
name: go generate
on:
push:
branches:
- main
- "release-branch/*"
pull_request:
branches:
- "*"
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
- name: check 'go generate' is clean
run: |
if [[ "${{github.ref}}" == release-branch/* ]]
then
pkgs=$(go list ./... | grep -v dnsfallback)
else
pkgs=$(go list ./... | grep -v dnsfallback)
fi
go generate $pkgs
echo
echo
git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1)

View File

@ -0,0 +1,35 @@
name: go mod tidy
on:
push:
branches:
- main
pull_request:
branches:
- "*"
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
check:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
- name: check 'go mod tidy' is clean
run: |
go mod tidy
echo
echo
git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1)

View File

@ -1,40 +0,0 @@
name: golangci-lint
on:
# For now, only lint pull requests, not the main branches.
pull_request:
# TODO(andrew): enable for main branch after an initial waiting period.
#push:
# branches:
# - main
workflow_dispatch:
permissions:
contents: read
pull-requests: read
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
golangci:
name: lint
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache: false
- name: golangci-lint
# Note: this is the 'v3' tag as of 2023-04-17
uses: golangci/golangci-lint-action@639cd343e1d3b897ff35927a75193d57cfcba299
with:
version: v1.52.2
# Show only new issues if it's a pull request.
only-new-issues: true

View File

@ -1,102 +0,0 @@
name: test installer.sh
on:
push:
branches:
- "main"
paths:
- scripts/installer.sh
pull_request:
branches:
- "*"
paths:
- scripts/installer.sh
jobs:
test:
strategy:
# Don't abort the entire matrix if one element fails.
fail-fast: false
# Don't start all of these at once, which could saturate Github workers.
max-parallel: 4
matrix:
image:
# This is a list of Docker images against which we test our installer.
# If you find that some of these no longer exist, please feel free
# to remove them from the list.
# When adding new images, please only use official ones.
- "debian:oldstable-slim"
- "debian:stable-slim"
- "debian:testing-slim"
- "debian:sid-slim"
- "ubuntu:18.04"
- "ubuntu:20.04"
- "ubuntu:22.04"
- "ubuntu:22.10"
- "ubuntu:23.04"
- "elementary/docker:stable"
- "elementary/docker:unstable"
- "parrotsec/core:lts-amd64"
- "parrotsec/core:latest"
- "kalilinux/kali-rolling"
- "kalilinux/kali-dev"
- "oraclelinux:9"
- "oraclelinux:8"
- "fedora:latest"
- "rockylinux:8.7"
- "rockylinux:9"
- "amazonlinux:latest"
- "opensuse/leap:latest"
- "opensuse/tumbleweed:latest"
- "archlinux:latest"
- "alpine:3.14"
- "alpine:latest"
- "alpine:edge"
deps:
# Run all images installing curl as a dependency.
- curl
include:
# Check a few images with wget rather than curl.
- { image: "debian:oldstable-slim", deps: "wget" }
- { image: "debian:sid-slim", deps: "wget" }
- { image: "ubuntu:23.04", deps: "wget" }
# Ubuntu 16.04 also needs apt-transport-https installed.
- { image: "ubuntu:16.04", deps: "curl apt-transport-https" }
- { image: "ubuntu:16.04", deps: "wget apt-transport-https" }
runs-on: ubuntu-latest
container:
image: ${{ matrix.image }}
options: --user root
steps:
- name: install dependencies (yum)
# tar and gzip are needed by the actions/checkout below.
run: yum install -y --allowerasing tar gzip ${{ matrix.deps }}
if: |
contains(matrix.image, 'centos')
|| contains(matrix.image, 'oraclelinux')
|| contains(matrix.image, 'fedora')
|| contains(matrix.image, 'amazonlinux')
- name: install dependencies (zypper)
# tar and gzip are needed by the actions/checkout below.
run: zypper --non-interactive install tar gzip
if: contains(matrix.image, 'opensuse')
- name: install dependencies (apt-get)
run: |
apt-get update
apt-get install -y ${{ matrix.deps }}
if: |
contains(matrix.image, 'debian')
|| contains(matrix.image, 'ubuntu')
|| contains(matrix.image, 'elementary')
|| contains(matrix.image, 'parrotsec')
|| contains(matrix.image, 'kalilinux')
- name: checkout
uses: actions/checkout@v3
- name: run installer
run: scripts/installer.sh
# Package installation can fail in docker because systemd is not running
# as PID 1, so ignore errors at this step. The real check is the
# `tailscale --version` command below.
continue-on-error: true
- name: check tailscale version
run: tailscale --version

44
.github/workflows/license.yml vendored 100644
View File

@ -0,0 +1,44 @@
name: license
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
- name: Run license checker
run: ./scripts/check_license_headers.sh .
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -0,0 +1,66 @@
name: Linux race
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: Basic build
run: go build ./cmd/...
- name: Run tests and benchmarks with -race flag on linux
run: go test -race -bench=. -benchtime=1x ./...
- name: Check that no tracked files in the repo have been modified
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
- name: Check that no files have been added to the repo
run: |
# Note: The "error: pathspec..." you see below is normal!
# In the success case in which there are no new untracked files,
# git ls-files complains about the pathspec not matching anything.
# That's OK. It's not worth the effort to suppress. Please ignore it.
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
then
echo "Build/test created untracked files in the repo (file names above)."
exit 1
fi
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

80
.github/workflows/linux.yml vendored 100644
View File

@ -0,0 +1,80 @@
name: Linux
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: Basic build
run: go build ./cmd/...
- name: Build variants
run: |
go install --tags=ts_include_cli ./cmd/tailscaled
go install --tags=ts_omit_aws ./cmd/tailscaled
- name: Get QEMU
run: |
# The qemu in Ubuntu 20.04 (Focal) is too old; we need 5.x something
# to run Go binaries. 5.2.0 (Debian bullseye) empirically works, and
# use this PPA which brings in a modern qemu.
sudo add-apt-repository -y ppa:jacob/virtualisation
sudo apt-get -y update
sudo apt-get -y install qemu-user
- name: Run tests on linux
run: go test -bench=. -benchtime=1x ./...
- name: Check that no tracked files in the repo have been modified
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
- name: Check that no files have been added to the repo
run: |
# Note: The "error: pathspec..." you see below is normal!
# In the success case in which there are no new untracked files,
# git ls-files complains about the pathspec not matching anything.
# That's OK. It's not worth the effort to suppress. Please ignore it.
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
then
echo "Build/test created untracked files in the repo (file names above)."
exit 1
fi
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

66
.github/workflows/linux32.yml vendored 100644
View File

@ -0,0 +1,66 @@
name: Linux 32-bit
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
build:
runs-on: ubuntu-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Check out code into the Go module directory
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
id: go
- name: Basic build
run: GOARCH=386 go build ./cmd/...
- name: Run tests on linux
run: GOARCH=386 go test -bench=. -benchtime=1x ./...
- name: Check that no tracked files in the repo have been modified
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
- name: Check that no files have been added to the repo
run: |
# Note: The "error: pathspec..." you see below is normal!
# In the success case in which there are no new untracked files,
# git ls-files complains about the pathspec not matching anything.
# That's OK. It's not worth the effort to suppress. Please ignore it.
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
then
echo "Build/test created untracked files in the repo (file names above)."
exit 1
fi
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -0,0 +1,112 @@
name: static-analysis
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
gofmt:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
- name: Run gofmt (goimports)
run: go run golang.org/x/tools/cmd/goimports -d --format-only .
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'
vet:
runs-on: ubuntu-latest
steps:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.19
- name: Check out code
uses: actions/checkout@v3
- name: Run go vet
run: go vet ./...
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'
staticcheck:
runs-on: ubuntu-latest
strategy:
matrix:
goos: [linux, windows, darwin]
goarch: [amd64]
include:
- goos: windows
goarch: 386
steps:
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version: 1.19
- name: Check out code
uses: actions/checkout@v3
- name: Install staticcheck
run: "GOBIN=~/.local/bin go install honnef.co/go/tools/cmd/staticcheck"
- name: Print staticcheck version
run: "staticcheck -version"
- name: "Run staticcheck (${{ matrix.goos }}/${{ matrix.goarch }})"
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
run: "staticcheck -- $(go list ./... | grep -v tempfork)"
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

View File

@ -1,500 +0,0 @@
# This is our main "CI tests" workflow. It runs everything that should run on
# both PRs and merged commits, and for the latter reports failures to slack.
name: CI
env:
# Our fuzz job, powered by OSS-Fuzz, fails periodically because we upgrade to
# new Go versions very eagerly. OSS-Fuzz is a little more conservative, and
# ends up being unable to compile our code.
#
# When this happens, we want to disable the fuzz target until OSS-Fuzz catches
# up. However, we also don't want to forget to turn it back on when OSS-Fuzz
# can once again build our code.
#
# This variable toggles the fuzz job between two modes:
# - false: we expect fuzzing to be happy, and should report failure if it's not.
# - true: we expect fuzzing is broken, and should report failure if it start working.
TS_FUZZ_CURRENTLY_BROKEN: false
on:
push:
branches:
- "main"
- "release-branch/*"
pull_request:
branches:
- "*"
merge_group:
branches:
- "main"
concurrency:
# For PRs, later CI runs preempt previous ones. e.g. a force push on a PR
# cancels running CI jobs and starts all new ones.
#
# For non-PR pushes, concurrency.group needs to be unique for every distinct
# CI run we want to have happen. Use run_id, which in practice means all
# non-PR CI runs will be allowed to run without preempting each other.
group: ${{ github.workflow }}-$${{ github.pull_request.number || github.run_id }}
cancel-in-progress: true
jobs:
test:
strategy:
fail-fast: false # don't abort the entire matrix if one element fails
matrix:
include:
- goarch: amd64
- goarch: amd64
buildflags: "-race"
- goarch: "386" # thanks yaml
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: Restore Cache
uses: actions/cache@v3
with:
# Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: |
~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-${{ matrix.goarch }}-${{ matrix.buildflags }}-go-2-
- name: build all
run: ./tool/go build ${{matrix.buildflags}} ./...
env:
GOARCH: ${{ matrix.goarch }}
- name: build variant CLIs
run: |
export TS_USE_TOOLCHAIN=1
./build_dist.sh --extra-small ./cmd/tailscaled
./build_dist.sh --box ./cmd/tailscaled
./build_dist.sh --extra-small --box ./cmd/tailscaled
rm -f tailscaled
env:
GOARCH: ${{ matrix.goarch }}
- name: get qemu # for tstest/archtest
if: matrix.goarch == 'amd64' && matrix.variant == ''
run: |
sudo apt-get -y update
sudo apt-get -y install qemu-user
- name: build test wrapper
run: ./tool/go build -o /tmp/testwrapper ./cmd/testwrapper
- name: test all
run: PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}}
env:
GOARCH: ${{ matrix.goarch }}
- name: bench all
run: PATH=$PWD/tool:$PATH /tmp/testwrapper ./... ${{matrix.buildflags}} -bench=. -benchtime=1x -run=^$
env:
GOARCH: ${{ matrix.goarch }}
- name: check that no tracked files changed
run: git diff --no-ext-diff --name-only --exit-code || (echo "Build/test modified the files above."; exit 1)
- name: check that no new files were added
run: |
# Note: The "error: pathspec..." you see below is normal!
# In the success case in which there are no new untracked files,
# git ls-files complains about the pathspec not matching anything.
# That's OK. It's not worth the effort to suppress. Please ignore it.
if git ls-files --others --exclude-standard --directory --no-empty-directory --error-unmatch -- ':/*'
then
echo "Build/test created untracked files in the repo (file names above)."
exit 1
fi
windows:
runs-on: windows-2022
steps:
- name: checkout
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache: false
- name: Restore Cache
uses: actions/cache@v3
with:
# Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: |
~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-go-2-
- name: test
# Don't use -bench=. -benchtime=1x.
# Somewhere in the layers (powershell?)
# the equals signs cause great confusion.
run: go test -bench . -benchtime 1x ./...
vm:
runs-on: ["self-hosted", "linux", "vm"]
# VM tests run with some privileges, don't let them run on 3p PRs.
if: github.repository == 'tailscale/tailscale'
steps:
- name: checkout
uses: actions/checkout@v3
- name: Run VM tests
run: ./tool/go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
env:
HOME: "/tmp"
TMPDIR: "/tmp"
XDB_CACHE_HOME: "/var/lib/ghrunner/cache"
cross: # cross-compile checks, build only.
strategy:
fail-fast: false # don't abort the entire matrix if one element fails
matrix:
include:
# Note: linux/amd64 is not in this matrix, because that goos/goarch is
# tested more exhaustively in the 'test' job above.
- goos: linux
goarch: arm64
- goos: linux
goarch: "386" # thanks yaml
- goos: linux
goarch: loong64
- goos: linux
goarch: arm
goarm: "5"
- goos: linux
goarch: arm
goarm: "7"
# macOS
- goos: darwin
goarch: amd64
- goos: darwin
goarch: arm64
# Windows
- goos: windows
goarch: amd64
- goos: windows
goarch: arm64
# BSDs
- goos: freebsd
goarch: amd64
- goos: openbsd
goarch: amd64
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: Restore Cache
uses: actions/cache@v3
with:
# Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: |
~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-${{ matrix.goos }}-${{ matrix.goarch }}-go-2-
- name: build all
run: ./tool/go build ./cmd/...
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
GOARM: ${{ matrix.goarm }}
CGO_ENABLED: "0"
- name: build tests
run: ./tool/go test -exec=true ./...
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
CGO_ENABLED: "0"
ios: # similar to cross above, but iOS can't build most of the repo. So, just
#make it build a few smoke packages.
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: build some
run: ./tool/go build ./ipn/... ./wgengine/ ./types/... ./control/controlclient
env:
GOOS: ios
GOARCH: arm64
android:
# similar to cross above, but android fails to build a few pieces of the
# repo. We should fix those pieces, they're small, but as a stepping stone,
# only test the subset of android that our past smoke test checked.
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
# Super minimal Android build that doesn't even use CGO and doesn't build everything that's needed
# and is only arm64. But it's a smoke build: it's not meant to catch everything. But it'll catch
# some Android breakages early.
# TODO(bradfitz): better; see https://github.com/tailscale/tailscale/issues/4482
- name: build some
run: ./tool/go install ./net/netns ./ipn/ipnlocal ./wgengine/magicsock/ ./wgengine/ ./wgengine/router/ ./wgengine/netstack ./util/dnsname/ ./ipn/ ./net/interfaces ./wgengine/router/ ./tailcfg/ ./types/logger/ ./net/dns ./hostinfo ./version
env:
GOOS: android
GOARCH: arm64
wasm: # builds tsconnect, which is the only wasm build we support
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: Restore Cache
uses: actions/cache@v3
with:
# Note: unlike the other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: |
~/.cache/go-build
~/go/pkg/mod/cache
~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
key: ${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}-${{ github.run_id }}
restore-keys: |
${{ github.job }}-${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
${{ github.job }}-${{ runner.os }}-go-2-
- name: build tsconnect client
run: ./tool/go build ./cmd/tsconnect/wasm ./cmd/tailscale/cli
env:
GOOS: js
GOARCH: wasm
- name: build tsconnect server
# Note, no GOOS/GOARCH in env on this build step, we're running a build
# tool that handles the build itself.
run: |
./tool/go run ./cmd/tsconnect --fast-compression build
./tool/go run ./cmd/tsconnect --fast-compression build-pkg
tailscale_go: # Subset of tests that depend on our custom Go toolchain.
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: test tailscale_go
run: ./tool/go test -tags=tailscale_go,ts_enable_sockstats ./net/sockstats/...
fuzz:
# This target periodically breaks (see TS_FUZZ_CURRENTLY_BROKEN at the top
# of the file), so it's more complex than usual: the 'build fuzzers' step
# might fail, and depending on the value of 'TS_FUZZ_CURRENTLY_BROKEN', that
# might or might not be fine. The steps after the build figure out whether
# the success/failure is expected, and appropriately pass/fail the job
# overall accordingly.
#
# Practically, this means that all steps after 'build fuzzers' must have an
# explicit 'if' condition, because the default condition for steps is
# 'success()', meaning "only run this if no previous steps failed".
if: github.event_name == 'pull_request'
runs-on: ubuntu-22.04
steps:
- name: build fuzzers
id: build
uses: google/oss-fuzz/infra/cifuzz/actions/build_fuzzers@master
# continue-on-error makes steps.build.conclusion be 'success' even if
# steps.build.outcome is 'failure'. This means this step does not
# contribute to the job's overall pass/fail evaluation.
continue-on-error: true
with:
oss-fuzz-project-name: 'tailscale'
dry-run: false
language: go
- name: report unexpectedly broken fuzz build
if: steps.build.outcome == 'failure' && env.TS_FUZZ_CURRENTLY_BROKEN != 'true'
run: |
echo "fuzzer build failed, see above for why"
echo "if the failure is due to OSS-Fuzz not being on the latest Go yet,"
echo "set TS_FUZZ_CURRENTLY_BROKEN=true in .github/workflows/test.yml"
echo "to temporarily disable fuzzing until OSS-Fuzz works again."
exit 1
- name: report unexpectedly working fuzz build
if: steps.build.outcome == 'success' && env.TS_FUZZ_CURRENTLY_BROKEN == 'true'
run: |
echo "fuzzer build succeeded, but we expect it to be broken"
echo "please set TS_FUZZ_CURRENTLY_BROKEN=false in .github/workflows/test.yml"
echo "to reenable fuzz testing"
exit 1
- name: run fuzzers
id: run
# Run the fuzzers whenever they're able to build, even if we're going to
# report a failure because TS_FUZZ_CURRENTLY_BROKEN is set to the wrong
# value.
if: steps.build.outcome == 'success'
uses: google/oss-fuzz/infra/cifuzz/actions/run_fuzzers@master
with:
oss-fuzz-project-name: 'tailscale'
fuzz-seconds: 300
dry-run: false
language: go
- name: upload crash
uses: actions/upload-artifact@v3
if: steps.run.outcome != 'success' && steps.build.outcome == 'success'
with:
name: artifacts
path: ./out/artifacts
depaware:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: check depaware
run: |
export PATH=$(./tool/go env GOROOT)/bin:$PATH
find . -name 'depaware.txt' | xargs -n1 dirname | xargs ./tool/go run github.com/tailscale/depaware --check
go_generate:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: check that 'go generate' is clean
run: |
pkgs=$(./tool/go list ./... | grep -v dnsfallback)
./tool/go generate $pkgs
echo
echo
git diff --name-only --exit-code || (echo "The files above need updating. Please run 'go generate'."; exit 1)
go_mod_tidy:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: check that 'go mod tidy' is clean
run: |
./tool/go mod tidy
echo
echo
git diff --name-only --exit-code || (echo "Please run 'go mod tidy'."; exit 1)
licenses:
runs-on: ubuntu-22.04
steps:
- name: checkout
uses: actions/checkout@v3
- name: check licenses
run: ./scripts/check_license_headers.sh .
staticcheck:
runs-on: ubuntu-22.04
strategy:
fail-fast: false # don't abort the entire matrix if one element fails
matrix:
goos: ["linux", "windows", "darwin"]
goarch: ["amd64"]
include:
- goos: "windows"
goarch: "386"
steps:
- name: checkout
uses: actions/checkout@v3
- name: install staticcheck
run: GOBIN=~/.local/bin ./tool/go install honnef.co/go/tools/cmd/staticcheck
- name: run staticcheck
run: |
export GOROOT=$(./tool/go env GOROOT)
export PATH=$GOROOT/bin:$PATH
staticcheck -- $(./tool/go list ./... | grep -v tempfork)
env:
GOOS: ${{ matrix.goos }}
GOARCH: ${{ matrix.goarch }}
notify_slack:
if: always()
# Any of these jobs failing causes a slack notification.
needs:
- android
- test
- windows
- vm
- cross
- ios
- wasm
- tailscale_go
- fuzz
- depaware
- go_generate
- go_mod_tidy
- licenses
- staticcheck
runs-on: ubuntu-22.04
steps:
- name: notify
# Only notify slack for merged commits, not PR failures.
#
# It may be tempting to move this condition into the job's 'if' block, but
# don't: Github only collapses the test list into "everything is OK" if
# all jobs succeeded. A skipped job results in the list staying expanded.
# By having the job always run, but skipping its only step as needed, we
# let the CI output collapse nicely in PRs.
if: failure() && github.event_name == 'push'
uses: ruby/action-slack@v3.2.1
with:
payload: |
{
"attachments": [{
"title": "Failure: ${{ github.workflow }}",
"title_link": "https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks",
"text": "${{ github.repository }}@${{ github.ref_name }}: <https://github.com/${{ github.repository }}/commit/${{ github.sha }}|${{ github.sha }}>",
"fields": [{ "value": ${{ toJson(github.event.head_commit.message) }}, "short": false }],
"footer": "${{ github.event.head_commit.committer.name }} at ${{ github.event.head_commit.timestamp }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
check_mergeability:
if: always()
runs-on: ubuntu-22.04
needs:
- android
- test
- windows
- vm
- cross
- ios
- wasm
- tailscale_go
- fuzz
- depaware
- go_generate
- go_mod_tidy
- licenses
- staticcheck
steps:
- name: Decide if change is okay to merge
if: github.event_name != 'push'
uses: re-actors/alls-green@release/v1
with:
jobs: ${{ toJSON(needs) }}

View File

@ -0,0 +1,30 @@
name: "@tailscale/connect npm publish"
on: workflow_dispatch
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up node
uses: actions/setup-node@v3
with:
node-version: "16.x"
registry-url: "https://registry.npmjs.org"
- name: Build package
# Build with build_dist.sh to ensure that version information is embedded.
# GOROOT is specified so that the Go/Wasm that is trigged by build-pk
# also picks up our custom Go toolchain.
run: |
./build_dist.sh tailscale.com/cmd/tsconnect
GOROOT="${HOME}/.cache/tailscale-go" ./tsconnect build-pkg
- name: Publish
env:
NODE_AUTH_TOKEN: ${{ secrets.TSCONNECT_NPM_PUBLISH_AUTH_TOKEN }}
run: ./tool/yarn --cwd ./cmd/tsconnect/pkg publish --access public

View File

@ -1,49 +0,0 @@
name: update-flake
on:
# run action when a change lands in the main branch which updates go.mod. Also
# allow manual triggering.
push:
branches:
- main
paths:
- go.mod
- .github/workflows/update-flakes.yml
workflow_dispatch:
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
update-flake:
runs-on: ubuntu-latest
steps:
- name: Check out code
uses: actions/checkout@v3
- name: Run update-flakes
run: ./update-flake.sh
- name: Get access token
uses: tibdex/github-app-token@b62528385c34dbc9f38e5f4225ac829252d1ea92 # v1.8.0
id: generate-token
with:
app_id: ${{ secrets.LICENSING_APP_ID }}
installation_id: ${{ secrets.LICENSING_APP_INSTALLATION_ID }}
private_key: ${{ secrets.LICENSING_APP_PRIVATE_KEY }}
- name: Send pull request
uses: peter-evans/create-pull-request@284f54f989303d2699d373481a0cfa13ad5a6666 #v5.0.1
with:
token: ${{ steps.generate-token.outputs.token }}
author: Flakes Updater <noreply+flakes-updater@tailscale.com>
committer: Flakes Updater <noreply+flakes-updater@tailscale.com>
branch: flakes
commit-message: "go.mod.sri: update SRI hash for go.mod changes"
title: "go.mod.sri: update SRI hash for go.mod changes"
body: Triggered by ${{ github.repository }}@${{ github.sha }}
signoff: true
delete-branch: true
reviewers: danderson

50
.github/workflows/vm.yml vendored 100644
View File

@ -0,0 +1,50 @@
name: VM
on:
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
ubuntu2004-LTS-cloud-base:
runs-on: [ self-hosted, linux, vm ]
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Set GOPATH
run: echo "GOPATH=$HOME/go" >> $GITHUB_ENV
- name: Checkout Code
uses: actions/checkout@v3
- name: Set up Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
- name: Run VM tests
run: go test ./tstest/integration/vms -v -no-s3 -run-vm-tests -run=TestRunUbuntu2004
env:
HOME: "/tmp"
TMPDIR: "/tmp"
XDG_CACHE_HOME: "/var/lib/ghrunner/cache"
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

66
.github/workflows/windows.yml vendored 100644
View File

@ -0,0 +1,66 @@
name: Windows
on:
push:
branches:
- main
pull_request:
branches:
- '*'
concurrency:
group: ${{ github.workflow }}-$${{ github.head_ref || github.run_id }}
cancel-in-progress: true
jobs:
test:
runs-on: windows-latest
if: "!contains(github.event.head_commit.message, '[ci skip]')"
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Install Go
uses: actions/setup-go@v3
with:
go-version-file: go.mod
- name: Restore Cache
uses: actions/cache@v3
with:
# Note: unlike some other setups, this is only grabbing the mod download
# cache, rather than the whole mod directory, as the download cache
# contains zips that can be unpacked in parallel faster than they can be
# fetched and extracted by tar
path: |
~/go/pkg/mod/cache
~\AppData\Local\go-build
# The -2- here should be incremented when the scheme of data to be
# cached changes (e.g. path above changes).
# TODO(raggi): add a go version here.
key: ${{ runner.os }}-go-2-${{ hashFiles('**/go.sum') }}
- name: Test
# Don't use -bench=. -benchtime=1x.
# Somewhere in the layers (powershell?)
# the equals signs cause great confusion.
run: go test -bench . -benchtime 1x ./...
- uses: k0kubun/action-slack@v2.0.0
with:
payload: |
{
"attachments": [{
"text": "${{ job.status }}: ${{ github.workflow }} <https://github.com/${{ github.repository }}/commit/${{ github.sha }}/checks|${{ env.COMMIT_DATE }} #${{ env.COMMIT_NUMBER_OF_DAY }}> " +
"(<https://github.com/${{ github.repository }}/commit/${{ github.sha }}|" + "${{ github.sha }}".substring(0, 10) + ">) " +
"of ${{ github.repository }}@" + "${{ github.ref }}".split('/').reverse()[0] + " by ${{ github.event.head_commit.committer.name }}",
"color": "danger"
}]
}
env:
SLACK_WEBHOOK_URL: ${{ secrets.SLACK_WEBHOOK_URL }}
if: failure() && github.event_name == 'push'

15
.gitignore vendored
View File

@ -22,18 +22,3 @@ cmd/tailscaled/tailscaled
# direnv config, this may be different for other people so it's probably safer
# to make this nonspecific.
.envrc
# Ignore personal VS Code settings
.vscode/
# Support personal project-specific GOPATH
.gopath/
# Ignore nix build result path
/result
# Ignore direnv nix-shell environment cache
.direnv/
/gocross
/dist

View File

@ -1,61 +0,0 @@
linters:
# Don't enable any linters by default; just the ones that we explicitly
# enable in the list below.
disable-all: true
enable:
- bidichk
- gofmt
- goimports
- misspell
- revive
# Configuration for how we run golangci-lint
run:
timeout: 5m
issues:
# Excluding configuration per-path, per-linter, per-text and per-source
exclude-rules:
# These are forks of an upstream package and thus are exempt from stylistic
# changes that would make pulling in upstream changes harder.
- path: tempfork/.*\.go
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
- path: util/singleflight/.*\.go
text: "File is not `gofmt`-ed with `-s` `-r 'interface{} -> any'`"
# Per-linter settings are contained in this top-level key
linters-settings:
# Enable all rules by default; we don't use invisible unicode runes.
bidichk:
gofmt:
rewrite-rules:
- pattern: 'interface{}'
replacement: 'any'
goimports:
misspell:
revive:
enable-all-rules: false
ignore-generated-header: true
rules:
- name: atomic
- name: context-keys-type
- name: defer
arguments: [[
# Calling 'recover' at the time a defer is registered (i.e. "defer recover()") has no effect.
"immediate-recover",
# Calling 'recover' outside of a deferred function has no effect
"recover",
# Returning values from a deferred function has no effect
"return",
]]
- name: duplicated-imports
- name: errorf
- name: string-of-int
- name: time-equal
- name: unconditional-recursion
- name: useless-break
- name: waitgroup-by-value

View File

@ -1,5 +1,6 @@
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
############################################################################
#
@ -31,7 +32,7 @@
# $ docker exec tailscaled tailscale status
FROM golang:1.20-alpine AS build-env
FROM golang:1.19-alpine AS build-env
WORKDIR /go/src/tailscale
@ -47,7 +48,8 @@ RUN go install \
golang.org/x/crypto/ssh \
golang.org/x/crypto/acme \
nhooyr.io/websocket \
github.com/mdlayher/netlink
github.com/mdlayher/netlink \
golang.zx2c4.com/wireguard/device
COPY . .
@ -61,15 +63,13 @@ ENV VERSION_GIT_HASH=$VERSION_GIT_HASH
ARG TARGETARCH
RUN GOARCH=$TARGETARCH go install -ldflags="\
-X tailscale.com/version.longStamp=$VERSION_LONG \
-X tailscale.com/version.shortStamp=$VERSION_SHORT \
-X tailscale.com/version.gitCommitStamp=$VERSION_GIT_HASH" \
-v ./cmd/tailscale ./cmd/tailscaled ./cmd/containerboot
-X tailscale.com/version.Long=$VERSION_LONG \
-X tailscale.com/version.Short=$VERSION_SHORT \
-X tailscale.com/version.GitCommit=$VERSION_GIT_HASH" \
-v ./cmd/tailscale ./cmd/tailscaled
FROM alpine:3.16
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables
COPY --from=build-env /go/bin/* /usr/local/bin/
# For compat with the previous run.sh, although ideally you should be
# using build_docker.sh which sets an entrypoint for the image.
RUN mkdir /tailscale && ln -s /usr/local/bin/containerboot /tailscale/run.sh
COPY --from=build-env /go/src/tailscale/docs/k8s/run.sh /usr/local/bin/

View File

@ -1,5 +1,6 @@
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
# Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
FROM alpine:3.16
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables iputils
RUN apk add --no-cache ca-certificates iptables iproute2 ip6tables

View File

@ -1,6 +1,7 @@
BSD 3-Clause License
Copyright (c) 2020 Tailscale Inc & AUTHORS.
Copyright (c) 2020 Tailscale & AUTHORS.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:

View File

@ -2,82 +2,55 @@ IMAGE_REPO ?= tailscale/tailscale
SYNO_ARCH ?= "amd64"
SYNO_DSM ?= "7"
vet: ## Run go vet
usage:
echo "See Makefile"
vet:
./tool/go vet ./...
tidy: ## Run go mod tidy
tidy:
./tool/go mod tidy
updatedeps: ## Update depaware deps
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
# it finds in its $$PATH is the right one.
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --update \
updatedeps:
./tool/go run github.com/tailscale/depaware --update \
tailscale.com/cmd/tailscaled \
tailscale.com/cmd/tailscale \
tailscale.com/cmd/derper
depaware: ## Run depaware checks
# depaware (via x/tools/go/packages) shells back to "go", so make sure the "go"
# it finds in its $$PATH is the right one.
PATH="$$(./tool/go env GOROOT)/bin:$$PATH" ./tool/go run github.com/tailscale/depaware --check \
depaware:
./tool/go run github.com/tailscale/depaware --check \
tailscale.com/cmd/tailscaled \
tailscale.com/cmd/tailscale \
tailscale.com/cmd/derper
buildwindows: ## Build tailscale CLI for windows/amd64
buildwindows:
GOOS=windows GOARCH=amd64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
build386: ## Build tailscale CLI for linux/386
build386:
GOOS=linux GOARCH=386 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
buildlinuxarm: ## Build tailscale CLI for linux/arm
buildlinuxarm:
GOOS=linux GOARCH=arm ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
buildwasm: ## Build tailscale CLI for js/wasm
buildwasm:
GOOS=js GOARCH=wasm ./tool/go install ./cmd/tsconnect/wasm ./cmd/tailscale/cli
buildlinuxloong64: ## Build tailscale CLI for linux/loong64
GOOS=linux GOARCH=loong64 ./tool/go install tailscale.com/cmd/tailscale tailscale.com/cmd/tailscaled
buildmultiarchimage: ## Build (and optionally push) multiarch docker image
buildmultiarchimage:
./build_docker.sh
check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm ## Perform basic checks and compilation tests
check: staticcheck vet depaware buildwindows build386 buildlinuxarm buildwasm
staticcheck: ## Run staticcheck.io checks
staticcheck:
./tool/go run honnef.co/go/tools/cmd/staticcheck -- $$(./tool/go list ./... | grep -v tempfork)
spk: ## Build synology package for ${SYNO_ARCH} architecture and ${SYNO_DSM} DSM version
./tool/go run ./cmd/dist build synology/dsm${SYNO_DSM}/${SYNO_ARCH}
spk:
PATH="${PWD}/tool:${PATH}" ./tool/go run github.com/tailscale/tailscale-synology@main -o tailscale.spk --source=. --goarch=${SYNO_ARCH} --dsm-version=${SYNO_DSM}
spkall: ## Build synology packages for all architectures and DSM versions
./tool/go run ./cmd/dist build synology
spkall:
mkdir -p spks
PATH="${PWD}/tool:${PATH}" ./tool/go run github.com/tailscale/tailscale-synology@main -o spks --source=. --goarch=all --dsm-version=all
pushspk: spk ## Push and install synology package on ${SYNO_HOST} host
pushspk: spk
echo "Pushing SPK to root@${SYNO_HOST} (env var SYNO_HOST) ..."
scp tailscale.spk root@${SYNO_HOST}:
ssh root@${SYNO_HOST} /usr/syno/bin/synopkg install tailscale.spk
publishdevimage: ## Build and publish tailscale image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
TAGS=latest REPOS=${REPO} PUSH=true TARGET=client ./build_docker.sh
publishdevoperator: ## Build and publish k8s-operator image to location specified by ${REPO}
@test -n "${REPO}" || (echo "REPO=... required; e.g. REPO=ghcr.io/${USER}/tailscale" && exit 1)
@test "${REPO}" != "tailscale/tailscale" || (echo "REPO=... must not be tailscale/tailscale" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/tailscale" || (echo "REPO=... must not be ghcr.io/tailscale/tailscale" && exit 1)
@test "${REPO}" != "tailscale/k8s-operator" || (echo "REPO=... must not be tailscale/k8s-operator" && exit 1)
@test "${REPO}" != "ghcr.io/tailscale/k8s-operator" || (echo "REPO=... must not be ghcr.io/tailscale/k8s-operator" && exit 1)
TAGS=latest REPOS=${REPO} PUSH=true TARGET=operator ./build_docker.sh
help: ## Show this help
@echo "\nSpecify a command. The choices are:\n"
@grep -hE '^[0-9a-zA-Z_-]+:.*?## .*$$' ${MAKEFILE_LIST} | awk 'BEGIN {FS = ":.*?## "}; {printf " \033[0;36m%-20s\033[m %s\n", $$1, $$2}'
@echo ""
.PHONY: help
.DEFAULT_GOAL := help

View File

@ -6,41 +6,27 @@ Private WireGuard® networks made easy
## Overview
This repository contains the majority of Tailscale's open source code.
Notably, it includes the `tailscaled` daemon and
the `tailscale` CLI tool. The `tailscaled` daemon runs on Linux, Windows,
[macOS](https://tailscale.com/kb/1065/macos-variants/), and to varying degrees
on FreeBSD and OpenBSD. The Tailscale iOS and Android apps use this repo's
code, but this repo doesn't contain the mobile GUI code.
This repository contains all the open source Tailscale client code and
the `tailscaled` daemon and `tailscale` CLI tool. The `tailscaled`
daemon runs on Linux, Windows and [macOS](https://tailscale.com/kb/1065/macos-variants/), and to varying degrees on FreeBSD, OpenBSD, and Darwin. (The Tailscale iOS and Android apps use this repo's code, but this repo doesn't contain the mobile GUI code.)
Other [Tailscale repos](https://github.com/orgs/tailscale/repositories) of note:
The Android app is at https://github.com/tailscale/tailscale-android
* the Android app is at https://github.com/tailscale/tailscale-android
* the Synology package is at https://github.com/tailscale/tailscale-synology
* the QNAP package is at https://github.com/tailscale/tailscale-qpkg
* the Chocolatey packaging is at https://github.com/tailscale/tailscale-chocolatey
For background on which parts of Tailscale are open source and why,
see [https://tailscale.com/opensource/](https://tailscale.com/opensource/).
The Synology package is at https://github.com/tailscale/tailscale-synology
## Using
We serve packages for a variety of distros and platforms at
[https://pkgs.tailscale.com](https://pkgs.tailscale.com/).
We serve packages for a variety of distros at
https://pkgs.tailscale.com .
## Other clients
The [macOS, iOS, and Windows clients](https://tailscale.com/download)
use the code in this repository but additionally include small GUI
wrappers. The GUI wrappers on non-open source platforms are themselves
not open source.
wrappers that are not open source.
## Building
We always require the latest Go release, currently Go 1.20. (While we build
releases with our [Go fork](https://github.com/tailscale/go/), its use is not
required.)
```
go install tailscale.com/cmd/tailscale{,d}
```
@ -57,6 +43,8 @@ If your distro has conventions that preclude the use of
`build_dist.sh`, please do the equivalent of what it does in your
distro's way, so that bug reports contain useful version information.
We require the latest Go release, currently Go 1.19.
## Bugs
Please file any issues about this code or the hosted service on
@ -71,9 +59,6 @@ We require [Developer Certificate of
Origin](https://en.wikipedia.org/wiki/Developer_Certificate_of_Origin)
`Signed-off-by` lines in commits.
See `git log` for our commit message style. It's basically the same as
[Go's style](https://github.com/golang/go/wiki/CommitMessage).
## About Us
[Tailscale](https://tailscale.com/) is primarily developed by the

View File

@ -1 +1 @@
1.45.0
1.31.0

1919
api.md

File diff suppressed because it is too large Load Diff

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2019 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package atomicfile contains code related to writing to filesystems
// atomically.
@ -8,20 +9,14 @@
package atomicfile // import "tailscale.com/atomicfile"
import (
"fmt"
"os"
"path/filepath"
"runtime"
)
// WriteFile writes data to filename+some suffix, then renames it into filename.
// The perm argument is ignored on Windows. If the target filename already
// exists but is not a regular file, WriteFile returns an error.
// WriteFile writes data to filename+some suffix, then renames it
// into filename. The perm argument is ignored on Windows.
func WriteFile(filename string, data []byte, perm os.FileMode) (err error) {
fi, err := os.Stat(filename)
if err == nil && !fi.Mode().IsRegular() {
return fmt.Errorf("%s already exists and is not a regular file", filename)
}
f, err := os.CreateTemp(filepath.Dir(filename), filepath.Base(filename)+".tmp")
if err != nil {
return err

View File

@ -1,47 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build !js && !windows
package atomicfile
import (
"net"
"os"
"path/filepath"
"runtime"
"strings"
"testing"
)
func TestDoesNotOverwriteIrregularFiles(t *testing.T) {
// Per tailscale/tailscale#7658 as one example, almost any imagined use of
// atomicfile.Write should likely not attempt to overwrite an irregular file
// such as a device node, socket, or named pipe.
const filename = "TestDoesNotOverwriteIrregularFiles"
var path string
// macOS private temp does not allow unix socket creation, but /tmp does.
if runtime.GOOS == "darwin" {
path = filepath.Join("/tmp", filename)
t.Cleanup(func() { os.Remove(path) })
} else {
path = filepath.Join(t.TempDir(), filename)
}
// The least troublesome thing to make that is not a file is a unix socket.
// Making a null device sadly requires root.
l, err := net.ListenUnix("unix", &net.UnixAddr{Name: path, Net: "unix"})
if err != nil {
t.Fatal(err)
}
defer l.Close()
err = WriteFile(path, []byte("hello"), 0644)
if err == nil {
t.Fatal("expected error, got nil")
}
if !strings.Contains(err.Error(), "is not a regular file") {
t.Fatalf("unexpected error: %v", err)
}
}

View File

@ -11,25 +11,42 @@
set -eu
go="go"
if [ -n "${TS_USE_TOOLCHAIN:-}" ]; then
go="./tool/go"
IFS=".$IFS" read -r major minor patch <VERSION.txt
git_hash=$(git rev-parse HEAD)
if ! git diff-index --quiet HEAD; then
git_hash="${git_hash}-dirty"
fi
base_hash=$(git rev-list --max-count=1 HEAD -- VERSION.txt)
change_count=$(git rev-list --count HEAD "^$base_hash")
short_hash=$(echo "$git_hash" | cut -c1-9)
if expr "$minor" : "[0-9]*[13579]$" >/dev/null; then
patch="$change_count"
change_suffix=""
elif [ "$change_count" != "0" ]; then
change_suffix="-$change_count"
else
change_suffix=""
fi
eval `CGO_ENABLED=0 GOOS=$($go env GOHOSTOS) GOARCH=$($go env GOHOSTARCH) $go run ./cmd/mkversion`
long_suffix="$change_suffix-t$short_hash"
MINOR="$major.$minor"
SHORT="$MINOR.$patch"
LONG="${SHORT}$long_suffix"
GIT_HASH="$git_hash"
if [ "$1" = "shellvars" ]; then
cat <<EOF
VERSION_MINOR="$VERSION_MINOR"
VERSION_SHORT="$VERSION_SHORT"
VERSION_LONG="$VERSION_LONG"
VERSION_GIT_HASH="$VERSION_GIT_HASH"
VERSION_MINOR="$MINOR"
VERSION_SHORT="$SHORT"
VERSION_LONG="$LONG"
VERSION_GIT_HASH="$GIT_HASH"
EOF
exit 0
fi
tags=""
ldflags="-X tailscale.com/version.longStamp=${VERSION_LONG} -X tailscale.com/version.shortStamp=${VERSION_SHORT}"
ldflags="-X tailscale.com/version.Long=${LONG} -X tailscale.com/version.Short=${SHORT} -X tailscale.com/version.GitCommit=${GIT_HASH}"
# build_dist.sh arguments must precede go build arguments.
while [ "$#" -gt 1 ]; do
@ -37,7 +54,7 @@ while [ "$#" -gt 1 ]; do
--extra-small)
shift
ldflags="$ldflags -w -s"
tags="${tags:+$tags,}ts_omit_aws,ts_omit_bird,ts_omit_tap,ts_omit_kube"
tags="${tags:+$tags,}ts_omit_aws"
;;
--box)
shift
@ -49,4 +66,4 @@ while [ "$#" -gt 1 ]; do
esac
done
exec $go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@"
exec ./tool/go build ${tags:+-tags=$tags} -ldflags "$ldflags" "$@"

View File

@ -23,52 +23,26 @@ set -eu
export PATH=$PWD/tool:$PATH
eval $(./build_dist.sh shellvars)
DEFAULT_TARGET="client"
DEFAULT_TAGS="v${VERSION_SHORT},v${VERSION_MINOR}"
DEFAULT_BASE="tailscale/alpine-base:3.16"
DEFAULT_REPOS="tailscale/tailscale,ghcr.io/tailscale/tailscale"
DEFAULT_BASE="ghcr.io/tailscale/alpine-base:3.16"
PUSH="${PUSH:-false}"
TARGET="${TARGET:-${DEFAULT_TARGET}}"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
TAGS="${TAGS:-${DEFAULT_TAGS}}"
BASE="${BASE:-${DEFAULT_BASE}}"
case "$TARGET" in
client)
DEFAULT_REPOS="tailscale/tailscale"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="\
tailscale.com/cmd/tailscale:/usr/local/bin/tailscale, \
tailscale.com/cmd/tailscaled:/usr/local/bin/tailscaled, \
tailscale.com/cmd/containerboot:/usr/local/bin/containerboot" \
--ldflags="\
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--repos="${REPOS}" \
--push="${PUSH}" \
/usr/local/bin/containerboot
;;
operator)
DEFAULT_REPOS="tailscale/k8s-operator"
REPOS="${REPOS:-${DEFAULT_REPOS}}"
go run github.com/tailscale/mkctr \
--gopaths="tailscale.com/cmd/k8s-operator:/usr/local/bin/operator" \
--ldflags="\
-X tailscale.com/version.longStamp=${VERSION_LONG} \
-X tailscale.com/version.shortStamp=${VERSION_SHORT} \
-X tailscale.com/version.gitCommitStamp=${VERSION_GIT_HASH}" \
--base="${BASE}" \
--tags="${TAGS}" \
--repos="${REPOS}" \
--push="${PUSH}" \
/usr/local/bin/operator
;;
*)
echo "unknown target: $TARGET"
exit 1
;;
esac
go run github.com/tailscale/mkctr \
--gopaths="\
tailscale.com/cmd/tailscale:/usr/local/bin/tailscale, \
tailscale.com/cmd/tailscaled:/usr/local/bin/tailscaled" \
--ldflags="\
-X tailscale.com/version.Long=${VERSION_LONG} \
-X tailscale.com/version.Short=${VERSION_SHORT} \
-X tailscale.com/version.GitCommit=${VERSION_GIT_HASH}" \
--files="docs/k8s/run.sh:/tailscale/run.sh" \
--base="${BASE}" \
--tags="${TAGS}" \
--repos="${REPOS}" \
--push="${PUSH}" \
/bin/sh /tailscale/run.sh

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package chirp implements a client to communicate with the BIRD Internet
// Routing Daemon.

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package chirp
import (

View File

@ -1,7 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package tailscale
@ -103,7 +105,7 @@ func (c *Client) ACL(ctx context.Context) (acl *ACL, err error) {
// it as a string.
// HuJSON is JSON with a few modifications to make it more human-friendly. The primary
// changes are allowing comments and trailing comments. See the following links for more info:
// https://tailscale.com/s/acl-format
// https://tailscale.com/kb/1018/acls?q=acl#tailscale-acl-policy-format
// https://github.com/tailscale/hujson
func (c *Client) ACLHuJSON(ctx context.Context) (acl *ACLHuJSON, err error) {
// Format return errors to be descriptive.
@ -436,7 +438,7 @@ func (c *Client) ValidateACLJSON(ctx context.Context, source, dest string) (test
}
}()
tests := []ACLTest{{User: source, Allow: []string{dest}}}
tests := []ACLTest{ACLTest{User: source, Allow: []string{dest}}}
postData, err := json.Marshal(tests)
if err != nil {
return nil, err

View File

@ -1,14 +1,12 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package apitype contains types for the Tailscale LocalAPI and control plane API.
// Package apitype contains types for the Tailscale local API and control plane API.
package apitype
import "tailscale.com/tailcfg"
// LocalAPIHost is the Host header value used by the LocalAPI.
const LocalAPIHost = "local-tailscaled.sock"
// WhoIsResponse is the JSON type returned by tailscaled debug server's /whois?ip=$IP handler.
type WhoIsResponse struct {
Node *tailcfg.Node
@ -23,7 +21,7 @@ type WhoIsResponse struct {
type FileTarget struct {
Node *tailcfg.Node
// PeerAPI is the http://ip:port URL base of the node's PeerAPI,
// PeerAPI is the http://ip:port URL base of the node's peer API,
// without any path (not even a single slash).
PeerAPIURL string
}
@ -32,9 +30,3 @@ type WaitingFile struct {
Name string
Size int64
}
// SetPushDeviceTokenRequest is the body POSTed to the LocalAPI endpoint /set-device-token.
type SetPushDeviceTokenRequest struct {
// PushDeviceToken is the iOS/macOS APNs device token (and any future Android equivalent).
PushDeviceToken string
}

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package apitype
@ -10,6 +11,7 @@ type DNSConfig struct {
Domains []string `json:"domains"`
Nameservers []string `json:"nameservers"`
Proxied bool `json:"proxied"`
PerDomain bool `json:",omitempty"`
}
type DNSResolver struct {

View File

@ -1,7 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package tailscale
@ -12,6 +14,7 @@ import (
"fmt"
"net/http"
"net/url"
"strings"
"tailscale.com/types/opt"
)
@ -43,18 +46,17 @@ type Device struct {
Name string `json:"name"`
Hostname string `json:"hostname"`
ClientVersion string `json:"clientVersion"` // Empty for external devices.
UpdateAvailable bool `json:"updateAvailable"` // Empty for external devices.
OS string `json:"os"`
Tags []string `json:"tags"`
Created string `json:"created"` // Empty for external devices.
LastSeen string `json:"lastSeen"`
KeyExpiryDisabled bool `json:"keyExpiryDisabled"`
Expires string `json:"expires"`
Authorized bool `json:"authorized"`
IsExternal bool `json:"isExternal"`
MachineKey string `json:"machineKey"` // Empty for external devices.
NodeKey string `json:"nodeKey"`
ClientVersion string `json:"clientVersion"` // Empty for external devices.
UpdateAvailable bool `json:"updateAvailable"` // Empty for external devices.
OS string `json:"os"`
Created string `json:"created"` // Empty for external devices.
LastSeen string `json:"lastSeen"`
KeyExpiryDisabled bool `json:"keyExpiryDisabled"`
Expires string `json:"expires"`
Authorized bool `json:"authorized"`
IsExternal bool `json:"isExternal"`
MachineKey string `json:"machineKey"` // Empty for external devices.
NodeKey string `json:"nodeKey"`
// BlocksIncomingConnections is configured via the device's
// Tailscale client preferences. This field is only reported
@ -212,20 +214,8 @@ func (c *Client) DeleteDevice(ctx context.Context, deviceID string) (err error)
// AuthorizeDevice marks a device as authorized.
func (c *Client) AuthorizeDevice(ctx context.Context, deviceID string) error {
return c.SetAuthorized(ctx, deviceID, true)
}
// SetAuthorized marks a device as authorized or not.
func (c *Client) SetAuthorized(ctx context.Context, deviceID string, authorized bool) error {
params := &struct {
Authorized bool `json:"authorized"`
}{Authorized: authorized}
data, err := json.Marshal(params)
if err != nil {
return err
}
path := fmt.Sprintf("%s/api/v2/device/%s/authorized", c.baseURL(), url.PathEscape(deviceID))
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewBuffer(data))
req, err := http.NewRequestWithContext(ctx, "POST", path, strings.NewReader(`{"authorized":true}`))
if err != nil {
return err
}

View File

@ -1,7 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package tailscale
@ -63,7 +65,7 @@ func (c *Client) dnsGETRequest(ctx context.Context, endpoint string) ([]byte, er
return b, nil
}
func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData any) ([]byte, error) {
func (c *Client) dnsPOSTRequest(ctx context.Context, endpoint string, postData interface{}) ([]byte, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/dns/%s", c.baseURL(), c.tailnet, endpoint)
data, err := json.Marshal(&postData)
if err != nil {

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The servetls program shows how to run an HTTPS server
// using a Tailscale cert via LetsEncrypt.

View File

@ -1,166 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package tailscale
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"time"
)
// Key represents a Tailscale API or auth key.
type Key struct {
ID string `json:"id"`
Created time.Time `json:"created"`
Expires time.Time `json:"expires"`
Capabilities KeyCapabilities `json:"capabilities"`
}
// KeyCapabilities are the capabilities of a Key.
type KeyCapabilities struct {
Devices KeyDeviceCapabilities `json:"devices,omitempty"`
}
// KeyDeviceCapabilities are the device-related capabilities of a Key.
type KeyDeviceCapabilities struct {
Create KeyDeviceCreateCapabilities `json:"create"`
}
// KeyDeviceCreateCapabilities are the device creation capabilities of a Key.
type KeyDeviceCreateCapabilities struct {
Reusable bool `json:"reusable"`
Ephemeral bool `json:"ephemeral"`
Preauthorized bool `json:"preauthorized"`
Tags []string `json:"tags,omitempty"`
}
// Keys returns the list of keys for the current user.
func (c *Client) Keys(ctx context.Context) ([]string, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
var keys struct {
Keys []*Key `json:"keys"`
}
if err := json.Unmarshal(b, &keys); err != nil {
return nil, err
}
ret := make([]string, 0, len(keys.Keys))
for _, k := range keys.Keys {
ret = append(ret, k.ID)
}
return ret, nil
}
// CreateKey creates a new key for the current user. Currently, only auth keys
// can be created. It returns the secret key itself, which cannot be retrieved again
// later, and the key metadata.
//
// To create a key with a specific expiry, use CreateKeyWithExpiry.
func (c *Client) CreateKey(ctx context.Context, caps KeyCapabilities) (keySecret string, keyMeta *Key, _ error) {
return c.CreateKeyWithExpiry(ctx, caps, 0)
}
// CreateKeyWithExpiry is like CreateKey, but allows specifying a expiration time.
//
// The time is truncated to a whole number of seconds. If zero, that means no expiration.
func (c *Client) CreateKeyWithExpiry(ctx context.Context, caps KeyCapabilities, expiry time.Duration) (keySecret string, keyMeta *Key, _ error) {
// convert expirySeconds to an int64 (seconds)
expirySeconds := int64(expiry.Seconds())
if expirySeconds < 0 {
return "", nil, fmt.Errorf("expiry must be positive")
}
if expirySeconds == 0 && expiry != 0 {
return "", nil, fmt.Errorf("non-zero expiry must be at least one second")
}
keyRequest := struct {
Capabilities KeyCapabilities `json:"capabilities"`
ExpirySeconds int64 `json:"expirySeconds,omitempty"`
}{caps, int64(expirySeconds)}
bs, err := json.Marshal(keyRequest)
if err != nil {
return "", nil, err
}
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys", c.baseURL(), c.tailnet)
req, err := http.NewRequestWithContext(ctx, "POST", path, bytes.NewReader(bs))
if err != nil {
return "", nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return "", nil, err
}
if resp.StatusCode != http.StatusOK {
return "", nil, handleErrorResponse(b, resp)
}
var key struct {
Key
Secret string `json:"key"`
}
if err := json.Unmarshal(b, &key); err != nil {
return "", nil, err
}
return key.Secret, &key.Key, nil
}
// Key returns the metadata for the given key ID. Currently, capabilities are
// only returned for auth keys, API keys only return general metadata.
func (c *Client) Key(ctx context.Context, id string) (*Key, error) {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id)
req, err := http.NewRequestWithContext(ctx, "GET", path, nil)
if err != nil {
return nil, err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
return nil, handleErrorResponse(b, resp)
}
var key Key
if err := json.Unmarshal(b, &key); err != nil {
return nil, err
}
return &key, nil
}
// DeleteKey deletes the key with the given ID.
func (c *Client) DeleteKey(ctx context.Context, id string) error {
path := fmt.Sprintf("%s/api/v2/tailnet/%s/keys/%s", c.baseURL(), c.tailnet, id)
req, err := http.NewRequestWithContext(ctx, "DELETE", path, nil)
if err != nil {
return err
}
b, resp, err := c.sendRequest(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return handleErrorResponse(b, resp)
}
return nil
}

View File

@ -1,7 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package tailscale
@ -27,7 +29,6 @@ import (
"go4.org/mem"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/envknob"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/net/netutil"
@ -35,15 +36,13 @@ import (
"tailscale.com/safesocket"
"tailscale.com/tailcfg"
"tailscale.com/tka"
"tailscale.com/types/key"
"tailscale.com/types/tkatype"
)
// defaultLocalClient is the default LocalClient when using the legacy
// package-level functions.
var defaultLocalClient LocalClient
// LocalClient is a client to Tailscale's "LocalAPI", communicating with the
// LocalClient is a client to Tailscale's "local API", communicating with the
// Tailscale daemon on the local machine. Its API is not necessarily stable and
// subject to changes between releases. Some API calls have stricter
// compatibility guarantees, once they've been widely adopted. See method docs
@ -96,12 +95,14 @@ func (lc *LocalClient) defaultDialer(ctx context.Context, network, addr string)
// a TCP server on a random port, find the random port. For HTTP connections,
// we don't send the token. It gets added in an HTTP Basic-Auth header.
if port, _, err := safesocket.LocalTCPPortAndToken(); err == nil {
// We use 127.0.0.1 and not "localhost" (issue 7851).
var d net.Dialer
return d.DialContext(ctx, "tcp", "127.0.0.1:"+strconv.Itoa(port))
return d.DialContext(ctx, "tcp", "localhost:"+strconv.Itoa(port))
}
}
s := safesocket.DefaultConnectionStrategy(lc.socket())
// The user provided a non-default tailscaled socket address.
// Connect only to exactly what they provided.
s.UseFallback(false)
return safesocket.Connect(s)
}
@ -115,7 +116,6 @@ func (lc *LocalClient) defaultDialer(ctx context.Context, network, addr string)
//
// DoLocalRequest may mutate the request to add Authorization headers.
func (lc *LocalClient) DoLocalRequest(req *http.Request) (*http.Response, error) {
req.Header.Set("Tailscale-Cap", strconv.Itoa(int(tailcfg.CurrentCapabilityVersion)))
lc.tsClientOnce.Do(func() {
lc.tsClient = &http.Client{
Transport: &http.Transport{
@ -132,8 +132,8 @@ func (lc *LocalClient) DoLocalRequest(req *http.Request) (*http.Response, error)
func (lc *LocalClient) doLocalRequestNiceError(req *http.Request) (*http.Response, error) {
res, err := lc.DoLocalRequest(req)
if err == nil {
if server := res.Header.Get("Tailscale-Version"); server != "" && server != envknob.IPCVersion() && onVersionMismatch != nil {
onVersionMismatch(envknob.IPCVersion(), server)
if server := res.Header.Get("Tailscale-Version"); server != "" && server != ipn.IPCVersion() && onVersionMismatch != nil {
onVersionMismatch(ipn.IPCVersion(), server)
}
if res.StatusCode == 403 {
all, _ := io.ReadAll(res.Body)
@ -197,10 +197,7 @@ func SetVersionMismatchHandler(f func(clientVer, serverVer string)) {
}
func (lc *LocalClient) send(ctx context.Context, method, path string, wantStatus int, body io.Reader) ([]byte, error) {
if jr, ok := body.(jsonReader); ok && jr.err != nil {
return nil, jr.err // fail early if there was a JSON marshaling error
}
req, err := http.NewRequestWithContext(ctx, method, "http://"+apitype.LocalAPIHost+path, body)
req, err := http.NewRequestWithContext(ctx, method, "http://local-tailscaled.sock"+path, body)
if err != nil {
return nil, err
}
@ -231,21 +228,20 @@ func WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, erro
return defaultLocalClient.WhoIs(ctx, remoteAddr)
}
func decodeJSON[T any](b []byte) (ret T, err error) {
if err := json.Unmarshal(b, &ret); err != nil {
var zero T
return zero, fmt.Errorf("failed to unmarshal JSON into %T: %w", ret, err)
}
return ret, nil
}
// WhoIs returns the owner of the remoteAddr, which must be an IP or IP:port.
func (lc *LocalClient) WhoIs(ctx context.Context, remoteAddr string) (*apitype.WhoIsResponse, error) {
body, err := lc.get200(ctx, "/localapi/v0/whois?addr="+url.QueryEscape(remoteAddr))
if err != nil {
return nil, err
}
return decodeJSON[*apitype.WhoIsResponse](body)
r := new(apitype.WhoIsResponse)
if err := json.Unmarshal(body, r); err != nil {
if max := 200; len(body) > max {
body = append(body[:max], "..."...)
}
return nil, fmt.Errorf("failed to parse JSON WhoIsResponse from %q", body)
}
return r, nil
}
// Goroutines returns a dump of the Tailscale daemon's current goroutines.
@ -259,25 +255,8 @@ func (lc *LocalClient) DaemonMetrics(ctx context.Context) ([]byte, error) {
return lc.get200(ctx, "/localapi/v0/metrics")
}
// TailDaemonLogs returns a stream the Tailscale daemon's logs as they arrive.
// Close the context to stop the stream.
func (lc *LocalClient) TailDaemonLogs(ctx context.Context) (io.Reader, error) {
req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/logtap", nil)
if err != nil {
return nil, err
}
res, err := lc.doLocalRequestNiceError(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
return nil, errors.New(res.Status)
}
return res.Body, nil
}
// Pprof returns a pprof profile of the Tailscale daemon.
func (lc *LocalClient) Pprof(ctx context.Context, pprofType string, sec int) ([]byte, error) {
// Profile returns a pprof profile of the Tailscale daemon.
func (lc *LocalClient) Profile(ctx context.Context, pprofType string, sec int) ([]byte, error) {
var secArg string
if sec < 0 || sec > 300 {
return nil, errors.New("duration out of range")
@ -285,7 +264,7 @@ func (lc *LocalClient) Pprof(ctx context.Context, pprofType string, sec int) ([]
if sec != 0 || pprofType == "profile" {
secArg = fmt.Sprint(sec)
}
return lc.get200(ctx, fmt.Sprintf("/localapi/v0/pprof?name=%s&seconds=%v", url.QueryEscape(pprofType), secArg))
return lc.get200(ctx, fmt.Sprintf("/localapi/v0/profile?name=%s&seconds=%v", url.QueryEscape(pprofType), secArg))
}
// BugReportOpts contains options to pass to the Tailscale daemon when
@ -297,12 +276,6 @@ type BugReportOpts struct {
// Diagnose specifies whether to print additional diagnostic information to
// the logs when generating this bugreport.
Diagnose bool
// Record specifies, if non-nil, whether to perform a bugreport
// "recording"generating an initial log marker, then waiting for
// this channel to be closed before finishing the request, which
// generates another log marker.
Record <-chan struct{}
}
// BugReportWithOpts logs and returns a log marker that can be shared by the
@ -311,40 +284,16 @@ type BugReportOpts struct {
// The opts type specifies options to pass to the Tailscale daemon when
// generating this bug report.
func (lc *LocalClient) BugReportWithOpts(ctx context.Context, opts BugReportOpts) (string, error) {
qparams := make(url.Values)
var qparams url.Values
if opts.Note != "" {
qparams.Set("note", opts.Note)
}
if opts.Diagnose {
qparams.Set("diagnose", "true")
}
if opts.Record != nil {
qparams.Set("record", "true")
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
var requestBody io.Reader
if opts.Record != nil {
pr, pw := io.Pipe()
requestBody = pr
// This goroutine waits for the 'Record' channel to be closed,
// and then closes the write end of our pipe to unblock the
// reader.
go func() {
defer pw.Close()
select {
case <-opts.Record:
case <-ctx.Done():
}
}()
}
// lc.send might block if opts.Record != nil; see above.
uri := fmt.Sprintf("/localapi/v0/bugreport?%s", qparams.Encode())
body, err := lc.send(ctx, "POST", uri, 200, requestBody)
body, err := lc.send(ctx, "POST", uri, 200, nil)
if err != nil {
return "", err
}
@ -369,47 +318,6 @@ func (lc *LocalClient) DebugAction(ctx context.Context, action string) error {
return nil
}
// DebugPortmap invokes the debug-portmap endpoint, and returns an
// io.ReadCloser that can be used to read the logs that are printed during this
// process.
func (lc *LocalClient) DebugPortmap(ctx context.Context, duration time.Duration, ty, gwSelf string) (io.ReadCloser, error) {
vals := make(url.Values)
vals.Set("duration", duration.String())
vals.Set("type", ty)
if gwSelf != "" {
vals.Set("gateway_and_self", gwSelf)
}
req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-portmap?"+vals.Encode(), nil)
if err != nil {
return nil, err
}
res, err := lc.doLocalRequestNiceError(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
body, _ := io.ReadAll(res.Body)
res.Body.Close()
return nil, fmt.Errorf("HTTP %s: %s", res.Status, body)
}
return res.Body, nil
}
// SetDevStoreKeyValue set a statestore key/value. It's only meant for development.
// The schema (including when keys are re-read) is not a stable interface.
func (lc *LocalClient) SetDevStoreKeyValue(ctx context.Context, key, value string) error {
body, err := lc.send(ctx, "POST", "/localapi/v0/dev-set-state-store?"+(url.Values{
"key": {key},
"value": {value},
}).Encode(), 200, nil)
if err != nil {
return fmt.Errorf("error %w: %s", err, body)
}
return nil
}
// SetComponentDebugLogging sets component's debug logging enabled for
// the provided duration. If the duration is in the past, the debug logging
// is disabled.
@ -457,7 +365,11 @@ func (lc *LocalClient) status(ctx context.Context, queryString string) (*ipnstat
if err != nil {
return nil, err
}
return decodeJSON[*ipnstate.Status](body)
st := new(ipnstate.Status)
if err := json.Unmarshal(body, st); err != nil {
return nil, err
}
return st, nil
}
// IDToken is a request to get an OIDC ID token for an audience.
@ -468,27 +380,23 @@ func (lc *LocalClient) IDToken(ctx context.Context, aud string) (*tailcfg.TokenR
if err != nil {
return nil, err
}
return decodeJSON[*tailcfg.TokenResponse](body)
tr := new(tailcfg.TokenResponse)
if err := json.Unmarshal(body, tr); err != nil {
return nil, err
}
return tr, nil
}
// WaitingFiles returns the list of received Taildrop files that have been
// received by the Tailscale daemon in its staging/cache directory but not yet
// transferred by the user's CLI or GUI client and written to a user's home
// directory somewhere.
func (lc *LocalClient) WaitingFiles(ctx context.Context) ([]apitype.WaitingFile, error) {
return lc.AwaitWaitingFiles(ctx, 0)
}
// AwaitWaitingFiles is like WaitingFiles but takes a duration to await for an answer.
// If the duration is 0, it will return immediately. The duration is respected at second
// granularity only. If no files are available, it returns (nil, nil).
func (lc *LocalClient) AwaitWaitingFiles(ctx context.Context, d time.Duration) ([]apitype.WaitingFile, error) {
path := "/localapi/v0/files/?waitsec=" + fmt.Sprint(int(d.Seconds()))
body, err := lc.get200(ctx, path)
body, err := lc.get200(ctx, "/localapi/v0/files/")
if err != nil {
return nil, err
}
return decodeJSON[[]apitype.WaitingFile](body)
var wfs []apitype.WaitingFile
if err := json.Unmarshal(body, &wfs); err != nil {
return nil, err
}
return wfs, nil
}
func (lc *LocalClient) DeleteWaitingFile(ctx context.Context, baseName string) error {
@ -497,7 +405,7 @@ func (lc *LocalClient) DeleteWaitingFile(ctx context.Context, baseName string) e
}
func (lc *LocalClient) GetWaitingFile(ctx context.Context, baseName string) (rc io.ReadCloser, size int64, err error) {
req, err := http.NewRequestWithContext(ctx, "GET", "http://"+apitype.LocalAPIHost+"/localapi/v0/files/"+url.PathEscape(baseName), nil)
req, err := http.NewRequestWithContext(ctx, "GET", "http://local-tailscaled.sock/localapi/v0/files/"+url.PathEscape(baseName), nil)
if err != nil {
return nil, 0, err
}
@ -522,7 +430,11 @@ func (lc *LocalClient) FileTargets(ctx context.Context) ([]apitype.FileTarget, e
if err != nil {
return nil, err
}
return decodeJSON[[]apitype.FileTarget](body)
var fts []apitype.FileTarget
if err := json.Unmarshal(body, &fts); err != nil {
return nil, fmt.Errorf("invalid JSON: %w", err)
}
return fts, nil
}
// PushFile sends Taildrop file r to target.
@ -530,7 +442,7 @@ func (lc *LocalClient) FileTargets(ctx context.Context) ([]apitype.FileTarget, e
// A size of -1 means unknown.
// The name parameter is the original filename, not escaped.
func (lc *LocalClient) PushFile(ctx context.Context, target tailcfg.StableNodeID, size int64, name string, r io.Reader) error {
req, err := http.NewRequestWithContext(ctx, "PUT", "http://"+apitype.LocalAPIHost+"/localapi/v0/file-put/"+string(target)+"/"+url.PathEscape(name), r)
req, err := http.NewRequestWithContext(ctx, "PUT", "http://local-tailscaled.sock/localapi/v0/file-put/"+string(target)+"/"+url.PathEscape(name), r)
if err != nil {
return err
}
@ -576,7 +488,11 @@ func (lc *LocalClient) CheckIPForwarding(ctx context.Context) error {
// Note that EditPrefs does the same validation as this, so call CheckPrefs before
// EditPrefs is not necessary.
func (lc *LocalClient) CheckPrefs(ctx context.Context, p *ipn.Prefs) error {
_, err := lc.send(ctx, "POST", "/localapi/v0/check-prefs", http.StatusOK, jsonBody(p))
pj, err := json.Marshal(p)
if err != nil {
return err
}
_, err = lc.send(ctx, "POST", "/localapi/v0/check-prefs", http.StatusOK, bytes.NewReader(pj))
return err
}
@ -593,27 +509,21 @@ func (lc *LocalClient) GetPrefs(ctx context.Context) (*ipn.Prefs, error) {
}
func (lc *LocalClient) EditPrefs(ctx context.Context, mp *ipn.MaskedPrefs) (*ipn.Prefs, error) {
body, err := lc.send(ctx, "PATCH", "/localapi/v0/prefs", http.StatusOK, jsonBody(mp))
mpj, err := json.Marshal(mp)
if err != nil {
return nil, err
}
return decodeJSON[*ipn.Prefs](body)
body, err := lc.send(ctx, "PATCH", "/localapi/v0/prefs", http.StatusOK, bytes.NewReader(mpj))
if err != nil {
return nil, err
}
var p ipn.Prefs
if err := json.Unmarshal(body, &p); err != nil {
return nil, fmt.Errorf("invalid prefs JSON: %w", err)
}
return &p, nil
}
// StartLoginInteractive starts an interactive login.
func (lc *LocalClient) StartLoginInteractive(ctx context.Context) error {
_, err := lc.send(ctx, "POST", "/localapi/v0/login-interactive", http.StatusNoContent, nil)
return err
}
// Start applies the configuration specified in opts, and starts the
// state machine.
func (lc *LocalClient) Start(ctx context.Context, opts ipn.Options) error {
_, err := lc.send(ctx, "POST", "/localapi/v0/start", http.StatusNoContent, jsonBody(opts))
return err
}
// Logout logs out the current node.
func (lc *LocalClient) Logout(ctx context.Context) error {
_, err := lc.send(ctx, "POST", "/localapi/v0/logout", http.StatusNoContent, nil)
return err
@ -655,7 +565,7 @@ func (lc *LocalClient) DialTCP(ctx context.Context, host string, port uint16) (n
},
}
ctx = httptrace.WithClientTrace(ctx, &trace)
req, err := http.NewRequestWithContext(ctx, "POST", "http://"+apitype.LocalAPIHost+"/localapi/v0/dial", nil)
req, err := http.NewRequestWithContext(ctx, "POST", "http://local-tailscaled.sock/localapi/v0/dial", nil)
if err != nil {
return nil, err
}
@ -817,7 +727,11 @@ func (lc *LocalClient) Ping(ctx context.Context, ip netip.Addr, pingtype tailcfg
if err != nil {
return nil, fmt.Errorf("error %w: %s", err, body)
}
return decodeJSON[*ipnstate.PingResult](body)
pr := new(ipnstate.PingResult)
if err := json.Unmarshal(body, pr); err != nil {
return nil, err
}
return pr, nil
}
// NetworkLockStatus fetches information about the tailnet key authority, if one is configured.
@ -826,21 +740,21 @@ func (lc *LocalClient) NetworkLockStatus(ctx context.Context) (*ipnstate.Network
if err != nil {
return nil, fmt.Errorf("error: %w", err)
}
return decodeJSON[*ipnstate.NetworkLockStatus](body)
pr := new(ipnstate.NetworkLockStatus)
if err := json.Unmarshal(body, pr); err != nil {
return nil, err
}
return pr, nil
}
// NetworkLockInit initializes the tailnet key authority.
//
// TODO(tom): Plumb through disablement secrets.
func (lc *LocalClient) NetworkLockInit(ctx context.Context, keys []tka.Key, disablementValues [][]byte, supportDisablement []byte) (*ipnstate.NetworkLockStatus, error) {
func (lc *LocalClient) NetworkLockInit(ctx context.Context, keys []tka.Key) (*ipnstate.NetworkLockStatus, error) {
var b bytes.Buffer
type initRequest struct {
Keys []tka.Key
DisablementValues [][]byte
SupportDisablement []byte
Keys []tka.Key
}
if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys, DisablementValues: disablementValues, SupportDisablement: supportDisablement}); err != nil {
if err := json.NewEncoder(&b).Encode(initRequest{Keys: keys}); err != nil {
return nil, err
}
@ -848,35 +762,16 @@ func (lc *LocalClient) NetworkLockInit(ctx context.Context, keys []tka.Key, disa
if err != nil {
return nil, fmt.Errorf("error: %w", err)
}
return decodeJSON[*ipnstate.NetworkLockStatus](body)
}
// NetworkLockWrapPreauthKey wraps a pre-auth key with information to
// enable unattended bringup in the locked tailnet.
func (lc *LocalClient) NetworkLockWrapPreauthKey(ctx context.Context, preauthKey string, tkaKey key.NLPrivate) (string, error) {
encodedPrivate, err := tkaKey.MarshalText()
if err != nil {
return "", err
pr := new(ipnstate.NetworkLockStatus)
if err := json.Unmarshal(body, pr); err != nil {
return nil, err
}
var b bytes.Buffer
type wrapRequest struct {
TSKey string
TKAKey string // key.NLPrivate.MarshalText
}
if err := json.NewEncoder(&b).Encode(wrapRequest{TSKey: preauthKey, TKAKey: string(encodedPrivate)}); err != nil {
return "", err
}
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/wrap-preauth-key", 200, &b)
if err != nil {
return "", fmt.Errorf("error: %w", err)
}
return string(body), nil
return pr, nil
}
// NetworkLockModify adds and/or removes key(s) to the tailnet key authority.
func (lc *LocalClient) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) error {
func (lc *LocalClient) NetworkLockModify(ctx context.Context, addKeys, removeKeys []tka.Key) (*ipnstate.NetworkLockStatus, error) {
var b bytes.Buffer
type modifyRequest struct {
AddKeys []tka.Key
@ -884,117 +779,19 @@ func (lc *LocalClient) NetworkLockModify(ctx context.Context, addKeys, removeKey
}
if err := json.NewEncoder(&b).Encode(modifyRequest{AddKeys: addKeys, RemoveKeys: removeKeys}); err != nil {
return err
return nil, err
}
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 204, &b); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}
// NetworkLockSign signs the specified node-key and transmits that signature to the control plane.
// rotationPublic, if specified, must be an ed25519 public key.
func (lc *LocalClient) NetworkLockSign(ctx context.Context, nodeKey key.NodePublic, rotationPublic []byte) error {
var b bytes.Buffer
type signRequest struct {
NodeKey key.NodePublic
RotationPublic []byte
}
if err := json.NewEncoder(&b).Encode(signRequest{NodeKey: nodeKey, RotationPublic: rotationPublic}); err != nil {
return err
}
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/sign", 200, &b); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}
// NetworkLockAffectedSigs returns all signatures signed by the specified keyID.
func (lc *LocalClient) NetworkLockAffectedSigs(ctx context.Context, keyID tkatype.KeyID) ([]tkatype.MarshaledSignature, error) {
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/affected-sigs", 200, bytes.NewReader(keyID))
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/modify", 200, &b)
if err != nil {
return nil, fmt.Errorf("error: %w", err)
}
return decodeJSON[[]tkatype.MarshaledSignature](body)
}
// NetworkLockLog returns up to maxEntries number of changes to network-lock state.
func (lc *LocalClient) NetworkLockLog(ctx context.Context, maxEntries int) ([]ipnstate.NetworkLockUpdate, error) {
v := url.Values{}
v.Set("limit", fmt.Sprint(maxEntries))
body, err := lc.send(ctx, "GET", "/localapi/v0/tka/log?"+v.Encode(), 200, nil)
if err != nil {
return nil, fmt.Errorf("error %w: %s", err, body)
}
return decodeJSON[[]ipnstate.NetworkLockUpdate](body)
}
// NetworkLockForceLocalDisable forcibly shuts down network lock on this node.
func (lc *LocalClient) NetworkLockForceLocalDisable(ctx context.Context) error {
// This endpoint expects an empty JSON stanza as the payload.
var b bytes.Buffer
if err := json.NewEncoder(&b).Encode(struct{}{}); err != nil {
return err
}
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/force-local-disable", 200, &b); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}
// NetworkLockVerifySigningDeeplink verifies the network lock deeplink contained
// in url and returns information extracted from it.
func (lc *LocalClient) NetworkLockVerifySigningDeeplink(ctx context.Context, url string) (*tka.DeeplinkValidationResult, error) {
vr := struct {
URL string
}{url}
body, err := lc.send(ctx, "POST", "/localapi/v0/tka/verify-deeplink", 200, jsonBody(vr))
if err != nil {
return nil, fmt.Errorf("sending verify-deeplink: %w", err)
}
return decodeJSON[*tka.DeeplinkValidationResult](body)
}
// SetServeConfig sets or replaces the serving settings.
// If config is nil, settings are cleared and serving is disabled.
func (lc *LocalClient) SetServeConfig(ctx context.Context, config *ipn.ServeConfig) error {
_, err := lc.send(ctx, "POST", "/localapi/v0/serve-config", 200, jsonBody(config))
if err != nil {
return fmt.Errorf("sending serve config: %w", err)
}
return nil
}
// NetworkLockDisable shuts down network-lock across the tailnet.
func (lc *LocalClient) NetworkLockDisable(ctx context.Context, secret []byte) error {
if _, err := lc.send(ctx, "POST", "/localapi/v0/tka/disable", 200, bytes.NewReader(secret)); err != nil {
return fmt.Errorf("error: %w", err)
}
return nil
}
// GetServeConfig return the current serve config.
//
// If the serve config is empty, it returns (nil, nil).
func (lc *LocalClient) GetServeConfig(ctx context.Context) (*ipn.ServeConfig, error) {
body, err := lc.send(ctx, "GET", "/localapi/v0/serve-config", 200, nil)
if err != nil {
return nil, fmt.Errorf("getting serve config: %w", err)
}
return getServeConfigFromJSON(body)
}
func getServeConfigFromJSON(body []byte) (sc *ipn.ServeConfig, err error) {
if err := json.Unmarshal(body, &sc); err != nil {
pr := new(ipnstate.NetworkLockStatus)
if err := json.Unmarshal(body, pr); err != nil {
return nil, err
}
return sc, nil
return pr, nil
}
// tailscaledConnectHint gives a little thing about why tailscaled (or
@ -1026,172 +823,3 @@ func tailscaledConnectHint() string {
}
return "not running?"
}
type jsonReader struct {
b *bytes.Reader
err error // sticky JSON marshal error, if any
}
// jsonBody returns an io.Reader that marshals v as JSON and then reads it.
func jsonBody(v any) jsonReader {
b, err := json.Marshal(v)
if err != nil {
return jsonReader{err: err}
}
return jsonReader{b: bytes.NewReader(b)}
}
func (r jsonReader) Read(p []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
return r.b.Read(p)
}
// ProfileStatus returns the current profile and the list of all profiles.
func (lc *LocalClient) ProfileStatus(ctx context.Context) (current ipn.LoginProfile, all []ipn.LoginProfile, err error) {
body, err := lc.send(ctx, "GET", "/localapi/v0/profiles/current", 200, nil)
if err != nil {
return
}
current, err = decodeJSON[ipn.LoginProfile](body)
if err != nil {
return
}
body, err = lc.send(ctx, "GET", "/localapi/v0/profiles/", 200, nil)
if err != nil {
return
}
all, err = decodeJSON[[]ipn.LoginProfile](body)
return current, all, err
}
// SwitchToEmptyProfile creates and switches to a new unnamed profile. The new
// profile is not assigned an ID until it is persisted after a successful login.
// In order to login to the new profile, the user must call LoginInteractive.
func (lc *LocalClient) SwitchToEmptyProfile(ctx context.Context) error {
_, err := lc.send(ctx, "PUT", "/localapi/v0/profiles/", http.StatusCreated, nil)
return err
}
// SwitchProfile switches to the given profile.
func (lc *LocalClient) SwitchProfile(ctx context.Context, profile ipn.ProfileID) error {
_, err := lc.send(ctx, "POST", "/localapi/v0/profiles/"+url.PathEscape(string(profile)), 204, nil)
return err
}
// DeleteProfile removes the profile with the given ID.
// If the profile is the current profile, an empty profile
// will be selected as if SwitchToEmptyProfile was called.
func (lc *LocalClient) DeleteProfile(ctx context.Context, profile ipn.ProfileID) error {
_, err := lc.send(ctx, "DELETE", "/localapi/v0/profiles"+url.PathEscape(string(profile)), http.StatusNoContent, nil)
return err
}
func (lc *LocalClient) DebugDERPRegion(ctx context.Context, regionIDOrCode string) (*ipnstate.DebugDERPRegionReport, error) {
v := url.Values{"region": {regionIDOrCode}}
body, err := lc.send(ctx, "POST", "/localapi/v0/debug-derp-region?"+v.Encode(), 200, nil)
if err != nil {
return nil, fmt.Errorf("error %w: %s", err, body)
}
return decodeJSON[*ipnstate.DebugDERPRegionReport](body)
}
// DebugSetExpireIn marks the current node key to expire in d.
//
// This is meant primarily for debug and testing.
func (lc *LocalClient) DebugSetExpireIn(ctx context.Context, d time.Duration) error {
v := url.Values{"expiry": {fmt.Sprint(time.Now().Add(d).Unix())}}
_, err := lc.send(ctx, "POST", "/localapi/v0/set-expiry-sooner?"+v.Encode(), 200, nil)
return err
}
// StreamDebugCapture streams a pcap-formatted packet capture.
//
// The provided context does not determine the lifetime of the
// returned io.ReadCloser.
func (lc *LocalClient) StreamDebugCapture(ctx context.Context) (io.ReadCloser, error) {
req, err := http.NewRequestWithContext(ctx, "POST", "http://"+apitype.LocalAPIHost+"/localapi/v0/debug-capture", nil)
if err != nil {
return nil, err
}
res, err := lc.doLocalRequestNiceError(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
res.Body.Close()
return nil, errors.New(res.Status)
}
return res.Body, nil
}
// WatchIPNBus subscribes to the IPN notification bus. It returns a watcher
// once the bus is connected successfully.
//
// The context is used for the life of the watch, not just the call to
// WatchIPNBus.
//
// The returned IPNBusWatcher's Close method must be called when done to release
// resources.
//
// A default set of ipn.Notify messages are returned but the set can be modified by mask.
func (lc *LocalClient) WatchIPNBus(ctx context.Context, mask ipn.NotifyWatchOpt) (*IPNBusWatcher, error) {
req, err := http.NewRequestWithContext(ctx, "GET",
"http://"+apitype.LocalAPIHost+"/localapi/v0/watch-ipn-bus?mask="+fmt.Sprint(mask),
nil)
if err != nil {
return nil, err
}
res, err := lc.doLocalRequestNiceError(req)
if err != nil {
return nil, err
}
if res.StatusCode != 200 {
res.Body.Close()
return nil, errors.New(res.Status)
}
dec := json.NewDecoder(res.Body)
return &IPNBusWatcher{
ctx: ctx,
httpRes: res,
dec: dec,
}, nil
}
// IPNBusWatcher is an active subscription (watch) of the local tailscaled IPN bus.
// It's returned by LocalClient.WatchIPNBus.
//
// It must be closed when done.
type IPNBusWatcher struct {
ctx context.Context // from original WatchIPNBus call
httpRes *http.Response
dec *json.Decoder
mu sync.Mutex
closed bool
}
// Close stops the watcher and releases its resources.
func (w *IPNBusWatcher) Close() error {
w.mu.Lock()
defer w.mu.Unlock()
if w.closed {
return nil
}
w.closed = true
return w.httpRes.Body.Close()
}
// Next returns the next ipn.Notify from the stream.
// If the context from LocalClient.WatchIPNBus is done, that error is returned.
func (w *IPNBusWatcher) Next() (ipn.Notify, error) {
var n ipn.Notify
if err := w.dec.Decode(&n); err != nil {
if cerr := w.ctx.Err(); cerr != nil {
err = cerr
}
return ipn.Notify{}, err
}
return n, nil
}

View File

@ -1,27 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build go1.19
package tailscale
import "testing"
func TestGetServeConfigFromJSON(t *testing.T) {
sc, err := getServeConfigFromJSON([]byte("null"))
if sc != nil {
t.Errorf("want nil for null")
}
if err != nil {
t.Errorf("reading null: %v", err)
}
sc, err = getServeConfigFromJSON([]byte(`{"TCP":{}}`))
if err != nil {
t.Errorf("reading object: %v", err)
} else if sc == nil {
t.Errorf("want non-nil for object")
} else if sc.TCP == nil {
t.Errorf("want non-nil TCP for object")
}
}

View File

@ -1,10 +1,12 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.20
//go:build !go1.19
// +build !go1.19
package tailscale
func init() {
you_need_Go_1_20_to_compile_Tailscale()
you_need_Go_1_19_to_compile_Tailscale()
}

View File

@ -1,7 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package tailscale

View File

@ -1,7 +1,9 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
package tailscale
@ -10,8 +12,6 @@ import (
"fmt"
"net/http"
"net/url"
"tailscale.com/util/httpm"
)
// TailnetDeleteRequest handles sending a DELETE request for a tailnet to control.
@ -23,7 +23,7 @@ func (c *Client) TailnetDeleteRequest(ctx context.Context, tailnetID string) (er
}()
path := fmt.Sprintf("%s/api/v2/tailnet/%s", c.baseURL(), url.PathEscape(string(tailnetID)))
req, err := http.NewRequestWithContext(ctx, httpm.DELETE, path, nil)
req, err := http.NewRequestWithContext(ctx, http.MethodDelete, path, nil)
if err != nil {
return err
}

View File

@ -1,9 +1,11 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.19
// +build go1.19
// Package tailscale contains Go clients for the Tailscale LocalAPI and
// Package tailscale contains Go clients for the Tailscale Local API and
// Tailscale control plane API.
//
// Warning: this package is in development and makes no API compatibility

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Program addlicense adds a license header to a file.
// It is intended for use with 'go generate',
@ -14,24 +15,26 @@ import (
)
var (
year = flag.Int("year", 0, "copyright year")
file = flag.String("file", "", "file to modify")
)
func usage() {
fmt.Fprintf(os.Stderr, `
usage: addlicense -file FILE <subcommand args...>
usage: addlicense -year YEAR -file FILE <subcommand args...>
`[1:])
flag.PrintDefaults()
fmt.Fprintf(os.Stderr, `
addlicense adds a Tailscale license to the beginning of file.
addlicense adds a Tailscale license to the beginning of file,
using year as the copyright year.
It is intended for use with 'go generate', so it also runs a subcommand,
which presumably creates the file.
Sample usage:
addlicense -file pull_strings.go stringer -type=pull
addlicense -year 2021 -file pull_strings.go stringer -type=pull
`[1:])
os.Exit(2)
}
@ -51,7 +54,7 @@ func main() {
check(err)
f, err := os.OpenFile(*file, os.O_TRUNC|os.O_WRONLY, 0644)
check(err)
_, err = fmt.Fprint(f, license)
_, err = fmt.Fprintf(f, license, *year)
check(err)
_, err = f.Write(b)
check(err)
@ -67,7 +70,8 @@ func check(err error) {
}
var license = `
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) %d Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
`[1:]

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Cloner is a tool to automate the creation of a Clone method.
//

View File

@ -1,97 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
package main
import (
"context"
"fmt"
"log"
"net/http"
"os"
"tailscale.com/kube"
"tailscale.com/tailcfg"
)
// findKeyInKubeSecret inspects the kube secret secretName for a data
// field called "authkey", and returns its value if present.
func findKeyInKubeSecret(ctx context.Context, secretName string) (string, error) {
s, err := kc.GetSecret(ctx, secretName)
if err != nil {
return "", err
}
ak, ok := s.Data["authkey"]
if !ok {
return "", nil
}
return string(ak), nil
}
// storeDeviceInfo writes deviceID into the "device_id" data field of the kube
// secret secretName.
func storeDeviceInfo(ctx context.Context, secretName string, deviceID tailcfg.StableNodeID, fqdn string) error {
// First check if the secret exists at all. Even if running on
// kubernetes, we do not necessarily store state in a k8s secret.
if _, err := kc.GetSecret(ctx, secretName); err != nil {
if s, ok := err.(*kube.Status); ok {
if s.Code >= 400 && s.Code <= 499 {
// Assume the secret doesn't exist, or we don't have
// permission to access it.
return nil
}
}
return err
}
m := &kube.Secret{
Data: map[string][]byte{
"device_id": []byte(deviceID),
"device_fqdn": []byte(fqdn),
},
}
return kc.StrategicMergePatchSecret(ctx, secretName, m, "tailscale-container")
}
// deleteAuthKey deletes the 'authkey' field of the given kube
// secret. No-op if there is no authkey in the secret.
func deleteAuthKey(ctx context.Context, secretName string) error {
// m is a JSON Patch data structure, see https://jsonpatch.com/ or RFC 6902.
m := []kube.JSONPatch{
{
Op: "remove",
Path: "/data/authkey",
},
}
if err := kc.JSONPatchSecret(ctx, secretName, m); err != nil {
if s, ok := err.(*kube.Status); ok && s.Code == http.StatusUnprocessableEntity {
// This is kubernetes-ese for "the field you asked to
// delete already doesn't exist", aka no-op.
return nil
}
return err
}
return nil
}
var kc *kube.Client
func initKube(root string) {
if root != "/" {
// If we are running in a test, we need to set the root path to the fake
// service account directory.
kube.SetRootPathForTesting(root)
}
var err error
kc, err = kube.New()
if err != nil {
log.Fatalf("Error creating kube client: %v", err)
}
if root != "/" {
// If we are running in a test, we need to set the URL to the
// httptest server.
kc.SetURL(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
}
}

View File

@ -1,578 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux
// The containerboot binary is a wrapper for starting tailscaled in a container.
// It handles reading the desired mode of operation out of environment
// variables, bringing up and authenticating Tailscale, and any other
// kubernetes-specific side jobs.
//
// As with most container things, configuration is passed through environment
// variables. All configuration is optional.
//
// - TS_AUTHKEY: the authkey to use for login.
// - TS_HOSTNAME: the hostname to request for the node.
// - TS_ROUTES: subnet routes to advertise.
// - TS_DEST_IP: proxy all incoming Tailscale traffic to the given
// destination.
// - TS_TAILSCALED_EXTRA_ARGS: extra arguments to 'tailscaled'.
// - TS_EXTRA_ARGS: extra arguments to 'tailscale up'.
// - TS_USERSPACE: run with userspace networking (the default)
// instead of kernel networking.
// - TS_STATE_DIR: the directory in which to store tailscaled
// state. The data should persist across container
// restarts.
// - TS_ACCEPT_DNS: whether to use the tailnet's DNS configuration.
// - TS_KUBE_SECRET: the name of the Kubernetes secret in which to
// store tailscaled state.
// - TS_SOCKS5_SERVER: the address on which to listen for SOCKS5
// proxying into the tailnet.
// - TS_OUTBOUND_HTTP_PROXY_LISTEN: the address on which to listen
// for HTTP proxying into the tailnet.
// - TS_SOCKET: the path where the tailscaled LocalAPI socket should
// be created.
// - TS_AUTH_ONCE: if true, only attempt to log in if not already
// logged in. If false (the default, for backwards
// compatibility), forcibly log in every time the
// container starts.
//
// When running on Kubernetes, containerboot defaults to storing state in the
// "tailscale" kube secret. To store state on local disk instead, set
// TS_KUBE_SECRET="" and TS_STATE_DIR=/path/to/storage/dir. The state dir should
// be persistent storage.
//
// Additionally, if TS_AUTHKEY is not set and the TS_KUBE_SECRET contains an
// "authkey" field, that key is used as the tailscale authkey.
package main
import (
"context"
"errors"
"fmt"
"io/fs"
"log"
"net/netip"
"os"
"os/exec"
"os/signal"
"path/filepath"
"strconv"
"strings"
"syscall"
"time"
"golang.org/x/sys/unix"
"tailscale.com/client/tailscale"
"tailscale.com/ipn"
"tailscale.com/util/deephash"
)
func main() {
log.SetPrefix("boot: ")
tailscale.I_Acknowledge_This_API_Is_Unstable = true
cfg := &settings{
AuthKey: defaultEnvs([]string{"TS_AUTHKEY", "TS_AUTH_KEY"}, ""),
Hostname: defaultEnv("TS_HOSTNAME", ""),
Routes: defaultEnv("TS_ROUTES", ""),
ProxyTo: defaultEnv("TS_DEST_IP", ""),
DaemonExtraArgs: defaultEnv("TS_TAILSCALED_EXTRA_ARGS", ""),
ExtraArgs: defaultEnv("TS_EXTRA_ARGS", ""),
InKubernetes: os.Getenv("KUBERNETES_SERVICE_HOST") != "",
UserspaceMode: defaultBool("TS_USERSPACE", true),
StateDir: defaultEnv("TS_STATE_DIR", ""),
AcceptDNS: defaultBool("TS_ACCEPT_DNS", false),
KubeSecret: defaultEnv("TS_KUBE_SECRET", "tailscale"),
SOCKSProxyAddr: defaultEnv("TS_SOCKS5_SERVER", ""),
HTTPProxyAddr: defaultEnv("TS_OUTBOUND_HTTP_PROXY_LISTEN", ""),
Socket: defaultEnv("TS_SOCKET", "/tmp/tailscaled.sock"),
AuthOnce: defaultBool("TS_AUTH_ONCE", false),
Root: defaultEnv("TS_TEST_ONLY_ROOT", "/"),
}
if cfg.ProxyTo != "" && cfg.UserspaceMode {
log.Fatal("TS_DEST_IP is not supported with TS_USERSPACE")
}
if !cfg.UserspaceMode {
if err := ensureTunFile(cfg.Root); err != nil {
log.Fatalf("Unable to create tuntap device file: %v", err)
}
if cfg.ProxyTo != "" || cfg.Routes != "" {
if err := ensureIPForwarding(cfg.Root, cfg.ProxyTo, cfg.Routes); err != nil {
log.Printf("Failed to enable IP forwarding: %v", err)
log.Printf("To run tailscale as a proxy or router container, IP forwarding must be enabled.")
if cfg.InKubernetes {
log.Fatalf("You can either set the sysctls as a privileged initContainer, or run the tailscale container with privileged=true.")
} else {
log.Fatalf("You can fix this by running the container with privileged=true, or the equivalent in your container runtime that permits access to sysctls.")
}
}
}
}
if cfg.InKubernetes {
initKube(cfg.Root)
}
// Context is used for all setup stuff until we're in steady
// state, so that if something is hanging we eventually time out
// and crashloop the container.
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
if cfg.InKubernetes && cfg.KubeSecret != "" {
canPatch, err := kc.CheckSecretPermissions(ctx, cfg.KubeSecret)
if err != nil {
log.Fatalf("Some Kubernetes permissions are missing, please check your RBAC configuration: %v", err)
}
cfg.KubernetesCanPatch = canPatch
if cfg.AuthKey == "" {
key, err := findKeyInKubeSecret(ctx, cfg.KubeSecret)
if err != nil {
log.Fatalf("Getting authkey from kube secret: %v", err)
}
if key != "" {
// This behavior of pulling authkeys from kube secrets was added
// at the same time as the patch permission, so we can enforce
// that we must be able to patch out the authkey after
// authenticating if you want to use this feature. This avoids
// us having to deal with the case where we might leave behind
// an unnecessary reusable authkey in a secret, like a rake in
// the grass.
if !cfg.KubernetesCanPatch {
log.Fatalf("authkey found in TS_KUBE_SECRET, but the pod doesn't have patch permissions on the secret to manage the authkey.")
}
log.Print("Using authkey found in kube secret")
cfg.AuthKey = key
} else {
log.Print("No authkey found in kube secret and TS_AUTHKEY not provided, login will be interactive if needed.")
}
}
}
client, daemonPid, err := startTailscaled(ctx, cfg)
if err != nil {
log.Fatalf("failed to bring up tailscale: %v", err)
}
w, err := client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialPrefs|ipn.NotifyInitialState)
if err != nil {
log.Fatalf("failed to watch tailscaled for updates: %v", err)
}
// Because we're still shelling out to `tailscale up` to get access to its
// flag parser, we have to stop watching the IPN bus so that we can block on
// the subcommand without stalling anything. Then once it's done, we resume
// watching the bus.
//
// Depending on the requested mode of operation, this auth step happens at
// different points in containerboot's lifecycle, hence the helper function.
didLogin := false
authTailscale := func() error {
if didLogin {
return nil
}
didLogin = true
w.Close()
if err := tailscaleUp(ctx, cfg); err != nil {
return fmt.Errorf("failed to auth tailscale: %v", err)
}
w, err = client.WatchIPNBus(ctx, ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
if err != nil {
return fmt.Errorf("rewatching tailscaled for updates after auth: %v", err)
}
return nil
}
if !cfg.AuthOnce {
if err := authTailscale(); err != nil {
log.Fatalf("failed to auth tailscale: %v", err)
}
}
authLoop:
for {
n, err := w.Next()
if err != nil {
log.Fatalf("failed to read from tailscaled: %v", err)
}
if n.State != nil {
switch *n.State {
case ipn.NeedsLogin:
if err := authTailscale(); err != nil {
log.Fatalf("failed to auth tailscale: %v", err)
}
case ipn.NeedsMachineAuth:
log.Printf("machine authorization required, please visit the admin panel")
case ipn.Running:
// Technically, all we want is to keep monitoring the bus for
// netmap updates. However, in order to make the container crash
// if tailscale doesn't initially come up, the watch has a
// startup deadline on it. So, we have to break out of this
// watch loop, cancel the watch, and watch again with no
// deadline to continue monitoring for changes.
break authLoop
default:
log.Printf("tailscaled in state %q, waiting", *n.State)
}
}
}
w.Close()
if cfg.InKubernetes && cfg.KubeSecret != "" && cfg.KubernetesCanPatch && cfg.AuthOnce {
// We were told to only auth once, so any secret-bound
// authkey is no longer needed. We don't strictly need to
// wipe it, but it's good hygiene.
log.Printf("Deleting authkey from kube secret")
if err := deleteAuthKey(ctx, cfg.KubeSecret); err != nil {
log.Fatalf("deleting authkey from kube secret: %v", err)
}
}
w, err = client.WatchIPNBus(context.Background(), ipn.NotifyInitialNetMap|ipn.NotifyInitialState)
if err != nil {
log.Fatalf("rewatching tailscaled for updates after auth: %v", err)
}
var (
wantProxy = cfg.ProxyTo != ""
wantDeviceInfo = cfg.InKubernetes && cfg.KubeSecret != "" && cfg.KubernetesCanPatch
startupTasksDone = false
currentIPs deephash.Sum // tailscale IPs assigned to device
currentDeviceInfo deephash.Sum // device ID and fqdn
)
for {
n, err := w.Next()
if err != nil {
log.Fatalf("failed to read from tailscaled: %v", err)
}
if n.State != nil && *n.State != ipn.Running {
// Something's gone wrong and we've left the authenticated state.
// Our container image never recovered gracefully from this, and the
// control flow required to make it work now is hard. So, just crash
// the container and rely on the container runtime to restart us,
// whereupon we'll go through initial auth again.
log.Fatalf("tailscaled left running state (now in state %q), exiting", *n.State)
}
if n.NetMap != nil {
if cfg.ProxyTo != "" && len(n.NetMap.Addresses) > 0 && deephash.Update(&currentIPs, &n.NetMap.Addresses) {
if err := installIPTablesRule(ctx, cfg.ProxyTo, n.NetMap.Addresses); err != nil {
log.Fatalf("installing proxy rules: %v", err)
}
}
deviceInfo := []any{n.NetMap.SelfNode.StableID, n.NetMap.SelfNode.Name}
if cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" && deephash.Update(&currentDeviceInfo, &deviceInfo) {
if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID, n.NetMap.SelfNode.Name); err != nil {
log.Fatalf("storing device ID in kube secret: %v", err)
}
}
}
if !startupTasksDone {
if (!wantProxy || currentIPs != deephash.Sum{}) && (!wantDeviceInfo || currentDeviceInfo != deephash.Sum{}) {
// This log message is used in tests to detect when all
// post-auth configuration is done.
log.Println("Startup complete, waiting for shutdown signal")
startupTasksDone = true
// Reap all processes, since we are PID1 and need to collect zombies. We can
// only start doing this once we've stopped shelling out to things
// `tailscale up`, otherwise this goroutine can reap the CLI subprocesses
// and wedge bringup.
go func() {
for {
var status unix.WaitStatus
pid, err := unix.Wait4(-1, &status, 0, nil)
if errors.Is(err, unix.EINTR) {
continue
}
if err != nil {
log.Fatalf("Waiting for exited processes: %v", err)
}
if pid == daemonPid {
log.Printf("Tailscaled exited")
os.Exit(0)
}
}
}()
}
}
}
}
func startTailscaled(ctx context.Context, cfg *settings) (*tailscale.LocalClient, int, error) {
args := tailscaledArgs(cfg)
sigCh := make(chan os.Signal, 1)
signal.Notify(sigCh, unix.SIGTERM, unix.SIGINT)
// tailscaled runs without context, since it needs to persist
// beyond the startup timeout in ctx.
cmd := exec.Command("tailscaled", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
cmd.SysProcAttr = &syscall.SysProcAttr{
Setpgid: true,
}
log.Printf("Starting tailscaled")
if err := cmd.Start(); err != nil {
return nil, 0, fmt.Errorf("starting tailscaled failed: %v", err)
}
go func() {
<-sigCh
log.Printf("Received SIGTERM from container runtime, shutting down tailscaled")
cmd.Process.Signal(unix.SIGTERM)
}()
// Wait for the socket file to appear, otherwise API ops will racily fail.
log.Printf("Waiting for tailscaled socket")
for {
if ctx.Err() != nil {
log.Fatalf("Timed out waiting for tailscaled socket")
}
_, err := os.Stat(cfg.Socket)
if errors.Is(err, fs.ErrNotExist) {
time.Sleep(100 * time.Millisecond)
continue
} else if err != nil {
log.Fatalf("Waiting for tailscaled socket: %v", err)
}
break
}
tsClient := &tailscale.LocalClient{
Socket: cfg.Socket,
UseSocketOnly: true,
}
return tsClient, cmd.Process.Pid, nil
}
// tailscaledArgs uses cfg to construct the argv for tailscaled.
func tailscaledArgs(cfg *settings) []string {
args := []string{"--socket=" + cfg.Socket}
switch {
case cfg.InKubernetes && cfg.KubeSecret != "":
args = append(args, "--state=kube:"+cfg.KubeSecret)
if cfg.StateDir == "" {
cfg.StateDir = "/tmp"
}
fallthrough
case cfg.StateDir != "":
args = append(args, "--statedir="+cfg.StateDir)
default:
args = append(args, "--state=mem:", "--statedir=/tmp")
}
if cfg.UserspaceMode {
args = append(args, "--tun=userspace-networking")
} else if err := ensureTunFile(cfg.Root); err != nil {
log.Fatalf("ensuring that /dev/net/tun exists: %v", err)
}
if cfg.SOCKSProxyAddr != "" {
args = append(args, "--socks5-server="+cfg.SOCKSProxyAddr)
}
if cfg.HTTPProxyAddr != "" {
args = append(args, "--outbound-http-proxy-listen="+cfg.HTTPProxyAddr)
}
if cfg.DaemonExtraArgs != "" {
args = append(args, strings.Fields(cfg.DaemonExtraArgs)...)
}
return args
}
// tailscaleUp uses cfg to run 'tailscale up'.
func tailscaleUp(ctx context.Context, cfg *settings) error {
args := []string{"--socket=" + cfg.Socket, "up"}
if cfg.AcceptDNS {
args = append(args, "--accept-dns=true")
} else {
args = append(args, "--accept-dns=false")
}
if cfg.AuthKey != "" {
args = append(args, "--authkey="+cfg.AuthKey)
}
if cfg.Routes != "" {
args = append(args, "--advertise-routes="+cfg.Routes)
}
if cfg.Hostname != "" {
args = append(args, "--hostname="+cfg.Hostname)
}
if cfg.ExtraArgs != "" {
args = append(args, strings.Fields(cfg.ExtraArgs)...)
}
log.Printf("Running 'tailscale up'")
cmd := exec.CommandContext(ctx, "tailscale", args...)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("tailscale up failed: %v", err)
}
return nil
}
// ensureTunFile checks that /dev/net/tun exists, creating it if
// missing.
func ensureTunFile(root string) error {
// Verify that /dev/net/tun exists, in some container envs it
// needs to be mknod-ed.
if _, err := os.Stat(filepath.Join(root, "dev/net")); errors.Is(err, fs.ErrNotExist) {
if err := os.MkdirAll(filepath.Join(root, "dev/net"), 0755); err != nil {
return err
}
}
if _, err := os.Stat(filepath.Join(root, "dev/net/tun")); errors.Is(err, fs.ErrNotExist) {
dev := unix.Mkdev(10, 200) // tuntap major and minor
if err := unix.Mknod(filepath.Join(root, "dev/net/tun"), 0600|unix.S_IFCHR, int(dev)); err != nil {
return err
}
}
return nil
}
// ensureIPForwarding enables IPv4/IPv6 forwarding for the container.
func ensureIPForwarding(root, proxyTo, routes string) error {
var (
v4Forwarding, v6Forwarding bool
)
if proxyTo != "" {
proxyIP, err := netip.ParseAddr(proxyTo)
if err != nil {
return fmt.Errorf("invalid proxy destination IP: %v", err)
}
if proxyIP.Is4() {
v4Forwarding = true
} else {
v6Forwarding = true
}
}
if routes != "" {
for _, route := range strings.Split(routes, ",") {
cidr, err := netip.ParsePrefix(route)
if err != nil {
return fmt.Errorf("invalid subnet route: %v", err)
}
if cidr.Addr().Is4() {
v4Forwarding = true
} else {
v6Forwarding = true
}
}
}
var paths []string
if v4Forwarding {
paths = append(paths, filepath.Join(root, "proc/sys/net/ipv4/ip_forward"))
}
if v6Forwarding {
paths = append(paths, filepath.Join(root, "proc/sys/net/ipv6/conf/all/forwarding"))
}
// In some common configurations (e.g. default docker,
// kubernetes), the container environment denies write access to
// most sysctls, including IP forwarding controls. Check the
// sysctl values before trying to change them, so that we
// gracefully do nothing if the container's already been set up
// properly by e.g. a k8s initContainer.
for _, path := range paths {
bs, err := os.ReadFile(path)
if err != nil {
return fmt.Errorf("reading %q: %w", path, err)
}
if v := strings.TrimSpace(string(bs)); v != "1" {
if err := os.WriteFile(path, []byte("1"), 0644); err != nil {
return fmt.Errorf("enabling %q: %w", path, err)
}
}
}
return nil
}
func installIPTablesRule(ctx context.Context, dstStr string, tsIPs []netip.Prefix) error {
dst, err := netip.ParseAddr(dstStr)
if err != nil {
return err
}
argv0 := "iptables"
if dst.Is6() {
argv0 = "ip6tables"
}
var local string
for _, pfx := range tsIPs {
if !pfx.IsSingleIP() {
continue
}
if pfx.Addr().Is4() != dst.Is4() {
continue
}
local = pfx.Addr().String()
break
}
if local == "" {
return fmt.Errorf("no tailscale IP matching family of %s found in %v", dstStr, tsIPs)
}
// Technically, if the control server ever changes the IPs assigned to this
// node, we'll slowly accumulate iptables rules. This shouldn't happen, so
// for now we'll live with it.
cmd := exec.CommandContext(ctx, argv0, "-t", "nat", "-I", "PREROUTING", "1", "-d", local, "-j", "DNAT", "--to-destination", dstStr)
cmd.Stdout = os.Stdout
cmd.Stderr = os.Stderr
if err := cmd.Run(); err != nil {
return fmt.Errorf("executing iptables failed: %w", err)
}
return nil
}
// settings is all the configuration for containerboot.
type settings struct {
AuthKey string
Hostname string
Routes string
ProxyTo string
DaemonExtraArgs string
ExtraArgs string
InKubernetes bool
UserspaceMode bool
StateDir string
AcceptDNS bool
KubeSecret string
SOCKSProxyAddr string
HTTPProxyAddr string
Socket string
AuthOnce bool
Root string
KubernetesCanPatch bool
}
// defaultEnv returns the value of the given envvar name, or defVal if
// unset.
func defaultEnv(name, defVal string) string {
if v, ok := os.LookupEnv(name); ok {
return v
}
return defVal
}
func defaultEnvs(names []string, defVal string) string {
for _, name := range names {
if v, ok := os.LookupEnv(name); ok {
return v
}
}
return defVal
}
// defaultBool returns the boolean value of the given envvar name, or
// defVal if unset or not a bool.
func defaultBool(name string, defVal bool) bool {
v := os.Getenv(name)
ret, err := strconv.ParseBool(v)
if err != nil {
return defVal
}
return ret
}

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +0,0 @@
#!/usr/bin/env bash
#
# This is a fake tailscale CLI (and also iptables and ip6tables) that
# records its arguments and exits successfully.
#
# It is used by main_test.go to test the behavior of containerboot.
echo $0 $@ >>$TS_TEST_RECORD_ARGS

View File

@ -1,37 +0,0 @@
#!/usr/bin/env bash
#
# This is a fake tailscale CLI that records its arguments, symlinks a
# fake LocalAPI socket into place, and does nothing until terminated.
#
# It is used by main_test.go to test the behavior of containerboot.
set -eu
echo $0 $@ >>$TS_TEST_RECORD_ARGS
socket=""
while [[ $# -gt 0 ]]; do
case $1 in
--socket=*)
socket="${1#--socket=}"
shift
;;
--socket)
shift
socket="$1"
shift
;;
*)
shift
;;
esac
done
if [[ -z "$socket" ]]; then
echo "didn't find socket path in args"
exit 1
fi
ln -s "$TS_TEST_SOCKET" "$socket"
while true; do sleep 1; done

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
@ -14,7 +15,6 @@ import (
"time"
"tailscale.com/syncs"
"tailscale.com/util/slicesx"
)
const refreshTimeout = time.Minute
@ -53,13 +53,6 @@ func refreshBootstrapDNS() {
ctx, cancel := context.WithTimeout(context.Background(), refreshTimeout)
defer cancel()
dnsEntries := resolveList(ctx, strings.Split(*bootstrapDNS, ","))
// Randomize the order of the IPs for each name to avoid the client biasing
// to IPv6
for k := range dnsEntries {
ips := dnsEntries[k]
slicesx.Shuffle(ips)
dnsEntries[k] = ips
}
j, err := json.MarshalIndent(dnsEntries, "", "\t")
if err != nil {
// leave the old values in place

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
@ -11,12 +12,14 @@ import (
"net/url"
"reflect"
"testing"
"tailscale.com/tstest"
)
func BenchmarkHandleBootstrapDNS(b *testing.B) {
tstest.Replace(b, bootstrapDNS, "log.tailscale.io,login.tailscale.com,controlplane.tailscale.com,login.us.tailscale.com")
prev := *bootstrapDNS
*bootstrapDNS = "log.tailscale.io,login.tailscale.com,controlplane.tailscale.com,login.us.tailscale.com"
defer func() {
*bootstrapDNS = prev
}()
refreshBootstrapDNS()
w := new(bitbucketResponseWriter)
req, _ := http.NewRequest("GET", "https://localhost/bootstrap-dns?q="+url.QueryEscape("log.tailscale.io"), nil)

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
@ -72,7 +73,7 @@ func NewManualCertManager(certdir, hostname string) (certProvider, error) {
return nil, fmt.Errorf("can not load cert: %w", err)
}
if err := x509Cert.VerifyHostname(hostname); err != nil {
// return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err)
return nil, fmt.Errorf("cert invalid for hostname %q: %w", hostname, err)
}
return &manualCertManager{cert: &cert, hostname: hostname}, nil
}
@ -81,7 +82,7 @@ func (m *manualCertManager) TLSConfig() *tls.Config {
return &tls.Config{
Certificates: nil,
NextProtos: []string{
"http/1.1",
"h2", "http/1.1", // enable HTTP/2
},
GetCertificate: m.getCertificate,
}
@ -89,7 +90,7 @@ func (m *manualCertManager) TLSConfig() *tls.Config {
func (m *manualCertManager) getCertificate(hi *tls.ClientHelloInfo) (*tls.Certificate, error) {
if hi.ServerName != m.hostname {
//return nil, fmt.Errorf("cert mismatch with hostname: %q", hi.ServerName)
return nil, fmt.Errorf("cert mismatch with hostname: %q", hi.ServerName)
}
// Return a shallow copy of the cert so the caller can append to its

View File

@ -2,95 +2,24 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
filippo.io/edwards25519 from github.com/hdevalence/ed25519consensus
filippo.io/edwards25519/field from filippo.io/edwards25519
W 💣 github.com/Microsoft/go-winio from tailscale.com/safesocket
W 💣 github.com/Microsoft/go-winio/internal/fs from github.com/Microsoft/go-winio
W 💣 github.com/Microsoft/go-winio/internal/socket from github.com/Microsoft/go-winio
W github.com/Microsoft/go-winio/internal/stringbuffer from github.com/Microsoft/go-winio/internal/fs
W github.com/Microsoft/go-winio/pkg/guid from github.com/Microsoft/go-winio+
W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+
W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate
W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy
github.com/beorn7/perks/quantile from github.com/prometheus/client_golang/prometheus
💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus
L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw
github.com/fxamacker/cbor/v2 from tailscale.com/tka
github.com/golang/groupcache/lru from tailscale.com/net/dnscache
github.com/golang/protobuf/proto from github.com/matttproud/golang_protobuf_extensions/pbutil+
L github.com/google/nftables from tailscale.com/util/linuxfw
L 💣 github.com/google/nftables/alignedbuff from github.com/google/nftables/xt
L 💣 github.com/google/nftables/binaryutil from github.com/google/nftables+
L github.com/google/nftables/expr from github.com/google/nftables+
L github.com/google/nftables/internal/parseexprfunc from github.com/google/nftables+
L github.com/google/nftables/xt from github.com/google/nftables/expr+
github.com/hdevalence/ed25519consensus from tailscale.com/tka
L github.com/josharian/native from github.com/mdlayher/netlink+
L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/interfaces+
L 💣 github.com/jsimonetti/rtnetlink from tailscale.com/net/interfaces
L github.com/jsimonetti/rtnetlink/internal/unix from github.com/jsimonetti/rtnetlink
github.com/klauspost/compress/flate from nhooyr.io/websocket
github.com/matttproud/golang_protobuf_extensions/pbutil from github.com/prometheus/common/expfmt
L 💣 github.com/mdlayher/netlink from github.com/jsimonetti/rtnetlink+
L 💣 github.com/mdlayher/netlink/nlenc from github.com/jsimonetti/rtnetlink+
L github.com/mdlayher/netlink/nltest from github.com/google/nftables
L 💣 github.com/mdlayher/socket from github.com/mdlayher/netlink
💣 github.com/mitchellh/go-ps from tailscale.com/safesocket
💣 github.com/prometheus/client_golang/prometheus from tailscale.com/tsweb/promvarz
github.com/prometheus/client_golang/prometheus/internal from github.com/prometheus/client_golang/prometheus
github.com/prometheus/client_model/go from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/expfmt from github.com/prometheus/client_golang/prometheus+
github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg from github.com/prometheus/common/expfmt
github.com/prometheus/common/model from github.com/prometheus/client_golang/prometheus+
LD github.com/prometheus/procfs from github.com/prometheus/client_golang/prometheus
LD github.com/prometheus/procfs/internal/fs from github.com/prometheus/procfs
LD github.com/prometheus/procfs/internal/util from github.com/prometheus/procfs
L 💣 github.com/tailscale/netlink from tailscale.com/util/linuxfw
L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink
L github.com/vishvananda/netns from github.com/tailscale/netlink+
github.com/x448/float16 from github.com/fxamacker/cbor/v2
💣 go4.org/mem from tailscale.com/client/tailscale+
go4.org/netipx from tailscale.com/wgengine/filter
W 💣 golang.zx2c4.com/wireguard/windows/tunnel/winipcfg from tailscale.com/net/interfaces+
google.golang.org/protobuf/encoding/prototext from github.com/golang/protobuf/proto+
google.golang.org/protobuf/encoding/protowire from github.com/golang/protobuf/proto+
google.golang.org/protobuf/internal/descfmt from google.golang.org/protobuf/internal/filedesc
google.golang.org/protobuf/internal/descopts from google.golang.org/protobuf/internal/filedesc+
google.golang.org/protobuf/internal/detrand from google.golang.org/protobuf/internal/descfmt+
google.golang.org/protobuf/internal/encoding/defval from google.golang.org/protobuf/internal/encoding/tag+
google.golang.org/protobuf/internal/encoding/messageset from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/internal/encoding/tag from google.golang.org/protobuf/internal/impl
google.golang.org/protobuf/internal/encoding/text from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/internal/errors from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/internal/filedesc from google.golang.org/protobuf/internal/encoding/tag+
google.golang.org/protobuf/internal/filetype from google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/internal/flags from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/internal/genid from google.golang.org/protobuf/encoding/prototext+
💣 google.golang.org/protobuf/internal/impl from google.golang.org/protobuf/internal/filetype+
google.golang.org/protobuf/internal/order from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/internal/pragma from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/internal/set from google.golang.org/protobuf/encoding/prototext
💣 google.golang.org/protobuf/internal/strs from google.golang.org/protobuf/encoding/prototext+
google.golang.org/protobuf/internal/version from google.golang.org/protobuf/runtime/protoimpl
google.golang.org/protobuf/proto from github.com/golang/protobuf/proto+
google.golang.org/protobuf/reflect/protodesc from github.com/golang/protobuf/proto
💣 google.golang.org/protobuf/reflect/protoreflect from github.com/golang/protobuf/proto+
google.golang.org/protobuf/reflect/protoregistry from github.com/golang/protobuf/proto+
google.golang.org/protobuf/runtime/protoiface from github.com/golang/protobuf/proto+
google.golang.org/protobuf/runtime/protoimpl from github.com/golang/protobuf/proto+
google.golang.org/protobuf/types/descriptorpb from google.golang.org/protobuf/reflect/protodesc
google.golang.org/protobuf/types/known/timestamppb from github.com/prometheus/client_golang/prometheus+
L gvisor.dev/gvisor/pkg/abi from gvisor.dev/gvisor/pkg/abi/linux
L 💣 gvisor.dev/gvisor/pkg/abi/linux from tailscale.com/util/linuxfw
L gvisor.dev/gvisor/pkg/bits from gvisor.dev/gvisor/pkg/abi/linux
L gvisor.dev/gvisor/pkg/context from gvisor.dev/gvisor/pkg/abi/linux
L 💣 gvisor.dev/gvisor/pkg/gohacks from gvisor.dev/gvisor/pkg/abi/linux+
L 💣 gvisor.dev/gvisor/pkg/hostarch from gvisor.dev/gvisor/pkg/abi/linux+
L gvisor.dev/gvisor/pkg/linewriter from gvisor.dev/gvisor/pkg/log
L gvisor.dev/gvisor/pkg/log from gvisor.dev/gvisor/pkg/context
L gvisor.dev/gvisor/pkg/marshal from gvisor.dev/gvisor/pkg/abi/linux+
L 💣 gvisor.dev/gvisor/pkg/marshal/primitive from gvisor.dev/gvisor/pkg/abi/linux
L 💣 gvisor.dev/gvisor/pkg/state from gvisor.dev/gvisor/pkg/abi/linux+
L gvisor.dev/gvisor/pkg/state/wire from gvisor.dev/gvisor/pkg/state
L 💣 gvisor.dev/gvisor/pkg/sync from gvisor.dev/gvisor/pkg/linewriter+
L gvisor.dev/gvisor/pkg/waiter from gvisor.dev/gvisor/pkg/context
nhooyr.io/websocket from tailscale.com/cmd/derper+
nhooyr.io/websocket/internal/errd from nhooyr.io/websocket
nhooyr.io/websocket/internal/xsync from nhooyr.io/websocket
@ -102,27 +31,22 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/derp/derphttp from tailscale.com/cmd/derper
tailscale.com/disco from tailscale.com/derp
tailscale.com/envknob from tailscale.com/derp+
tailscale.com/health from tailscale.com/net/tlsdial
tailscale.com/hostinfo from tailscale.com/net/interfaces+
tailscale.com/ipn from tailscale.com/client/tailscale
tailscale.com/ipn/ipnstate from tailscale.com/client/tailscale+
tailscale.com/metrics from tailscale.com/cmd/derper+
💣 tailscale.com/metrics from tailscale.com/cmd/derper+
tailscale.com/net/dnscache from tailscale.com/derp/derphttp
tailscale.com/net/flowtrack from tailscale.com/net/packet+
💣 tailscale.com/net/interfaces from tailscale.com/net/netns+
tailscale.com/net/netaddr from tailscale.com/ipn+
tailscale.com/net/netknob from tailscale.com/net/netns
tailscale.com/net/netmon from tailscale.com/net/sockstats+
tailscale.com/net/netns from tailscale.com/derp/derphttp
tailscale.com/net/netutil from tailscale.com/client/tailscale
tailscale.com/net/packet from tailscale.com/wgengine/filter
tailscale.com/net/sockstats from tailscale.com/derp/derphttp
tailscale.com/net/stun from tailscale.com/cmd/derper
L tailscale.com/net/tcpinfo from tailscale.com/derp
tailscale.com/net/tlsdial from tailscale.com/derp/derphttp
tailscale.com/net/tsaddr from tailscale.com/ipn+
💣 tailscale.com/net/tshttpproxy from tailscale.com/derp/derphttp+
tailscale.com/net/wsconn from tailscale.com/cmd/derper+
tailscale.com/paths from tailscale.com/client/tailscale
tailscale.com/safesocket from tailscale.com/client/tailscale
tailscale.com/syncs from tailscale.com/cmd/derper+
@ -130,39 +54,28 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
tailscale.com/tka from tailscale.com/client/tailscale+
W tailscale.com/tsconst from tailscale.com/net/interfaces
💣 tailscale.com/tstime/mono from tailscale.com/tstime/rate
tailscale.com/tstime/rate from tailscale.com/wgengine/filter+
tailscale.com/tstime/rate from tailscale.com/wgengine/filter
tailscale.com/tsweb from tailscale.com/cmd/derper
tailscale.com/tsweb/promvarz from tailscale.com/tsweb
tailscale.com/tsweb/varz from tailscale.com/tsweb+
tailscale.com/types/dnstype from tailscale.com/tailcfg
tailscale.com/types/empty from tailscale.com/ipn
tailscale.com/types/ipproto from tailscale.com/net/flowtrack+
tailscale.com/types/key from tailscale.com/cmd/derper+
tailscale.com/types/lazy from tailscale.com/version+
tailscale.com/types/logger from tailscale.com/cmd/derper+
tailscale.com/types/netmap from tailscale.com/ipn
tailscale.com/types/opt from tailscale.com/client/tailscale+
tailscale.com/types/pad32 from tailscale.com/derp
tailscale.com/types/persist from tailscale.com/ipn
tailscale.com/types/preftype from tailscale.com/ipn
tailscale.com/types/ptr from tailscale.com/hostinfo+
tailscale.com/types/structs from tailscale.com/ipn+
tailscale.com/types/tkatype from tailscale.com/types/key+
tailscale.com/types/views from tailscale.com/ipn/ipnstate+
W tailscale.com/util/clientmetric from tailscale.com/net/tshttpproxy
tailscale.com/util/cloudenv from tailscale.com/hostinfo+
W tailscale.com/util/cmpver from tailscale.com/net/tshttpproxy
tailscale.com/util/cmpx from tailscale.com/cmd/derper+
L 💣 tailscale.com/util/dirwalk from tailscale.com/metrics
tailscale.com/util/dnsname from tailscale.com/hostinfo+
tailscale.com/util/httpm from tailscale.com/client/tailscale
W tailscale.com/util/endian from tailscale.com/net/netns
tailscale.com/util/lineread from tailscale.com/hostinfo+
L 💣 tailscale.com/util/linuxfw from tailscale.com/net/netns
tailscale.com/util/mak from tailscale.com/syncs+
tailscale.com/util/multierr from tailscale.com/health+
tailscale.com/util/set from tailscale.com/health+
tailscale.com/util/singleflight from tailscale.com/net/dnscache
tailscale.com/util/slicesx from tailscale.com/cmd/derper+
tailscale.com/util/vizerror from tailscale.com/tsweb
L tailscale.com/util/strs from tailscale.com/hostinfo
W 💣 tailscale.com/util/winutil from tailscale.com/hostinfo+
tailscale.com/version from tailscale.com/derp+
tailscale.com/version/distro from tailscale.com/hostinfo+
@ -176,18 +89,15 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
golang.org/x/crypto/chacha20poly1305 from crypto/tls
golang.org/x/crypto/cryptobyte from crypto/ecdsa+
golang.org/x/crypto/cryptobyte/asn1 from crypto/ecdsa+
golang.org/x/crypto/curve25519 from golang.org/x/crypto/nacl/box+
golang.org/x/crypto/curve25519 from crypto/tls+
golang.org/x/crypto/hkdf from crypto/tls
golang.org/x/crypto/nacl/box from tailscale.com/types/key
golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box
golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+
golang.org/x/exp/constraints from golang.org/x/exp/slices
golang.org/x/exp/maps from tailscale.com/types/views
golang.org/x/exp/slices from tailscale.com/net/tsaddr+
L golang.org/x/net/bpf from github.com/mdlayher/netlink+
golang.org/x/net/dns/dnsmessage from net+
golang.org/x/net/http/httpguts from net/http
golang.org/x/net/http/httpproxy from net/http+
golang.org/x/net/http/httpproxy from net/http
golang.org/x/net/http2/hpack from net/http
golang.org/x/net/idna from golang.org/x/crypto/acme/autocert+
golang.org/x/net/proxy from tailscale.com/net/netns
@ -208,7 +118,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
bytes from bufio+
compress/flate from compress/gzip+
compress/gzip from internal/profile+
L compress/zlib from debug/elf
container/list from crypto/tls+
context from crypto/tls+
crypto from crypto/ecdsa+
@ -216,7 +125,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
crypto/cipher from crypto/aes+
crypto/des from crypto/tls+
crypto/dsa from crypto/x509
crypto/ecdh from crypto/ecdsa+
crypto/ecdsa from crypto/tls+
crypto/ed25519 from crypto/tls+
crypto/elliptic from crypto/ecdsa+
@ -232,8 +140,6 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
crypto/tls from golang.org/x/crypto/acme+
crypto/x509 from crypto/tls+
crypto/x509/pkix from crypto/x509+
L debug/dwarf from debug/elf
L debug/elf from golang.org/x/sys/unix
embed from crypto/internal/nistec+
encoding from encoding/json+
encoding/asn1 from crypto/x509+
@ -247,18 +153,14 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
expvar from tailscale.com/cmd/derper+
flag from tailscale.com/cmd/derper
fmt from compress/flate+
go/token from google.golang.org/protobuf/internal/strs
hash from crypto+
L hash/adler32 from compress/zlib
hash/crc32 from compress/gzip+
hash/fnv from google.golang.org/protobuf/internal/detrand
hash/maphash from go4.org/mem
html from net/http/pprof+
io from bufio+
io/fs from crypto/x509+
io/ioutil from github.com/mitchellh/go-ps+
log from expvar+
log/internal from log
math from compress/flate+
math/big from crypto/dsa+
math/bits from compress/flate+
@ -270,20 +172,18 @@ tailscale.com/cmd/derper dependencies: (generated by github.com/tailscale/depawa
net/http from expvar+
net/http/httptrace from net/http+
net/http/internal from net/http
net/http/pprof from tailscale.com/tsweb+
net/http/pprof from tailscale.com/tsweb
net/netip from go4.org/netipx+
net/textproto from golang.org/x/net/http/httpguts+
net/url from crypto/x509+
os from crypto/rand+
os/exec from golang.zx2c4.com/wireguard/windows/tunnel/winipcfg+
W os/user from tailscale.com/util/winutil
path from golang.org/x/crypto/acme/autocert+
path/filepath from crypto/x509+
reflect from crypto/x509+
regexp from internal/profile+
regexp/syntax from regexp
runtime/debug from golang.org/x/crypto/acme+
runtime/metrics from github.com/prometheus/client_golang/prometheus+
runtime/pprof from net/http/pprof
runtime/trace from net/http/pprof
sort from compress/flate+

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The derper binary is a simple DERP server.
package main // import "tailscale.com/cmd/derper"
@ -33,12 +34,11 @@ import (
"tailscale.com/net/stun"
"tailscale.com/tsweb"
"tailscale.com/types/key"
"tailscale.com/util/cmpx"
)
var (
dev = flag.Bool("dev", false, "run in localhost development mode (overrides -a)")
addr = flag.String("a", ":443", "server HTTP/HTTPS listen address, in form \":port\", \"ip:port\", or for IPv6 \"[ip]:port\". If the IP is omitted, it defaults to all interfaces. Serves HTTPS if the port is 443 and/or -certmode is manual, otherwise HTTP.")
dev = flag.Bool("dev", false, "run in localhost development mode")
addr = flag.String("a", ":443", "server HTTPS listen address, in form \":port\", \"ip:port\", or for IPv6 \"[ip]:port\". If the IP is omitted, it defaults to all interfaces.")
httpPort = flag.Int("http-port", 80, "The port on which to serve HTTP. Set to -1 to disable. The listener is bound to the same IP (if any) as specified in the -a flag.")
stunPort = flag.Int("stun-port", 3478, "The UDP port on which to serve STUN. The listener is bound to the same IP (if any) as specified in the -a flag.")
configPath = flag.String("c", "", "config file path")
@ -325,31 +325,11 @@ func main() {
}
}
const (
noContentChallengeHeader = "X-Tailscale-Challenge"
noContentResponseHeader = "X-Tailscale-Response"
)
// For captive portal detection
func serveNoContent(w http.ResponseWriter, r *http.Request) {
if challenge := r.Header.Get(noContentChallengeHeader); challenge != "" {
badChar := strings.IndexFunc(challenge, func(r rune) bool {
return !isChallengeChar(r)
}) != -1
if len(challenge) <= 64 && !badChar {
w.Header().Set(noContentResponseHeader, "response "+challenge)
}
}
w.WriteHeader(http.StatusNoContent)
}
func isChallengeChar(c rune) bool {
// Semi-randomly chosen as a limited set of valid characters
return ('a' <= c && c <= 'z') || ('A' <= c && c <= 'Z') ||
('0' <= c && c <= '9') ||
c == '.' || c == '-' || c == '_'
}
// probeHandler is the endpoint that js/wasm clients hit to measure
// DERP latency, since they can't do UDP STUN queries.
func probeHandler(w http.ResponseWriter, r *http.Request) {
@ -437,7 +417,11 @@ func defaultMeshPSKFile() string {
}
func rateLimitedListenAndServeTLS(srv *http.Server) error {
ln, err := net.Listen("tcp", cmpx.Or(srv.Addr, ":https"))
addr := srv.Addr
if addr == "" {
addr = ":https"
}
ln, err := net.Listen("tcp", addr)
if err != nil {
return err
}

View File

@ -1,14 +1,12 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
import (
"context"
"net"
"net/http"
"net/http/httptest"
"strings"
"testing"
"tailscale.com/net/stun"
@ -69,62 +67,3 @@ func BenchmarkServerSTUN(b *testing.B) {
}
}
func TestNoContent(t *testing.T) {
testCases := []struct {
name string
input string
want string
}{
{
name: "no challenge",
},
{
name: "valid challenge",
input: "input",
want: "response input",
},
{
name: "valid challenge hostname",
input: "ts_derp99b.tailscale.com",
want: "response ts_derp99b.tailscale.com",
},
{
name: "invalid challenge",
input: "foo\x00bar",
want: "",
},
{
name: "whitespace invalid challenge",
input: "foo bar",
want: "",
},
{
name: "long challenge",
input: strings.Repeat("x", 65),
want: "",
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
req, _ := http.NewRequest("GET", "https://localhost/generate_204", nil)
if tt.input != "" {
req.Header.Set(noContentChallengeHeader, tt.input)
}
w := httptest.NewRecorder()
serveNoContent(w, req)
resp := w.Result()
if tt.want == "" {
if h, found := resp.Header[noContentResponseHeader]; found {
t.Errorf("got %+v; expected no response header", h)
}
return
}
if got := resp.Header.Get(noContentResponseHeader); got != tt.want {
t.Errorf("got %q; want %q", got, tt.want)
}
})
}
}

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
@ -49,7 +50,8 @@ func startMeshWithHost(s *derp.Server, host string) error {
}
var d net.Dialer
var r net.Resolver
if base, ok := strings.CutSuffix(host, ".tailscale.com"); ok && port == "443" {
if port == "443" && strings.HasSuffix(host, ".tailscale.com") {
base := strings.TrimSuffix(host, ".tailscale.com")
subCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
defer cancel()
vpcHost := base + "-vpc.tailscale.com"

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main
@ -12,7 +13,6 @@ import (
"nhooyr.io/websocket"
"tailscale.com/derp"
"tailscale.com/net/wsconn"
)
var counterWebSocketAccepts = expvar.NewInt("derp_websocket_accepts")
@ -50,7 +50,7 @@ func addWebSocketSupport(s *derp.Server, base http.Handler) http.Handler {
return
}
counterWebSocketAccepts.Add(1)
wc := wsconn.NetConn(r.Context(), c, websocket.MessageBinary)
wc := websocket.NetConn(r.Context(), c, websocket.MessageBinary)
brw := bufio.NewReadWriter(bufio.NewReader(wc), bufio.NewWriter(wc))
s.Accept(r.Context(), wc, brw, r.RemoteAddr)
})

View File

@ -1,59 +1,77 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The derpprobe binary probes derpers.
package main
package main // import "tailscale.com/cmd/derper/derpprobe"
import (
"bytes"
"context"
crand "crypto/rand"
"crypto/x509"
"encoding/json"
"errors"
"flag"
"fmt"
"html"
"io"
"log"
"net"
"net/http"
"os"
"sort"
"strings"
"sync"
"time"
"tailscale.com/prober"
"tailscale.com/tsweb"
"tailscale.com/derp"
"tailscale.com/derp/derphttp"
"tailscale.com/net/stun"
"tailscale.com/tailcfg"
"tailscale.com/types/key"
)
var (
derpMapURL = flag.String("derp-map", "https://login.tailscale.com/derpmap/default", "URL to DERP map (https:// or file://)")
listen = flag.String("listen", ":8030", "HTTP listen address")
probeOnce = flag.Bool("once", false, "probe once and print results, then exit; ignores the listen flag")
spread = flag.Bool("spread", true, "whether to spread probing over time")
interval = flag.Duration("interval", 15*time.Second, "probe interval")
)
// certReissueAfter is the time after which we expect all certs to be
// reissued, at minimum.
//
// This is currently set to the date of the LetsEncrypt ALPN revocation event of Jan 2022:
// https://community.letsencrypt.org/t/questions-about-renewing-before-tls-alpn-01-revocations/170449
//
// If there's another revocation event, bump this again.
var certReissueAfter = time.Unix(1643226768, 0)
var (
mu sync.Mutex
state = map[nodePair]pairStatus{}
lastDERPMap *tailcfg.DERPMap
lastDERPMapAt time.Time
certs = map[string]*x509.Certificate{}
)
func main() {
flag.Parse()
p := prober.New().WithSpread(*spread).WithOnce(*probeOnce).WithMetricNamespace("derpprobe")
dp, err := prober.DERP(p, *derpMapURL, *interval, *interval, *interval)
if err != nil {
log.Fatal(err)
}
p.Run("derpmap-probe", *interval, nil, dp.ProbeMap)
// proactively load the DERP map. Nothing terrible happens if this fails, so we ignore
// the error. The Slack bot will print a notification that the DERP map was empty.
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
_, _ = getDERPMap(ctx)
if *probeOnce {
log.Printf("Waiting for all probes (may take up to 1m)")
p.Wait()
go probeLoop()
go slackLoop()
log.Fatal(http.ListenAndServe(*listen, http.HandlerFunc(serve)))
}
st := getOverallStatus(p)
for _, s := range st.good {
log.Printf("good: %s", s)
}
for _, s := range st.bad {
log.Printf("bad: %s", s)
}
return
}
mux := http.NewServeMux()
tsweb.Debugger(mux)
mux.HandleFunc("/", http.HandlerFunc(serveFunc(p)))
log.Fatal(http.ListenAndServe(*listen, mux))
func setCert(name string, cert *x509.Certificate) {
mu.Lock()
defer mu.Unlock()
certs[name] = cert
}
type overallStatus struct {
@ -68,43 +86,471 @@ func (st *overallStatus) addGoodf(format string, a ...any) {
st.good = append(st.good, fmt.Sprintf(format, a...))
}
func getOverallStatus(p *prober.Prober) (o overallStatus) {
for p, i := range p.ProbeInfo() {
if i.End.IsZero() {
// Do not show probes that have not finished yet.
continue
}
if i.Result {
o.addGoodf("%s: %s", p, i.Latency)
} else {
o.addBadf("%s: %s", p, i.Error)
func getOverallStatus() (o overallStatus) {
mu.Lock()
defer mu.Unlock()
if lastDERPMap == nil {
o.addBadf("no DERP map")
return
}
now := time.Now()
if age := now.Sub(lastDERPMapAt); age > time.Minute {
o.addBadf("DERPMap hasn't been successfully refreshed in %v", age.Round(time.Second))
}
addPairMeta := func(pair nodePair) {
st, ok := state[pair]
age := now.Sub(st.at).Round(time.Second)
switch {
case !ok:
o.addBadf("no state for %v", pair)
case st.err != nil:
o.addBadf("%v: %v", pair, st.err)
case age > 90*time.Second:
o.addBadf("%v: update is %v old", pair, age)
default:
o.addGoodf("%v: %v, %v ago", pair, st.latency.Round(time.Millisecond), age)
}
}
sort.Strings(o.bad)
sort.Strings(o.good)
for _, reg := range sortedRegions(lastDERPMap) {
for _, from := range reg.Nodes {
addPairMeta(nodePair{"UDP", from.Name})
for _, to := range reg.Nodes {
addPairMeta(nodePair{from.Name, to.Name})
}
}
}
var subjs []string
for k := range certs {
subjs = append(subjs, k)
}
sort.Strings(subjs)
soon := time.Now().Add(14 * 24 * time.Hour) // in 2 weeks; autocert does 30 days by default
for _, s := range subjs {
cert := certs[s]
if cert.NotBefore.Before(certReissueAfter) {
o.addBadf("cert %q needs reissuing; NotBefore=%v", s, cert.NotBefore.Format(time.RFC3339))
continue
}
if cert.NotAfter.Before(soon) {
o.addBadf("cert %q expiring soon (%v); wasn't auto-refreshed", s, cert.NotAfter.Format(time.RFC3339))
continue
}
o.addGoodf("cert %q good %v - %v", s, cert.NotBefore.Format(time.RFC3339), cert.NotAfter.Format(time.RFC3339))
}
return
}
func serveFunc(p *prober.Prober) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
st := getOverallStatus(p)
summary := "All good"
if (float64(len(st.bad)) / float64(len(st.bad)+len(st.good))) > 0.25 {
// Returning a 500 allows monitoring this server externally and configuring
// an alert on HTTP response code.
w.WriteHeader(500)
summary = fmt.Sprintf("%d problems", len(st.bad))
func serve(w http.ResponseWriter, r *http.Request) {
st := getOverallStatus()
summary := "All good"
if (float64(len(st.bad)) / float64(len(st.bad)+len(st.good))) > 0.25 {
// This will generate an alert and page a human.
// It also ends up in Slack, but as part of the alert handling pipeline not
// because we generated a Slack notification from here.
w.WriteHeader(500)
summary = fmt.Sprintf("%d problems", len(st.bad))
}
io.WriteString(w, "<html><head><style>.bad { font-weight: bold; color: #700; }</style></head>\n")
fmt.Fprintf(w, "<body><h1>derp probe</h1>\n%s:<ul>", summary)
for _, s := range st.bad {
fmt.Fprintf(w, "<li class=bad>%s</li>\n", html.EscapeString(s))
}
for _, s := range st.good {
fmt.Fprintf(w, "<li>%s</li>\n", html.EscapeString(s))
}
io.WriteString(w, "</ul></body></html>\n")
}
func notifySlack(text string) error {
type SlackRequestBody struct {
Text string `json:"text"`
}
slackBody, err := json.Marshal(SlackRequestBody{Text: text})
if err != nil {
return err
}
webhookUrl := os.Getenv("SLACK_WEBHOOK")
if webhookUrl == "" {
return errors.New("No SLACK_WEBHOOK configured")
}
req, err := http.NewRequest("POST", webhookUrl, bytes.NewReader(slackBody))
if err != nil {
return err
}
req.Header.Add("Content-Type", "application/json")
client := &http.Client{Timeout: 10 * time.Second}
resp, err := client.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
return errors.New(resp.Status)
}
body, _ := io.ReadAll(resp.Body)
if string(body) != "ok" {
return errors.New("Non-ok response returned from Slack")
}
return nil
}
// We only page a human if it looks like there is a significant outage across multiple regions.
// To Slack, we report all failures great and small.
func slackLoop() {
inBadState := false
for {
time.Sleep(time.Second * 30)
st := getOverallStatus()
if len(st.bad) > 0 && !inBadState {
err := notifySlack(strings.Join(st.bad, "\n"))
if err == nil {
inBadState = true
} else {
log.Printf("%d problems, notify Slack failed: %v", len(st.bad), err)
}
}
io.WriteString(w, "<html><head><style>.bad { font-weight: bold; color: #700; }</style></head>\n")
fmt.Fprintf(w, "<body><h1>derp probe</h1>\n%s:<ul>", summary)
for _, s := range st.bad {
fmt.Fprintf(w, "<li class=bad>%s</li>\n", html.EscapeString(s))
if len(st.bad) == 0 && inBadState {
err := notifySlack("All DERPs recovered.")
if err == nil {
inBadState = false
}
}
for _, s := range st.good {
fmt.Fprintf(w, "<li>%s</li>\n", html.EscapeString(s))
}
io.WriteString(w, "</ul></body></html>\n")
}
}
func sortedRegions(dm *tailcfg.DERPMap) []*tailcfg.DERPRegion {
ret := make([]*tailcfg.DERPRegion, 0, len(dm.Regions))
for _, r := range dm.Regions {
ret = append(ret, r)
}
sort.Slice(ret, func(i, j int) bool { return ret[i].RegionID < ret[j].RegionID })
return ret
}
type nodePair struct {
from string // DERPNode.Name, or "UDP" for a STUN query to 'to'
to string // DERPNode.Name
}
func (p nodePair) String() string { return fmt.Sprintf("(%s→%s)", p.from, p.to) }
type pairStatus struct {
err error
latency time.Duration
at time.Time
}
func setDERPMap(dm *tailcfg.DERPMap) {
mu.Lock()
defer mu.Unlock()
lastDERPMap = dm
lastDERPMapAt = time.Now()
}
func setState(p nodePair, latency time.Duration, err error) {
mu.Lock()
defer mu.Unlock()
st := pairStatus{
err: err,
latency: latency,
at: time.Now(),
}
state[p] = st
if err != nil {
log.Printf("%+v error: %v", p, err)
} else {
log.Printf("%+v: %v", p, latency.Round(time.Millisecond))
}
}
func probeLoop() {
ticker := time.NewTicker(15 * time.Second)
for {
err := probe()
if err != nil {
log.Printf("probe: %v", err)
}
<-ticker.C
}
}
func probe() error {
ctx, cancel := context.WithTimeout(context.Background(), 60*time.Second)
defer cancel()
dm, err := getDERPMap(ctx)
if err != nil {
return err
}
var wg sync.WaitGroup
wg.Add(len(dm.Regions))
for _, reg := range dm.Regions {
reg := reg
go func() {
defer wg.Done()
for _, from := range reg.Nodes {
latency, err := probeUDP(ctx, dm, from)
setState(nodePair{"UDP", from.Name}, latency, err)
for _, to := range reg.Nodes {
latency, err := probeNodePair(ctx, dm, from, to)
setState(nodePair{from.Name, to.Name}, latency, err)
}
}
}()
}
wg.Wait()
return ctx.Err()
}
func probeUDP(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode) (latency time.Duration, err error) {
pc, err := net.ListenPacket("udp", ":0")
if err != nil {
return 0, err
}
defer pc.Close()
uc := pc.(*net.UDPConn)
tx := stun.NewTxID()
req := stun.Request(tx)
for _, ipStr := range []string{n.IPv4, n.IPv6} {
if ipStr == "" {
continue
}
port := n.STUNPort
if port == -1 {
continue
}
if port == 0 {
port = 3478
}
for {
ip := net.ParseIP(ipStr)
_, err := uc.WriteToUDP(req, &net.UDPAddr{IP: ip, Port: port})
if err != nil {
return 0, err
}
buf := make([]byte, 1500)
uc.SetReadDeadline(time.Now().Add(2 * time.Second))
t0 := time.Now()
n, _, err := uc.ReadFromUDP(buf)
d := time.Since(t0)
if err != nil {
if ctx.Err() != nil {
return 0, fmt.Errorf("timeout reading from %v: %v", ip, err)
}
if d < time.Second {
return 0, fmt.Errorf("error reading from %v: %v", ip, err)
}
time.Sleep(100 * time.Millisecond)
continue
}
txBack, _, err := stun.ParseResponse(buf[:n])
if err != nil {
return 0, fmt.Errorf("parsing STUN response from %v: %v", ip, err)
}
if txBack != tx {
return 0, fmt.Errorf("read wrong tx back from %v", ip)
}
if latency == 0 || d < latency {
latency = d
}
break
}
}
return latency, nil
}
func probeNodePair(ctx context.Context, dm *tailcfg.DERPMap, from, to *tailcfg.DERPNode) (latency time.Duration, err error) {
// The passed in context is a minute for the whole region. The
// idea is that each node pair in the region will be done
// serially and regularly in the future, reusing connections
// (at least in the happy path). For now they don't reuse
// connections and probe at most once every 15 seconds. We
// bound the duration of a single node pair within a region
// so one bad one can't starve others.
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
fromc, err := newConn(ctx, dm, from)
if err != nil {
return 0, err
}
defer fromc.Close()
toc, err := newConn(ctx, dm, to)
if err != nil {
return 0, err
}
defer toc.Close()
// Wait a bit for from's node to hear about to existing on the
// other node in the region, in the case where the two nodes
// are different.
if from.Name != to.Name {
time.Sleep(100 * time.Millisecond) // pretty arbitrary
}
// Make a random packet
pkt := make([]byte, 8)
crand.Read(pkt)
t0 := time.Now()
// Send the random packet.
sendc := make(chan error, 1)
go func() {
sendc <- fromc.Send(toc.SelfPublicKey(), pkt)
}()
select {
case <-ctx.Done():
return 0, fmt.Errorf("timeout sending via %q: %w", from.Name, ctx.Err())
case err := <-sendc:
if err != nil {
return 0, fmt.Errorf("error sending via %q: %w", from.Name, err)
}
}
// Receive the random packet.
recvc := make(chan any, 1) // either derp.ReceivedPacket or error
go func() {
for {
m, err := toc.Recv()
if err != nil {
recvc <- err
return
}
switch v := m.(type) {
case derp.ReceivedPacket:
recvc <- v
default:
log.Printf("%v: ignoring Recv frame type %T", to.Name, v)
// Loop.
}
}
}()
select {
case <-ctx.Done():
return 0, fmt.Errorf("timeout receiving from %q: %w", to.Name, ctx.Err())
case v := <-recvc:
if err, ok := v.(error); ok {
return 0, fmt.Errorf("error receiving from %q: %w", to.Name, err)
}
p := v.(derp.ReceivedPacket)
if p.Source != fromc.SelfPublicKey() {
return 0, fmt.Errorf("got data packet from unexpected source, %v", p.Source)
}
if !bytes.Equal(p.Data, pkt) {
return 0, fmt.Errorf("unexpected data packet %q", p.Data)
}
}
return time.Since(t0), nil
}
func newConn(ctx context.Context, dm *tailcfg.DERPMap, n *tailcfg.DERPNode) (*derphttp.Client, error) {
priv := key.NewNode()
dc := derphttp.NewRegionClient(priv, log.Printf, func() *tailcfg.DERPRegion {
rid := n.RegionID
return &tailcfg.DERPRegion{
RegionID: rid,
RegionCode: fmt.Sprintf("%s-%s", dm.Regions[rid].RegionCode, n.Name),
RegionName: dm.Regions[rid].RegionName,
Nodes: []*tailcfg.DERPNode{n},
}
})
dc.IsProber = true
err := dc.Connect(ctx)
if err != nil {
return nil, err
}
cs, ok := dc.TLSConnectionState()
if !ok {
dc.Close()
return nil, errors.New("no TLS state")
}
if len(cs.PeerCertificates) == 0 {
dc.Close()
return nil, errors.New("no peer certificates")
}
if cs.ServerName != n.HostName {
dc.Close()
return nil, fmt.Errorf("TLS server name %q != derp hostname %q", cs.ServerName, n.HostName)
}
setCert(cs.ServerName, cs.PeerCertificates[0])
errc := make(chan error, 1)
go func() {
m, err := dc.Recv()
if err != nil {
errc <- err
return
}
switch m.(type) {
case derp.ServerInfoMessage:
errc <- nil
default:
errc <- fmt.Errorf("unexpected first message type %T", errc)
}
}()
select {
case err := <-errc:
if err != nil {
go dc.Close()
return nil, err
}
case <-ctx.Done():
go dc.Close()
return nil, fmt.Errorf("timeout waiting for ServerInfoMessage: %w", ctx.Err())
}
return dc, nil
}
var httpOrFileClient = &http.Client{Transport: httpOrFileTransport()}
func httpOrFileTransport() http.RoundTripper {
tr := http.DefaultTransport.(*http.Transport).Clone()
tr.RegisterProtocol("file", http.NewFileTransport(http.Dir("/")))
return tr
}
func getDERPMap(ctx context.Context) (*tailcfg.DERPMap, error) {
req, err := http.NewRequestWithContext(ctx, "GET", *derpMapURL, nil)
if err != nil {
return nil, err
}
res, err := httpOrFileClient.Do(req)
if err != nil {
mu.Lock()
defer mu.Unlock()
if lastDERPMap != nil && time.Since(lastDERPMapAt) < 10*time.Minute {
// Assume that control is restarting and use
// the same one for a bit.
return lastDERPMap, nil
}
return nil, err
}
defer res.Body.Close()
if res.StatusCode != 200 {
return nil, fmt.Errorf("fetching %s: %s", *derpMapURL, res.Status)
}
dm := new(tailcfg.DERPMap)
if err := json.NewDecoder(res.Body).Decode(dm); err != nil {
return nil, fmt.Errorf("decoding %s JSON: %v", *derpMapURL, err)
}
setDERPMap(dm)
return dm, nil
}

51
cmd/dist/dist.go vendored
View File

@ -1,51 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// The dist command builds Tailscale release packages for distribution.
package main
import (
"context"
"errors"
"flag"
"log"
"os"
"tailscale.com/release/dist"
"tailscale.com/release/dist/cli"
"tailscale.com/release/dist/synology"
"tailscale.com/release/dist/unixpkgs"
)
var synologyPackageCenter bool
func getTargets() ([]dist.Target, error) {
var ret []dist.Target
ret = append(ret, unixpkgs.Targets()...)
// Synology packages can be built either for sideloading, or for
// distribution by Synology in their package center. When
// distributed through the package center, apps can request
// additional permissions to use a tuntap interface and control
// the NAS's network stack, rather than be forced to run in
// userspace mode.
//
// Since only we can provide packages to Synology for
// distribution, we default to building the "sideload" variant of
// packages that we distribute on pkgs.tailscale.com.
ret = append(ret, synology.Targets(synologyPackageCenter)...)
return ret, nil
}
func main() {
cmd := cli.CLI(getTargets)
for _, subcmd := range cmd.Subcommands {
if subcmd.Name == "build" {
subcmd.FlagSet.BoolVar(&synologyPackageCenter, "synology-package-center", false, "build synology packages with extra metadata for the official package center")
}
}
if err := cmd.ParseAndRun(context.Background(), os.Args[1:]); err != nil && !errors.Is(err, flag.ErrHelp) {
log.Fatal(err)
}
}

View File

@ -1 +0,0 @@
get-authkey

View File

@ -1,74 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// get-authkey allocates an authkey using an OAuth API client
// https://tailscale.com/s/oauth-clients and prints it
// to stdout for scripts to capture and use.
package main
import (
"context"
"flag"
"fmt"
"log"
"os"
"strings"
"golang.org/x/oauth2/clientcredentials"
"tailscale.com/client/tailscale"
"tailscale.com/util/cmpx"
)
func main() {
// Required to use our client API. We're fine with the instability since the
// client lives in the same repo as this code.
tailscale.I_Acknowledge_This_API_Is_Unstable = true
reusable := flag.Bool("reusable", false, "allocate a reusable authkey")
ephemeral := flag.Bool("ephemeral", false, "allocate an ephemeral authkey")
preauth := flag.Bool("preauth", true, "set the authkey as pre-authorized")
tags := flag.String("tags", "", "comma-separated list of tags to apply to the authkey")
flag.Parse()
clientID := os.Getenv("TS_API_CLIENT_ID")
clientSecret := os.Getenv("TS_API_CLIENT_SECRET")
if clientID == "" || clientSecret == "" {
log.Fatal("TS_API_CLIENT_ID and TS_API_CLIENT_SECRET must be set")
}
if *tags == "" {
log.Fatal("at least one tag must be specified")
}
baseURL := cmpx.Or(os.Getenv("TS_BASE_URL"), "https://api.tailscale.com")
credentials := clientcredentials.Config{
ClientID: clientID,
ClientSecret: clientSecret,
TokenURL: baseURL + "/api/v2/oauth/token",
Scopes: []string{"device"},
}
ctx := context.Background()
tsClient := tailscale.NewClient("-", nil)
tsClient.HTTPClient = credentials.Client(ctx)
tsClient.BaseURL = baseURL
caps := tailscale.KeyCapabilities{
Devices: tailscale.KeyDeviceCapabilities{
Create: tailscale.KeyDeviceCreateCapabilities{
Reusable: *reusable,
Ephemeral: *ephemeral,
Preauthorized: *preauth,
Tags: strings.Split(*tags, ","),
},
},
}
authkey, _, err := tsClient.CreateKey(ctx, caps)
if err != nil {
log.Fatal(err.Error())
}
fmt.Println(authkey)
}

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package main

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Command gitops-pusher allows users to use a GitOps flow for managing Tailscale ACLs.
//
@ -22,8 +23,6 @@ import (
"github.com/peterbourgon/ff/v3/ffcli"
"github.com/tailscale/hujson"
"golang.org/x/oauth2/clientcredentials"
"tailscale.com/util/httpm"
)
var (
@ -32,7 +31,6 @@ var (
cacheFname = rootFlagSet.String("cache-file", "./version-cache.json", "filename for the previous known version hash")
timeout = rootFlagSet.Duration("timeout", 5*time.Minute, "timeout for the entire CI run")
githubSyntax = rootFlagSet.Bool("github-syntax", true, "use GitHub Action error syntax (https://docs.github.com/en/actions/using-workflows/workflow-commands-for-github-actions#setting-an-error-message)")
apiServer = rootFlagSet.String("api-server", "api.tailscale.com", "API server to contact")
)
func modifiedExternallyError() {
@ -43,9 +41,9 @@ func modifiedExternallyError() {
}
}
func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error {
func apply(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
return func(ctx context.Context, args []string) error {
controlEtag, err := getACLETag(ctx, client, tailnet, apiKey)
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
if err != nil {
return err
}
@ -74,7 +72,7 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte
return nil
}
if err := applyNewACL(ctx, client, tailnet, apiKey, *policyFname, controlEtag); err != nil {
if err := applyNewACL(ctx, tailnet, apiKey, *policyFname, controlEtag); err != nil {
return err
}
@ -84,9 +82,9 @@ func apply(cache *Cache, client *http.Client, tailnet, apiKey string) func(conte
}
}
func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error {
func test(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
return func(ctx context.Context, args []string) error {
controlEtag, err := getACLETag(ctx, client, tailnet, apiKey)
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
if err != nil {
return err
}
@ -114,16 +112,16 @@ func test(cache *Cache, client *http.Client, tailnet, apiKey string) func(contex
return nil
}
if err := testNewACLs(ctx, client, tailnet, apiKey, *policyFname); err != nil {
if err := testNewACLs(ctx, tailnet, apiKey, *policyFname); err != nil {
return err
}
return nil
}
}
func getChecksums(cache *Cache, client *http.Client, tailnet, apiKey string) func(context.Context, []string) error {
func getChecksums(cache *Cache, tailnet, apiKey string) func(context.Context, []string) error {
return func(ctx context.Context, args []string) error {
controlEtag, err := getACLETag(ctx, client, tailnet, apiKey)
controlEtag, err := getACLETag(ctx, tailnet, apiKey)
if err != nil {
return err
}
@ -152,24 +150,8 @@ func main() {
log.Fatal("set envvar TS_TAILNET to your tailnet's name")
}
apiKey, ok := os.LookupEnv("TS_API_KEY")
oauthId, oiok := os.LookupEnv("TS_OAUTH_ID")
oauthSecret, osok := os.LookupEnv("TS_OAUTH_SECRET")
if !ok && (!oiok || !osok) {
log.Fatal("set envvar TS_API_KEY to your Tailscale API key or TS_OAUTH_ID and TS_OAUTH_SECRET to your Tailscale OAuth ID and Secret")
}
if ok && (oiok || osok) {
log.Fatal("set either the envvar TS_API_KEY or TS_OAUTH_ID and TS_OAUTH_SECRET")
}
var client *http.Client
if oiok {
oauthConfig := &clientcredentials.Config{
ClientID: oauthId,
ClientSecret: oauthSecret,
TokenURL: fmt.Sprintf("https://%s/api/v2/oauth/token", *apiServer),
}
client = oauthConfig.Client(context.Background())
} else {
client = http.DefaultClient
if !ok {
log.Fatal("set envvar TS_API_KEY to your Tailscale API key")
}
cache, err := LoadCache(*cacheFname)
if err != nil {
@ -186,7 +168,7 @@ func main() {
ShortUsage: "gitops-pusher [options] apply",
ShortHelp: "Pushes changes to CONTROL",
LongHelp: `Pushes changes to CONTROL`,
Exec: apply(cache, client, tailnet, apiKey),
Exec: apply(cache, tailnet, apiKey),
}
testCmd := &ffcli.Command{
@ -194,7 +176,7 @@ func main() {
ShortUsage: "gitops-pusher [options] test",
ShortHelp: "Tests ACL changes",
LongHelp: "Tests ACL changes",
Exec: test(cache, client, tailnet, apiKey),
Exec: test(cache, tailnet, apiKey),
}
cksumCmd := &ffcli.Command{
@ -202,7 +184,7 @@ func main() {
ShortUsage: "Shows checksums of ACL files",
ShortHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison",
LongHelp: "Fetch checksum of CONTROL's ACL and the local ACL for comparison",
Exec: getChecksums(cache, client, tailnet, apiKey),
Exec: getChecksums(cache, tailnet, apiKey),
}
root := &ffcli.Command{
@ -245,14 +227,14 @@ func sumFile(fname string) (string, error) {
return fmt.Sprintf("%x", h.Sum(nil)), nil
}
func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname, oldEtag string) error {
func applyNewACL(ctx context.Context, tailnet, apiKey, policyFname, oldEtag string) error {
fin, err := os.Open(policyFname)
if err != nil {
return err
}
defer fin.Close()
req, err := http.NewRequestWithContext(ctx, httpm.POST, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), fin)
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("https://api.tailscale.com/api/v2/tailnet/%s/acl", tailnet), fin)
if err != nil {
return err
}
@ -261,7 +243,7 @@ func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, poli
req.Header.Set("Content-Type", "application/hujson")
req.Header.Set("If-Match", `"`+oldEtag+`"`)
resp, err := client.Do(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
@ -282,7 +264,7 @@ func applyNewACL(ctx context.Context, client *http.Client, tailnet, apiKey, poli
return nil
}
func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, policyFname string) error {
func testNewACLs(ctx context.Context, tailnet, apiKey, policyFname string) error {
data, err := os.ReadFile(policyFname)
if err != nil {
return err
@ -292,7 +274,7 @@ func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, poli
return err
}
req, err := http.NewRequestWithContext(ctx, httpm.POST, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl/validate", *apiServer, tailnet), bytes.NewBuffer(data))
req, err := http.NewRequestWithContext(ctx, http.MethodPost, fmt.Sprintf("https://api.tailscale.com/api/v2/tailnet/%s/acl/validate", tailnet), bytes.NewBuffer(data))
if err != nil {
return err
}
@ -300,7 +282,7 @@ func testNewACLs(ctx context.Context, client *http.Client, tailnet, apiKey, poli
req.SetBasicAuth(apiKey, "")
req.Header.Set("Content-Type", "application/hujson")
resp, err := client.Do(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
@ -363,8 +345,8 @@ type ACLTestErrorDetail struct {
Errors []string `json:"errors"`
}
func getACLETag(ctx context.Context, client *http.Client, tailnet, apiKey string) (string, error) {
req, err := http.NewRequestWithContext(ctx, httpm.GET, fmt.Sprintf("https://%s/api/v2/tailnet/%s/acl", *apiServer, tailnet), nil)
func getACLETag(ctx context.Context, tailnet, apiKey string) (string, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("https://api.tailscale.com/api/v2/tailnet/%s/acl", tailnet), nil)
if err != nil {
return "", err
}
@ -372,7 +354,7 @@ func getACLETag(ctx context.Context, client *http.Client, tailnet, apiKey string
req.SetBasicAuth(apiKey, "")
req.Header.Set("Accept", "application/hujson")
resp, err := client.Do(req)
resp, err := http.DefaultClient.Do(req)
if err != nil {
return "", err
}

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The hello binary runs hello.ts.net.
package main // import "tailscale.com/cmd/hello"

View File

@ -1,24 +0,0 @@
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: tailscale-auth-proxy
rules:
- apiGroups: [""]
resources: ["users", "groups"]
verbs: ["impersonate"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tailscale-auth-proxy
subjects:
- kind: ServiceAccount
name: operator
namespace: tailscale
roleRef:
kind: ClusterRole
name: tailscale-auth-proxy
apiGroup: rbac.authorization.k8s.io

View File

@ -1,156 +0,0 @@
# Copyright (c) Tailscale Inc & AUTHORS
# SPDX-License-Identifier: BSD-3-Clause
apiVersion: v1
kind: Namespace
metadata:
name: tailscale
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: proxies
namespace: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: proxies
namespace: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: proxies
namespace: tailscale
subjects:
- kind: ServiceAccount
name: proxies
namespace: tailscale
roleRef:
kind: Role
name: proxies
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: operator
namespace: tailscale
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: tailscale-operator
rules:
- apiGroups: [""]
resources: ["services", "services/status"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: tailscale-operator
subjects:
- kind: ServiceAccount
name: operator
namespace: tailscale
roleRef:
kind: ClusterRole
name: tailscale-operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: operator
namespace: tailscale
rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["*"]
- apiGroups: ["apps"]
resources: ["statefulsets"]
verbs: ["*"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: operator
namespace: tailscale
subjects:
- kind: ServiceAccount
name: operator
namespace: tailscale
roleRef:
kind: Role
name: operator
apiGroup: rbac.authorization.k8s.io
---
apiVersion: v1
kind: Secret
metadata:
name: operator-oauth
namespace: tailscale
stringData:
client_id: # SET CLIENT ID HERE
client_secret: # SET CLIENT SECRET HERE
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: operator
namespace: tailscale
spec:
replicas: 1
strategy:
type: Recreate
selector:
matchLabels:
app: operator
template:
metadata:
labels:
app: operator
spec:
serviceAccountName: operator
volumes:
- name: oauth
secret:
secretName: operator-oauth
containers:
- name: operator
image: tailscale/k8s-operator:unstable
resources:
requests:
cpu: 500m
memory: 100Mi
env:
- name: OPERATOR_HOSTNAME
value: tailscale-operator
- name: OPERATOR_SECRET
value: operator
- name: OPERATOR_LOGGING
value: info
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: CLIENT_ID_FILE
value: /oauth/client_id
- name: CLIENT_SECRET_FILE
value: /oauth/client_secret
- name: PROXY_IMAGE
value: tailscale/tailscale:unstable
- name: PROXY_TAGS
value: tag:k8s
- name: AUTH_PROXY
value: "false"
volumeMounts:
- name: oauth
mountPath: /oauth
readOnly: true

View File

@ -1,37 +0,0 @@
# This file is not a complete manifest, it's a skeleton that the operator embeds
# at build time and then uses to construct Tailscale proxy pods.
apiVersion: apps/v1
kind: StatefulSet
metadata:
spec:
replicas: 1
template:
metadata:
deletionGracePeriodSeconds: 10
spec:
serviceAccountName: proxies
initContainers:
- name: sysctler
image: busybox
securityContext:
privileged: true
command: ["/bin/sh"]
args:
- -c
- sysctl -w net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1
resources:
requests:
cpu: 1m
memory: 1Mi
containers:
- name: tailscale
imagePullPolicy: Always
env:
- name: TS_USERSPACE
value: "false"
- name: TS_AUTH_ONCE
value: "true"
securityContext:
capabilities:
add:
- NET_ADMIN

View File

@ -1,752 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// tailscale-operator provides a way to expose services running in a Kubernetes
// cluster to your Tailnet.
package main
import (
"context"
"crypto/tls"
_ "embed"
"fmt"
"net/http"
"os"
"strings"
"time"
"github.com/go-logr/zapr"
"go.uber.org/zap"
"go.uber.org/zap/zapcore"
"golang.org/x/exp/slices"
"golang.org/x/oauth2/clientcredentials"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/transport"
"sigs.k8s.io/controller-runtime/pkg/builder"
"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/config"
"sigs.k8s.io/controller-runtime/pkg/handler"
logf "sigs.k8s.io/controller-runtime/pkg/log"
kzap "sigs.k8s.io/controller-runtime/pkg/log/zap"
"sigs.k8s.io/controller-runtime/pkg/manager"
"sigs.k8s.io/controller-runtime/pkg/manager/signals"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"sigs.k8s.io/yaml"
"tailscale.com/client/tailscale"
"tailscale.com/hostinfo"
"tailscale.com/ipn"
"tailscale.com/ipn/store/kubestore"
"tailscale.com/tsnet"
"tailscale.com/types/logger"
"tailscale.com/types/opt"
"tailscale.com/util/dnsname"
"tailscale.com/version"
)
func main() {
// Required to use our client API. We're fine with the instability since the
// client lives in the same repo as this code.
tailscale.I_Acknowledge_This_API_Is_Unstable = true
var (
hostname = defaultEnv("OPERATOR_HOSTNAME", "tailscale-operator")
kubeSecret = defaultEnv("OPERATOR_SECRET", "")
operatorTags = defaultEnv("OPERATOR_INITIAL_TAGS", "tag:k8s-operator")
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
clientIDPath = defaultEnv("CLIENT_ID_FILE", "")
clientSecretPath = defaultEnv("CLIENT_SECRET_FILE", "")
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "")
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
shouldRunAuthProxy = defaultBool("AUTH_PROXY", false)
)
var opts []kzap.Opts
switch tslogging {
case "info":
opts = append(opts, kzap.Level(zapcore.InfoLevel))
case "debug":
opts = append(opts, kzap.Level(zapcore.DebugLevel))
case "dev":
opts = append(opts, kzap.UseDevMode(true), kzap.Level(zapcore.DebugLevel))
}
zlog := kzap.NewRaw(opts...).Sugar()
logf.SetLogger(zapr.NewLogger(zlog.Desugar()))
startlog := zlog.Named("startup")
if clientIDPath == "" || clientSecretPath == "" {
startlog.Fatalf("CLIENT_ID_FILE and CLIENT_SECRET_FILE must be set")
}
clientID, err := os.ReadFile(clientIDPath)
if err != nil {
startlog.Fatalf("reading client ID %q: %v", clientIDPath, err)
}
clientSecret, err := os.ReadFile(clientSecretPath)
if err != nil {
startlog.Fatalf("reading client secret %q: %v", clientSecretPath, err)
}
credentials := clientcredentials.Config{
ClientID: string(clientID),
ClientSecret: string(clientSecret),
TokenURL: "https://login.tailscale.com/api/v2/oauth/token",
}
tsClient := tailscale.NewClient("-", nil)
tsClient.HTTPClient = credentials.Client(context.Background())
if shouldRunAuthProxy {
hostinfo.SetApp("k8s-operator-proxy")
} else {
hostinfo.SetApp("k8s-operator")
}
s := &tsnet.Server{
Hostname: hostname,
Logf: zlog.Named("tailscaled").Debugf,
}
if kubeSecret != "" {
st, err := kubestore.New(logger.Discard, kubeSecret)
if err != nil {
startlog.Fatalf("creating kube store: %v", err)
}
s.Store = st
}
if err := s.Start(); err != nil {
startlog.Fatalf("starting tailscale server: %v", err)
}
defer s.Close()
lc, err := s.LocalClient()
if err != nil {
startlog.Fatalf("getting local client: %v", err)
}
ctx := context.Background()
loginDone := false
machineAuthShown := false
waitOnline:
for {
startlog.Debugf("querying tailscaled status")
st, err := lc.StatusWithoutPeers(ctx)
if err != nil {
startlog.Fatalf("getting status: %v", err)
}
switch st.BackendState {
case "Running":
break waitOnline
case "NeedsLogin":
if loginDone {
break
}
caps := tailscale.KeyCapabilities{
Devices: tailscale.KeyDeviceCapabilities{
Create: tailscale.KeyDeviceCreateCapabilities{
Reusable: false,
Preauthorized: true,
Tags: strings.Split(operatorTags, ","),
},
},
}
authkey, _, err := tsClient.CreateKey(ctx, caps)
if err != nil {
startlog.Fatalf("creating operator authkey: %v", err)
}
if err := lc.Start(ctx, ipn.Options{
AuthKey: authkey,
}); err != nil {
startlog.Fatalf("starting tailscale: %v", err)
}
if err := lc.StartLoginInteractive(ctx); err != nil {
startlog.Fatalf("starting login: %v", err)
}
startlog.Debugf("requested login by authkey")
loginDone = true
case "NeedsMachineAuth":
if !machineAuthShown {
startlog.Infof("Machine approval required, please visit the admin panel to approve")
machineAuthShown = true
}
default:
startlog.Debugf("waiting for tailscale to start: %v", st.BackendState)
}
time.Sleep(time.Second)
}
// For secrets and statefulsets, we only get permission to touch the objects
// in the controller's own namespace. This cannot be expressed by
// .Watches(...) below, instead you have to add a per-type field selector to
// the cache that sits a few layers below the builder stuff, which will
// implicitly filter what parts of the world the builder code gets to see at
// all.
nsFilter := cache.ByObject{
Field: client.InNamespace(tsNamespace).AsSelector(),
}
restConfig := config.GetConfigOrDie()
mgr, err := manager.New(restConfig, manager.Options{
Cache: cache.Options{
ByObject: map[client.Object]cache.ByObject{
&corev1.Secret{}: nsFilter,
&appsv1.StatefulSet{}: nsFilter,
},
},
})
if err != nil {
startlog.Fatalf("could not create manager: %v", err)
}
sr := &ServiceReconciler{
Client: mgr.GetClient(),
tsClient: tsClient,
defaultTags: strings.Split(tags, ","),
operatorNamespace: tsNamespace,
proxyImage: image,
proxyPriorityClassName: priorityClassName,
logger: zlog.Named("service-reconciler"),
}
reconcileFilter := handler.EnqueueRequestsFromMapFunc(func(_ context.Context, o client.Object) []reconcile.Request {
ls := o.GetLabels()
if ls[LabelManaged] != "true" {
return nil
}
if ls[LabelParentType] != "svc" {
return nil
}
return []reconcile.Request{
{
NamespacedName: types.NamespacedName{
Namespace: ls[LabelParentNamespace],
Name: ls[LabelParentName],
},
},
}
})
err = builder.
ControllerManagedBy(mgr).
For(&corev1.Service{}).
Watches(&appsv1.StatefulSet{}, reconcileFilter).
Watches(&corev1.Secret{}, reconcileFilter).
Complete(sr)
if err != nil {
startlog.Fatalf("could not create controller: %v", err)
}
startlog.Infof("Startup complete, operator running, version: %s", version.Long())
if shouldRunAuthProxy {
cfg, err := restConfig.TransportConfig()
if err != nil {
startlog.Fatalf("could not get rest.TransportConfig(): %v", err)
}
// Kubernetes uses SPDY for exec and port-forward, however SPDY is
// incompatible with HTTP/2; so disable HTTP/2 in the proxy.
tr := http.DefaultTransport.(*http.Transport).Clone()
tr.TLSClientConfig, err = transport.TLSConfigFor(cfg)
if err != nil {
startlog.Fatalf("could not get transport.TLSConfigFor(): %v", err)
}
tr.TLSNextProto = make(map[string]func(authority string, c *tls.Conn) http.RoundTripper)
rt, err := transport.HTTPWrappersForConfig(cfg, tr)
if err != nil {
startlog.Fatalf("could not get rest.TransportConfig(): %v", err)
}
go runAuthProxy(s, rt, zlog.Named("auth-proxy").Infof)
}
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
startlog.Fatalf("could not start manager: %v", err)
}
}
const (
LabelManaged = "tailscale.com/managed"
LabelParentType = "tailscale.com/parent-resource-type"
LabelParentName = "tailscale.com/parent-resource"
LabelParentNamespace = "tailscale.com/parent-resource-ns"
FinalizerName = "tailscale.com/finalizer"
AnnotationExpose = "tailscale.com/expose"
AnnotationTags = "tailscale.com/tags"
AnnotationHostname = "tailscale.com/hostname"
)
// ServiceReconciler is a simple ControllerManagedBy example implementation.
type ServiceReconciler struct {
client.Client
tsClient tsClient
defaultTags []string
operatorNamespace string
proxyImage string
proxyPriorityClassName string
logger *zap.SugaredLogger
}
type tsClient interface {
CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error)
DeleteDevice(ctx context.Context, id string) error
}
func childResourceLabels(parent *corev1.Service) map[string]string {
// You might wonder why we're using owner references, since they seem to be
// built for exactly this. Unfortunately, Kubernetes does not support
// cross-namespace ownership, by design. This means we cannot make the
// service being exposed the owner of the implementation details of the
// proxying. Instead, we have to do our own filtering and tracking with
// labels.
return map[string]string{
LabelManaged: "true",
LabelParentName: parent.GetName(),
LabelParentNamespace: parent.GetNamespace(),
LabelParentType: "svc",
}
}
func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request) (_ reconcile.Result, err error) {
logger := a.logger.With("service-ns", req.Namespace, "service-name", req.Name)
logger.Debugf("starting reconcile")
defer logger.Debugf("reconcile finished")
svc := new(corev1.Service)
err = a.Get(ctx, req.NamespacedName, svc)
if apierrors.IsNotFound(err) {
// Request object not found, could have been deleted after reconcile request.
logger.Debugf("service not found, assuming it was deleted")
return reconcile.Result{}, nil
} else if err != nil {
return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err)
}
if !svc.DeletionTimestamp.IsZero() || !a.shouldExpose(svc) {
logger.Debugf("service is being deleted or should not be exposed, cleaning up")
return reconcile.Result{}, a.maybeCleanup(ctx, logger, svc)
}
return reconcile.Result{}, a.maybeProvision(ctx, logger, svc)
}
// maybeCleanup removes any existing resources related to serving svc over tailscale.
//
// This function is responsible for removing the finalizer from the service,
// once all associated resources are gone.
func (a *ServiceReconciler) maybeCleanup(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) error {
ix := slices.Index(svc.Finalizers, FinalizerName)
if ix < 0 {
logger.Debugf("no finalizer, nothing to do")
return nil
}
ml := childResourceLabels(svc)
// Need to delete the StatefulSet first, and delete it with foreground
// cascading deletion. That way, the pod that's writing to the Secret will
// stop running before we start looking at the Secret's contents, and
// assuming k8s ordering semantics don't mess with us, that should avoid
// tailscale device deletion races where we fail to notice a device that
// should be removed.
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, ml)
if err != nil {
return fmt.Errorf("getting statefulset: %w", err)
}
if sts != nil {
if !sts.GetDeletionTimestamp().IsZero() {
// Deletion in progress, check again later. We'll get another
// notification when the deletion is complete.
logger.Debugf("waiting for statefulset %s/%s deletion", sts.GetNamespace(), sts.GetName())
return nil
}
err := a.DeleteAllOf(ctx, &appsv1.StatefulSet{}, client.InNamespace(a.operatorNamespace), client.MatchingLabels(ml), client.PropagationPolicy(metav1.DeletePropagationForeground))
if err != nil {
return fmt.Errorf("deleting statefulset: %w", err)
}
logger.Debugf("started deletion of statefulset %s/%s", sts.GetNamespace(), sts.GetName())
return nil
}
id, _, err := a.getDeviceInfo(ctx, svc)
if err != nil {
return fmt.Errorf("getting device info: %w", err)
}
if id != "" {
// TODO: handle case where the device is already deleted, but the secret
// is still around.
if err := a.tsClient.DeleteDevice(ctx, id); err != nil {
return fmt.Errorf("deleting device: %w", err)
}
}
types := []client.Object{
&corev1.Service{},
&corev1.Secret{},
}
for _, typ := range types {
if err := a.DeleteAllOf(ctx, typ, client.InNamespace(a.operatorNamespace), client.MatchingLabels(ml)); err != nil {
return err
}
}
svc.Finalizers = append(svc.Finalizers[:ix], svc.Finalizers[ix+1:]...)
if err := a.Update(ctx, svc); err != nil {
return fmt.Errorf("failed to remove finalizer: %w", err)
}
// Unlike most log entries in the reconcile loop, this will get printed
// exactly once at the very end of cleanup, because the final step of
// cleanup removes the tailscale finalizer, which will make all future
// reconciles exit early.
logger.Infof("unexposed service from tailnet")
return nil
}
// maybeProvision ensures that svc is exposed over tailscale, taking any actions
// necessary to reach that state.
//
// This function adds a finalizer to svc, ensuring that we can handle orderly
// deprovisioning later.
func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) error {
hostname, err := nameForService(svc)
if err != nil {
return err
}
if !slices.Contains(svc.Finalizers, FinalizerName) {
// This log line is printed exactly once during initial provisioning,
// because once the finalizer is in place this block gets skipped. So,
// this is a nice place to tell the operator that the high level,
// multi-reconcile operation is underway.
logger.Infof("exposing service over tailscale")
svc.Finalizers = append(svc.Finalizers, FinalizerName)
if err := a.Update(ctx, svc); err != nil {
return fmt.Errorf("failed to add finalizer: %w", err)
}
}
// Do full reconcile.
hsvc, err := a.reconcileHeadlessService(ctx, logger, svc)
if err != nil {
return fmt.Errorf("failed to reconcile headless service: %w", err)
}
tags := a.defaultTags
if tstr, ok := svc.Annotations[AnnotationTags]; ok {
tags = strings.Split(tstr, ",")
}
secretName, err := a.createOrGetSecret(ctx, logger, svc, hsvc, tags)
if err != nil {
return fmt.Errorf("failed to create or get API key secret: %w", err)
}
_, err = a.reconcileSTS(ctx, logger, svc, hsvc, secretName, hostname)
if err != nil {
return fmt.Errorf("failed to reconcile statefulset: %w", err)
}
if !a.hasLoadBalancerClass(svc) {
logger.Debugf("service is not a LoadBalancer, so not updating ingress")
return nil
}
_, tsHost, err := a.getDeviceInfo(ctx, svc)
if err != nil {
return fmt.Errorf("failed to get device ID: %w", err)
}
if tsHost == "" {
logger.Debugf("no Tailscale hostname known yet, waiting for proxy pod to finish auth")
// No hostname yet. Wait for the proxy pod to auth.
svc.Status.LoadBalancer.Ingress = nil
if err := a.Status().Update(ctx, svc); err != nil {
return fmt.Errorf("failed to update service status: %w", err)
}
return nil
}
logger.Debugf("setting ingress hostname to %q", tsHost)
svc.Status.LoadBalancer.Ingress = []corev1.LoadBalancerIngress{
{
Hostname: tsHost,
},
}
if err := a.Status().Update(ctx, svc); err != nil {
return fmt.Errorf("failed to update service status: %w", err)
}
return nil
}
func (a *ServiceReconciler) shouldExpose(svc *corev1.Service) bool {
// Headless services can't be exposed, since there is no ClusterIP to
// forward to.
if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" {
return false
}
return a.hasLoadBalancerClass(svc) || a.hasAnnotation(svc)
}
func (a *ServiceReconciler) hasLoadBalancerClass(svc *corev1.Service) bool {
return svc != nil &&
svc.Spec.Type == corev1.ServiceTypeLoadBalancer &&
svc.Spec.LoadBalancerClass != nil &&
*svc.Spec.LoadBalancerClass == "tailscale"
}
func (a *ServiceReconciler) hasAnnotation(svc *corev1.Service) bool {
return svc != nil &&
svc.Annotations[AnnotationExpose] == "true"
}
func (a *ServiceReconciler) reconcileHeadlessService(ctx context.Context, logger *zap.SugaredLogger, svc *corev1.Service) (*corev1.Service, error) {
hsvc := &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
GenerateName: "ts-" + svc.Name + "-",
Namespace: a.operatorNamespace,
Labels: childResourceLabels(svc),
},
Spec: corev1.ServiceSpec{
ClusterIP: "None",
Selector: map[string]string{
"app": string(svc.UID),
},
},
}
logger.Debugf("reconciling headless service for StatefulSet")
return createOrUpdate(ctx, a.Client, a.operatorNamespace, hsvc, func(svc *corev1.Service) { svc.Spec = hsvc.Spec })
}
func (a *ServiceReconciler) createOrGetSecret(ctx context.Context, logger *zap.SugaredLogger, svc, hsvc *corev1.Service, tags []string) (string, error) {
secret := &corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
// Hardcode a -0 suffix so that in future, if we support
// multiple StatefulSet replicas, we can provision -N for
// those.
Name: hsvc.Name + "-0",
Namespace: a.operatorNamespace,
Labels: childResourceLabels(svc),
},
}
if err := a.Get(ctx, client.ObjectKeyFromObject(secret), secret); err == nil {
logger.Debugf("secret %s/%s already exists", secret.GetNamespace(), secret.GetName())
return secret.Name, nil
} else if !apierrors.IsNotFound(err) {
return "", err
}
// Secret doesn't exist yet, create one. Initially it contains
// only the Tailscale authkey, but once Tailscale starts it'll
// also store the daemon state.
sts, err := getSingleObject[appsv1.StatefulSet](ctx, a.Client, a.operatorNamespace, childResourceLabels(svc))
if err != nil {
return "", err
}
if sts != nil {
// StatefulSet exists, so we have already created the secret.
// If the secret is missing, they should delete the StatefulSet.
logger.Errorf("Tailscale proxy secret doesn't exist, but the corresponding StatefulSet %s/%s already does. Something is wrong, please delete the StatefulSet.", sts.GetNamespace(), sts.GetName())
return "", nil
}
// Create API Key secret which is going to be used by the statefulset
// to authenticate with Tailscale.
logger.Debugf("creating authkey for new tailscale proxy")
authKey, err := a.newAuthKey(ctx, tags)
if err != nil {
return "", err
}
secret.StringData = map[string]string{
"authkey": authKey,
}
if err := a.Create(ctx, secret); err != nil {
return "", err
}
return secret.Name, nil
}
func (a *ServiceReconciler) getDeviceInfo(ctx context.Context, svc *corev1.Service) (id, hostname string, err error) {
sec, err := getSingleObject[corev1.Secret](ctx, a.Client, a.operatorNamespace, childResourceLabels(svc))
if err != nil {
return "", "", err
}
if sec == nil {
return "", "", nil
}
id = string(sec.Data["device_id"])
if id == "" {
return "", "", nil
}
// Kubernetes chokes on well-formed FQDNs with the trailing dot, so we have
// to remove it.
hostname = strings.TrimSuffix(string(sec.Data["device_fqdn"]), ".")
if hostname == "" {
return "", "", nil
}
return id, hostname, nil
}
func (a *ServiceReconciler) newAuthKey(ctx context.Context, tags []string) (string, error) {
caps := tailscale.KeyCapabilities{
Devices: tailscale.KeyDeviceCapabilities{
Create: tailscale.KeyDeviceCreateCapabilities{
Reusable: false,
Preauthorized: true,
Tags: tags,
},
},
}
key, _, err := a.tsClient.CreateKey(ctx, caps)
if err != nil {
return "", err
}
return key, nil
}
//go:embed manifests/proxy.yaml
var proxyYaml []byte
func (a *ServiceReconciler) reconcileSTS(ctx context.Context, logger *zap.SugaredLogger, parentSvc, headlessSvc *corev1.Service, authKeySecret, hostname string) (*appsv1.StatefulSet, error) {
var ss appsv1.StatefulSet
if err := yaml.Unmarshal(proxyYaml, &ss); err != nil {
return nil, fmt.Errorf("failed to unmarshal proxy spec: %w", err)
}
container := &ss.Spec.Template.Spec.Containers[0]
container.Image = a.proxyImage
container.Env = append(container.Env,
corev1.EnvVar{
Name: "TS_DEST_IP",
Value: parentSvc.Spec.ClusterIP,
},
corev1.EnvVar{
Name: "TS_KUBE_SECRET",
Value: authKeySecret,
},
corev1.EnvVar{
Name: "TS_HOSTNAME",
Value: hostname,
})
ss.ObjectMeta = metav1.ObjectMeta{
Name: headlessSvc.Name,
Namespace: a.operatorNamespace,
Labels: childResourceLabels(parentSvc),
}
ss.Spec.ServiceName = headlessSvc.Name
ss.Spec.Selector = &metav1.LabelSelector{
MatchLabels: map[string]string{
"app": string(parentSvc.UID),
},
}
ss.Spec.Template.ObjectMeta.Labels = map[string]string{
"app": string(parentSvc.UID),
}
ss.Spec.Template.Spec.PriorityClassName = a.proxyPriorityClassName
logger.Debugf("reconciling statefulset %s/%s", ss.GetNamespace(), ss.GetName())
return createOrUpdate(ctx, a.Client, a.operatorNamespace, &ss, func(s *appsv1.StatefulSet) { s.Spec = ss.Spec })
}
// ptrObject is a type constraint for pointer types that implement
// client.Object.
type ptrObject[T any] interface {
client.Object
*T
}
// createOrUpdate adds obj to the k8s cluster, unless the object already exists,
// in which case update is called to make changes to it. If update is nil, the
// existing object is returned unmodified.
//
// obj is looked up by its Name and Namespace if Name is set, otherwise it's
// looked up by labels.
func createOrUpdate[T any, O ptrObject[T]](ctx context.Context, c client.Client, ns string, obj O, update func(O)) (O, error) {
var (
existing O
err error
)
if obj.GetName() != "" {
existing = new(T)
existing.SetName(obj.GetName())
existing.SetNamespace(obj.GetNamespace())
err = c.Get(ctx, client.ObjectKeyFromObject(obj), existing)
} else {
existing, err = getSingleObject[T, O](ctx, c, ns, obj.GetLabels())
}
if err == nil && existing != nil {
if update != nil {
update(existing)
if err := c.Update(ctx, existing); err != nil {
return nil, err
}
}
return existing, nil
}
if err != nil && !apierrors.IsNotFound(err) {
return nil, fmt.Errorf("failed to get object: %w", err)
}
if err := c.Create(ctx, obj); err != nil {
return nil, err
}
return obj, nil
}
// getSingleObject searches for k8s objects of type T
// (e.g. corev1.Service) with the given labels, and returns
// it. Returns nil if no objects match the labels, and an error if
// more than one object matches.
func getSingleObject[T any, O ptrObject[T]](ctx context.Context, c client.Client, ns string, labels map[string]string) (O, error) {
ret := O(new(T))
kinds, _, err := c.Scheme().ObjectKinds(ret)
if err != nil {
return nil, err
}
if len(kinds) != 1 {
// TODO: the runtime package apparently has a "pick the best
// GVK" function somewhere that might be good enough?
return nil, fmt.Errorf("more than 1 GroupVersionKind for %T", ret)
}
gvk := kinds[0]
gvk.Kind += "List"
lst := unstructured.UnstructuredList{}
lst.SetGroupVersionKind(gvk)
if err := c.List(ctx, &lst, client.InNamespace(ns), client.MatchingLabels(labels)); err != nil {
return nil, err
}
if len(lst.Items) == 0 {
return nil, nil
}
if len(lst.Items) > 1 {
return nil, fmt.Errorf("found multiple matching %T objects", ret)
}
if err := c.Scheme().Convert(&lst.Items[0], ret, nil); err != nil {
return nil, err
}
return ret, nil
}
func defaultBool(envName string, defVal bool) bool {
vs := os.Getenv(envName)
if vs == "" {
return defVal
}
v, _ := opt.Bool(vs).Get()
return v
}
func defaultEnv(envName, defVal string) string {
v := os.Getenv(envName)
if v == "" {
return defVal
}
return v
}
func nameForService(svc *corev1.Service) (string, error) {
if h, ok := svc.Annotations[AnnotationHostname]; ok {
if err := dnsname.ValidLabel(h); err != nil {
return "", fmt.Errorf("invalid Tailscale hostname %q: %w", h, err)
}
return h, nil
}
return svc.Namespace + "-" + svc.Name, nil
}

View File

@ -1,904 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"context"
"strings"
"sync"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"go.uber.org/zap"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
"tailscale.com/client/tailscale"
"tailscale.com/types/ptr"
)
func TestLoadBalancerClass(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
sr := &ServiceReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
logger: zl.Sugar(),
}
// Create a service that we should manage, and check that the initial round
// of objects looks right.
mustCreate(t, fc, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
// The apiserver is supposed to set the UID, but the fake client
// doesn't. So, set it explicitly because other code later depends
// on it being set.
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeLoadBalancer,
LoadBalancerClass: ptr.To("tailscale"),
},
})
expectReconciled(t, sr, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test")
expectEqual(t, fc, expectedSecret(fullName))
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
// Normally the Tailscale proxy pod would come up here and write its info
// into the secret. Simulate that, then verify reconcile again and verify
// that we get to the end.
mustUpdate(t, fc, "operator-ns", fullName, func(s *corev1.Secret) {
if s.Data == nil {
s.Data = map[string][]byte{}
}
s.Data["device_id"] = []byte("ts-id-1234")
s.Data["device_fqdn"] = []byte("tailscale.device.name.")
})
expectReconciled(t, sr, "default", "test")
want := &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeLoadBalancer,
LoadBalancerClass: ptr.To("tailscale"),
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{
Hostname: "tailscale.device.name",
},
},
},
},
}
expectEqual(t, fc, want)
// Turn the service back into a ClusterIP service, which should make the
// operator clean up.
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
s.Spec.Type = corev1.ServiceTypeClusterIP
s.Spec.LoadBalancerClass = nil
})
mustUpdateStatus(t, fc, "default", "test", func(s *corev1.Service) {
// Fake client doesn't automatically delete the LoadBalancer status when
// changing away from the LoadBalancer type, we have to do
// controller-manager's work by hand.
s.Status = corev1.ServiceStatus{}
})
// synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
// didn't create any child resources since this is all faked, so the
// deletion goes through immediately.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
// The deletion triggers another reconcile, to finish the cleanup.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
want = &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
}
expectEqual(t, fc, want)
}
func TestAnnotations(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
sr := &ServiceReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
logger: zl.Sugar(),
}
// Create a service that we should manage, and check that the initial round
// of objects looks right.
mustCreate(t, fc, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
// The apiserver is supposed to set the UID, but the fake client
// doesn't. So, set it explicitly because other code later depends
// on it being set.
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/expose": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
})
expectReconciled(t, sr, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test")
expectEqual(t, fc, expectedSecret(fullName))
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
want := &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/expose": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
}
expectEqual(t, fc, want)
// Turn the service back into a ClusterIP service, which should make the
// operator clean up.
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
delete(s.ObjectMeta.Annotations, "tailscale.com/expose")
})
// synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
// didn't create any child resources since this is all faked, so the
// deletion goes through immediately.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
// Second time around, the rest of cleanup happens.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
want = &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
}
expectEqual(t, fc, want)
}
func TestAnnotationIntoLB(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
sr := &ServiceReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
logger: zl.Sugar(),
}
// Create a service that we should manage, and check that the initial round
// of objects looks right.
mustCreate(t, fc, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
// The apiserver is supposed to set the UID, but the fake client
// doesn't. So, set it explicitly because other code later depends
// on it being set.
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/expose": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
})
expectReconciled(t, sr, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test")
expectEqual(t, fc, expectedSecret(fullName))
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
// Normally the Tailscale proxy pod would come up here and write its info
// into the secret. Simulate that, since it would have normally happened at
// this point and the LoadBalancer is going to expect this.
mustUpdate(t, fc, "operator-ns", fullName, func(s *corev1.Secret) {
if s.Data == nil {
s.Data = map[string][]byte{}
}
s.Data["device_id"] = []byte("ts-id-1234")
s.Data["device_fqdn"] = []byte("tailscale.device.name.")
})
expectReconciled(t, sr, "default", "test")
want := &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/expose": "true",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
}
expectEqual(t, fc, want)
// Remove Tailscale's annotation, and at the same time convert the service
// into a tailscale LoadBalancer.
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
delete(s.ObjectMeta.Annotations, "tailscale.com/expose")
s.Spec.Type = corev1.ServiceTypeLoadBalancer
s.Spec.LoadBalancerClass = ptr.To("tailscale")
})
expectReconciled(t, sr, "default", "test")
// None of the proxy machinery should have changed...
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
// ... but the service should have a LoadBalancer status.
want = &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeLoadBalancer,
LoadBalancerClass: ptr.To("tailscale"),
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{
Hostname: "tailscale.device.name",
},
},
},
},
}
expectEqual(t, fc, want)
}
func TestLBIntoAnnotation(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
sr := &ServiceReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
logger: zl.Sugar(),
}
// Create a service that we should manage, and check that the initial round
// of objects looks right.
mustCreate(t, fc, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
// The apiserver is supposed to set the UID, but the fake client
// doesn't. So, set it explicitly because other code later depends
// on it being set.
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeLoadBalancer,
LoadBalancerClass: ptr.To("tailscale"),
},
})
expectReconciled(t, sr, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test")
expectEqual(t, fc, expectedSecret(fullName))
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
// Normally the Tailscale proxy pod would come up here and write its info
// into the secret. Simulate that, then verify reconcile again and verify
// that we get to the end.
mustUpdate(t, fc, "operator-ns", fullName, func(s *corev1.Secret) {
if s.Data == nil {
s.Data = map[string][]byte{}
}
s.Data["device_id"] = []byte("ts-id-1234")
s.Data["device_fqdn"] = []byte("tailscale.device.name.")
})
expectReconciled(t, sr, "default", "test")
want := &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeLoadBalancer,
LoadBalancerClass: ptr.To("tailscale"),
},
Status: corev1.ServiceStatus{
LoadBalancer: corev1.LoadBalancerStatus{
Ingress: []corev1.LoadBalancerIngress{
{
Hostname: "tailscale.device.name",
},
},
},
},
}
expectEqual(t, fc, want)
// Turn the service back into a ClusterIP service, but also add the
// tailscale annotation.
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
s.ObjectMeta.Annotations = map[string]string{
"tailscale.com/expose": "true",
}
s.Spec.Type = corev1.ServiceTypeClusterIP
s.Spec.LoadBalancerClass = nil
})
mustUpdateStatus(t, fc, "default", "test", func(s *corev1.Service) {
// Fake client doesn't automatically delete the LoadBalancer status when
// changing away from the LoadBalancer type, we have to do
// controller-manager's work by hand.
s.Status = corev1.ServiceStatus{}
})
expectReconciled(t, sr, "default", "test")
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedSTS(shortName, fullName, "default-test", ""))
want = &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
Annotations: map[string]string{
"tailscale.com/expose": "true",
},
UID: types.UID("1234-UID"),
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
}
expectEqual(t, fc, want)
}
func TestCustomHostname(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
sr := &ServiceReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
logger: zl.Sugar(),
}
// Create a service that we should manage, and check that the initial round
// of objects looks right.
mustCreate(t, fc, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
// The apiserver is supposed to set the UID, but the fake client
// doesn't. So, set it explicitly because other code later depends
// on it being set.
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/expose": "true",
"tailscale.com/hostname": "reindeer-flotilla",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
})
expectReconciled(t, sr, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test")
expectEqual(t, fc, expectedSecret(fullName))
expectEqual(t, fc, expectedHeadlessService(shortName))
expectEqual(t, fc, expectedSTS(shortName, fullName, "reindeer-flotilla", ""))
want := &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
Finalizers: []string{"tailscale.com/finalizer"},
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/expose": "true",
"tailscale.com/hostname": "reindeer-flotilla",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
}
expectEqual(t, fc, want)
// Turn the service back into a ClusterIP service, which should make the
// operator clean up.
mustUpdate(t, fc, "default", "test", func(s *corev1.Service) {
delete(s.ObjectMeta.Annotations, "tailscale.com/expose")
})
// synchronous StatefulSet deletion triggers a requeue. But, the StatefulSet
// didn't create any child resources since this is all faked, so the
// deletion goes through immediately.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
// Second time around, the rest of cleanup happens.
expectReconciled(t, sr, "default", "test")
expectMissing[appsv1.StatefulSet](t, fc, "operator-ns", shortName)
expectMissing[corev1.Service](t, fc, "operator-ns", shortName)
expectMissing[corev1.Secret](t, fc, "operator-ns", fullName)
want = &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/hostname": "reindeer-flotilla",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
}
expectEqual(t, fc, want)
}
func TestCustomPriorityClassName(t *testing.T) {
fc := fake.NewFakeClient()
ft := &fakeTSClient{}
zl, err := zap.NewDevelopment()
if err != nil {
t.Fatal(err)
}
sr := &ServiceReconciler{
Client: fc,
tsClient: ft,
defaultTags: []string{"tag:k8s"},
operatorNamespace: "operator-ns",
proxyImage: "tailscale/tailscale",
proxyPriorityClassName: "tailscale-critical",
logger: zl.Sugar(),
}
// Create a service that we should manage, and check that the initial round
// of objects looks right.
mustCreate(t, fc, &corev1.Service{
ObjectMeta: metav1.ObjectMeta{
Name: "test",
Namespace: "default",
// The apiserver is supposed to set the UID, but the fake client
// doesn't. So, set it explicitly because other code later depends
// on it being set.
UID: types.UID("1234-UID"),
Annotations: map[string]string{
"tailscale.com/expose": "true",
"tailscale.com/hostname": "custom-priority-class-name",
},
},
Spec: corev1.ServiceSpec{
ClusterIP: "10.20.30.40",
Type: corev1.ServiceTypeClusterIP,
},
})
expectReconciled(t, sr, "default", "test")
fullName, shortName := findGenName(t, fc, "default", "test")
expectEqual(t, fc, expectedSTS(shortName, fullName, "custom-priority-class-name", "tailscale-critical"))
}
func expectedSecret(name string) *corev1.Secret {
return &corev1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: "operator-ns",
Labels: map[string]string{
"tailscale.com/managed": "true",
"tailscale.com/parent-resource": "test",
"tailscale.com/parent-resource-ns": "default",
"tailscale.com/parent-resource-type": "svc",
},
},
StringData: map[string]string{
"authkey": "secret-authkey",
},
}
}
func expectedHeadlessService(name string) *corev1.Service {
return &corev1.Service{
TypeMeta: metav1.TypeMeta{
Kind: "Service",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
GenerateName: "ts-test-",
Namespace: "operator-ns",
Labels: map[string]string{
"tailscale.com/managed": "true",
"tailscale.com/parent-resource": "test",
"tailscale.com/parent-resource-ns": "default",
"tailscale.com/parent-resource-type": "svc",
},
},
Spec: corev1.ServiceSpec{
Selector: map[string]string{
"app": "1234-UID",
},
ClusterIP: "None",
},
}
}
func expectedSTS(stsName, secretName, hostname, priorityClassName string) *appsv1.StatefulSet {
return &appsv1.StatefulSet{
TypeMeta: metav1.TypeMeta{
Kind: "StatefulSet",
APIVersion: "apps/v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: stsName,
Namespace: "operator-ns",
Labels: map[string]string{
"tailscale.com/managed": "true",
"tailscale.com/parent-resource": "test",
"tailscale.com/parent-resource-ns": "default",
"tailscale.com/parent-resource-type": "svc",
},
},
Spec: appsv1.StatefulSetSpec{
Replicas: ptr.To[int32](1),
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "1234-UID"},
},
ServiceName: stsName,
Template: corev1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
DeletionGracePeriodSeconds: ptr.To[int64](10),
Labels: map[string]string{"app": "1234-UID"},
},
Spec: corev1.PodSpec{
ServiceAccountName: "proxies",
PriorityClassName: priorityClassName,
InitContainers: []corev1.Container{
{
Name: "sysctler",
Image: "busybox",
Command: []string{"/bin/sh"},
Args: []string{"-c", "sysctl -w net.ipv4.ip_forward=1 net.ipv6.conf.all.forwarding=1"},
SecurityContext: &corev1.SecurityContext{
Privileged: ptr.To(true),
},
},
},
Containers: []corev1.Container{
{
Name: "tailscale",
Image: "tailscale/tailscale",
Env: []corev1.EnvVar{
{Name: "TS_USERSPACE", Value: "false"},
{Name: "TS_AUTH_ONCE", Value: "true"},
{Name: "TS_DEST_IP", Value: "10.20.30.40"},
{Name: "TS_KUBE_SECRET", Value: secretName},
{Name: "TS_HOSTNAME", Value: hostname},
},
SecurityContext: &corev1.SecurityContext{
Capabilities: &corev1.Capabilities{
Add: []corev1.Capability{"NET_ADMIN"},
},
},
ImagePullPolicy: "Always",
},
},
},
},
},
}
}
func findGenName(t *testing.T, client client.Client, ns, name string) (full, noSuffix string) {
t.Helper()
labels := map[string]string{
LabelManaged: "true",
LabelParentName: name,
LabelParentNamespace: ns,
LabelParentType: "svc",
}
s, err := getSingleObject[corev1.Secret](context.Background(), client, "operator-ns", labels)
if err != nil {
t.Fatalf("finding secret for %q: %v", name, err)
}
return s.GetName(), strings.TrimSuffix(s.GetName(), "-0")
}
func mustCreate(t *testing.T, client client.Client, obj client.Object) {
t.Helper()
if err := client.Create(context.Background(), obj); err != nil {
t.Fatalf("creating %q: %v", obj.GetName(), err)
}
}
func mustUpdate[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string, update func(O)) {
t.Helper()
obj := O(new(T))
if err := client.Get(context.Background(), types.NamespacedName{
Name: name,
Namespace: ns,
}, obj); err != nil {
t.Fatalf("getting %q: %v", name, err)
}
update(obj)
if err := client.Update(context.Background(), obj); err != nil {
t.Fatalf("updating %q: %v", name, err)
}
}
func mustUpdateStatus[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string, update func(O)) {
t.Helper()
obj := O(new(T))
if err := client.Get(context.Background(), types.NamespacedName{
Name: name,
Namespace: ns,
}, obj); err != nil {
t.Fatalf("getting %q: %v", name, err)
}
update(obj)
if err := client.Status().Update(context.Background(), obj); err != nil {
t.Fatalf("updating %q: %v", name, err)
}
}
func expectEqual[T any, O ptrObject[T]](t *testing.T, client client.Client, want O) {
t.Helper()
got := O(new(T))
if err := client.Get(context.Background(), types.NamespacedName{
Name: want.GetName(),
Namespace: want.GetNamespace(),
}, got); err != nil {
t.Fatalf("getting %q: %v", want.GetName(), err)
}
// The resource version changes eagerly whenever the operator does even a
// no-op update. Asserting a specific value leads to overly brittle tests,
// so just remove it from both got and want.
got.SetResourceVersion("")
want.SetResourceVersion("")
if diff := cmp.Diff(got, want); diff != "" {
t.Fatalf("unexpected object (-got +want):\n%s", diff)
}
}
func expectMissing[T any, O ptrObject[T]](t *testing.T, client client.Client, ns, name string) {
t.Helper()
obj := O(new(T))
if err := client.Get(context.Background(), types.NamespacedName{
Name: name,
Namespace: ns,
}, obj); !apierrors.IsNotFound(err) {
t.Fatalf("object %s/%s unexpectedly present, wanted missing", ns, name)
}
}
func expectReconciled(t *testing.T, sr *ServiceReconciler, ns, name string) {
t.Helper()
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: ns,
},
}
res, err := sr.Reconcile(context.Background(), req)
if err != nil {
t.Fatalf("Reconcile: unexpected error: %v", err)
}
if res.Requeue {
t.Fatalf("unexpected immediate requeue")
}
if res.RequeueAfter != 0 {
t.Fatalf("unexpected timed requeue (%v)", res.RequeueAfter)
}
}
func expectRequeue(t *testing.T, sr *ServiceReconciler, ns, name string) {
t.Helper()
req := reconcile.Request{
NamespacedName: types.NamespacedName{
Name: name,
Namespace: ns,
},
}
res, err := sr.Reconcile(context.Background(), req)
if err != nil {
t.Fatalf("Reconcile: unexpected error: %v", err)
}
if res.Requeue {
t.Fatalf("unexpected immediate requeue")
}
if res.RequeueAfter == 0 {
t.Fatalf("expected timed requeue, got success")
}
}
type fakeTSClient struct {
sync.Mutex
keyRequests []tailscale.KeyCapabilities
deleted []string
}
func (c *fakeTSClient) CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error) {
c.Lock()
defer c.Unlock()
c.keyRequests = append(c.keyRequests, caps)
k := &tailscale.Key{
ID: "key",
Created: time.Now(),
Capabilities: caps,
}
return "secret-authkey", k, nil
}
func (c *fakeTSClient) DeleteDevice(ctx context.Context, deviceID string) error {
c.Lock()
defer c.Unlock()
c.deleted = append(c.deleted, deviceID)
return nil
}
func (c *fakeTSClient) KeyRequests() []tailscale.KeyCapabilities {
c.Lock()
defer c.Unlock()
return c.keyRequests
}
func (c *fakeTSClient) Deleted() []string {
c.Lock()
defer c.Unlock()
return c.deleted
}

View File

@ -1,120 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
package main
import (
"context"
"crypto/tls"
"fmt"
"log"
"net/http"
"net/http/httputil"
"net/url"
"os"
"strings"
"tailscale.com/client/tailscale"
"tailscale.com/client/tailscale/apitype"
"tailscale.com/tsnet"
"tailscale.com/types/logger"
)
type whoIsKey struct{}
// authProxy is an http.Handler that authenticates requests using the Tailscale
// LocalAPI and then proxies them to the Kubernetes API.
type authProxy struct {
logf logger.Logf
lc *tailscale.LocalClient
rp *httputil.ReverseProxy
}
func (h *authProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) {
who, err := h.lc.WhoIs(r.Context(), r.RemoteAddr)
if err != nil {
h.logf("failed to authenticate caller: %v", err)
http.Error(w, "failed to authenticate caller", http.StatusInternalServerError)
return
}
r = r.WithContext(context.WithValue(r.Context(), whoIsKey{}, who))
h.rp.ServeHTTP(w, r)
}
// runAuthProxy runs an HTTP server that authenticates requests using the
// Tailscale LocalAPI and then proxies them to the Kubernetes API.
// It listens on :443 and uses the Tailscale HTTPS certificate.
// s will be started if it is not already running.
// rt is used to proxy requests to the Kubernetes API.
//
// It never returns.
func runAuthProxy(s *tsnet.Server, rt http.RoundTripper, logf logger.Logf) {
ln, err := s.Listen("tcp", ":443")
if err != nil {
log.Fatalf("could not listen on :443: %v", err)
}
u, err := url.Parse(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS")))
if err != nil {
log.Fatalf("runAuthProxy: failed to parse URL %v", err)
}
lc, err := s.LocalClient()
if err != nil {
log.Fatalf("could not get local client: %v", err)
}
ap := &authProxy{
logf: logf,
lc: lc,
rp: &httputil.ReverseProxy{
Director: func(r *http.Request) {
// We want to proxy to the Kubernetes API, but we want to use
// the caller's identity to do so. We do this by impersonating
// the caller using the Kubernetes User Impersonation feature:
// https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation
// Out of paranoia, remove all authentication headers that might
// have been set by the client.
r.Header.Del("Authorization")
r.Header.Del("Impersonate-Group")
r.Header.Del("Impersonate-User")
r.Header.Del("Impersonate-Uid")
for k := range r.Header {
if strings.HasPrefix(k, "Impersonate-Extra-") {
r.Header.Del(k)
}
}
// Now add the impersonation headers that we want.
who := r.Context().Value(whoIsKey{}).(*apitype.WhoIsResponse)
if who.Node.IsTagged() {
// Use the nodes FQDN as the username, and the nodes tags as the groups.
// "Impersonate-Group" requires "Impersonate-User" to be set.
r.Header.Set("Impersonate-User", strings.TrimSuffix(who.Node.Name, "."))
for _, tag := range who.Node.Tags {
r.Header.Add("Impersonate-Group", tag)
}
} else {
r.Header.Set("Impersonate-User", who.UserProfile.LoginName)
}
// Replace the URL with the Kubernetes APIServer.
r.URL.Scheme = u.Scheme
r.URL.Host = u.Host
},
Transport: rt,
},
}
hs := &http.Server{
// Kubernetes uses SPDY for exec and port-forward, however SPDY is
// incompatible with HTTP/2; so disable HTTP/2 in the proxy.
TLSConfig: &tls.Config{
GetCertificate: lc.GetCertificate,
NextProtos: []string{"http/1.1"},
},
TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)),
Handler: ap,
}
if err := hs.ServeTLS(ln, "", ""); err != nil {
log.Fatalf("runAuthProxy: failed to serve %v", err)
}
}

View File

@ -1,51 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// The mkmanifest command is a simple helper utility to create a '.syso' file
// that contains a Windows manifest file.
package main
import (
"log"
"os"
"github.com/tc-hib/winres"
)
func main() {
if len(os.Args) != 4 {
log.Fatalf("usage: %s arch manifest.xml output.syso", os.Args[0])
}
arch := winres.Arch(os.Args[1])
switch arch {
case winres.ArchAMD64, winres.ArchARM64, winres.ArchI386:
default:
log.Fatalf("unsupported arch: %s", arch)
}
manifest, err := os.ReadFile(os.Args[2])
if err != nil {
log.Fatalf("error reading manifest file %q: %v", os.Args[2], err)
}
out := os.Args[3]
// Start by creating an empty resource set
rs := winres.ResourceSet{}
// Add resources
rs.Set(winres.RT_MANIFEST, winres.ID(1), 0, manifest)
// Compile to a COFF object file
f, err := os.Create(out)
if err != nil {
log.Fatalf("error creating output file %q: %v", out, err)
}
if err := rs.WriteObject(f, arch); err != nil {
log.Fatalf("error writing object: %v", err)
}
if err := f.Close(); err != nil {
log.Fatalf("error writing output file %q: %v", out, err)
}
}

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// mkpkg builds the Tailscale rpm and deb packages.
package main
@ -57,7 +58,6 @@ func main() {
postrm := flag.String("postrm", "", "debian postrm script path")
replaces := flag.String("replaces", "", "package which this package replaces, if any")
depends := flag.String("depends", "", "comma-separated list of packages this package depends on")
recommends := flag.String("recommends", "", "comma-separated list of packages this package recommends")
flag.Parse()
filesMap, err := parseFiles(*files)
@ -93,9 +93,6 @@ func main() {
if len(*depends) != 0 {
info.Overridables.Depends = strings.Split(*depends, ",")
}
if len(*recommends) != 0 {
info.Overridables.Recommends = strings.Split(*recommends, ",")
}
if *replaces != "" {
info.Overridables.Replaces = []string{*replaces}
info.Overridables.Conflicts = []string{*replaces}

View File

@ -1,44 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// mkversion gets version info from git and outputs a bunch of shell variables
// that get used elsewhere in the build system to embed version numbers into
// binaries.
package main
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"time"
"tailscale.com/tailcfg"
"tailscale.com/version/mkversion"
)
func main() {
prefix := ""
if len(os.Args) > 1 {
if os.Args[1] == "--export" {
prefix = "export "
} else {
fmt.Println("usage: mkversion [--export|-h|--help]")
os.Exit(1)
}
}
var b bytes.Buffer
io.WriteString(&b, mkversion.Info().String())
// Copyright and the client capability are not part of the version
// information, but similarly used in Xcode builds to embed in the metadata,
// thus generate them now.
copyright := fmt.Sprintf("Copyright © %d Tailscale Inc. All Rights Reserved.", time.Now().Year())
fmt.Fprintf(&b, "VERSION_COPYRIGHT=%q\n", copyright)
fmt.Fprintf(&b, "VERSION_CAPABILITY=%d\n", tailcfg.CurrentCapabilityVersion)
s := bufio.NewScanner(&b)
for s.Scan() {
fmt.Println(prefix + s.Text())
}
}

View File

@ -1,7 +0,0 @@
# nardump
nardump is like nix-store --dump, but in Go, writing a NAR file (tar-like,
but focused on being reproducible) to stdout or to a hash with the --sri flag.
It lets us calculate the Nix sha256 in shell.nix without the person running
git-pull-oss.sh having Nix available.

View File

@ -1,184 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// nardump is like nix-store --dump, but in Go, writing a NAR
// file (tar-like, but focused on being reproducible) to stdout
// or to a hash with the --sri flag.
//
// It lets us calculate a Nix sha256 without the person running
// git-pull-oss.sh having Nix available.
package main
// For the format, see:
// See https://gist.github.com/jbeda/5c79d2b1434f0018d693
import (
"bufio"
"crypto/sha256"
"encoding/base64"
"encoding/binary"
"flag"
"fmt"
"io"
"io/fs"
"log"
"os"
"path"
"sort"
)
var sri = flag.Bool("sri", false, "print SRI")
func main() {
flag.Parse()
if flag.NArg() != 1 {
log.Fatal("usage: nardump <dir>")
}
arg := flag.Arg(0)
if err := os.Chdir(arg); err != nil {
log.Fatal(err)
}
if *sri {
hash := sha256.New()
if err := writeNAR(hash, os.DirFS(".")); err != nil {
log.Fatal(err)
}
fmt.Printf("sha256-%s\n", base64.StdEncoding.EncodeToString(hash.Sum(nil)))
return
}
bw := bufio.NewWriter(os.Stdout)
if err := writeNAR(bw, os.DirFS(".")); err != nil {
log.Fatal(err)
}
bw.Flush()
}
// writeNARError is a sentinel panic type that's recovered by writeNAR
// and converted into the wrapped error.
type writeNARError struct{ err error }
// narWriter writes NAR files.
type narWriter struct {
w io.Writer
fs fs.FS
}
// writeNAR writes a NAR file to w from the root of fs.
func writeNAR(w io.Writer, fs fs.FS) (err error) {
defer func() {
if e := recover(); e != nil {
if we, ok := e.(writeNARError); ok {
err = we.err
return
}
panic(e)
}
}()
nw := &narWriter{w: w, fs: fs}
nw.str("nix-archive-1")
return nw.writeDir(".")
}
func (nw *narWriter) writeDir(dirPath string) error {
ents, err := fs.ReadDir(nw.fs, dirPath)
if err != nil {
return err
}
sort.Slice(ents, func(i, j int) bool {
return ents[i].Name() < ents[j].Name()
})
nw.str("(")
nw.str("type")
nw.str("directory")
for _, ent := range ents {
nw.str("entry")
nw.str("(")
nw.str("name")
nw.str(ent.Name())
nw.str("node")
mode := ent.Type()
sub := path.Join(dirPath, ent.Name())
var err error
switch {
case mode.IsRegular():
err = nw.writeRegular(sub)
case mode.IsDir():
err = nw.writeDir(sub)
default:
// TODO(bradfitz): symlink, but requires fighting io/fs a bit
// to get at Readlink or the osFS via fs. But for now
// we don't need symlinks because they're not in Go's archive.
return fmt.Errorf("unsupported file type %v at %q", sub, mode)
}
if err != nil {
return err
}
nw.str(")")
}
nw.str(")")
return nil
}
func (nw *narWriter) writeRegular(path string) error {
nw.str("(")
nw.str("type")
nw.str("regular")
fi, err := fs.Stat(nw.fs, path)
if err != nil {
return err
}
if fi.Mode()&0111 != 0 {
nw.str("executable")
nw.str("")
}
contents, err := fs.ReadFile(nw.fs, path)
if err != nil {
return err
}
nw.str("contents")
if err := writeBytes(nw.w, contents); err != nil {
return err
}
nw.str(")")
return nil
}
func (nw *narWriter) str(s string) {
if err := writeString(nw.w, s); err != nil {
panic(writeNARError{err})
}
}
func writeString(w io.Writer, s string) error {
var buf [8]byte
binary.LittleEndian.PutUint64(buf[:], uint64(len(s)))
if _, err := w.Write(buf[:]); err != nil {
return err
}
if _, err := io.WriteString(w, s); err != nil {
return err
}
return writePad(w, len(s))
}
func writeBytes(w io.Writer, b []byte) error {
var buf [8]byte
binary.LittleEndian.PutUint64(buf[:], uint64(len(b)))
if _, err := w.Write(buf[:]); err != nil {
return err
}
if _, err := w.Write(b); err != nil {
return err
}
return writePad(w, len(b))
}
func writePad(w io.Writer, n int) error {
pad := n % 8
if pad == 0 {
return nil
}
var zeroes [8]byte
_, err := w.Write(zeroes[:8-pad])
return err
}

View File

@ -1,386 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// netlogfmt parses a stream of JSON log messages from stdin and
// formats the network traffic logs produced by "tailscale.com/wgengine/netlog"
// according to the schema in "tailscale.com/types/netlogtype.Message"
// in a more humanly readable format.
//
// Example usage:
//
// $ cat netlog.json | go run tailscale.com/cmd/netlogfmt
// =========================================================================================
// NodeID: n123456CNTRL
// Logged: 2022-10-13T20:23:10.165Z
// Window: 2022-10-13T20:23:09.644Z (5s)
// --------------------------------------------------- Tx[P/s] Tx[B/s] Rx[P/s] Rx[B/s]
// VirtualTraffic: 16.80 1.64Ki 11.20 1.03Ki
// TCP: 100.109.51.95:22 -> 100.85.80.41:42912 16.00 1.59Ki 10.40 1008.84
// TCP: 100.109.51.95:21291 -> 100.107.177.2:53133 0.40 27.60 0.40 24.20
// TCP: 100.109.51.95:21291 -> 100.107.177.2:53134 0.40 23.40 0.40 24.20
// PhysicalTraffic: 16.80 2.32Ki 11.20 1.48Ki
// 100.85.80.41 -> 192.168.0.101:41641 16.00 2.23Ki 10.40 1.40Ki
// 100.107.177.2 -> 192.168.0.100:41641 0.80 83.20 0.80 83.20
// =========================================================================================
package main
import (
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"log"
"math"
"net/http"
"net/netip"
"os"
"strconv"
"strings"
"time"
"github.com/dsnet/try"
jsonv2 "github.com/go-json-experiment/json"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
"tailscale.com/types/logid"
"tailscale.com/types/netlogtype"
"tailscale.com/util/must"
)
var (
resolveNames = flag.Bool("resolve-names", false, "convert tailscale IP addresses to hostnames; must also specify --api-key and --tailnet-id")
apiKey = flag.String("api-key", "", "API key to query the Tailscale API with; see https://login.tailscale.com/admin/settings/keys")
tailnetName = flag.String("tailnet-name", "", "tailnet domain name to lookup devices in; see https://login.tailscale.com/admin/settings/general")
)
var namesByAddr map[netip.Addr]string
func main() {
flag.Parse()
if *resolveNames {
namesByAddr = mustMakeNamesByAddr()
}
// The logic handles a stream of arbitrary JSON.
// So long as a JSON object seems like a network log message,
// then this will unmarshal and print it.
if err := processStream(os.Stdin); err != nil {
if err == io.EOF {
return
}
log.Fatalf("processStream: %v", err)
}
}
func processStream(r io.Reader) (err error) {
defer try.Handle(&err)
dec := jsonv2.NewDecoder(os.Stdin)
for {
processValue(dec)
}
}
func processValue(dec *jsonv2.Decoder) {
switch dec.PeekKind() {
case '[':
processArray(dec)
case '{':
processObject(dec)
default:
try.E(dec.SkipValue())
}
}
func processArray(dec *jsonv2.Decoder) {
try.E1(dec.ReadToken()) // parse '['
for dec.PeekKind() != ']' {
processValue(dec)
}
try.E1(dec.ReadToken()) // parse ']'
}
func processObject(dec *jsonv2.Decoder) {
var hasTraffic bool
var rawMsg []byte
try.E1(dec.ReadToken()) // parse '{'
for dec.PeekKind() != '}' {
// Capture any members that could belong to a network log message.
switch name := try.E1(dec.ReadToken()); name.String() {
case "virtualTraffic", "subnetTraffic", "exitTraffic", "physicalTraffic":
hasTraffic = true
fallthrough
case "logtail", "nodeId", "logged", "start", "end":
if len(rawMsg) == 0 {
rawMsg = append(rawMsg, '{')
} else {
rawMsg = append(rawMsg[:len(rawMsg)-1], ',')
}
rawMsg = append(append(append(rawMsg, '"'), name.String()...), '"')
rawMsg = append(rawMsg, ':')
rawMsg = append(rawMsg, try.E1(dec.ReadValue())...)
rawMsg = append(rawMsg, '}')
default:
processValue(dec)
}
}
try.E1(dec.ReadToken()) // parse '}'
// If this appears to be a network log message, then unmarshal and print it.
if hasTraffic {
var msg message
try.E(jsonv2.Unmarshal(rawMsg, &msg))
printMessage(msg)
}
}
type message struct {
Logtail struct {
ID logid.PublicID `json:"id"`
Logged time.Time `json:"server_time"`
} `json:"logtail"`
Logged time.Time `json:"logged"`
netlogtype.Message
}
func printMessage(msg message) {
// Construct a table of network traffic per connection.
rows := [][7]string{{3: "Tx[P/s]", 4: "Tx[B/s]", 5: "Rx[P/s]", 6: "Rx[B/s]"}}
duration := msg.End.Sub(msg.Start)
addRows := func(heading string, traffic []netlogtype.ConnectionCounts) {
if len(traffic) == 0 {
return
}
slices.SortFunc(traffic, func(x, y netlogtype.ConnectionCounts) bool {
nx := x.TxPackets + x.TxBytes + x.RxPackets + x.RxBytes
ny := y.TxPackets + y.TxBytes + y.RxPackets + y.RxBytes
return nx > ny
})
var sum netlogtype.Counts
for _, cc := range traffic {
sum = sum.Add(cc.Counts)
}
rows = append(rows, [7]string{
0: heading + ":",
3: formatSI(float64(sum.TxPackets) / duration.Seconds()),
4: formatIEC(float64(sum.TxBytes) / duration.Seconds()),
5: formatSI(float64(sum.RxPackets) / duration.Seconds()),
6: formatIEC(float64(sum.RxBytes) / duration.Seconds()),
})
if len(traffic) == 1 && traffic[0].Connection.IsZero() {
return // this is already a summary counts
}
formatAddrPort := func(a netip.AddrPort) string {
if !a.IsValid() {
return ""
}
if name, ok := namesByAddr[a.Addr()]; ok {
if a.Port() == 0 {
return name
}
return name + ":" + strconv.Itoa(int(a.Port()))
}
if a.Port() == 0 {
return a.Addr().String()
}
return a.String()
}
for _, cc := range traffic {
row := [7]string{
0: " ",
1: formatAddrPort(cc.Src),
2: formatAddrPort(cc.Dst),
3: formatSI(float64(cc.TxPackets) / duration.Seconds()),
4: formatIEC(float64(cc.TxBytes) / duration.Seconds()),
5: formatSI(float64(cc.RxPackets) / duration.Seconds()),
6: formatIEC(float64(cc.RxBytes) / duration.Seconds()),
}
if cc.Proto > 0 {
row[0] += cc.Proto.String() + ":"
}
rows = append(rows, row)
}
}
addRows("VirtualTraffic", msg.VirtualTraffic)
addRows("SubnetTraffic", msg.SubnetTraffic)
addRows("ExitTraffic", msg.ExitTraffic)
addRows("PhysicalTraffic", msg.PhysicalTraffic)
// Compute the maximum width of each field.
var maxWidths [7]int
for _, row := range rows {
for i, col := range row {
if maxWidths[i] < len(col) && !(i == 0 && !strings.HasPrefix(col, " ")) {
maxWidths[i] = len(col)
}
}
}
var maxSum int
for _, n := range maxWidths {
maxSum += n
}
// Output a table of network traffic per connection.
line := make([]byte, 0, maxSum+len(" ")+len(" -> ")+4*len(" "))
line = appendRepeatByte(line, '=', cap(line))
fmt.Println(string(line))
if !msg.Logtail.ID.IsZero() {
fmt.Printf("LogID: %s\n", msg.Logtail.ID)
}
if msg.NodeID != "" {
fmt.Printf("NodeID: %s\n", msg.NodeID)
}
formatTime := func(t time.Time) string {
return t.In(time.Local).Format("2006-01-02 15:04:05.000")
}
switch {
case !msg.Logged.IsZero():
fmt.Printf("Logged: %s\n", formatTime(msg.Logged))
case !msg.Logtail.Logged.IsZero():
fmt.Printf("Logged: %s\n", formatTime(msg.Logtail.Logged))
}
fmt.Printf("Window: %s (%0.3fs)\n", formatTime(msg.Start), duration.Seconds())
for i, row := range rows {
line = line[:0]
isHeading := !strings.HasPrefix(row[0], " ")
for j, col := range row {
if isHeading && j == 0 {
col = "" // headings will be printed later
}
switch j {
case 0, 2: // left justified
line = append(line, col...)
line = appendRepeatByte(line, ' ', maxWidths[j]-len(col))
case 1, 3, 4, 5, 6: // right justified
line = appendRepeatByte(line, ' ', maxWidths[j]-len(col))
line = append(line, col...)
}
switch j {
case 0:
line = append(line, " "...)
case 1:
if row[1] == "" && row[2] == "" {
line = append(line, " "...)
} else {
line = append(line, " -> "...)
}
case 2, 3, 4, 5:
line = append(line, " "...)
}
}
switch {
case i == 0: // print dashed-line table heading
line = appendRepeatByte(line[:0], '-', maxWidths[0]+len(" ")+maxWidths[1]+len(" -> ")+maxWidths[2])[:cap(line)]
case isHeading:
copy(line[:], row[0])
}
fmt.Println(string(line))
}
}
func mustMakeNamesByAddr() map[netip.Addr]string {
switch {
case *apiKey == "":
log.Fatalf("--api-key must be specified with --resolve-names")
case *tailnetName == "":
log.Fatalf("--tailnet must be specified with --resolve-names")
}
// Query the Tailscale API for a list of devices in the tailnet.
const apiURL = "https://api.tailscale.com/api/v2"
req := must.Get(http.NewRequest("GET", apiURL+"/tailnet/"+*tailnetName+"/devices", nil))
req.Header.Add("Authorization", "Basic "+base64.StdEncoding.EncodeToString([]byte(*apiKey+":")))
resp := must.Get(http.DefaultClient.Do(req))
defer resp.Body.Close()
b := must.Get(io.ReadAll(resp.Body))
if resp.StatusCode != 200 {
log.Fatalf("http: %v: %s", http.StatusText(resp.StatusCode), b)
}
// Unmarshal the API response.
var m struct {
Devices []struct {
Name string `json:"name"`
Addrs []netip.Addr `json:"addresses"`
} `json:"devices"`
}
must.Do(json.Unmarshal(b, &m))
// Construct a unique mapping of Tailscale IP addresses to hostnames.
// For brevity, we start with the first segment of the name and
// use more segments until we find the shortest prefix that is unique
// for all names in the tailnet.
seen := make(map[string]bool)
namesByAddr := make(map[netip.Addr]string)
retry:
for i := 0; i < 10; i++ {
maps.Clear(seen)
maps.Clear(namesByAddr)
for _, d := range m.Devices {
name := fieldPrefix(d.Name, i)
if seen[name] {
continue retry
}
seen[name] = true
for _, a := range d.Addrs {
namesByAddr[a] = name
}
}
return namesByAddr
}
panic("unable to produce unique mapping of address to names")
}
// fieldPrefix returns the first n number of dot-separated segments.
//
// Example:
//
// fieldPrefix("foo.bar.baz", 0) returns ""
// fieldPrefix("foo.bar.baz", 1) returns "foo"
// fieldPrefix("foo.bar.baz", 2) returns "foo.bar"
// fieldPrefix("foo.bar.baz", 3) returns "foo.bar.baz"
// fieldPrefix("foo.bar.baz", 4) returns "foo.bar.baz"
func fieldPrefix(s string, n int) string {
s0 := s
for i := 0; i < n && len(s) > 0; i++ {
if j := strings.IndexByte(s, '.'); j >= 0 {
s = s[j+1:]
} else {
s = ""
}
}
return strings.TrimSuffix(s0[:len(s0)-len(s)], ".")
}
func appendRepeatByte(b []byte, c byte, n int) []byte {
for i := 0; i < n; i++ {
b = append(b, c)
}
return b
}
func formatSI(n float64) string {
switch n := math.Abs(n); {
case n < 1e3:
return fmt.Sprintf("%0.2f ", n/(1e0))
case n < 1e6:
return fmt.Sprintf("%0.2fk", n/(1e3))
case n < 1e9:
return fmt.Sprintf("%0.2fM", n/(1e6))
default:
return fmt.Sprintf("%0.2fG", n/(1e9))
}
}
func formatIEC(n float64) string {
switch n := math.Abs(n); {
case n < 1<<10:
return fmt.Sprintf("%0.2f ", n/(1<<0))
case n < 1<<20:
return fmt.Sprintf("%0.2fKi", n/(1<<10))
case n < 1<<30:
return fmt.Sprintf("%0.2fMi", n/(1<<20))
default:
return fmt.Sprintf("%0.2fGi", n/(1<<30))
}
}

View File

@ -1,7 +1,5 @@
# nginx-auth
[![status: experimental](https://img.shields.io/badge/status-experimental-blue)](https://tailscale.com/kb/1167/release-stages/#experimental)
This is a tool that allows users to use Tailscale Whois authentication with
NGINX as a reverse proxy. This allows users that already have a bunch of
services hosted on an internal NGINX server to point those domains to the
@ -129,7 +127,7 @@ the `Expected-Tailnet` header to your auth request:
```nginx
location /auth {
# ...
proxy_set_header Expected-Tailnet "tailnet012345.ts.net";
proxy_set_header Expected-Tailnet "tailscale.com";
}
```
@ -146,8 +144,6 @@ generic "forbidden" error page:
</html>
```
You can get the tailnet name from [the admin panel](https://login.tailscale.com/admin/dns).
## Building
Install `cmd/mkpkg`:

View File

@ -2,31 +2,30 @@
set -e
VERSION=0.1.3
for ARCH in amd64 arm64; do
CGO_ENABLED=0 GOARCH=${ARCH} GOOS=linux go build -o tailscale.nginx-auth .
CGO_ENABLED=0 GOARCH=amd64 GOOS=linux go build -o tailscale.nginx-auth .
mkpkg \
--out=tailscale-nginx-auth-${VERSION}-${ARCH}.deb \
--name=tailscale-nginx-auth \
--version=${VERSION} \
--type=deb \
--arch=${ARCH} \
--postinst=deb/postinst.sh \
--postrm=deb/postrm.sh \
--prerm=deb/prerm.sh \
--description="Tailscale NGINX authentication protocol handler" \
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md
VERSION=0.1.1
mkpkg \
--out=tailscale-nginx-auth-${VERSION}-${ARCH}.rpm \
--name=tailscale-nginx-auth \
--version=${VERSION} \
--type=rpm \
--arch=${ARCH} \
--postinst=rpm/postinst.sh \
--postrm=rpm/postrm.sh \
--prerm=rpm/prerm.sh \
--description="Tailscale NGINX authentication protocol handler" \
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md
done
mkpkg \
--out=tailscale-nginx-auth-${VERSION}-amd64.deb \
--name=tailscale-nginx-auth \
--version=${VERSION} \
--type=deb \
--arch=amd64 \
--postinst=deb/postinst.sh \
--postrm=deb/postrm.sh \
--prerm=deb/prerm.sh \
--description="Tailscale NGINX authentication protocol handler" \
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md
mkpkg \
--out=tailscale-nginx-auth-${VERSION}-amd64.rpm \
--name=tailscale-nginx-auth \
--version=${VERSION} \
--type=rpm \
--arch=amd64 \
--postinst=rpm/postinst.sh \
--postrm=rpm/postrm.sh \
--prerm=rpm/prerm.sh \
--description="Tailscale NGINX authentication protocol handler" \
--files=./tailscale.nginx-auth:/usr/sbin/tailscale.nginx-auth,./tailscale.nginx-auth.socket:/lib/systemd/system/tailscale.nginx-auth.socket,./tailscale.nginx-auth.service:/lib/systemd/system/tailscale.nginx-auth.service,./README.md:/usr/share/tailscale/nginx-auth/README.md

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
@ -56,7 +57,7 @@ func main() {
return
}
if info.Node.IsTagged() {
if len(info.Node.Tags) != 0 {
w.WriteHeader(http.StatusForbidden)
log.Printf("node %s is tagged", info.Node.Hostinfo.Hostname())
return

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The pgproxy server is a proxy for the Postgres wire protocol.
package main
@ -272,7 +273,7 @@ func (p *proxy) serve(sessionID int64, c net.Conn) error {
}
if buf[0] != 'S' {
p.errors.Add("upstream-bad-protocol", 1)
return fmt.Errorf("upstream didn't acknowledge start-ssl, said %q", buf[0])
return fmt.Errorf("upstream didn't acknowldge start-ssl, said %q", buf[0])
}
tlsConf := &tls.Config{
ServerName: p.upstreamHost,

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// The printdep command is a build system tool for printing out information
// about dependencies.
@ -31,11 +32,20 @@ func main() {
fmt.Println(strings.TrimSpace(ts.GoToolchainRev))
}
if *goToolchainURL {
var suffix string
switch runtime.GOARCH {
case "amd64":
// None
case "arm64":
suffix = "-" + runtime.GOARCH
default:
log.Fatalf("unsupported GOARCH %q", runtime.GOARCH)
}
switch runtime.GOOS {
case "linux", "darwin":
default:
log.Fatalf("unsupported GOOS %q", runtime.GOOS)
}
fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s-%s.tar.gz\n", strings.TrimSpace(ts.GoToolchainRev), runtime.GOOS, runtime.GOARCH)
fmt.Printf("https://github.com/tailscale/go/releases/download/build-%s/%s%s.tar.gz\n", strings.TrimSpace(ts.GoToolchainRev), runtime.GOOS, suffix)
}
}

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// proxy-to-grafana is a reverse proxy which identifies users based on their
// originating Tailscale identity and maps them to corresponding Grafana
@ -147,7 +148,7 @@ func getTailscaleUser(ctx context.Context, localClient *tailscale.LocalClient, i
if err != nil {
return nil, fmt.Errorf("failed to identify remote host: %w", err)
}
if whois.Node.IsTagged() {
if len(whois.Node.Tags) != 0 {
return nil, fmt.Errorf("tagged nodes are not users")
}
if whois.UserProfile == nil || whois.UserProfile.LoginName == "" {

View File

@ -1,224 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// The sniproxy is an outbound SNI proxy. It receives TLS connections over
// Tailscale on one or more TCP ports and sends them out to the same SNI
// hostname & port on the internet. It only does TCP.
package main
import (
"context"
"flag"
"log"
"net"
"net/http"
"strings"
"time"
"golang.org/x/net/dns/dnsmessage"
"inet.af/tcpproxy"
"tailscale.com/client/tailscale"
"tailscale.com/hostinfo"
"tailscale.com/net/netutil"
"tailscale.com/tsnet"
"tailscale.com/types/nettype"
)
var (
ports = flag.String("ports", "443", "comma-separated list of ports to proxy")
wgPort = flag.Int("wg-listen-port", 0, "UDP port to listen on for WireGuard and peer-to-peer traffic; 0 means automatically select")
promoteHTTPS = flag.Bool("promote-https", true, "promote HTTP to HTTPS")
)
var tsMBox = dnsmessage.MustNewName("support.tailscale.com.")
func main() {
flag.Parse()
if *ports == "" {
log.Fatal("no ports")
}
hostinfo.SetApp("sniproxy")
var s server
s.ts.Port = uint16(*wgPort)
defer s.ts.Close()
lc, err := s.ts.LocalClient()
if err != nil {
log.Fatal(err)
}
s.lc = lc
for _, portStr := range strings.Split(*ports, ",") {
ln, err := s.ts.Listen("tcp", ":"+portStr)
if err != nil {
log.Fatal(err)
}
log.Printf("Serving on port %v ...", portStr)
go s.serve(ln)
}
ln, err := s.ts.Listen("udp", ":53")
if err != nil {
log.Fatal(err)
}
go s.serveDNS(ln)
if *promoteHTTPS {
ln, err := s.ts.Listen("tcp", ":80")
if err != nil {
log.Fatal(err)
}
log.Printf("Promoting HTTP to HTTPS ...")
go s.promoteHTTPS(ln)
}
select {}
}
type server struct {
ts tsnet.Server
lc *tailscale.LocalClient
}
func (s *server) serve(ln net.Listener) {
for {
c, err := ln.Accept()
if err != nil {
log.Fatal(err)
}
go s.serveConn(c)
}
}
func (s *server) serveDNS(ln net.Listener) {
for {
c, err := ln.Accept()
if err != nil {
log.Fatal(err)
}
go s.serveDNSConn(c.(nettype.ConnPacketConn))
}
}
func (s *server) serveDNSConn(c nettype.ConnPacketConn) {
defer c.Close()
c.SetReadDeadline(time.Now().Add(5 * time.Second))
buf := make([]byte, 1500)
n, err := c.Read(buf)
if err != nil {
log.Printf("c.Read failed: %v\n ", err)
return
}
var msg dnsmessage.Message
err = msg.Unpack(buf[:n])
if err != nil {
log.Printf("dnsmessage unpack failed: %v\n ", err)
return
}
buf, err = s.dnsResponse(&msg)
if err != nil {
log.Printf("s.dnsResponse failed: %v\n", err)
return
}
_, err = c.Write(buf)
if err != nil {
log.Printf("c.Write failed: %v\n", err)
return
}
}
func (s *server) serveConn(c net.Conn) {
addrPortStr := c.LocalAddr().String()
_, port, err := net.SplitHostPort(addrPortStr)
if err != nil {
log.Printf("bogus addrPort %q", addrPortStr)
c.Close()
return
}
var dialer net.Dialer
dialer.Timeout = 5 * time.Second
var p tcpproxy.Proxy
p.ListenFunc = func(net, laddr string) (net.Listener, error) {
return netutil.NewOneConnListener(c, nil), nil
}
p.AddSNIRouteFunc(addrPortStr, func(ctx context.Context, sniName string) (t tcpproxy.Target, ok bool) {
return &tcpproxy.DialProxy{
Addr: net.JoinHostPort(sniName, port),
DialContext: dialer.DialContext,
}, true
})
p.Start()
}
func (s *server) dnsResponse(req *dnsmessage.Message) (buf []byte, err error) {
resp := dnsmessage.NewBuilder(buf,
dnsmessage.Header{
ID: req.Header.ID,
Response: true,
Authoritative: true,
})
resp.EnableCompression()
if len(req.Questions) == 0 {
buf, _ = resp.Finish()
return
}
q := req.Questions[0]
err = resp.StartQuestions()
if err != nil {
return
}
resp.Question(q)
ip4, ip6 := s.ts.TailscaleIPs()
err = resp.StartAnswers()
if err != nil {
return
}
switch q.Type {
case dnsmessage.TypeAAAA:
err = resp.AAAAResource(
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
dnsmessage.AAAAResource{AAAA: ip6.As16()},
)
case dnsmessage.TypeA:
err = resp.AResource(
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
dnsmessage.AResource{A: ip4.As4()},
)
case dnsmessage.TypeSOA:
err = resp.SOAResource(
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
dnsmessage.SOAResource{NS: q.Name, MBox: tsMBox, Serial: 2023030600,
Refresh: 120, Retry: 120, Expire: 120, MinTTL: 60},
)
case dnsmessage.TypeNS:
err = resp.NSResource(
dnsmessage.ResourceHeader{Name: q.Name, Class: q.Class, TTL: 120},
dnsmessage.NSResource{NS: tsMBox},
)
}
if err != nil {
return
}
return resp.Finish()
}
func (s *server) promoteHTTPS(ln net.Listener) {
err := http.Serve(ln, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
http.Redirect(w, r, "https://"+r.Host+r.RequestURI, http.StatusFound)
}))
log.Fatalf("promoteHTTPS http.Serve: %v", err)
}

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2021 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Program speedtest provides the speedtest command. The reason to keep it separate from
// the normal tailscale cli is because it is not yet ready to go in the tailscale binary.

View File

@ -1,5 +1,6 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// ssh-auth-none-demo is a demo SSH server that's meant to run on the
// public internet (at 188.166.70.128 port 2222) and
@ -64,10 +65,8 @@ func main() {
ServerConfigCallback: func(ctx ssh.Context) *gossh.ServerConfig {
start := time.Now()
return &gossh.ServerConfig{
NextAuthMethodCallback: func(conn gossh.ConnMetadata, prevErrors []error) []string {
return []string{"tailscale"}
},
NoClientAuth: true, // required for the NoClientAuthCallback to run
ImplicitAuthMethod: "tailscale",
NoClientAuth: true, // required for the NoClientAuthCallback to run
NoClientAuthCallback: func(cm gossh.ConnMetadata) (*gossh.Permissions, error) {
cm.SendAuthBanner(fmt.Sprintf("# Banner: doing none auth at %v\r\n", time.Since(start)))

View File

@ -1,57 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// Command stunc makes a STUN request to a STUN server and prints the result.
package main
import (
"log"
"net"
"os"
"tailscale.com/net/stun"
)
func main() {
log.SetFlags(0)
if len(os.Args) != 2 {
log.Fatalf("usage: %s <hostname>", os.Args[0])
}
host := os.Args[1]
uaddr, err := net.ResolveUDPAddr("udp", host+":3478")
if err != nil {
log.Fatal(err)
}
c, err := net.ListenUDP("udp", nil)
if err != nil {
log.Fatal(err)
}
txID := stun.NewTxID()
req := stun.Request(txID)
_, err = c.WriteToUDP(req, uaddr)
if err != nil {
log.Fatal(err)
}
var buf [1024]byte
n, raddr, err := c.ReadFromUDPAddrPort(buf[:])
if err != nil {
log.Fatal(err)
}
tid, saddr, err := stun.ParseResponse(buf[:n])
if err != nil {
log.Fatal(err)
}
if tid != txID {
log.Fatalf("txid mismatch: got %v, want %v", tid, txID)
}
log.Printf("sent addr: %v", uaddr)
log.Printf("from addr: %v", raddr)
log.Printf("stun addr: %v", saddr)
}

View File

@ -1,212 +0,0 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
// The sync-containers command synchronizes container image tags from one
// registry to another.
//
// It is intended as a workaround for ghcr.io's lack of good push credentials:
// you can either authorize "classic" Personal Access Tokens in your org (which
// are a common vector of very bad compromise), or you can get a short-lived
// credential in a Github action.
//
// Since we publish to both Docker Hub and ghcr.io, we use this program in a
// Github action to effectively rsync from docker hub into ghcr.io, so that we
// can continue to forbid dangerous Personal Access Tokens in the tailscale org.
package main
import (
"context"
"flag"
"fmt"
"log"
"sort"
"strings"
"github.com/google/go-containerregistry/pkg/authn"
"github.com/google/go-containerregistry/pkg/authn/github"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/types"
)
var (
src = flag.String("src", "", "Source image")
dst = flag.String("dst", "", "Destination image")
max = flag.Int("max", 0, "Maximum number of tags to sync (0 for all tags)")
dryRun = flag.Bool("dry-run", true, "Don't actually sync anything")
)
func main() {
flag.Parse()
if *src == "" {
log.Fatalf("--src is required")
}
if *dst == "" {
log.Fatalf("--dst is required")
}
keychain := authn.NewMultiKeychain(authn.DefaultKeychain, github.Keychain)
opts := []remote.Option{
remote.WithAuthFromKeychain(keychain),
remote.WithContext(context.Background()),
}
stags, err := listTags(*src, opts...)
if err != nil {
log.Fatalf("listing source tags: %v", err)
}
dtags, err := listTags(*dst, opts...)
if err != nil {
log.Fatalf("listing destination tags: %v", err)
}
add, remove := diffTags(stags, dtags)
if l := len(add); l > 0 {
log.Printf("%d tags to push: %s", len(add), strings.Join(add, ", "))
if *max > 0 && l > *max {
log.Printf("Limiting sync to %d tags", *max)
add = add[:*max]
}
}
for _, tag := range add {
if !*dryRun {
log.Printf("Syncing tag %q", tag)
if err := copyTag(*src, *dst, tag, opts...); err != nil {
log.Printf("Syncing tag %q: progress error: %v", tag, err)
}
} else {
log.Printf("Dry run: would sync tag %q", tag)
}
}
if len(remove) > 0 {
log.Printf("%d tags to remove: %s\n", len(remove), strings.Join(remove, ", "))
log.Printf("Not removing any tags for safety.\n")
}
var wellKnown = [...]string{"latest", "stable"}
for _, tag := range wellKnown {
if needsUpdate(*src, *dst, tag) {
if err := copyTag(*src, *dst, tag, opts...); err != nil {
log.Printf("Updating tag %q: progress error: %v", tag, err)
}
}
}
}
func copyTag(srcStr, dstStr, tag string, opts ...remote.Option) error {
src, err := name.ParseReference(fmt.Sprintf("%s:%s", srcStr, tag))
if err != nil {
return err
}
dst, err := name.ParseReference(fmt.Sprintf("%s:%s", dstStr, tag))
if err != nil {
return err
}
desc, err := remote.Get(src)
if err != nil {
return err
}
ch := make(chan v1.Update, 10)
opts = append(opts, remote.WithProgress(ch))
progressDone := make(chan struct{})
go func() {
defer close(progressDone)
for p := range ch {
fmt.Printf("Syncing tag %q: %d%% (%d/%d)\n", tag, int(float64(p.Complete)/float64(p.Total)*100), p.Complete, p.Total)
if p.Error != nil {
fmt.Printf("error: %v\n", p.Error)
}
}
}()
switch desc.MediaType {
case types.OCIManifestSchema1, types.DockerManifestSchema2:
img, err := desc.Image()
if err != nil {
return err
}
if err := remote.Write(dst, img, opts...); err != nil {
return err
}
case types.OCIImageIndex, types.DockerManifestList:
idx, err := desc.ImageIndex()
if err != nil {
return err
}
if err := remote.WriteIndex(dst, idx, opts...); err != nil {
return err
}
}
<-progressDone
return nil
}
func listTags(repoStr string, opts ...remote.Option) ([]string, error) {
repo, err := name.NewRepository(repoStr)
if err != nil {
return nil, err
}
tags, err := remote.List(repo, opts...)
if err != nil {
return nil, err
}
sort.Strings(tags)
return tags, nil
}
func diffTags(src, dst []string) (add, remove []string) {
srcd := make(map[string]bool)
for _, tag := range src {
srcd[tag] = true
}
dstd := make(map[string]bool)
for _, tag := range dst {
dstd[tag] = true
}
for _, tag := range src {
if !dstd[tag] {
add = append(add, tag)
}
}
for _, tag := range dst {
if !srcd[tag] {
remove = append(remove, tag)
}
}
sort.Strings(add)
sort.Strings(remove)
return add, remove
}
func needsUpdate(srcStr, dstStr, tag string) bool {
src, err := name.ParseReference(fmt.Sprintf("%s:%s", srcStr, tag))
if err != nil {
return false
}
dst, err := name.ParseReference(fmt.Sprintf("%s:%s", dstStr, tag))
if err != nil {
return false
}
srcDesc, err := remote.Get(src)
if err != nil {
return false
}
dstDesc, err := remote.Get(dst)
if err != nil {
return true
}
return srcDesc.Digest != dstDesc.Digest
}

Some files were not shown because too many files have changed in this diff Show More