Compare commits

...

34 Commits

Author SHA1 Message Date
Rhea Ghosh 043a34500d VERSION.txt: this is v1.38.4
Signed-off-by: Rhea Ghosh <rhea@tailscale.com>
2023-04-05 12:02:42 -05:00
shayne 214217dd10
cmd/tailscale/cli: [serve] add support for proxy paths (#7800)
(cherry picked from commit 81fd00a6b7)
2023-04-05 12:34:02 -04:00
Maisem Ali 00205f0ab6
ssh/tailssh: handle output matching better in tests (#7799) 2023-04-05 11:36:46 -04:00
shayne 61f36aa1cd
cmd/tailscale/cli: do not allow turning Funnel on while shields-up (#7770) 2023-04-05 09:57:26 -04:00
Mihai Parparita 296d6820b5
cmd/tailscale/cli: fix inconsistency between serve text and example command
Use the same local port number in both, and be more precise about what
is being forwarded

Signed-off-by: Mihai Parparita <mihai@tailscale.com>
2023-04-05 09:57:21 -04:00
shayne 383b7c747a
cmd/tailscale/cli: make serve and funnel visible in list (#7737) 2023-04-05 09:57:12 -04:00
David Anderson c3301abc5e go.toolchain.rev: update for go 1.20.3
Signed-off-by: David Anderson <danderson@tailscale.com>
(cherry picked from commit 45138fcfba)
2023-04-04 12:07:49 -07:00
Maisem Ali 49e305f862
ssh/tailssh: fix race in errors returned when starting recorder
There were two code paths that could fail depending on how fast
the recorder responses. This fixes that by returning the correct
error from both paths.

Fixes #7707

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit e04acabfde)
2023-03-31 17:01:01 -07:00
Maisem Ali 71a5f2a989
ssh/tailssh: add tests for recording failure
Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 5ba57e4661)
2023-03-31 17:00:54 -07:00
Maisem Ali 1b1ac05d95
ssh/tailssh: add session recording test for non-pty sessions
Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 09d0b632d4)
2023-03-31 17:00:48 -07:00
Maisem Ali e6b81f983e
ssh/tailssh: handle session recording when running in userspace mode
Previously it would dial out using the http.DefaultClient, however that doesn't work
when tailscaled is running in userspace mode (e.g. when testing).

Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 583e86b7df)
2023-03-31 17:00:37 -07:00
Maisem Ali 8414c591e5
ssh/tailssh: enable recording of non-pty sessions
Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 8a246487c2)
2023-03-31 17:00:28 -07:00
Maisem Ali 0651c1a069
ssh/tailssh: add docs to CastHeader fields
Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 8765568373)
2023-03-31 17:00:19 -07:00
Maisem Ali 2474bd2754
ssh/tailssh: use background context for uploading recordings
Otherwise we see errors like
```
ssh-session(sess-20230322T005655-5562985593): recording: error sending recording to <addr>:80: Post "http://<addr>:80/record": context canceled
```

The ss.ctx is closed when the session closes, but we don't want to break the upload at that time. Instead we want to wait for the session to
close the writer when it finishes, which it is already doing.

Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit c350cd1f06)
2023-03-31 17:00:10 -07:00
Maisem Ali 40091d0261
ssh/tailssh: allow recorders to be configured on the first or final action
Currently we only send down recorders in first action, allow the final action
to replace them but not to drop them.

Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit d92047cc30)
2023-03-31 17:00:01 -07:00
Maisem Ali d216363bc5
ssh/tailssh: add more metadata to recording header
Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 7a97e64ef0)
2023-03-31 16:59:52 -07:00
Maisem Ali dbbc465bfd
ssh/tailssh: stream SSH recordings to configured recorders
Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 916aa782af)
2023-03-31 16:59:44 -07:00
Charlotte Brandhorst-Satzkorn 598b24d85c
tailcfg: move recorders field from SSHRule to SSHAction
Signed-off-by: Charlotte Brandhorst-Satzkorn <charlotte@tailscale.com>
(cherry picked from commit 1b78dc1f33)
2023-03-31 16:59:31 -07:00
Charlotte Brandhorst-Satzkorn 17c6d5c7c5
tailcfg: add recorders field to SSHRule struct
This change introduces the Recorders field to the SSHRule struct. The
field is used to store and define addresses where the ssh recorder is
located.

Signed-off-by: Charlotte Brandhorst-Satzkorn <charlotte@tailscale.com>
(cherry picked from commit 3efd83555f)
2023-03-31 16:59:22 -07:00
Shayne Sweeney 47ebe6f956
VERSION.txt: this is v1.38.3
Signed-off-by: Shayne Sweeney <shayne@tailscale.com>
2023-03-29 13:41:59 -04:00
shayne c750186830
ipn/ipnlocal: [serve] Trim mountPoint prefix from proxy path (#7334)
This change trims the mountPoint from the request URL path before
sending the request to the reverse proxy.

Today if you mount a proxy at `/foo` and request to
`/foo/bar/baz`, we leak the `mountPoint` `/foo` as part of the request
URL's path.

This fix makes removed the `mountPoint` prefix from the path so
proxied services receive requests as if they were running at the root
(`/`) path.

This could be an issue if the app generates URLs (in HTML or otherwise)
and assumes `/path`. In this case, those URLs will 404.

With that, I still think we should trim by default and not leak the
`mountPoint` (specific to Tailscale) into whatever app is hosted.
If it causes an issue with URL generation, I'd suggest looking at configuring
an app-specific path prefix or running Caddy as a more advanced
solution.

Fixes: #6571

Signed-off-by: Shayne Sweeney <shayne@tailscale.com>
2023-03-28 19:23:50 -04:00
shayne d7bbd4fe03
ipn/ipnlocal: [serve/funnel] use actual SrcAddr as X-Forwarded-For (#7600)
The reverse proxy was sending the ingressd IPv6 down as the
X-Forwarded-For. This update uses the actual remote addr.

Updates tailscale/corp#9914

Signed-off-by: Shayne Sweeney <shayne@tailscale.com>
2023-03-28 19:23:43 -04:00
shayne ac0c0b081d
funnel: change references from alpha to beta (#7613)
Updates CLI and docs to reference Funnel as beta

Signed-off-by: Shayne Sweeney <shayne@tailscale.com>
2023-03-28 19:23:37 -04:00
Maisem Ali 068ed7dbfa
ipn/ipnlocal: use atomicfile.WriteFile in certFileStore
Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 9e81db50f6)
2023-03-26 16:29:57 -07:00
Maisem Ali 26bf7c4dbe
ipn/ipnlocal: fix cert storage in Kubernetes
We were checking against the wrong directory, instead if we
have a custom store configured just use that.

Fixes #7588
Fixes #7665

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 8a11f76a0d)
2023-03-26 16:26:55 -07:00
Maisem Ali d47b74e461
ipn/ipnlocal: also store ACME keys in the certStore
We were not storing the ACME keys in the state store, they would always
be stored on disk.

Updates #7588

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit ec90522a53)
2023-03-26 16:26:39 -07:00
Denton Gentry 3db61d07ca
VERSION.txt: this is v1.38.2
Signed-off-by: Denton Gentry <dgentry@tailscale.com>
2023-03-22 10:04:44 -07:00
Mihai Parparita 817aa282c2
net/sockstats: export cellular-only clientmetrics
Followup to #7518 to also export client metrics when the active interface
is cellular.

Updates tailscale/corp#9230
Updates #3363

Signed-off-by: Mihai Parparita <mihai@tailscale.com>
(cherry picked from commit d2dec13392)
2023-03-22 09:14:14 -07:00
Andrew Dunham d00c046b72
ssh/tailssh: fix privilege dropping on FreeBSD; add tests
On FreeBSD and Darwin, changing a process's supplementary groups with
setgroups(2) will also change the egid of the process, setting it to the
first entry in the provided list. This is distinct from the behaviour on
other platforms (and possibly a violation of the POSIX standard).

Because of this, on FreeBSD with no TTY, our incubator code would
previously not change the process's gid, because it would read the
newly-changed egid, compare it against the expected egid, and since they
matched, not change the gid. Because we didn't use the 'login' program
on FreeBSD without a TTY, this would propagate to a child process.

This could be observed by running "id -p" in two contexts. The expected
output, and the output returned when running from a SSH shell, is:

    andrew@freebsd:~ $ id -p
    uid         andrew
    groups      andrew

However, when run via "ssh andrew@freebsd id -p", the output would be:

    $ ssh andrew@freebsd id -p
    login       root
    uid         andrew
    rgid        wheel
    groups      andrew

(this could also be observed via "id -g -r" to print just the gid)

We fix this by pulling the details of privilege dropping out into their
own function and prepending the expected gid to the start of the list on
Darwin and FreeBSD.

Finally, we add some tests that run a child process, drop privileges,
and assert that the final UID/GID/additional groups are what we expect.

More information can be found in the following article:
    https://www.usenix.org/system/files/login/articles/325-tsafrir.pdf

Updates #7616
Alternative to #7609

Signed-off-by: Andrew Dunham <andrew@du.nham.ca>
Change-Id: I0e6513c31b121108b50fe561c89e5816d84a45b9
(cherry picked from commit ccace1f7df)
2023-03-21 16:47:53 -07:00
Tom DNetto aad01c81b1
cmd/tailscale/cli: move tskey-wrap functionality under lock sign
Signed-off-by: Tom DNetto <tom@tailscale.com>
(cherry picked from commit 60cd4ac08d)
2023-03-21 14:30:15 -07:00
Denton Gentry fd558e2e68
net/interfaces: also allow link-local for AzureAppServices.
In May 2021, Azure App Services used 172.16.x.x addresses:
```
10: eth0@if11: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
    link/ether 02:42:ac:10:01:03 brd ff:ff:ff:ff:ff:ff
    inet 172.16.1.3/24 brd 172.16.1.255 scope global eth0
       valid_lft forever preferred_lft forever
```

Now it uses link-local:
```
2: eth0@if6: <BROADCAST,MULTICAST,UP,LOWER_UP,M-DOWN> mtu 1500 qdisc noqueue state UP
    link/ether 8a:30:1f:50:1d:23 brd ff:ff:ff:ff:ff:ff
    inet 169.254.129.3/24 brd 169.254.129.255 scope global eth0
       valid_lft forever preferred_lft forever
```

This is reasonable for them to choose to do, it just broke the handling in net/interfaces.

This PR proposes to:
1. Always allow link-local in LocalAddresses() if we have no better
   address available.
2. Continue to make isUsableV4() conditional on an environment we know
   requires it.

I don't love the idea of having to discover these environments one by
one, but I don't understand the consequences of making isUsableV4()
return true unconditionally. It makes isUsableV4() essentially always
return true and perform no function.

Fixes https://github.com/tailscale/tailscale/issues/7603

Signed-off-by: Denton Gentry <dgentry@tailscale.com>
(cherry picked from commit ebc630c6c0)
2023-03-20 14:52:27 -07:00
Denton Gentry 3eeff9e7f7
VERSION.txt: this is v1.38.1
Signed-off-by: Denton Gentry <dgentry@tailscale.com>
2023-03-14 14:39:43 -07:00
David Anderson 6c0e6a5f4e version/mkversion: don't break on tagged go.mod entries
I thought our versioning scheme would make go.mod include a commit hash
even on stable builds. I was wrong. Fortunately, the rest of this code
wants anything that 'git rev-parse' understands (to convert it into a full
git hash), and tags qualify.

Signed-off-by: David Anderson <danderson@tailscale.com>
(cherry picked from commit 9ebab961c9)
2023-03-14 14:32:10 -07:00
Denton Gentry 10d462d321
VERSION.txt: this is v1.38.0
Signed-off-by: Denton Gentry <dgentry@tailscale.com>
2023-03-14 13:17:16 -07:00
22 changed files with 1111 additions and 175 deletions

View File

@ -1 +1 @@
1.37.0
1.38.4

View File

@ -120,6 +120,8 @@ change in the future.
pingCmd,
ncCmd,
sshCmd,
funnelCmd,
serveCmd,
versionCmd,
webCmd,
fileCmd,
@ -147,10 +149,6 @@ change in the future.
switch {
case slices.Contains(args, "debug"):
rootCmd.Subcommands = append(rootCmd.Subcommands, debugCmd)
case slices.Contains(args, "funnel"):
rootCmd.Subcommands = append(rootCmd.Subcommands, funnelCmd)
case slices.Contains(args, "serve"):
rootCmd.Subcommands = append(rootCmd.Subcommands, serveCmd)
case slices.Contains(args, "update"):
rootCmd.Subcommands = append(rootCmd.Subcommands, updateCmd)
}

View File

@ -29,7 +29,7 @@ var funnelCmd = newFunnelCommand(&serveEnv{lc: &localClient})
func newFunnelCommand(e *serveEnv) *ffcli.Command {
return &ffcli.Command{
Name: "funnel",
ShortHelp: "[ALPHA] turn Tailscale Funnel on or off",
ShortHelp: "Turn on/off Funnel service",
ShortUsage: strings.TrimSpace(`
funnel <serve-port> {on|off}
funnel status [--json]

View File

@ -40,9 +40,17 @@ var netlockCmd = &ffcli.Command{
nlDisablementKDFCmd,
nlLogCmd,
nlLocalDisableCmd,
nlTskeyWrapCmd,
},
Exec: runNetworkLockStatus,
Exec: runNetworkLockNoSubcommand,
}
func runNetworkLockNoSubcommand(ctx context.Context, args []string) error {
// Detect & handle the deprecated command 'lock tskey-wrap'.
if len(args) >= 2 && args[0] == "tskey-wrap" {
return runTskeyWrapCmd(ctx, args[1:])
}
return runNetworkLockStatus(ctx, args)
}
var nlInitArgs struct {
@ -427,13 +435,19 @@ func runNetworkLockModify(ctx context.Context, addArgs, removeArgs []string) err
var nlSignCmd = &ffcli.Command{
Name: "sign",
ShortUsage: "sign <node-key> [<rotation-key>]",
ShortHelp: "Signs a node key and transmits the signature to the coordination server",
LongHelp: "Signs a node key and transmits the signature to the coordination server",
Exec: runNetworkLockSign,
ShortUsage: "sign <node-key> [<rotation-key>] or sign <auth-key>",
ShortHelp: "Signs a node or pre-approved auth key",
LongHelp: `Either:
- signs a node key and transmits the signature to the coordination server, or
- signs a pre-approved auth key, printing it in a form that can be used to bring up nodes under tailnet lock`,
Exec: runNetworkLockSign,
}
func runNetworkLockSign(ctx context.Context, args []string) error {
if len(args) > 0 && strings.HasPrefix(args[0], "tskey-auth-") {
return runTskeyWrapCmd(ctx, args)
}
var (
nodeKey key.NodePublic
rotationKey key.NLPublic
@ -636,14 +650,6 @@ func runNetworkLockLog(ctx context.Context, args []string) error {
return nil
}
var nlTskeyWrapCmd = &ffcli.Command{
Name: "tskey-wrap",
ShortUsage: "tskey-wrap <tailscale pre-auth key>",
ShortHelp: "Modifies a pre-auth key from the admin panel to work with tailnet lock",
LongHelp: "Modifies a pre-auth key from the admin panel to work with tailnet lock",
Exec: runTskeyWrapCmd,
}
func runTskeyWrapCmd(ctx context.Context, args []string) error {
if len(args) != 1 {
return errors.New("usage: lock tskey-wrap <tailscale pre-auth key>")
@ -657,21 +663,25 @@ func runTskeyWrapCmd(ctx context.Context, args []string) error {
return fixTailscaledConnectError(err)
}
return wrapAuthKey(ctx, args[0], st)
}
func wrapAuthKey(ctx context.Context, keyStr string, status *ipnstate.Status) error {
// Generate a separate tailnet-lock key just for the credential signature.
// We use the free-form meta strings to mark a little bit of metadata about this
// key.
priv := key.NewNLPrivate()
m := map[string]string{
"purpose": "pre-auth key",
"wrapper_stableid": string(st.Self.ID),
"wrapper_stableid": string(status.Self.ID),
"wrapper_createtime": fmt.Sprint(time.Now().Unix()),
}
if strings.HasPrefix(args[0], "tskey-auth-") && strings.Index(args[0][len("tskey-auth-"):], "-") > 0 {
if strings.HasPrefix(keyStr, "tskey-auth-") && strings.Index(keyStr[len("tskey-auth-"):], "-") > 0 {
// We don't want to accidentally embed the nonce part of the authkey in
// the event the format changes. As such, we make sure its in the format we
// expect (tskey-auth-<stableID, inc CNTRL suffix>-nonce) before we parse
// out and embed the stableID.
s := strings.TrimPrefix(args[0], "tskey-auth-")
s := strings.TrimPrefix(keyStr, "tskey-auth-")
m["authkey_stableid"] = s[:strings.Index(s, "-")]
}
k := tka.Key{
@ -681,7 +691,7 @@ func runTskeyWrapCmd(ctx context.Context, args []string) error {
Meta: m,
}
wrapped, err := localClient.NetworkLockWrapPreauthKey(ctx, args[0], priv)
wrapped, err := localClient.NetworkLockWrapPreauthKey(ctx, keyStr, priv)
if err != nil {
return fmt.Errorf("wrapping failed: %w", err)
}

View File

@ -33,7 +33,7 @@ var serveCmd = newServeCommand(&serveEnv{lc: &localClient})
func newServeCommand(e *serveEnv) *ffcli.Command {
return &ffcli.Command{
Name: "serve",
ShortHelp: "[ALPHA] Serve from your Tailscale node",
ShortHelp: "Serve content and local servers",
ShortUsage: strings.TrimSpace(`
serve https:<port> <mount-point> <source> [off]
serve tcp:<port> tcp://localhost:<local-port> [off]
@ -41,11 +41,11 @@ serve https:<port> <mount-point> <source> [off]
serve status [--json]
`),
LongHelp: strings.TrimSpace(`
*** ALPHA; all of this is subject to change ***
*** BETA; all of this is subject to change ***
The 'tailscale serve' set of commands allows you to serve
content and local servers from your Tailscale node to
your tailnet.
your tailnet.
You can also choose to enable the Tailscale Funnel with:
'tailscale funnel on'. Funnel allows you to publish
@ -66,10 +66,12 @@ EXAMPLES
- To serve simple static text:
$ tailscale serve https:8080 / text:"Hello, world!"
- To forward raw TCP packets to a local TCP server on port 5432:
- To forward incoming TCP connections on port 2222 to a local TCP server on
port 22 (e.g. to run OpenSSH in parallel with Tailscale SSH):
$ tailscale serve tcp:2222 tcp://localhost:22
- To forward raw, TLS-terminated TCP packets to a local TCP server on port 80:
- To accept TCP TLS connections (terminated within tailscaled) proxied to a
local plaintext server on port 80:
$ tailscale serve tls-terminated-tcp:443 tcp://localhost:80
`),
Exec: e.runServe,
@ -451,6 +453,7 @@ func expandProxyTarget(source string) (string, error) {
if u.Port() != "" {
url += ":" + u.Port()
}
url += u.Path
return url, nil
}

View File

@ -262,6 +262,18 @@ func TestServeConfigMutations(t *testing.T) {
},
},
})
add(step{reset: true})
add(step{ // support path in proxy
command: cmd("https / http://127.0.0.1:3000/foo/bar"),
want: &ipn.ServeConfig{
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
Web: map[ipn.HostPort]*ipn.WebServerConfig{
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
"/": {Proxy: "http://127.0.0.1:3000/foo/bar"},
}},
},
},
})
// tcp
add(step{reset: true})

View File

@ -212,7 +212,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+
tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver
tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal
tailscale.com/ipn/store from tailscale.com/cmd/tailscaled
tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+
L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store
L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store
tailscale.com/ipn/store/mem from tailscale.com/ipn/store+

View File

@ -1 +1 @@
db4dc9046c93dde2c0e534ca7d529bd690ad09c9
ddff070c02790cb571006e820e58cce9627569cf

View File

@ -31,10 +31,13 @@ import (
"time"
"golang.org/x/crypto/acme"
"tailscale.com/atomicfile"
"tailscale.com/envknob"
"tailscale.com/hostinfo"
"tailscale.com/ipn"
"tailscale.com/ipn/ipnstate"
"tailscale.com/ipn/store"
"tailscale.com/ipn/store/mem"
"tailscale.com/types/logger"
"tailscale.com/version"
"tailscale.com/version/distro"
@ -82,11 +85,6 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK
return nil, errors.New("invalid domain")
}
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
dir, err := b.certDir()
if err != nil {
logf("failed to get certDir: %v", err)
return nil, err
}
now := time.Now()
traceACME := func(v any) {
if !acmeDebug() {
@ -96,17 +94,22 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK
log.Printf("acme %T: %s", v, j)
}
if pair, err := b.getCertPEMCached(dir, domain, now); err == nil {
cs, err := b.getCertStore()
if err != nil {
return nil, err
}
if pair, err := getCertPEMCached(cs, domain, now); err == nil {
future := now.AddDate(0, 0, 14)
if b.shouldStartDomainRenewal(dir, domain, future) {
if b.shouldStartDomainRenewal(cs, domain, future) {
logf("starting async renewal")
// Start renewal in the background.
go b.getCertPEM(context.Background(), logf, traceACME, dir, domain, future)
go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, future)
}
return pair, nil
}
pair, err := b.getCertPEM(ctx, logf, traceACME, dir, domain, now)
pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now)
if err != nil {
logf("getCertPEM: %v", err)
return nil, err
@ -114,7 +117,7 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK
return pair, nil
}
func (b *LocalBackend) shouldStartDomainRenewal(dir, domain string, future time.Time) bool {
func (b *LocalBackend) shouldStartDomainRenewal(cs certStore, domain string, future time.Time) bool {
renewMu.Lock()
defer renewMu.Unlock()
now := time.Now()
@ -124,7 +127,7 @@ func (b *LocalBackend) shouldStartDomainRenewal(dir, domain string, future time.
return false
}
lastRenewCheck[domain] = now
_, err := b.getCertPEMCached(dir, domain, future)
_, err := getCertPEMCached(cs, domain, future)
return errors.Is(err, errCertExpired)
}
@ -140,15 +143,32 @@ type certStore interface {
WriteCert(domain string, cert []byte) error
// WriteKey writes the key for domain.
WriteKey(domain string, key []byte) error
// ACMEKey returns the value previously stored via WriteACMEKey.
// It is a PEM encoded ECDSA key.
ACMEKey() ([]byte, error)
// WriteACMEKey stores the provided PEM encoded ECDSA key.
WriteACMEKey([]byte) error
}
var errCertExpired = errors.New("cert expired")
func (b *LocalBackend) getCertStore(dir string) certStore {
if hostinfo.GetEnvType() == hostinfo.Kubernetes && dir == "/tmp" {
return certStateStore{StateStore: b.store}
func (b *LocalBackend) getCertStore() (certStore, error) {
switch b.store.(type) {
case *store.FileStore:
case *mem.Store:
default:
if hostinfo.GetEnvType() == hostinfo.Kubernetes {
// We're running in Kubernetes with a custom StateStore,
// use that instead of the cert directory.
// TODO(maisem): expand this to other environments?
return certStateStore{StateStore: b.store}, nil
}
}
return certFileStore{dir: dir}
dir, err := b.certDir()
if err != nil {
return nil, err
}
return certFileStore{dir: dir}, nil
}
// certFileStore implements certStore by storing the cert & key files in the named directory.
@ -160,6 +180,25 @@ type certFileStore struct {
testRoots *x509.CertPool
}
const acmePEMName = "acme-account.key.pem"
func (f certFileStore) ACMEKey() ([]byte, error) {
pemName := filepath.Join(f.dir, acmePEMName)
v, err := os.ReadFile(pemName)
if err != nil {
if os.IsNotExist(err) {
return nil, ipn.ErrStateNotExist
}
return nil, err
}
return v, nil
}
func (f certFileStore) WriteACMEKey(b []byte) error {
pemName := filepath.Join(f.dir, acmePEMName)
return atomicfile.WriteFile(pemName, b, 0600)
}
func (f certFileStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) {
certPEM, err := os.ReadFile(certFile(f.dir, domain))
if err != nil {
@ -182,11 +221,11 @@ func (f certFileStore) Read(domain string, now time.Time) (*TLSCertKeyPair, erro
}
func (f certFileStore) WriteCert(domain string, cert []byte) error {
return os.WriteFile(certFile(f.dir, domain), cert, 0644)
return atomicfile.WriteFile(certFile(f.dir, domain), cert, 0644)
}
func (f certFileStore) WriteKey(domain string, key []byte) error {
return os.WriteFile(keyFile(f.dir, domain), key, 0600)
return atomicfile.WriteFile(keyFile(f.dir, domain), key, 0600)
}
// certStateStore implements certStore by storing the cert & key files in an ipn.StateStore.
@ -221,6 +260,14 @@ func (s certStateStore) WriteKey(domain string, key []byte) error {
return s.WriteState(ipn.StateKey(domain+".key"), key)
}
func (s certStateStore) ACMEKey() ([]byte, error) {
return s.ReadState(ipn.StateKey(acmePEMName))
}
func (s certStateStore) WriteACMEKey(key []byte) error {
return s.WriteState(ipn.StateKey(acmePEMName), key)
}
// TLSCertKeyPair is a TLS public and private key, and whether they were obtained
// from cache or freshly obtained.
type TLSCertKeyPair struct {
@ -236,26 +283,26 @@ func certFile(dir, domain string) string { return filepath.Join(dir, domain+".cr
// domain exists on disk in dir that is valid at the provided now time.
// If the keypair is expired, it returns errCertExpired.
// If the keypair doesn't exist, it returns ipn.ErrStateNotExist.
func (b *LocalBackend) getCertPEMCached(dir, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
if !validLookingCertDomain(domain) {
// Before we read files from disk using it, validate it's halfway
// reasonable looking.
return nil, fmt.Errorf("invalid domain %q", domain)
}
return b.getCertStore(dir).Read(domain, now)
return cs.Read(domain, now)
}
func (b *LocalBackend) getCertPEM(ctx context.Context, logf logger.Logf, traceACME func(any), dir, domain string, now time.Time) (*TLSCertKeyPair, error) {
func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time) (*TLSCertKeyPair, error) {
acmeMu.Lock()
defer acmeMu.Unlock()
if p, err := b.getCertPEMCached(dir, domain, now); err == nil {
if p, err := getCertPEMCached(cs, domain, now); err == nil {
return p, nil
} else if !errors.Is(err, ipn.ErrStateNotExist) && !errors.Is(err, errCertExpired) {
return nil, err
}
key, err := acmeKey(dir)
key, err := acmeKey(cs)
if err != nil {
return nil, fmt.Errorf("acmeKey: %w", err)
}
@ -366,8 +413,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, logf logger.Logf, traceAC
if err := encodeECDSAKey(&privPEM, certPrivKey); err != nil {
return nil, err
}
certStore := b.getCertStore(dir)
if err := certStore.WriteKey(domain, privPEM.Bytes()); err != nil {
if err := cs.WriteKey(domain, privPEM.Bytes()); err != nil {
return nil, err
}
@ -390,7 +436,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, logf logger.Logf, traceAC
return nil, err
}
}
if err := certStore.WriteCert(domain, certPEM.Bytes()); err != nil {
if err := cs.WriteCert(domain, certPEM.Bytes()); err != nil {
return nil, err
}
@ -444,14 +490,15 @@ func parsePrivateKey(der []byte) (crypto.Signer, error) {
return nil, errors.New("acme/autocert: failed to parse private key")
}
func acmeKey(dir string) (crypto.Signer, error) {
pemName := filepath.Join(dir, "acme-account.key.pem")
if v, err := os.ReadFile(pemName); err == nil {
func acmeKey(cs certStore) (crypto.Signer, error) {
if v, err := cs.ACMEKey(); err == nil {
priv, _ := pem.Decode(v)
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
return nil, errors.New("acme/autocert: invalid account key found in cache")
}
return parsePrivateKey(priv.Bytes)
} else if err != nil && !errors.Is(err, ipn.ErrStateNotExist) {
return nil, err
}
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
@ -462,7 +509,7 @@ func acmeKey(dir string) (crypto.Signer, error) {
if err := encodeECDSAKey(&pemBuf, privKey); err != nil {
return nil, err
}
if err := os.WriteFile(pemName, pemBuf.Bytes(), 0600); err != nil {
if err := cs.WriteACMEKey(pemBuf.Bytes()); err != nil {
return nil, err
}
return privKey, nil

View File

@ -298,7 +298,7 @@ func NewLocalBackend(logf logger.Logf, logid string, store ipn.StateStore, diale
statsLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
e: e,
pm: pm,
store: pm.Store(),
store: store,
dialer: dialer,
backendLogID: logid,
state: ipn.NoState,
@ -2530,6 +2530,9 @@ func (b *LocalBackend) checkPrefsLocked(p *ipn.Prefs) error {
if err := b.checkExitNodePrefsLocked(p); err != nil {
errs = append(errs, err)
}
if err := b.checkFunnelEnabledLocked(p); err != nil {
errs = append(errs, err)
}
return multierr.New(errs...)
}
@ -2614,6 +2617,13 @@ func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error {
return nil
}
func (b *LocalBackend) checkFunnelEnabledLocked(p *ipn.Prefs) error {
if p.ShieldsUp && b.serveConfig.IsFunnelOn() {
return errors.New("Cannot enable shields-up when Funnel is enabled.")
}
return nil
}
func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) {
b.mu.Lock()
if mp.EggSet {

View File

@ -218,6 +218,11 @@ func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig) error {
b.mu.Lock()
defer b.mu.Unlock()
prefs := b.pm.CurrentPrefs()
if config.IsFunnelOn() && prefs.ShieldsUp() {
return errors.New("Unable to turn on Funnel while shields-up is enabled")
}
nm := b.netMap
if nm == nil {
return errors.New("netMap is nil")
@ -439,18 +444,26 @@ func (b *LocalBackend) proxyHandlerForBackend(backend string) (*httputil.Reverse
if err != nil {
return nil, fmt.Errorf("invalid url %s: %w", targetURL, err)
}
rp := httputil.NewSingleHostReverseProxy(u)
rp.Transport = &http.Transport{
DialContext: b.dialer.SystemDial,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: insecure,
rp := &httputil.ReverseProxy{
Rewrite: func(r *httputil.ProxyRequest) {
r.SetURL(u)
r.Out.Host = r.In.Host
if c, ok := r.Out.Context().Value(serveHTTPContextKey{}).(*serveHTTPContext); ok {
r.Out.Header.Set("X-Forwarded-For", c.SrcAddr.Addr().String())
}
},
Transport: &http.Transport{
DialContext: b.dialer.SystemDial,
TLSClientConfig: &tls.Config{
InsecureSkipVerify: insecure,
},
// Values for the following parameters have been copied from http.DefaultTransport.
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
},
// Values for the following parameters have been copied from http.DefaultTransport.
ForceAttemptHTTP2: true,
MaxIdleConns: 100,
IdleConnTimeout: 90 * time.Second,
TLSHandshakeTimeout: 10 * time.Second,
ExpectContinueTimeout: 1 * time.Second,
}
return rp, nil
}
@ -476,7 +489,12 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) {
http.Error(w, "unknown proxy destination", http.StatusInternalServerError)
return
}
p.(http.Handler).ServeHTTP(w, r)
h := p.(http.Handler)
// Trim the mount point from the URL path before proxying. (#6571)
if r.URL.Path != "/" {
h = http.StripPrefix(strings.TrimSuffix(mountPoint, "/"), h)
}
h.ServeHTTP(w, r)
return
}

View File

@ -163,6 +163,12 @@ func (sc *ServeConfig) IsServingWeb(port uint16) bool {
return sc.TCP[port].HTTPS
}
// IsFunnelOn checks if ServeConfig is currently allowing
// funnel traffic for any host:port.
//
// View version of ServeConfig.IsFunnelOn.
func (v ServeConfigView) IsFunnelOn() bool { return v.ж.IsFunnelOn() }
// IsFunnelOn checks if ServeConfig is currently allowing
// funnel traffic for any host:port.
func (sc *ServeConfig) IsFunnelOn() bool {
@ -180,7 +186,7 @@ func (sc *ServeConfig) IsFunnelOn() bool {
// CheckFunnelAccess checks whether Funnel access is allowed for the given node
// and port.
// It checks:
// 1. an invite was used to join the Funnel alpha
// 1. Funnel is enabled on the Tailnet
// 2. HTTPS is enabled on the Tailnet
// 3. the node has the "funnel" nodeAttr
// 4. the port is allowed for Funnel
@ -190,7 +196,7 @@ func (sc *ServeConfig) IsFunnelOn() bool {
// Funnel.
func CheckFunnelAccess(port uint16, nodeAttrs []string) error {
if slices.Contains(nodeAttrs, tailcfg.CapabilityWarnFunnelNoInvite) {
return errors.New("Funnel not available; an invite is required to join the alpha. See https://tailscale.com/s/no-funnel.")
return errors.New("Funnel not enabled; See https://tailscale.com/s/no-funnel.")
}
if slices.Contains(nodeAttrs, tailcfg.CapabilityWarnFunnelNoHTTPS) {
return errors.New("Funnel not available; HTTPS must be enabled. See https://tailscale.com/s/https.")

View File

@ -153,11 +153,9 @@ func LocalAddresses() (regular, loopback []netip.Addr, err error) {
if len(regular4) == 0 && len(regular6) == 0 {
// if we have no usable IP addresses then be willing to accept
// addresses we otherwise wouldn't, like:
// + 169.254.x.x (AWS Lambda uses NAT with these)
// + 169.254.x.x (AWS Lambda and Azure App Services use NAT with these)
// + IPv6 ULA (Google Cloud Run uses these with address translation)
if hostinfo.GetEnvType() == hostinfo.AWSLambda {
regular4 = linklocal4
}
regular4 = linklocal4
regular6 = ula6
}
regular = append(regular4, regular6...)
@ -645,7 +643,14 @@ func isUsableV4(ip netip.Addr) bool {
return false
}
if ip.IsLinkLocalUnicast() {
return hostinfo.GetEnvType() == hostinfo.AWSLambda
switch hostinfo.GetEnvType() {
case hostinfo.AWSLambda:
return true
case hostinfo.AzureAppService:
return true
default:
return false
}
}
return true
}

View File

@ -25,7 +25,7 @@ type sockStatCounters struct {
txBytes, rxBytes atomic.Uint64
rxBytesByInterface, txBytesByInterface map[int]*atomic.Uint64
txBytesMetric, rxBytesMetric *clientmetric.Metric
txBytesMetric, rxBytesMetric, txBytesCellularMetric, rxBytesCellularMetric *clientmetric.Metric
// Validate counts for TCP sockets by using the TCP_CONNECTION_INFO
// getsockopt. We get current counts, as well as save final values when
@ -65,10 +65,12 @@ func withSockStats(ctx context.Context, label Label) context.Context {
counters, ok := sockStats.countersByLabel[label]
if !ok {
counters = &sockStatCounters{
rxBytesByInterface: make(map[int]*atomic.Uint64),
txBytesByInterface: make(map[int]*atomic.Uint64),
txBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_tx_bytes_%s", label)),
rxBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_rx_bytes_%s", label)),
rxBytesByInterface: make(map[int]*atomic.Uint64),
txBytesByInterface: make(map[int]*atomic.Uint64),
txBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_tx_bytes_%s", label)),
rxBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_rx_bytes_%s", label)),
txBytesCellularMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_tx_bytes_cellular_%s", label)),
rxBytesCellularMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_rx_bytes_cellular_%s", label)),
}
// We might be called before setLinkMonitor has been called (and we've
@ -119,6 +121,7 @@ func withSockStats(ctx context.Context, label Label) context.Context {
}
if sockStats.currentInterfaceCellular.Load() {
sockStats.rxBytesCellularMetric.Add(int64(n))
counters.rxBytesCellularMetric.Add(int64(n))
}
}
didWrite := func(n int) {
@ -132,6 +135,7 @@ func withSockStats(ctx context.Context, label Label) context.Context {
}
if sockStats.currentInterfaceCellular.Load() {
sockStats.txBytesCellularMetric.Add(int64(n))
counters.txBytesCellularMetric.Add(int64(n))
}
}
willOverwrite := func(trace *net.SockTrace) {

View File

@ -235,6 +235,7 @@ func beIncubator(args []string) error {
if err == nil && sessionCloser != nil {
defer sessionCloser()
}
var groupIDs []int
for _, g := range strings.Split(ia.groups, ",") {
gid, err := strconv.ParseInt(g, 10, 32)
@ -244,22 +245,10 @@ func beIncubator(args []string) error {
groupIDs = append(groupIDs, int(gid))
}
if err := setGroups(groupIDs); err != nil {
if err := dropPrivileges(logf, int(ia.uid), ia.gid, groupIDs); err != nil {
return err
}
if egid := os.Getegid(); egid != ia.gid {
if err := syscall.Setgid(int(ia.gid)); err != nil {
logf(err.Error())
os.Exit(1)
}
}
if euid != ia.uid {
// Switch users if required before starting the desired process.
if err := syscall.Setuid(int(ia.uid)); err != nil {
logf(err.Error())
os.Exit(1)
}
}
if ia.isSFTP {
logf("handling sftp")
@ -304,6 +293,108 @@ func beIncubator(args []string) error {
return err
}
// TODO(andrew-d): verify that this works in more configurations before
// enabling by default.
const assertDropPrivileges = false
// dropPrivileges contains all the logic for dropping privileges to a different
// UID, GID, and set of supplementary groups. This function is
// security-sensitive and ordering-dependent; please be very cautious if/when
// refactoring.
//
// WARNING: if you change this function, you *MUST* run the TestDropPrivileges
// test in this package as root on at least Linux, FreeBSD and Darwin. This can
// be done by running:
//
// go test -c ./ssh/tailssh/ && sudo ./tailssh.test -test.v -test.run TestDropPrivileges
func dropPrivileges(logf logger.Logf, wantUid, wantGid int, supplementaryGroups []int) error {
fatalf := func(format string, args ...any) {
logf(format, args...)
os.Exit(1)
}
euid := os.Geteuid()
egid := os.Getegid()
if runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" {
// On FreeBSD and Darwin, the first entry returned from the
// getgroups(2) syscall is the egid, and changing it with
// setgroups(2) changes the egid of the process. This is
// technically a violation of the POSIX standard; see the
// following article for more detail:
// https://www.usenix.org/system/files/login/articles/325-tsafrir.pdf
//
// In this case, we add an entry at the beginning of the
// groupIDs list containing the expected gid if it's not
// already there, which modifies the egid and additional groups
// as one unit.
if len(supplementaryGroups) == 0 || supplementaryGroups[0] != wantGid {
supplementaryGroups = append([]int{wantGid}, supplementaryGroups...)
}
}
if err := setGroups(supplementaryGroups); err != nil {
return err
}
if egid != wantGid {
// On FreeBSD and Darwin, we may have already called the
// equivalent of setegid(wantGid) via the call to setGroups,
// above. However, per the manpage, setgid(getegid()) is an
// allowed operation regardless of privilege level.
//
// FreeBSD:
// The setgid() system call is permitted if the specified ID
// is equal to the real group ID or the effective group ID
// of the process, or if the effective user ID is that of
// the super user.
//
// Darwin:
// The setgid() function is permitted if the effective
// user ID is that of the super user, or if the specified
// group ID is the same as the effective group ID. If
// not, but the specified group ID is the same as the real
// group ID, setgid() will set the effective group ID to
// the real group ID.
if err := syscall.Setgid(wantGid); err != nil {
fatalf("Setgid(%d): %v", wantGid, err)
}
}
if euid != wantUid {
// Switch users if required before starting the desired process.
if err := syscall.Setuid(wantUid); err != nil {
fatalf("Setuid(%d): %v", wantUid, err)
}
}
// If we changed either the UID or GID, defensively assert that we
// cannot reset the it back to our original values, and that the
// current egid/euid are the expected values after we change
// everything; if not, we exit the process.
if assertDropPrivileges {
if egid != wantGid {
if err := syscall.Setegid(egid); err == nil {
fatalf("unexpectedly able to set egid back to %d", egid)
}
}
if euid != wantUid {
if err := syscall.Seteuid(euid); err == nil {
fatalf("unexpectedly able to set euid back to %d", euid)
}
}
if got := os.Getegid(); got != wantGid {
fatalf("got egid=%d, want %d", got, wantGid)
}
if got := os.Geteuid(); got != wantUid {
fatalf("got euid=%d, want %d", got, wantUid)
}
// TODO(andrew-d): assert that our supplementary groups are correct
}
return nil
}
// launchProcess launches an incubator process for the provided session.
// It is responsible for configuring the process execution environment.
// The caller can wait for the process to exit by calling cmd.Wait().

View File

@ -0,0 +1,295 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly
package tailssh
import (
"encoding/json"
"errors"
"os"
"os/exec"
"os/user"
"path/filepath"
"reflect"
"regexp"
"runtime"
"strconv"
"syscall"
"testing"
"golang.org/x/exp/slices"
"tailscale.com/types/logger"
)
func TestDropPrivileges(t *testing.T) {
type SubprocInput struct {
UID int
GID int
AdditionalGroups []int
}
type SubprocOutput struct {
UID int
GID int
EUID int
EGID int
AdditionalGroups []int
}
if v := os.Getenv("TS_TEST_DROP_PRIVILEGES_CHILD"); v != "" {
t.Logf("in child process")
var input SubprocInput
if err := json.Unmarshal([]byte(v), &input); err != nil {
t.Fatal(err)
}
// Get a handle to our provided JSON file before dropping privs.
f := os.NewFile(3, "out.json")
// We're in our subprocess; actually drop privileges now.
dropPrivileges(t.Logf, input.UID, input.GID, input.AdditionalGroups)
additional, _ := syscall.Getgroups()
// Print our IDs
json.NewEncoder(f).Encode(SubprocOutput{
UID: os.Getuid(),
GID: os.Getgid(),
EUID: os.Geteuid(),
EGID: os.Getegid(),
AdditionalGroups: additional,
})
// Close output file to ensure that it's flushed to disk before we exit
f.Close()
// Always exit the process now that we have a different
// UID/GID/etc.; we don't want the Go test framework to try and
// clean anything up, since it might no longer have access.
os.Exit(0)
}
if os.Getuid() != 0 {
t.Skip("test only works when run as root")
}
rerunSelf := func(t *testing.T, input SubprocInput) []byte {
fpath := filepath.Join(t.TempDir(), "out.json")
outf, err := os.Create(fpath)
if err != nil {
t.Fatal(err)
}
inputb, err := json.Marshal(input)
if err != nil {
t.Fatal(err)
}
cmd := exec.Command(os.Args[0], "-test.v", "-test.run", "^"+regexp.QuoteMeta(t.Name())+"$")
cmd.Env = append(os.Environ(), "TS_TEST_DROP_PRIVILEGES_CHILD="+string(inputb))
cmd.ExtraFiles = []*os.File{outf}
cmd.Stdout = logger.FuncWriter(logger.WithPrefix(t.Logf, "child: "))
cmd.Stderr = logger.FuncWriter(logger.WithPrefix(t.Logf, "child: "))
if err := cmd.Run(); err != nil {
t.Fatal(err)
}
outf.Close()
jj, err := os.ReadFile(fpath)
if err != nil {
t.Fatal(err)
}
return jj
}
// We want to ensure we're not colliding with existing users; find some
// unused UIDs and GIDs for the tests we run.
uid1 := findUnusedUID(t)
gid1 := findUnusedGID(t)
gid2 := findUnusedGID(t, gid1)
gid3 := findUnusedGID(t, gid1, gid2)
// For some tests, we want a UID/GID pair with the same numerical
// value; this finds one.
uidgid1 := findUnusedUIDGID(t, uid1, gid1, gid2, gid3)
t.Logf("uid1=%d gid1=%d gid2=%d gid3=%d uidgid1=%d",
uid1, gid1, gid2, gid3, uidgid1)
testCases := []struct {
name string
uid int
gid int
additionalGroups []int
}{
{
name: "all_different_values",
uid: uid1,
gid: gid1,
additionalGroups: []int{gid2, gid3},
},
{
name: "no_additional_groups",
uid: uid1,
gid: gid1,
additionalGroups: []int{},
},
// This is a regression test for the following bug, triggered
// on Darwin & FreeBSD:
// https://github.com/tailscale/tailscale/issues/7616
{
name: "same_values",
uid: uidgid1,
gid: uidgid1,
additionalGroups: []int{uidgid1},
},
}
for _, tt := range testCases {
t.Run(tt.name, func(t *testing.T) {
subprocOut := rerunSelf(t, SubprocInput{
UID: tt.uid,
GID: tt.gid,
AdditionalGroups: tt.additionalGroups,
})
var out SubprocOutput
if err := json.Unmarshal(subprocOut, &out); err != nil {
t.Logf("%s", subprocOut)
t.Fatal(err)
}
t.Logf("output: %+v", out)
if out.UID != tt.uid {
t.Errorf("got uid %d; want %d", out.UID, tt.uid)
}
if out.GID != tt.gid {
t.Errorf("got gid %d; want %d", out.GID, tt.gid)
}
if out.EUID != tt.uid {
t.Errorf("got euid %d; want %d", out.EUID, tt.uid)
}
if out.EGID != tt.gid {
t.Errorf("got egid %d; want %d", out.EGID, tt.gid)
}
// On FreeBSD and Darwin, the set of additional groups
// is prefixed with the egid; handle that case by
// modifying our expected set.
wantGroups := make(map[int]bool)
for _, id := range tt.additionalGroups {
wantGroups[id] = true
}
if runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" {
wantGroups[tt.gid] = true
}
gotGroups := make(map[int]bool)
for _, id := range out.AdditionalGroups {
gotGroups[id] = true
}
if !reflect.DeepEqual(gotGroups, wantGroups) {
t.Errorf("got additional groups %+v; want %+v", gotGroups, wantGroups)
}
})
}
}
func findUnusedUID(t *testing.T, not ...int) int {
for i := 1000; i < 65535; i++ {
// Skip UIDs that might be valid
if maybeValidUID(i) {
continue
}
// Skip UIDs that we're avoiding
if slices.Contains(not, i) {
continue
}
// Not a valid UID, not one we're avoiding... all good!
return i
}
t.Fatalf("unable to find an unused UID")
return -1
}
func findUnusedGID(t *testing.T, not ...int) int {
for i := 1000; i < 65535; i++ {
if maybeValidGID(i) {
continue
}
// Skip GIDs that we're avoiding
if slices.Contains(not, i) {
continue
}
// Not a valid GID, not one we're avoiding... all good!
return i
}
t.Fatalf("unable to find an unused GID")
return -1
}
func findUnusedUIDGID(t *testing.T, not ...int) int {
for i := 1000; i < 65535; i++ {
if maybeValidUID(i) || maybeValidGID(i) {
continue
}
// Skip IDs that we're avoiding
if slices.Contains(not, i) {
continue
}
// Not a valid ID, not one we're avoiding... all good!
return i
}
t.Fatalf("unable to find an unused UID/GID pair")
return -1
}
func maybeValidUID(id int) bool {
_, err := user.LookupId(strconv.Itoa(id))
if err == nil {
return true
}
var u1 user.UnknownUserIdError
if errors.As(err, &u1) {
return false
}
var u2 user.UnknownUserError
if errors.As(err, &u2) {
return false
}
// Some other error; might be valid
return true
}
func maybeValidGID(id int) bool {
_, err := user.LookupGroupId(strconv.Itoa(id))
if err == nil {
return true
}
var u1 user.UnknownGroupIdError
if errors.As(err, &u1) {
return false
}
var u2 user.UnknownGroupError
if errors.As(err, &u2) {
return false
}
// Some other error; might be valid
return true
}

View File

@ -35,6 +35,7 @@ import (
"tailscale.com/ipn/ipnlocal"
"tailscale.com/logtail/backoff"
"tailscale.com/net/tsaddr"
"tailscale.com/net/tsdial"
"tailscale.com/tailcfg"
"tailscale.com/tempfork/gliderlabs/ssh"
"tailscale.com/types/logger"
@ -62,7 +63,7 @@ type ipnLocalBackend interface {
NetMap() *netmap.NetworkMap
WhoIs(ipp netip.AddrPort) (n *tailcfg.Node, u tailcfg.UserProfile, ok bool)
DoNoiseRequest(req *http.Request) (*http.Response, error)
TailscaleVarRoot() string
Dialer() *tsdial.Dialer
}
type server struct {
@ -77,11 +78,33 @@ type server struct {
// mu protects the following
mu sync.Mutex
httpc *http.Client // for calling out to peers.
activeConns map[*conn]bool // set; value is always true
fetchPublicKeysCache map[string]pubKeyCacheEntry // by https URL
shutdownCalled bool
}
// sessionRecordingClient returns an http.Client that uses srv.lb.Dialer() to
// dial connections. This is used to make requests to the session recording
// server to upload session recordings.
func (srv *server) sessionRecordingClient() *http.Client {
srv.mu.Lock()
defer srv.mu.Unlock()
if srv.httpc != nil {
return srv.httpc
}
tr := http.DefaultTransport.(*http.Transport).Clone()
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
return srv.lb.Dialer().UserDial(ctx, network, addr)
}
srv.httpc = &http.Client{
Transport: tr,
}
return srv.httpc
}
func (srv *server) now() time.Time {
if srv != nil && srv.timeNow != nil {
return srv.timeNow()
@ -987,12 +1010,6 @@ func (ss *sshSession) handleSSHAgentForwarding(s ssh.Session, lu *user.User) err
return nil
}
// recordSSH is a temporary dev knob to test the SSH recording
// functionality and support off-node streaming.
//
// TODO(bradfitz,maisem): move this to SSHPolicy.
var recordSSH = envknob.RegisterBool("TS_DEBUG_LOG_SSH")
// run is the entrypoint for a newly accepted SSH session.
//
// It handles ss once it's been accepted and determined
@ -1048,7 +1065,12 @@ func (ss *sshSession) run() {
var err error
rec, err = ss.startNewRecording()
if err != nil {
fmt.Fprintf(ss, "can't start new recording\r\n")
var uve userVisibleError
if errors.As(err, &uve) {
fmt.Fprintf(ss, "%s\r\n", uve)
} else {
fmt.Fprintf(ss, "can't start new recording\r\n")
}
ss.logf("startNewRecording: %v", err)
ss.Exit(1)
return
@ -1060,6 +1082,13 @@ func (ss *sshSession) run() {
err := ss.launchProcess()
if err != nil {
logf("start failed: %v", err.Error())
if errors.Is(err, context.Canceled) {
err := context.Cause(ss.ctx)
var uve userVisibleError
if errors.As(err, &uve) {
fmt.Fprintf(ss, "%s\r\n", uve)
}
}
ss.Exit(1)
return
}
@ -1125,12 +1154,19 @@ func (ss *sshSession) run() {
return
}
// recorders returns the list of recorders to use for this session.
// If the final action has a non-empty list of recorders, that list is
// returned. Otherwise, the list of recorders from the initial action
// is returned.
func (ss *sshSession) recorders() []netip.AddrPort {
if len(ss.conn.finalAction.Recorders) > 0 {
return ss.conn.finalAction.Recorders
}
return ss.conn.action0.Recorders
}
func (ss *sshSession) shouldRecord() bool {
// for now only record pty sessions
// TODO(bradfitz,maisem): make configurable on SSHPolicy and
// support recording non-pty stuff too.
_, _, isPtyReq := ss.Pty()
return recordSSH() && isPtyReq
return len(ss.recorders()) > 0
}
type sshConnInfo struct {
@ -1312,11 +1348,67 @@ func randBytes(n int) []byte {
return b
}
// CastHeader is the header of an asciinema file.
type CastHeader struct {
// Version is the asciinema file format version.
Version int `json:"version"`
// Width is the terminal width in characters.
// It is non-zero for Pty sessions.
Width int `json:"width"`
// Height is the terminal height in characters.
// It is non-zero for Pty sessions.
Height int `json:"height"`
// Timestamp is the unix timestamp of when the recording started.
Timestamp int64 `json:"timestamp"`
// Env is the environment variables of the session.
// Only "TERM" is set (2023-03-22).
Env map[string]string `json:"env"`
// Command is the command that was executed.
// Typically empty for shell sessions.
Command string `json:"command,omitempty"`
// Tailscale-specific fields:
// SrcNode is the FQDN of the node originating the connection.
// It is also the MagicDNS name for the node.
// It does not have a trailing dot.
// e.g. "host.tail-scale.ts.net"
SrcNode string `json:"srcNode"`
// SrcNodeID is the node ID of the node originating the connection.
SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"`
// SrcNodeTags is the list of tags on the node originating the connection (if any).
SrcNodeTags []string `json:"srcNodeTags,omitempty"`
// SrcNodeUserID is the user ID of the node originating the connection (if not tagged).
SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged
// SrcNodeUser is the LoginName of the node originating the connection (if not tagged).
SrcNodeUser string `json:"srcNodeUser,omitempty"`
// SSHUser is the username as presented by the client.
SSHUser string `json:"sshUser"` // as presented by the client
// LocalUser is the effective username on the server.
LocalUser string `json:"localUser"`
}
// startNewRecording starts a new SSH session recording.
//
// It writes an asciinema file to
// $TAILSCALE_VAR_ROOT/ssh-sessions/ssh-session-<unixtime>-*.cast.
func (ss *sshSession) startNewRecording() (_ *recording, err error) {
recorders := ss.recorders()
if len(recorders) == 0 {
return nil, errors.New("no recorders configured")
}
recorder := recorders[0]
if len(recorders) > 1 {
ss.logf("warning: multiple recorders configured, using first one: %v", recorder)
}
var w ssh.Window
if ptyReq, _, isPtyReq := ss.Pty(); isPtyReq {
w = ptyReq.Window
@ -1332,39 +1424,59 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) {
ss: ss,
start: now,
}
varRoot := ss.conn.srv.lb.TailscaleVarRoot()
if varRoot == "" {
return nil, errors.New("no var root for recording storage")
}
dir := filepath.Join(varRoot, "ssh-sessions")
if err := os.MkdirAll(dir, 0700); err != nil {
pr, pw := io.Pipe()
// We want to use a background context for uploading and not ss.ctx.
// ss.ctx is closed when the session closes, but we don't want to break the upload at that time.
// Instead we want to wait for the session to close the writer when it finishes.
ctx := context.Background()
req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s:%d/record", recorder.Addr(), recorder.Port()), pr)
if err != nil {
pr.Close()
pw.Close()
return nil, err
}
defer func() {
// We want to wait for the server to respond with 100 Continue to notifiy us
// that it's ready to receive data. We do this to block the session from
// starting until the server is ready to receive data.
// It also allows the server to reject the request before we start sending
// data.
req.Header.Set("Expect", "100-continue")
go func() {
defer pw.Close()
ss.logf("starting asciinema recording to %s", recorder)
hc := ss.conn.srv.sessionRecordingClient()
resp, err := hc.Do(req)
if err != nil {
rec.Close()
err := fmt.Errorf("recording: error sending recording: %w", err)
ss.logf("%v", err)
ss.cancelCtx(userVisibleError{
msg: "recording: error sending recording",
error: err,
})
return
}
defer resp.Body.Close()
defer ss.cancelCtx(errors.New("recording: done"))
if resp.StatusCode != http.StatusOK {
err := fmt.Errorf("recording: server responded with %s", resp.Status)
ss.logf("%v", err)
ss.cancelCtx(userVisibleError{
msg: "recording server responded with: " + resp.Status,
error: err,
})
}
}()
f, err := os.CreateTemp(dir, fmt.Sprintf("ssh-session-%v-*.cast", now.UnixNano()))
if err != nil {
return nil, err
}
rec.out = f
rec.out = pw
// {"version": 2, "width": 221, "height": 84, "timestamp": 1647146075, "env": {"SHELL": "/bin/bash", "TERM": "screen"}}
type CastHeader struct {
Version int `json:"version"`
Width int `json:"width"`
Height int `json:"height"`
Timestamp int64 `json:"timestamp"`
Env map[string]string `json:"env"`
}
j, err := json.Marshal(CastHeader{
ch := CastHeader{
Version: 2,
Width: w.Width,
Height: w.Height,
Timestamp: now.Unix(),
Command: strings.Join(ss.Command(), " "),
Env: map[string]string{
"TERM": term,
// TODO(bradfitz): anything else important?
@ -1376,15 +1488,29 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) {
// it. Then we can (1) make the cmd, (2) start the
// recording, (3) start the process.
},
})
SSHUser: ss.conn.info.sshUser,
LocalUser: ss.conn.localUser.Username,
SrcNode: strings.TrimSuffix(ss.conn.info.node.Name, "."),
SrcNodeID: ss.conn.info.node.StableID,
}
if !ss.conn.info.node.IsTagged() {
ch.SrcNodeUser = ss.conn.info.uprof.LoginName
ch.SrcNodeUserID = ss.conn.info.node.User
} else {
ch.SrcNodeTags = ss.conn.info.node.Tags
}
j, err := json.Marshal(ch)
if err != nil {
f.Close()
return nil, err
}
ss.logf("starting asciinema recording to %s", f.Name())
j = append(j, '\n')
if _, err := f.Write(j); err != nil {
f.Close()
if _, err := pw.Write(j); err != nil {
if errors.Is(err, io.ErrClosedPipe) && ss.ctx.Err() != nil {
// If we got an io.ErrClosedPipe, it's likely because
// the recording server closed the connection on us. Return
// the original context error instead.
return nil, context.Cause(ss.ctx)
}
return nil, err
}
return rec, nil
@ -1396,7 +1522,7 @@ type recording struct {
start time.Time
mu sync.Mutex // guards writes to, close of out
out *os.File // nil if closed
out io.WriteCloser
}
func (r *recording) Close() error {
@ -1415,10 +1541,17 @@ func (r *recording) Close() error {
// The dir should be "i" for input or "o" for output.
//
// If r is nil, it returns w unchanged.
//
// Currently (2023-03-21) we only record output, not input.
func (r *recording) writer(dir string, w io.Writer) io.Writer {
if r == nil {
return w
}
if dir == "i" {
// TODO: record input? Maybe not, since it might contain
// passwords.
return w
}
return &loggingWriter{r, dir, w}
}

View File

@ -7,6 +7,7 @@ package tailssh
import (
"bytes"
"context"
"crypto/ed25519"
"crypto/rand"
"crypto/sha256"
@ -14,6 +15,7 @@ import (
"errors"
"fmt"
"io"
"io/ioutil"
"net"
"net/http"
"net/http/httptest"
@ -236,6 +238,10 @@ var (
testSignerOnce sync.Once
)
func (ts *localState) Dialer() *tsdial.Dialer {
return nil
}
func (ts *localState) GetSSH_HostKeys() ([]gossh.Signer, error) {
testSignerOnce.Do(func() {
_, priv, err := ed25519.GenerateKey(rand.Reader)
@ -319,9 +325,213 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule {
}
}
func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) {
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS)
}
var handler http.HandlerFunc
recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
handler(w, r)
}))
defer recordingServer.Close()
s := &server{
logf: t.Logf,
httpc: recordingServer.Client(),
lb: &localState{
sshEnabled: true,
matchingRule: newSSHRule(
&tailcfg.SSHAction{
Accept: true,
Recorders: []netip.AddrPort{
netip.MustParseAddrPort(recordingServer.Listener.Addr().String()),
},
},
),
},
}
defer s.Shutdown()
const sshUser = "alice"
cfg := &gossh.ClientConfig{
User: sshUser,
HostKeyCallback: gossh.InsecureIgnoreHostKey(),
}
tests := []struct {
name string
handler func(w http.ResponseWriter, r *http.Request)
sshCommand string
wantClientOutput string
clientOutputMustNotContain []string
}{
{
name: "upload-denied",
handler: func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusForbidden)
},
sshCommand: "echo hello",
wantClientOutput: "recording: server responded with 403 Forbidden\r\n",
clientOutputMustNotContain: []string{"hello"},
},
{
name: "upload-fails-after-starting",
handler: func(w http.ResponseWriter, r *http.Request) {
r.Body.Read(make([]byte, 1))
time.Sleep(100 * time.Millisecond)
w.WriteHeader(http.StatusInternalServerError)
},
sshCommand: "echo hello && sleep 1 && echo world",
wantClientOutput: "\r\n\r\nrecording server responded with: 500 Internal Server Error\r\n\r\n",
clientOutputMustNotContain: []string{"world"},
},
}
src, dst := must.Get(netip.ParseAddrPort("100.100.100.101:2231")), must.Get(netip.ParseAddrPort("100.100.100.102:22"))
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tstest.Replace(t, &handler, tt.handler)
sc, dc := memnet.NewTCPConn(src, dst, 1024)
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg)
if err != nil {
t.Errorf("client: %v", err)
return
}
client := gossh.NewClient(c, chans, reqs)
defer client.Close()
session, err := client.NewSession()
if err != nil {
t.Errorf("client: %v", err)
return
}
defer session.Close()
t.Logf("client established session")
got, err := session.CombinedOutput(tt.sshCommand)
if err != nil {
t.Logf("client got: %q: %v", got, err)
} else {
t.Errorf("client did not get kicked out: %q", got)
}
gotStr := string(got)
if !strings.HasSuffix(gotStr, tt.wantClientOutput) {
t.Errorf("client got %q, want %q", got, tt.wantClientOutput)
}
for _, x := range tt.clientOutputMustNotContain {
if strings.Contains(gotStr, x) {
t.Errorf("client output must not contain %q", x)
}
}
}()
if err := s.HandleSSHConn(dc); err != nil {
t.Errorf("unexpected error: %v", err)
}
wg.Wait()
})
}
}
// TestSSHRecordingNonInteractive tests that the SSH server records the SSH session
// when the client is not interactive (i.e. no PTY).
// It starts a local SSH server and a recording server. The recording server
// records the SSH session and returns it to the test.
// The test then verifies that the recording has a valid CastHeader, it does not
// validate the contents of the recording.
func TestSSHRecordingNonInteractive(t *testing.T) {
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS)
}
var recording []byte
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
defer cancel()
var err error
recording, err = ioutil.ReadAll(r.Body)
if err != nil {
t.Error(err)
return
}
}))
defer recordingServer.Close()
s := &server{
logf: logger.Discard,
httpc: recordingServer.Client(),
lb: &localState{
sshEnabled: true,
matchingRule: newSSHRule(
&tailcfg.SSHAction{
Accept: true,
Recorders: []netip.AddrPort{
must.Get(netip.ParseAddrPort(recordingServer.Listener.Addr().String())),
},
},
),
},
}
defer s.Shutdown()
src, dst := must.Get(netip.ParseAddrPort("100.100.100.101:2231")), must.Get(netip.ParseAddrPort("100.100.100.102:22"))
sc, dc := memnet.NewTCPConn(src, dst, 1024)
const sshUser = "alice"
cfg := &gossh.ClientConfig{
User: sshUser,
HostKeyCallback: gossh.InsecureIgnoreHostKey(),
}
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg)
if err != nil {
t.Errorf("client: %v", err)
return
}
client := gossh.NewClient(c, chans, reqs)
defer client.Close()
session, err := client.NewSession()
if err != nil {
t.Errorf("client: %v", err)
return
}
defer session.Close()
t.Logf("client established session")
_, err = session.CombinedOutput("echo Ran echo!")
if err != nil {
t.Errorf("client: %v", err)
}
}()
if err := s.HandleSSHConn(dc); err != nil {
t.Errorf("unexpected error: %v", err)
}
wg.Wait()
<-ctx.Done() // wait for recording to finish
var ch CastHeader
if err := json.NewDecoder(bytes.NewReader(recording)).Decode(&ch); err != nil {
t.Fatal(err)
}
if ch.SSHUser != sshUser {
t.Errorf("SSHUser = %q; want %q", ch.SSHUser, sshUser)
}
if ch.Command != "echo Ran echo!" {
t.Errorf("Command = %q; want %q", ch.Command, "echo Ran echo!")
}
}
func TestSSHAuthFlow(t *testing.T) {
if runtime.GOOS != "linux" {
t.Skip("Not running on Linux, skipping")
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS)
}
acceptRule := newSSHRule(&tailcfg.SSHAction{
Accept: true,
@ -539,7 +749,8 @@ func TestSSH(t *testing.T) {
node: &tailcfg.Node{},
uprof: tailcfg.UserProfile{},
}
sc.finalAction = &tailcfg.SSHAction{Accept: true}
sc.action0 = &tailcfg.SSHAction{Accept: true}
sc.finalAction = sc.action0
sc.Handler = func(s ssh.Session) {
sc.newSSHSession(s).run()

View File

@ -3,7 +3,7 @@
package tailcfg
//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHPrincipal,ControlDialPlan --clonefunc
//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan --clonefunc
import (
"bytes"
@ -1822,7 +1822,8 @@ const (
// Funnel warning capabilities used for reporting errors to the user.
// CapabilityWarnFunnelNoInvite indicates an invite has not been accepted for the Funnel alpha.
// CapabilityWarnFunnelNoInvite indicates whether Funnel is enabled for the tailnet.
// NOTE: In transition from Alpha to Beta, this capability is being reused as the enablement.
CapabilityWarnFunnelNoInvite = "https://tailscale.com/cap/warn-funnel-no-invite"
// CapabilityWarnFunnelNoHTTPS indicates HTTPS has not been enabled for the tailnet.
@ -2021,9 +2022,9 @@ type SSHAction struct {
// to use local port forwarding if requested.
AllowLocalPortForwarding bool `json:"allowLocalPortForwarding,omitempty"`
// SessionHaulTargetNode, if non-empty, is the Stable ID of a peer to
// stream this SSH session's logs to.
SessionHaulTargetNode StableNodeID `json:"sessionHaulTargetNode,omitempty"`
// Recorders defines the destinations of the SSH session recorders.
// The recording will be uploaded to http://addr:port/record.
Recorders []netip.AddrPort `json:"recorders"`
}
// OverTLSPublicKeyResponse is the JSON response to /key?v=<n>

View File

@ -371,10 +371,7 @@ func (src *SSHRule) Clone() *SSHRule {
dst.SSHUsers[k] = v
}
}
if dst.Action != nil {
dst.Action = new(SSHAction)
*dst.Action = *src.Action
}
dst.Action = src.Action.Clone()
return dst
}
@ -386,6 +383,30 @@ var _SSHRuleCloneNeedsRegeneration = SSHRule(struct {
Action *SSHAction
}{})
// Clone makes a deep copy of SSHAction.
// The result aliases no memory with the original.
func (src *SSHAction) Clone() *SSHAction {
if src == nil {
return nil
}
dst := new(SSHAction)
*dst = *src
dst.Recorders = append(src.Recorders[:0:0], src.Recorders...)
return dst
}
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _SSHActionCloneNeedsRegeneration = SSHAction(struct {
Message string
Reject bool
Accept bool
SessionDuration time.Duration
AllowAgentForwarding bool
HoldAndDelegate string
AllowLocalPortForwarding bool
Recorders []netip.AddrPort
}{})
// Clone makes a deep copy of SSHPrincipal.
// The result aliases no memory with the original.
func (src *SSHPrincipal) Clone() *SSHPrincipal {
@ -426,7 +447,7 @@ var _ControlDialPlanCloneNeedsRegeneration = ControlDialPlan(struct {
// Clone duplicates src into dst and reports whether it succeeded.
// To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>,
// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHPrincipal,ControlDialPlan.
// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan.
func Clone(dst, src any) bool {
switch src := src.(type) {
case *User:
@ -528,6 +549,15 @@ func Clone(dst, src any) bool {
*dst = src.Clone()
return true
}
case *SSHAction:
switch dst := dst.(type) {
case *SSHAction:
*dst = *src.Clone()
return true
case **SSHAction:
*dst = src.Clone()
return true
}
case *SSHPrincipal:
switch dst := dst.(type) {
case *SSHPrincipal:

View File

@ -20,7 +20,7 @@ import (
"tailscale.com/types/views"
)
//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHPrincipal,ControlDialPlan
//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan
// View returns a readonly view of User.
func (p *User) View() UserView {
@ -865,13 +865,7 @@ func (v SSHRuleView) Principals() views.SliceView[*SSHPrincipal, SSHPrincipalVie
}
func (v SSHRuleView) SSHUsers() views.Map[string, string] { return views.MapOf(v.ж.SSHUsers) }
func (v SSHRuleView) Action() *SSHAction {
if v.ж.Action == nil {
return nil
}
x := *v.ж.Action
return &x
}
func (v SSHRuleView) Action() SSHActionView { return v.ж.Action.View() }
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _SSHRuleViewNeedsRegeneration = SSHRule(struct {
@ -881,6 +875,72 @@ var _SSHRuleViewNeedsRegeneration = SSHRule(struct {
Action *SSHAction
}{})
// View returns a readonly view of SSHAction.
func (p *SSHAction) View() SSHActionView {
return SSHActionView{ж: p}
}
// SSHActionView provides a read-only view over SSHAction.
//
// Its methods should only be called if `Valid()` returns true.
type SSHActionView struct {
// ж is the underlying mutable value, named with a hard-to-type
// character that looks pointy like a pointer.
// It is named distinctively to make you think of how dangerous it is to escape
// to callers. You must not let callers be able to mutate it.
ж *SSHAction
}
// Valid reports whether underlying value is non-nil.
func (v SSHActionView) Valid() bool { return v.ж != nil }
// AsStruct returns a clone of the underlying value which aliases no memory with
// the original.
func (v SSHActionView) AsStruct() *SSHAction {
if v.ж == nil {
return nil
}
return v.ж.Clone()
}
func (v SSHActionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
func (v *SSHActionView) UnmarshalJSON(b []byte) error {
if v.ж != nil {
return errors.New("already initialized")
}
if len(b) == 0 {
return nil
}
var x SSHAction
if err := json.Unmarshal(b, &x); err != nil {
return err
}
v.ж = &x
return nil
}
func (v SSHActionView) Message() string { return v.ж.Message }
func (v SSHActionView) Reject() bool { return v.ж.Reject }
func (v SSHActionView) Accept() bool { return v.ж.Accept }
func (v SSHActionView) SessionDuration() time.Duration { return v.ж.SessionDuration }
func (v SSHActionView) AllowAgentForwarding() bool { return v.ж.AllowAgentForwarding }
func (v SSHActionView) HoldAndDelegate() string { return v.ж.HoldAndDelegate }
func (v SSHActionView) AllowLocalPortForwarding() bool { return v.ж.AllowLocalPortForwarding }
func (v SSHActionView) Recorders() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Recorders) }
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
var _SSHActionViewNeedsRegeneration = SSHAction(struct {
Message string
Reject bool
Accept bool
SessionDuration time.Duration
AllowAgentForwarding bool
HoldAndDelegate string
AllowLocalPortForwarding bool
Recorders []netip.AddrPort
}{})
// View returns a readonly view of SSHPrincipal.
func (p *SSHPrincipal) View() SSHPrincipalView {
return SSHPrincipalView{ж: p}

View File

@ -150,14 +150,14 @@ func InfoFrom(dir string) (VersionInfo, error) {
}
// Note, this mechanism doesn't correctly support go.mod replacements,
// or go workdirs. We only parse out the commit hash from go.mod's
// or go workdirs. We only parse out the commit ref from go.mod's
// "require" line, nothing else.
tailscaleHash, err := tailscaleModuleHash(modBs)
tailscaleRef, err := tailscaleModuleRef(modBs)
if err != nil {
return VersionInfo{}, err
}
v, err := infoFromCache(tailscaleHash, runner)
v, err := infoFromCache(tailscaleRef, runner)
if err != nil {
return VersionInfo{}, err
}
@ -171,9 +171,10 @@ func InfoFrom(dir string) (VersionInfo, error) {
return mkOutput(v)
}
// tailscaleModuleHash returns the git hash of the 'require tailscale.com' line
// in the given go.mod bytes.
func tailscaleModuleHash(modBs []byte) (string, error) {
// tailscaleModuleRef returns the git ref of the 'require tailscale.com' line
// in the given go.mod bytes. The ref is either a short commit hash, or a git
// tag.
func tailscaleModuleRef(modBs []byte) (string, error) {
mod, err := modfile.Parse("go.mod", modBs, nil)
if err != nil {
return "", err
@ -187,7 +188,8 @@ func tailscaleModuleHash(modBs []byte) (string, error) {
if i := strings.LastIndexByte(req.Mod.Version, '-'); i != -1 {
return req.Mod.Version[i+1:], nil
}
return "", fmt.Errorf("couldn't parse git hash from tailscale.com version %q", req.Mod.Version)
// If there are no dashes, the version is a tag.
return req.Mod.Version, nil
}
return "", fmt.Errorf("no require tailscale.com line in go.mod")
}
@ -310,7 +312,7 @@ type verInfo struct {
// sentinel patch number.
const unknownPatchVersion = 9999999
func infoFromCache(shortHash string, runner dirRunner) (verInfo, error) {
func infoFromCache(ref string, runner dirRunner) (verInfo, error) {
cacheDir, err := os.UserCacheDir()
if err != nil {
return verInfo{}, fmt.Errorf("Getting user cache dir: %w", err)
@ -324,16 +326,16 @@ func infoFromCache(shortHash string, runner dirRunner) (verInfo, error) {
}
}
if !r.ok("git", "cat-file", "-e", shortHash) {
if !r.ok("git", "cat-file", "-e", ref) {
if !r.ok("git", "fetch", "origin") {
return verInfo{}, fmt.Errorf("updating OSS repo failed")
}
}
hash, err := r.output("git", "rev-parse", shortHash)
hash, err := r.output("git", "rev-parse", ref)
if err != nil {
return verInfo{}, err
}
date, err := r.output("git", "log", "-n1", "--format=%ct", shortHash)
date, err := r.output("git", "log", "-n1", "--format=%ct", ref)
if err != nil {
return verInfo{}, err
}