Compare commits
34 Commits
main
...
release-br
Author | SHA1 | Date |
---|---|---|
![]() |
043a34500d | |
![]() |
214217dd10 | |
![]() |
00205f0ab6 | |
![]() |
61f36aa1cd | |
![]() |
296d6820b5 | |
![]() |
383b7c747a | |
![]() |
c3301abc5e | |
![]() |
49e305f862 | |
![]() |
71a5f2a989 | |
![]() |
1b1ac05d95 | |
![]() |
e6b81f983e | |
![]() |
8414c591e5 | |
![]() |
0651c1a069 | |
![]() |
2474bd2754 | |
![]() |
40091d0261 | |
![]() |
d216363bc5 | |
![]() |
dbbc465bfd | |
![]() |
598b24d85c | |
![]() |
17c6d5c7c5 | |
![]() |
47ebe6f956 | |
![]() |
c750186830 | |
![]() |
d7bbd4fe03 | |
![]() |
ac0c0b081d | |
![]() |
068ed7dbfa | |
![]() |
26bf7c4dbe | |
![]() |
d47b74e461 | |
![]() |
3db61d07ca | |
![]() |
817aa282c2 | |
![]() |
d00c046b72 | |
![]() |
aad01c81b1 | |
![]() |
fd558e2e68 | |
![]() |
3eeff9e7f7 | |
![]() |
6c0e6a5f4e | |
![]() |
10d462d321 |
|
@ -1 +1 @@
|
||||||
1.37.0
|
1.38.4
|
||||||
|
|
|
@ -120,6 +120,8 @@ change in the future.
|
||||||
pingCmd,
|
pingCmd,
|
||||||
ncCmd,
|
ncCmd,
|
||||||
sshCmd,
|
sshCmd,
|
||||||
|
funnelCmd,
|
||||||
|
serveCmd,
|
||||||
versionCmd,
|
versionCmd,
|
||||||
webCmd,
|
webCmd,
|
||||||
fileCmd,
|
fileCmd,
|
||||||
|
@ -147,10 +149,6 @@ change in the future.
|
||||||
switch {
|
switch {
|
||||||
case slices.Contains(args, "debug"):
|
case slices.Contains(args, "debug"):
|
||||||
rootCmd.Subcommands = append(rootCmd.Subcommands, debugCmd)
|
rootCmd.Subcommands = append(rootCmd.Subcommands, debugCmd)
|
||||||
case slices.Contains(args, "funnel"):
|
|
||||||
rootCmd.Subcommands = append(rootCmd.Subcommands, funnelCmd)
|
|
||||||
case slices.Contains(args, "serve"):
|
|
||||||
rootCmd.Subcommands = append(rootCmd.Subcommands, serveCmd)
|
|
||||||
case slices.Contains(args, "update"):
|
case slices.Contains(args, "update"):
|
||||||
rootCmd.Subcommands = append(rootCmd.Subcommands, updateCmd)
|
rootCmd.Subcommands = append(rootCmd.Subcommands, updateCmd)
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ var funnelCmd = newFunnelCommand(&serveEnv{lc: &localClient})
|
||||||
func newFunnelCommand(e *serveEnv) *ffcli.Command {
|
func newFunnelCommand(e *serveEnv) *ffcli.Command {
|
||||||
return &ffcli.Command{
|
return &ffcli.Command{
|
||||||
Name: "funnel",
|
Name: "funnel",
|
||||||
ShortHelp: "[ALPHA] turn Tailscale Funnel on or off",
|
ShortHelp: "Turn on/off Funnel service",
|
||||||
ShortUsage: strings.TrimSpace(`
|
ShortUsage: strings.TrimSpace(`
|
||||||
funnel <serve-port> {on|off}
|
funnel <serve-port> {on|off}
|
||||||
funnel status [--json]
|
funnel status [--json]
|
||||||
|
|
|
@ -40,9 +40,17 @@ var netlockCmd = &ffcli.Command{
|
||||||
nlDisablementKDFCmd,
|
nlDisablementKDFCmd,
|
||||||
nlLogCmd,
|
nlLogCmd,
|
||||||
nlLocalDisableCmd,
|
nlLocalDisableCmd,
|
||||||
nlTskeyWrapCmd,
|
|
||||||
},
|
},
|
||||||
Exec: runNetworkLockStatus,
|
Exec: runNetworkLockNoSubcommand,
|
||||||
|
}
|
||||||
|
|
||||||
|
func runNetworkLockNoSubcommand(ctx context.Context, args []string) error {
|
||||||
|
// Detect & handle the deprecated command 'lock tskey-wrap'.
|
||||||
|
if len(args) >= 2 && args[0] == "tskey-wrap" {
|
||||||
|
return runTskeyWrapCmd(ctx, args[1:])
|
||||||
|
}
|
||||||
|
|
||||||
|
return runNetworkLockStatus(ctx, args)
|
||||||
}
|
}
|
||||||
|
|
||||||
var nlInitArgs struct {
|
var nlInitArgs struct {
|
||||||
|
@ -427,13 +435,19 @@ func runNetworkLockModify(ctx context.Context, addArgs, removeArgs []string) err
|
||||||
|
|
||||||
var nlSignCmd = &ffcli.Command{
|
var nlSignCmd = &ffcli.Command{
|
||||||
Name: "sign",
|
Name: "sign",
|
||||||
ShortUsage: "sign <node-key> [<rotation-key>]",
|
ShortUsage: "sign <node-key> [<rotation-key>] or sign <auth-key>",
|
||||||
ShortHelp: "Signs a node key and transmits the signature to the coordination server",
|
ShortHelp: "Signs a node or pre-approved auth key",
|
||||||
LongHelp: "Signs a node key and transmits the signature to the coordination server",
|
LongHelp: `Either:
|
||||||
|
- signs a node key and transmits the signature to the coordination server, or
|
||||||
|
- signs a pre-approved auth key, printing it in a form that can be used to bring up nodes under tailnet lock`,
|
||||||
Exec: runNetworkLockSign,
|
Exec: runNetworkLockSign,
|
||||||
}
|
}
|
||||||
|
|
||||||
func runNetworkLockSign(ctx context.Context, args []string) error {
|
func runNetworkLockSign(ctx context.Context, args []string) error {
|
||||||
|
if len(args) > 0 && strings.HasPrefix(args[0], "tskey-auth-") {
|
||||||
|
return runTskeyWrapCmd(ctx, args)
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
nodeKey key.NodePublic
|
nodeKey key.NodePublic
|
||||||
rotationKey key.NLPublic
|
rotationKey key.NLPublic
|
||||||
|
@ -636,14 +650,6 @@ func runNetworkLockLog(ctx context.Context, args []string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var nlTskeyWrapCmd = &ffcli.Command{
|
|
||||||
Name: "tskey-wrap",
|
|
||||||
ShortUsage: "tskey-wrap <tailscale pre-auth key>",
|
|
||||||
ShortHelp: "Modifies a pre-auth key from the admin panel to work with tailnet lock",
|
|
||||||
LongHelp: "Modifies a pre-auth key from the admin panel to work with tailnet lock",
|
|
||||||
Exec: runTskeyWrapCmd,
|
|
||||||
}
|
|
||||||
|
|
||||||
func runTskeyWrapCmd(ctx context.Context, args []string) error {
|
func runTskeyWrapCmd(ctx context.Context, args []string) error {
|
||||||
if len(args) != 1 {
|
if len(args) != 1 {
|
||||||
return errors.New("usage: lock tskey-wrap <tailscale pre-auth key>")
|
return errors.New("usage: lock tskey-wrap <tailscale pre-auth key>")
|
||||||
|
@ -657,21 +663,25 @@ func runTskeyWrapCmd(ctx context.Context, args []string) error {
|
||||||
return fixTailscaledConnectError(err)
|
return fixTailscaledConnectError(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return wrapAuthKey(ctx, args[0], st)
|
||||||
|
}
|
||||||
|
|
||||||
|
func wrapAuthKey(ctx context.Context, keyStr string, status *ipnstate.Status) error {
|
||||||
// Generate a separate tailnet-lock key just for the credential signature.
|
// Generate a separate tailnet-lock key just for the credential signature.
|
||||||
// We use the free-form meta strings to mark a little bit of metadata about this
|
// We use the free-form meta strings to mark a little bit of metadata about this
|
||||||
// key.
|
// key.
|
||||||
priv := key.NewNLPrivate()
|
priv := key.NewNLPrivate()
|
||||||
m := map[string]string{
|
m := map[string]string{
|
||||||
"purpose": "pre-auth key",
|
"purpose": "pre-auth key",
|
||||||
"wrapper_stableid": string(st.Self.ID),
|
"wrapper_stableid": string(status.Self.ID),
|
||||||
"wrapper_createtime": fmt.Sprint(time.Now().Unix()),
|
"wrapper_createtime": fmt.Sprint(time.Now().Unix()),
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(args[0], "tskey-auth-") && strings.Index(args[0][len("tskey-auth-"):], "-") > 0 {
|
if strings.HasPrefix(keyStr, "tskey-auth-") && strings.Index(keyStr[len("tskey-auth-"):], "-") > 0 {
|
||||||
// We don't want to accidentally embed the nonce part of the authkey in
|
// We don't want to accidentally embed the nonce part of the authkey in
|
||||||
// the event the format changes. As such, we make sure its in the format we
|
// the event the format changes. As such, we make sure its in the format we
|
||||||
// expect (tskey-auth-<stableID, inc CNTRL suffix>-nonce) before we parse
|
// expect (tskey-auth-<stableID, inc CNTRL suffix>-nonce) before we parse
|
||||||
// out and embed the stableID.
|
// out and embed the stableID.
|
||||||
s := strings.TrimPrefix(args[0], "tskey-auth-")
|
s := strings.TrimPrefix(keyStr, "tskey-auth-")
|
||||||
m["authkey_stableid"] = s[:strings.Index(s, "-")]
|
m["authkey_stableid"] = s[:strings.Index(s, "-")]
|
||||||
}
|
}
|
||||||
k := tka.Key{
|
k := tka.Key{
|
||||||
|
@ -681,7 +691,7 @@ func runTskeyWrapCmd(ctx context.Context, args []string) error {
|
||||||
Meta: m,
|
Meta: m,
|
||||||
}
|
}
|
||||||
|
|
||||||
wrapped, err := localClient.NetworkLockWrapPreauthKey(ctx, args[0], priv)
|
wrapped, err := localClient.NetworkLockWrapPreauthKey(ctx, keyStr, priv)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("wrapping failed: %w", err)
|
return fmt.Errorf("wrapping failed: %w", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,7 +33,7 @@ var serveCmd = newServeCommand(&serveEnv{lc: &localClient})
|
||||||
func newServeCommand(e *serveEnv) *ffcli.Command {
|
func newServeCommand(e *serveEnv) *ffcli.Command {
|
||||||
return &ffcli.Command{
|
return &ffcli.Command{
|
||||||
Name: "serve",
|
Name: "serve",
|
||||||
ShortHelp: "[ALPHA] Serve from your Tailscale node",
|
ShortHelp: "Serve content and local servers",
|
||||||
ShortUsage: strings.TrimSpace(`
|
ShortUsage: strings.TrimSpace(`
|
||||||
serve https:<port> <mount-point> <source> [off]
|
serve https:<port> <mount-point> <source> [off]
|
||||||
serve tcp:<port> tcp://localhost:<local-port> [off]
|
serve tcp:<port> tcp://localhost:<local-port> [off]
|
||||||
|
@ -41,7 +41,7 @@ serve https:<port> <mount-point> <source> [off]
|
||||||
serve status [--json]
|
serve status [--json]
|
||||||
`),
|
`),
|
||||||
LongHelp: strings.TrimSpace(`
|
LongHelp: strings.TrimSpace(`
|
||||||
*** ALPHA; all of this is subject to change ***
|
*** BETA; all of this is subject to change ***
|
||||||
|
|
||||||
The 'tailscale serve' set of commands allows you to serve
|
The 'tailscale serve' set of commands allows you to serve
|
||||||
content and local servers from your Tailscale node to
|
content and local servers from your Tailscale node to
|
||||||
|
@ -66,10 +66,12 @@ EXAMPLES
|
||||||
- To serve simple static text:
|
- To serve simple static text:
|
||||||
$ tailscale serve https:8080 / text:"Hello, world!"
|
$ tailscale serve https:8080 / text:"Hello, world!"
|
||||||
|
|
||||||
- To forward raw TCP packets to a local TCP server on port 5432:
|
- To forward incoming TCP connections on port 2222 to a local TCP server on
|
||||||
|
port 22 (e.g. to run OpenSSH in parallel with Tailscale SSH):
|
||||||
$ tailscale serve tcp:2222 tcp://localhost:22
|
$ tailscale serve tcp:2222 tcp://localhost:22
|
||||||
|
|
||||||
- To forward raw, TLS-terminated TCP packets to a local TCP server on port 80:
|
- To accept TCP TLS connections (terminated within tailscaled) proxied to a
|
||||||
|
local plaintext server on port 80:
|
||||||
$ tailscale serve tls-terminated-tcp:443 tcp://localhost:80
|
$ tailscale serve tls-terminated-tcp:443 tcp://localhost:80
|
||||||
`),
|
`),
|
||||||
Exec: e.runServe,
|
Exec: e.runServe,
|
||||||
|
@ -451,6 +453,7 @@ func expandProxyTarget(source string) (string, error) {
|
||||||
if u.Port() != "" {
|
if u.Port() != "" {
|
||||||
url += ":" + u.Port()
|
url += ":" + u.Port()
|
||||||
}
|
}
|
||||||
|
url += u.Path
|
||||||
return url, nil
|
return url, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -262,6 +262,18 @@ func TestServeConfigMutations(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
|
add(step{reset: true})
|
||||||
|
add(step{ // support path in proxy
|
||||||
|
command: cmd("https / http://127.0.0.1:3000/foo/bar"),
|
||||||
|
want: &ipn.ServeConfig{
|
||||||
|
TCP: map[uint16]*ipn.TCPPortHandler{443: {HTTPS: true}},
|
||||||
|
Web: map[ipn.HostPort]*ipn.WebServerConfig{
|
||||||
|
"foo.test.ts.net:443": {Handlers: map[string]*ipn.HTTPHandler{
|
||||||
|
"/": {Proxy: "http://127.0.0.1:3000/foo/bar"},
|
||||||
|
}},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
|
|
||||||
// tcp
|
// tcp
|
||||||
add(step{reset: true})
|
add(step{reset: true})
|
||||||
|
|
|
@ -212,7 +212,7 @@ tailscale.com/cmd/tailscaled dependencies: (generated by github.com/tailscale/de
|
||||||
tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+
|
tailscale.com/ipn/ipnstate from tailscale.com/control/controlclient+
|
||||||
tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver
|
tailscale.com/ipn/localapi from tailscale.com/ipn/ipnserver
|
||||||
tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal
|
tailscale.com/ipn/policy from tailscale.com/ipn/ipnlocal
|
||||||
tailscale.com/ipn/store from tailscale.com/cmd/tailscaled
|
tailscale.com/ipn/store from tailscale.com/cmd/tailscaled+
|
||||||
L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store
|
L tailscale.com/ipn/store/awsstore from tailscale.com/ipn/store
|
||||||
L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store
|
L tailscale.com/ipn/store/kubestore from tailscale.com/ipn/store
|
||||||
tailscale.com/ipn/store/mem from tailscale.com/ipn/store+
|
tailscale.com/ipn/store/mem from tailscale.com/ipn/store+
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
db4dc9046c93dde2c0e534ca7d529bd690ad09c9
|
ddff070c02790cb571006e820e58cce9627569cf
|
||||||
|
|
|
@ -31,10 +31,13 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/crypto/acme"
|
"golang.org/x/crypto/acme"
|
||||||
|
"tailscale.com/atomicfile"
|
||||||
"tailscale.com/envknob"
|
"tailscale.com/envknob"
|
||||||
"tailscale.com/hostinfo"
|
"tailscale.com/hostinfo"
|
||||||
"tailscale.com/ipn"
|
"tailscale.com/ipn"
|
||||||
"tailscale.com/ipn/ipnstate"
|
"tailscale.com/ipn/ipnstate"
|
||||||
|
"tailscale.com/ipn/store"
|
||||||
|
"tailscale.com/ipn/store/mem"
|
||||||
"tailscale.com/types/logger"
|
"tailscale.com/types/logger"
|
||||||
"tailscale.com/version"
|
"tailscale.com/version"
|
||||||
"tailscale.com/version/distro"
|
"tailscale.com/version/distro"
|
||||||
|
@ -82,11 +85,6 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK
|
||||||
return nil, errors.New("invalid domain")
|
return nil, errors.New("invalid domain")
|
||||||
}
|
}
|
||||||
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
|
logf := logger.WithPrefix(b.logf, fmt.Sprintf("cert(%q): ", domain))
|
||||||
dir, err := b.certDir()
|
|
||||||
if err != nil {
|
|
||||||
logf("failed to get certDir: %v", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
traceACME := func(v any) {
|
traceACME := func(v any) {
|
||||||
if !acmeDebug() {
|
if !acmeDebug() {
|
||||||
|
@ -96,17 +94,22 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK
|
||||||
log.Printf("acme %T: %s", v, j)
|
log.Printf("acme %T: %s", v, j)
|
||||||
}
|
}
|
||||||
|
|
||||||
if pair, err := b.getCertPEMCached(dir, domain, now); err == nil {
|
cs, err := b.getCertStore()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
if pair, err := getCertPEMCached(cs, domain, now); err == nil {
|
||||||
future := now.AddDate(0, 0, 14)
|
future := now.AddDate(0, 0, 14)
|
||||||
if b.shouldStartDomainRenewal(dir, domain, future) {
|
if b.shouldStartDomainRenewal(cs, domain, future) {
|
||||||
logf("starting async renewal")
|
logf("starting async renewal")
|
||||||
// Start renewal in the background.
|
// Start renewal in the background.
|
||||||
go b.getCertPEM(context.Background(), logf, traceACME, dir, domain, future)
|
go b.getCertPEM(context.Background(), cs, logf, traceACME, domain, future)
|
||||||
}
|
}
|
||||||
return pair, nil
|
return pair, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pair, err := b.getCertPEM(ctx, logf, traceACME, dir, domain, now)
|
pair, err := b.getCertPEM(ctx, cs, logf, traceACME, domain, now)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf("getCertPEM: %v", err)
|
logf("getCertPEM: %v", err)
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -114,7 +117,7 @@ func (b *LocalBackend) GetCertPEM(ctx context.Context, domain string) (*TLSCertK
|
||||||
return pair, nil
|
return pair, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *LocalBackend) shouldStartDomainRenewal(dir, domain string, future time.Time) bool {
|
func (b *LocalBackend) shouldStartDomainRenewal(cs certStore, domain string, future time.Time) bool {
|
||||||
renewMu.Lock()
|
renewMu.Lock()
|
||||||
defer renewMu.Unlock()
|
defer renewMu.Unlock()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
|
@ -124,7 +127,7 @@ func (b *LocalBackend) shouldStartDomainRenewal(dir, domain string, future time.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
lastRenewCheck[domain] = now
|
lastRenewCheck[domain] = now
|
||||||
_, err := b.getCertPEMCached(dir, domain, future)
|
_, err := getCertPEMCached(cs, domain, future)
|
||||||
return errors.Is(err, errCertExpired)
|
return errors.Is(err, errCertExpired)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,15 +143,32 @@ type certStore interface {
|
||||||
WriteCert(domain string, cert []byte) error
|
WriteCert(domain string, cert []byte) error
|
||||||
// WriteKey writes the key for domain.
|
// WriteKey writes the key for domain.
|
||||||
WriteKey(domain string, key []byte) error
|
WriteKey(domain string, key []byte) error
|
||||||
|
// ACMEKey returns the value previously stored via WriteACMEKey.
|
||||||
|
// It is a PEM encoded ECDSA key.
|
||||||
|
ACMEKey() ([]byte, error)
|
||||||
|
// WriteACMEKey stores the provided PEM encoded ECDSA key.
|
||||||
|
WriteACMEKey([]byte) error
|
||||||
}
|
}
|
||||||
|
|
||||||
var errCertExpired = errors.New("cert expired")
|
var errCertExpired = errors.New("cert expired")
|
||||||
|
|
||||||
func (b *LocalBackend) getCertStore(dir string) certStore {
|
func (b *LocalBackend) getCertStore() (certStore, error) {
|
||||||
if hostinfo.GetEnvType() == hostinfo.Kubernetes && dir == "/tmp" {
|
switch b.store.(type) {
|
||||||
return certStateStore{StateStore: b.store}
|
case *store.FileStore:
|
||||||
|
case *mem.Store:
|
||||||
|
default:
|
||||||
|
if hostinfo.GetEnvType() == hostinfo.Kubernetes {
|
||||||
|
// We're running in Kubernetes with a custom StateStore,
|
||||||
|
// use that instead of the cert directory.
|
||||||
|
// TODO(maisem): expand this to other environments?
|
||||||
|
return certStateStore{StateStore: b.store}, nil
|
||||||
}
|
}
|
||||||
return certFileStore{dir: dir}
|
}
|
||||||
|
dir, err := b.certDir()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return certFileStore{dir: dir}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// certFileStore implements certStore by storing the cert & key files in the named directory.
|
// certFileStore implements certStore by storing the cert & key files in the named directory.
|
||||||
|
@ -160,6 +180,25 @@ type certFileStore struct {
|
||||||
testRoots *x509.CertPool
|
testRoots *x509.CertPool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
const acmePEMName = "acme-account.key.pem"
|
||||||
|
|
||||||
|
func (f certFileStore) ACMEKey() ([]byte, error) {
|
||||||
|
pemName := filepath.Join(f.dir, acmePEMName)
|
||||||
|
v, err := os.ReadFile(pemName)
|
||||||
|
if err != nil {
|
||||||
|
if os.IsNotExist(err) {
|
||||||
|
return nil, ipn.ErrStateNotExist
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f certFileStore) WriteACMEKey(b []byte) error {
|
||||||
|
pemName := filepath.Join(f.dir, acmePEMName)
|
||||||
|
return atomicfile.WriteFile(pemName, b, 0600)
|
||||||
|
}
|
||||||
|
|
||||||
func (f certFileStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) {
|
func (f certFileStore) Read(domain string, now time.Time) (*TLSCertKeyPair, error) {
|
||||||
certPEM, err := os.ReadFile(certFile(f.dir, domain))
|
certPEM, err := os.ReadFile(certFile(f.dir, domain))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -182,11 +221,11 @@ func (f certFileStore) Read(domain string, now time.Time) (*TLSCertKeyPair, erro
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f certFileStore) WriteCert(domain string, cert []byte) error {
|
func (f certFileStore) WriteCert(domain string, cert []byte) error {
|
||||||
return os.WriteFile(certFile(f.dir, domain), cert, 0644)
|
return atomicfile.WriteFile(certFile(f.dir, domain), cert, 0644)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (f certFileStore) WriteKey(domain string, key []byte) error {
|
func (f certFileStore) WriteKey(domain string, key []byte) error {
|
||||||
return os.WriteFile(keyFile(f.dir, domain), key, 0600)
|
return atomicfile.WriteFile(keyFile(f.dir, domain), key, 0600)
|
||||||
}
|
}
|
||||||
|
|
||||||
// certStateStore implements certStore by storing the cert & key files in an ipn.StateStore.
|
// certStateStore implements certStore by storing the cert & key files in an ipn.StateStore.
|
||||||
|
@ -221,6 +260,14 @@ func (s certStateStore) WriteKey(domain string, key []byte) error {
|
||||||
return s.WriteState(ipn.StateKey(domain+".key"), key)
|
return s.WriteState(ipn.StateKey(domain+".key"), key)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (s certStateStore) ACMEKey() ([]byte, error) {
|
||||||
|
return s.ReadState(ipn.StateKey(acmePEMName))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s certStateStore) WriteACMEKey(key []byte) error {
|
||||||
|
return s.WriteState(ipn.StateKey(acmePEMName), key)
|
||||||
|
}
|
||||||
|
|
||||||
// TLSCertKeyPair is a TLS public and private key, and whether they were obtained
|
// TLSCertKeyPair is a TLS public and private key, and whether they were obtained
|
||||||
// from cache or freshly obtained.
|
// from cache or freshly obtained.
|
||||||
type TLSCertKeyPair struct {
|
type TLSCertKeyPair struct {
|
||||||
|
@ -236,26 +283,26 @@ func certFile(dir, domain string) string { return filepath.Join(dir, domain+".cr
|
||||||
// domain exists on disk in dir that is valid at the provided now time.
|
// domain exists on disk in dir that is valid at the provided now time.
|
||||||
// If the keypair is expired, it returns errCertExpired.
|
// If the keypair is expired, it returns errCertExpired.
|
||||||
// If the keypair doesn't exist, it returns ipn.ErrStateNotExist.
|
// If the keypair doesn't exist, it returns ipn.ErrStateNotExist.
|
||||||
func (b *LocalBackend) getCertPEMCached(dir, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
|
func getCertPEMCached(cs certStore, domain string, now time.Time) (p *TLSCertKeyPair, err error) {
|
||||||
if !validLookingCertDomain(domain) {
|
if !validLookingCertDomain(domain) {
|
||||||
// Before we read files from disk using it, validate it's halfway
|
// Before we read files from disk using it, validate it's halfway
|
||||||
// reasonable looking.
|
// reasonable looking.
|
||||||
return nil, fmt.Errorf("invalid domain %q", domain)
|
return nil, fmt.Errorf("invalid domain %q", domain)
|
||||||
}
|
}
|
||||||
return b.getCertStore(dir).Read(domain, now)
|
return cs.Read(domain, now)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *LocalBackend) getCertPEM(ctx context.Context, logf logger.Logf, traceACME func(any), dir, domain string, now time.Time) (*TLSCertKeyPair, error) {
|
func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger.Logf, traceACME func(any), domain string, now time.Time) (*TLSCertKeyPair, error) {
|
||||||
acmeMu.Lock()
|
acmeMu.Lock()
|
||||||
defer acmeMu.Unlock()
|
defer acmeMu.Unlock()
|
||||||
|
|
||||||
if p, err := b.getCertPEMCached(dir, domain, now); err == nil {
|
if p, err := getCertPEMCached(cs, domain, now); err == nil {
|
||||||
return p, nil
|
return p, nil
|
||||||
} else if !errors.Is(err, ipn.ErrStateNotExist) && !errors.Is(err, errCertExpired) {
|
} else if !errors.Is(err, ipn.ErrStateNotExist) && !errors.Is(err, errCertExpired) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
key, err := acmeKey(dir)
|
key, err := acmeKey(cs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("acmeKey: %w", err)
|
return nil, fmt.Errorf("acmeKey: %w", err)
|
||||||
}
|
}
|
||||||
|
@ -366,8 +413,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, logf logger.Logf, traceAC
|
||||||
if err := encodeECDSAKey(&privPEM, certPrivKey); err != nil {
|
if err := encodeECDSAKey(&privPEM, certPrivKey); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
certStore := b.getCertStore(dir)
|
if err := cs.WriteKey(domain, privPEM.Bytes()); err != nil {
|
||||||
if err := certStore.WriteKey(domain, privPEM.Bytes()); err != nil {
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -390,7 +436,7 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, logf logger.Logf, traceAC
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := certStore.WriteCert(domain, certPEM.Bytes()); err != nil {
|
if err := cs.WriteCert(domain, certPEM.Bytes()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -444,14 +490,15 @@ func parsePrivateKey(der []byte) (crypto.Signer, error) {
|
||||||
return nil, errors.New("acme/autocert: failed to parse private key")
|
return nil, errors.New("acme/autocert: failed to parse private key")
|
||||||
}
|
}
|
||||||
|
|
||||||
func acmeKey(dir string) (crypto.Signer, error) {
|
func acmeKey(cs certStore) (crypto.Signer, error) {
|
||||||
pemName := filepath.Join(dir, "acme-account.key.pem")
|
if v, err := cs.ACMEKey(); err == nil {
|
||||||
if v, err := os.ReadFile(pemName); err == nil {
|
|
||||||
priv, _ := pem.Decode(v)
|
priv, _ := pem.Decode(v)
|
||||||
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
if priv == nil || !strings.Contains(priv.Type, "PRIVATE") {
|
||||||
return nil, errors.New("acme/autocert: invalid account key found in cache")
|
return nil, errors.New("acme/autocert: invalid account key found in cache")
|
||||||
}
|
}
|
||||||
return parsePrivateKey(priv.Bytes)
|
return parsePrivateKey(priv.Bytes)
|
||||||
|
} else if err != nil && !errors.Is(err, ipn.ErrStateNotExist) {
|
||||||
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
privKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
||||||
|
@ -462,7 +509,7 @@ func acmeKey(dir string) (crypto.Signer, error) {
|
||||||
if err := encodeECDSAKey(&pemBuf, privKey); err != nil {
|
if err := encodeECDSAKey(&pemBuf, privKey); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
if err := os.WriteFile(pemName, pemBuf.Bytes(), 0600); err != nil {
|
if err := cs.WriteACMEKey(pemBuf.Bytes()); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return privKey, nil
|
return privKey, nil
|
||||||
|
|
|
@ -298,7 +298,7 @@ func NewLocalBackend(logf logger.Logf, logid string, store ipn.StateStore, diale
|
||||||
statsLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
statsLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
||||||
e: e,
|
e: e,
|
||||||
pm: pm,
|
pm: pm,
|
||||||
store: pm.Store(),
|
store: store,
|
||||||
dialer: dialer,
|
dialer: dialer,
|
||||||
backendLogID: logid,
|
backendLogID: logid,
|
||||||
state: ipn.NoState,
|
state: ipn.NoState,
|
||||||
|
@ -2530,6 +2530,9 @@ func (b *LocalBackend) checkPrefsLocked(p *ipn.Prefs) error {
|
||||||
if err := b.checkExitNodePrefsLocked(p); err != nil {
|
if err := b.checkExitNodePrefsLocked(p); err != nil {
|
||||||
errs = append(errs, err)
|
errs = append(errs, err)
|
||||||
}
|
}
|
||||||
|
if err := b.checkFunnelEnabledLocked(p); err != nil {
|
||||||
|
errs = append(errs, err)
|
||||||
|
}
|
||||||
return multierr.New(errs...)
|
return multierr.New(errs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2614,6 +2617,13 @@ func (b *LocalBackend) checkExitNodePrefsLocked(p *ipn.Prefs) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (b *LocalBackend) checkFunnelEnabledLocked(p *ipn.Prefs) error {
|
||||||
|
if p.ShieldsUp && b.serveConfig.IsFunnelOn() {
|
||||||
|
return errors.New("Cannot enable shields-up when Funnel is enabled.")
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) {
|
func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
if mp.EggSet {
|
if mp.EggSet {
|
||||||
|
|
|
@ -218,6 +218,11 @@ func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig) error {
|
||||||
b.mu.Lock()
|
b.mu.Lock()
|
||||||
defer b.mu.Unlock()
|
defer b.mu.Unlock()
|
||||||
|
|
||||||
|
prefs := b.pm.CurrentPrefs()
|
||||||
|
if config.IsFunnelOn() && prefs.ShieldsUp() {
|
||||||
|
return errors.New("Unable to turn on Funnel while shields-up is enabled")
|
||||||
|
}
|
||||||
|
|
||||||
nm := b.netMap
|
nm := b.netMap
|
||||||
if nm == nil {
|
if nm == nil {
|
||||||
return errors.New("netMap is nil")
|
return errors.New("netMap is nil")
|
||||||
|
@ -439,8 +444,15 @@ func (b *LocalBackend) proxyHandlerForBackend(backend string) (*httputil.Reverse
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("invalid url %s: %w", targetURL, err)
|
return nil, fmt.Errorf("invalid url %s: %w", targetURL, err)
|
||||||
}
|
}
|
||||||
rp := httputil.NewSingleHostReverseProxy(u)
|
rp := &httputil.ReverseProxy{
|
||||||
rp.Transport = &http.Transport{
|
Rewrite: func(r *httputil.ProxyRequest) {
|
||||||
|
r.SetURL(u)
|
||||||
|
r.Out.Host = r.In.Host
|
||||||
|
if c, ok := r.Out.Context().Value(serveHTTPContextKey{}).(*serveHTTPContext); ok {
|
||||||
|
r.Out.Header.Set("X-Forwarded-For", c.SrcAddr.Addr().String())
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Transport: &http.Transport{
|
||||||
DialContext: b.dialer.SystemDial,
|
DialContext: b.dialer.SystemDial,
|
||||||
TLSClientConfig: &tls.Config{
|
TLSClientConfig: &tls.Config{
|
||||||
InsecureSkipVerify: insecure,
|
InsecureSkipVerify: insecure,
|
||||||
|
@ -451,6 +463,7 @@ func (b *LocalBackend) proxyHandlerForBackend(backend string) (*httputil.Reverse
|
||||||
IdleConnTimeout: 90 * time.Second,
|
IdleConnTimeout: 90 * time.Second,
|
||||||
TLSHandshakeTimeout: 10 * time.Second,
|
TLSHandshakeTimeout: 10 * time.Second,
|
||||||
ExpectContinueTimeout: 1 * time.Second,
|
ExpectContinueTimeout: 1 * time.Second,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
return rp, nil
|
return rp, nil
|
||||||
}
|
}
|
||||||
|
@ -476,7 +489,12 @@ func (b *LocalBackend) serveWebHandler(w http.ResponseWriter, r *http.Request) {
|
||||||
http.Error(w, "unknown proxy destination", http.StatusInternalServerError)
|
http.Error(w, "unknown proxy destination", http.StatusInternalServerError)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
p.(http.Handler).ServeHTTP(w, r)
|
h := p.(http.Handler)
|
||||||
|
// Trim the mount point from the URL path before proxying. (#6571)
|
||||||
|
if r.URL.Path != "/" {
|
||||||
|
h = http.StripPrefix(strings.TrimSuffix(mountPoint, "/"), h)
|
||||||
|
}
|
||||||
|
h.ServeHTTP(w, r)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
10
ipn/serve.go
10
ipn/serve.go
|
@ -163,6 +163,12 @@ func (sc *ServeConfig) IsServingWeb(port uint16) bool {
|
||||||
return sc.TCP[port].HTTPS
|
return sc.TCP[port].HTTPS
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsFunnelOn checks if ServeConfig is currently allowing
|
||||||
|
// funnel traffic for any host:port.
|
||||||
|
//
|
||||||
|
// View version of ServeConfig.IsFunnelOn.
|
||||||
|
func (v ServeConfigView) IsFunnelOn() bool { return v.ж.IsFunnelOn() }
|
||||||
|
|
||||||
// IsFunnelOn checks if ServeConfig is currently allowing
|
// IsFunnelOn checks if ServeConfig is currently allowing
|
||||||
// funnel traffic for any host:port.
|
// funnel traffic for any host:port.
|
||||||
func (sc *ServeConfig) IsFunnelOn() bool {
|
func (sc *ServeConfig) IsFunnelOn() bool {
|
||||||
|
@ -180,7 +186,7 @@ func (sc *ServeConfig) IsFunnelOn() bool {
|
||||||
// CheckFunnelAccess checks whether Funnel access is allowed for the given node
|
// CheckFunnelAccess checks whether Funnel access is allowed for the given node
|
||||||
// and port.
|
// and port.
|
||||||
// It checks:
|
// It checks:
|
||||||
// 1. an invite was used to join the Funnel alpha
|
// 1. Funnel is enabled on the Tailnet
|
||||||
// 2. HTTPS is enabled on the Tailnet
|
// 2. HTTPS is enabled on the Tailnet
|
||||||
// 3. the node has the "funnel" nodeAttr
|
// 3. the node has the "funnel" nodeAttr
|
||||||
// 4. the port is allowed for Funnel
|
// 4. the port is allowed for Funnel
|
||||||
|
@ -190,7 +196,7 @@ func (sc *ServeConfig) IsFunnelOn() bool {
|
||||||
// Funnel.
|
// Funnel.
|
||||||
func CheckFunnelAccess(port uint16, nodeAttrs []string) error {
|
func CheckFunnelAccess(port uint16, nodeAttrs []string) error {
|
||||||
if slices.Contains(nodeAttrs, tailcfg.CapabilityWarnFunnelNoInvite) {
|
if slices.Contains(nodeAttrs, tailcfg.CapabilityWarnFunnelNoInvite) {
|
||||||
return errors.New("Funnel not available; an invite is required to join the alpha. See https://tailscale.com/s/no-funnel.")
|
return errors.New("Funnel not enabled; See https://tailscale.com/s/no-funnel.")
|
||||||
}
|
}
|
||||||
if slices.Contains(nodeAttrs, tailcfg.CapabilityWarnFunnelNoHTTPS) {
|
if slices.Contains(nodeAttrs, tailcfg.CapabilityWarnFunnelNoHTTPS) {
|
||||||
return errors.New("Funnel not available; HTTPS must be enabled. See https://tailscale.com/s/https.")
|
return errors.New("Funnel not available; HTTPS must be enabled. See https://tailscale.com/s/https.")
|
||||||
|
|
|
@ -153,11 +153,9 @@ func LocalAddresses() (regular, loopback []netip.Addr, err error) {
|
||||||
if len(regular4) == 0 && len(regular6) == 0 {
|
if len(regular4) == 0 && len(regular6) == 0 {
|
||||||
// if we have no usable IP addresses then be willing to accept
|
// if we have no usable IP addresses then be willing to accept
|
||||||
// addresses we otherwise wouldn't, like:
|
// addresses we otherwise wouldn't, like:
|
||||||
// + 169.254.x.x (AWS Lambda uses NAT with these)
|
// + 169.254.x.x (AWS Lambda and Azure App Services use NAT with these)
|
||||||
// + IPv6 ULA (Google Cloud Run uses these with address translation)
|
// + IPv6 ULA (Google Cloud Run uses these with address translation)
|
||||||
if hostinfo.GetEnvType() == hostinfo.AWSLambda {
|
|
||||||
regular4 = linklocal4
|
regular4 = linklocal4
|
||||||
}
|
|
||||||
regular6 = ula6
|
regular6 = ula6
|
||||||
}
|
}
|
||||||
regular = append(regular4, regular6...)
|
regular = append(regular4, regular6...)
|
||||||
|
@ -645,7 +643,14 @@ func isUsableV4(ip netip.Addr) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if ip.IsLinkLocalUnicast() {
|
if ip.IsLinkLocalUnicast() {
|
||||||
return hostinfo.GetEnvType() == hostinfo.AWSLambda
|
switch hostinfo.GetEnvType() {
|
||||||
|
case hostinfo.AWSLambda:
|
||||||
|
return true
|
||||||
|
case hostinfo.AzureAppService:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,7 @@ type sockStatCounters struct {
|
||||||
txBytes, rxBytes atomic.Uint64
|
txBytes, rxBytes atomic.Uint64
|
||||||
rxBytesByInterface, txBytesByInterface map[int]*atomic.Uint64
|
rxBytesByInterface, txBytesByInterface map[int]*atomic.Uint64
|
||||||
|
|
||||||
txBytesMetric, rxBytesMetric *clientmetric.Metric
|
txBytesMetric, rxBytesMetric, txBytesCellularMetric, rxBytesCellularMetric *clientmetric.Metric
|
||||||
|
|
||||||
// Validate counts for TCP sockets by using the TCP_CONNECTION_INFO
|
// Validate counts for TCP sockets by using the TCP_CONNECTION_INFO
|
||||||
// getsockopt. We get current counts, as well as save final values when
|
// getsockopt. We get current counts, as well as save final values when
|
||||||
|
@ -69,6 +69,8 @@ func withSockStats(ctx context.Context, label Label) context.Context {
|
||||||
txBytesByInterface: make(map[int]*atomic.Uint64),
|
txBytesByInterface: make(map[int]*atomic.Uint64),
|
||||||
txBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_tx_bytes_%s", label)),
|
txBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_tx_bytes_%s", label)),
|
||||||
rxBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_rx_bytes_%s", label)),
|
rxBytesMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_rx_bytes_%s", label)),
|
||||||
|
txBytesCellularMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_tx_bytes_cellular_%s", label)),
|
||||||
|
rxBytesCellularMetric: clientmetric.NewCounter(fmt.Sprintf("sockstats_rx_bytes_cellular_%s", label)),
|
||||||
}
|
}
|
||||||
|
|
||||||
// We might be called before setLinkMonitor has been called (and we've
|
// We might be called before setLinkMonitor has been called (and we've
|
||||||
|
@ -119,6 +121,7 @@ func withSockStats(ctx context.Context, label Label) context.Context {
|
||||||
}
|
}
|
||||||
if sockStats.currentInterfaceCellular.Load() {
|
if sockStats.currentInterfaceCellular.Load() {
|
||||||
sockStats.rxBytesCellularMetric.Add(int64(n))
|
sockStats.rxBytesCellularMetric.Add(int64(n))
|
||||||
|
counters.rxBytesCellularMetric.Add(int64(n))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
didWrite := func(n int) {
|
didWrite := func(n int) {
|
||||||
|
@ -132,6 +135,7 @@ func withSockStats(ctx context.Context, label Label) context.Context {
|
||||||
}
|
}
|
||||||
if sockStats.currentInterfaceCellular.Load() {
|
if sockStats.currentInterfaceCellular.Load() {
|
||||||
sockStats.txBytesCellularMetric.Add(int64(n))
|
sockStats.txBytesCellularMetric.Add(int64(n))
|
||||||
|
counters.txBytesCellularMetric.Add(int64(n))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
willOverwrite := func(trace *net.SockTrace) {
|
willOverwrite := func(trace *net.SockTrace) {
|
||||||
|
|
|
@ -235,6 +235,7 @@ func beIncubator(args []string) error {
|
||||||
if err == nil && sessionCloser != nil {
|
if err == nil && sessionCloser != nil {
|
||||||
defer sessionCloser()
|
defer sessionCloser()
|
||||||
}
|
}
|
||||||
|
|
||||||
var groupIDs []int
|
var groupIDs []int
|
||||||
for _, g := range strings.Split(ia.groups, ",") {
|
for _, g := range strings.Split(ia.groups, ",") {
|
||||||
gid, err := strconv.ParseInt(g, 10, 32)
|
gid, err := strconv.ParseInt(g, 10, 32)
|
||||||
|
@ -244,22 +245,10 @@ func beIncubator(args []string) error {
|
||||||
groupIDs = append(groupIDs, int(gid))
|
groupIDs = append(groupIDs, int(gid))
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := setGroups(groupIDs); err != nil {
|
if err := dropPrivileges(logf, int(ia.uid), ia.gid, groupIDs); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if egid := os.Getegid(); egid != ia.gid {
|
|
||||||
if err := syscall.Setgid(int(ia.gid)); err != nil {
|
|
||||||
logf(err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if euid != ia.uid {
|
|
||||||
// Switch users if required before starting the desired process.
|
|
||||||
if err := syscall.Setuid(int(ia.uid)); err != nil {
|
|
||||||
logf(err.Error())
|
|
||||||
os.Exit(1)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if ia.isSFTP {
|
if ia.isSFTP {
|
||||||
logf("handling sftp")
|
logf("handling sftp")
|
||||||
|
|
||||||
|
@ -304,6 +293,108 @@ func beIncubator(args []string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// TODO(andrew-d): verify that this works in more configurations before
|
||||||
|
// enabling by default.
|
||||||
|
const assertDropPrivileges = false
|
||||||
|
|
||||||
|
// dropPrivileges contains all the logic for dropping privileges to a different
|
||||||
|
// UID, GID, and set of supplementary groups. This function is
|
||||||
|
// security-sensitive and ordering-dependent; please be very cautious if/when
|
||||||
|
// refactoring.
|
||||||
|
//
|
||||||
|
// WARNING: if you change this function, you *MUST* run the TestDropPrivileges
|
||||||
|
// test in this package as root on at least Linux, FreeBSD and Darwin. This can
|
||||||
|
// be done by running:
|
||||||
|
//
|
||||||
|
// go test -c ./ssh/tailssh/ && sudo ./tailssh.test -test.v -test.run TestDropPrivileges
|
||||||
|
func dropPrivileges(logf logger.Logf, wantUid, wantGid int, supplementaryGroups []int) error {
|
||||||
|
fatalf := func(format string, args ...any) {
|
||||||
|
logf(format, args...)
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
euid := os.Geteuid()
|
||||||
|
egid := os.Getegid()
|
||||||
|
|
||||||
|
if runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" {
|
||||||
|
// On FreeBSD and Darwin, the first entry returned from the
|
||||||
|
// getgroups(2) syscall is the egid, and changing it with
|
||||||
|
// setgroups(2) changes the egid of the process. This is
|
||||||
|
// technically a violation of the POSIX standard; see the
|
||||||
|
// following article for more detail:
|
||||||
|
// https://www.usenix.org/system/files/login/articles/325-tsafrir.pdf
|
||||||
|
//
|
||||||
|
// In this case, we add an entry at the beginning of the
|
||||||
|
// groupIDs list containing the expected gid if it's not
|
||||||
|
// already there, which modifies the egid and additional groups
|
||||||
|
// as one unit.
|
||||||
|
if len(supplementaryGroups) == 0 || supplementaryGroups[0] != wantGid {
|
||||||
|
supplementaryGroups = append([]int{wantGid}, supplementaryGroups...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := setGroups(supplementaryGroups); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if egid != wantGid {
|
||||||
|
// On FreeBSD and Darwin, we may have already called the
|
||||||
|
// equivalent of setegid(wantGid) via the call to setGroups,
|
||||||
|
// above. However, per the manpage, setgid(getegid()) is an
|
||||||
|
// allowed operation regardless of privilege level.
|
||||||
|
//
|
||||||
|
// FreeBSD:
|
||||||
|
// The setgid() system call is permitted if the specified ID
|
||||||
|
// is equal to the real group ID or the effective group ID
|
||||||
|
// of the process, or if the effective user ID is that of
|
||||||
|
// the super user.
|
||||||
|
//
|
||||||
|
// Darwin:
|
||||||
|
// The setgid() function is permitted if the effective
|
||||||
|
// user ID is that of the super user, or if the specified
|
||||||
|
// group ID is the same as the effective group ID. If
|
||||||
|
// not, but the specified group ID is the same as the real
|
||||||
|
// group ID, setgid() will set the effective group ID to
|
||||||
|
// the real group ID.
|
||||||
|
if err := syscall.Setgid(wantGid); err != nil {
|
||||||
|
fatalf("Setgid(%d): %v", wantGid, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if euid != wantUid {
|
||||||
|
// Switch users if required before starting the desired process.
|
||||||
|
if err := syscall.Setuid(wantUid); err != nil {
|
||||||
|
fatalf("Setuid(%d): %v", wantUid, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If we changed either the UID or GID, defensively assert that we
|
||||||
|
// cannot reset the it back to our original values, and that the
|
||||||
|
// current egid/euid are the expected values after we change
|
||||||
|
// everything; if not, we exit the process.
|
||||||
|
if assertDropPrivileges {
|
||||||
|
if egid != wantGid {
|
||||||
|
if err := syscall.Setegid(egid); err == nil {
|
||||||
|
fatalf("unexpectedly able to set egid back to %d", egid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if euid != wantUid {
|
||||||
|
if err := syscall.Seteuid(euid); err == nil {
|
||||||
|
fatalf("unexpectedly able to set euid back to %d", euid)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if got := os.Getegid(); got != wantGid {
|
||||||
|
fatalf("got egid=%d, want %d", got, wantGid)
|
||||||
|
}
|
||||||
|
if got := os.Geteuid(); got != wantUid {
|
||||||
|
fatalf("got euid=%d, want %d", got, wantUid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(andrew-d): assert that our supplementary groups are correct
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// launchProcess launches an incubator process for the provided session.
|
// launchProcess launches an incubator process for the provided session.
|
||||||
// It is responsible for configuring the process execution environment.
|
// It is responsible for configuring the process execution environment.
|
||||||
// The caller can wait for the process to exit by calling cmd.Wait().
|
// The caller can wait for the process to exit by calling cmd.Wait().
|
||||||
|
|
|
@ -0,0 +1,295 @@
|
||||||
|
// Copyright (c) Tailscale Inc & AUTHORS
|
||||||
|
// SPDX-License-Identifier: BSD-3-Clause
|
||||||
|
|
||||||
|
//go:build linux || darwin || freebsd || openbsd || netbsd || dragonfly
|
||||||
|
|
||||||
|
package tailssh
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"os"
|
||||||
|
"os/exec"
|
||||||
|
"os/user"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"regexp"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"syscall"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"golang.org/x/exp/slices"
|
||||||
|
"tailscale.com/types/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestDropPrivileges(t *testing.T) {
|
||||||
|
type SubprocInput struct {
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
AdditionalGroups []int
|
||||||
|
}
|
||||||
|
type SubprocOutput struct {
|
||||||
|
UID int
|
||||||
|
GID int
|
||||||
|
EUID int
|
||||||
|
EGID int
|
||||||
|
AdditionalGroups []int
|
||||||
|
}
|
||||||
|
|
||||||
|
if v := os.Getenv("TS_TEST_DROP_PRIVILEGES_CHILD"); v != "" {
|
||||||
|
t.Logf("in child process")
|
||||||
|
|
||||||
|
var input SubprocInput
|
||||||
|
if err := json.Unmarshal([]byte(v), &input); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get a handle to our provided JSON file before dropping privs.
|
||||||
|
f := os.NewFile(3, "out.json")
|
||||||
|
|
||||||
|
// We're in our subprocess; actually drop privileges now.
|
||||||
|
dropPrivileges(t.Logf, input.UID, input.GID, input.AdditionalGroups)
|
||||||
|
|
||||||
|
additional, _ := syscall.Getgroups()
|
||||||
|
|
||||||
|
// Print our IDs
|
||||||
|
json.NewEncoder(f).Encode(SubprocOutput{
|
||||||
|
UID: os.Getuid(),
|
||||||
|
GID: os.Getgid(),
|
||||||
|
EUID: os.Geteuid(),
|
||||||
|
EGID: os.Getegid(),
|
||||||
|
AdditionalGroups: additional,
|
||||||
|
})
|
||||||
|
|
||||||
|
// Close output file to ensure that it's flushed to disk before we exit
|
||||||
|
f.Close()
|
||||||
|
|
||||||
|
// Always exit the process now that we have a different
|
||||||
|
// UID/GID/etc.; we don't want the Go test framework to try and
|
||||||
|
// clean anything up, since it might no longer have access.
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
if os.Getuid() != 0 {
|
||||||
|
t.Skip("test only works when run as root")
|
||||||
|
}
|
||||||
|
|
||||||
|
rerunSelf := func(t *testing.T, input SubprocInput) []byte {
|
||||||
|
fpath := filepath.Join(t.TempDir(), "out.json")
|
||||||
|
outf, err := os.Create(fpath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
inputb, err := json.Marshal(input)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
cmd := exec.Command(os.Args[0], "-test.v", "-test.run", "^"+regexp.QuoteMeta(t.Name())+"$")
|
||||||
|
cmd.Env = append(os.Environ(), "TS_TEST_DROP_PRIVILEGES_CHILD="+string(inputb))
|
||||||
|
cmd.ExtraFiles = []*os.File{outf}
|
||||||
|
cmd.Stdout = logger.FuncWriter(logger.WithPrefix(t.Logf, "child: "))
|
||||||
|
cmd.Stderr = logger.FuncWriter(logger.WithPrefix(t.Logf, "child: "))
|
||||||
|
if err := cmd.Run(); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
outf.Close()
|
||||||
|
|
||||||
|
jj, err := os.ReadFile(fpath)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
return jj
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want to ensure we're not colliding with existing users; find some
|
||||||
|
// unused UIDs and GIDs for the tests we run.
|
||||||
|
uid1 := findUnusedUID(t)
|
||||||
|
gid1 := findUnusedGID(t)
|
||||||
|
gid2 := findUnusedGID(t, gid1)
|
||||||
|
gid3 := findUnusedGID(t, gid1, gid2)
|
||||||
|
|
||||||
|
// For some tests, we want a UID/GID pair with the same numerical
|
||||||
|
// value; this finds one.
|
||||||
|
uidgid1 := findUnusedUIDGID(t, uid1, gid1, gid2, gid3)
|
||||||
|
|
||||||
|
t.Logf("uid1=%d gid1=%d gid2=%d gid3=%d uidgid1=%d",
|
||||||
|
uid1, gid1, gid2, gid3, uidgid1)
|
||||||
|
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
uid int
|
||||||
|
gid int
|
||||||
|
additionalGroups []int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "all_different_values",
|
||||||
|
uid: uid1,
|
||||||
|
gid: gid1,
|
||||||
|
additionalGroups: []int{gid2, gid3},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "no_additional_groups",
|
||||||
|
uid: uid1,
|
||||||
|
gid: gid1,
|
||||||
|
additionalGroups: []int{},
|
||||||
|
},
|
||||||
|
// This is a regression test for the following bug, triggered
|
||||||
|
// on Darwin & FreeBSD:
|
||||||
|
// https://github.com/tailscale/tailscale/issues/7616
|
||||||
|
{
|
||||||
|
name: "same_values",
|
||||||
|
uid: uidgid1,
|
||||||
|
gid: uidgid1,
|
||||||
|
additionalGroups: []int{uidgid1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range testCases {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
subprocOut := rerunSelf(t, SubprocInput{
|
||||||
|
UID: tt.uid,
|
||||||
|
GID: tt.gid,
|
||||||
|
AdditionalGroups: tt.additionalGroups,
|
||||||
|
})
|
||||||
|
|
||||||
|
var out SubprocOutput
|
||||||
|
if err := json.Unmarshal(subprocOut, &out); err != nil {
|
||||||
|
t.Logf("%s", subprocOut)
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
t.Logf("output: %+v", out)
|
||||||
|
|
||||||
|
if out.UID != tt.uid {
|
||||||
|
t.Errorf("got uid %d; want %d", out.UID, tt.uid)
|
||||||
|
}
|
||||||
|
if out.GID != tt.gid {
|
||||||
|
t.Errorf("got gid %d; want %d", out.GID, tt.gid)
|
||||||
|
}
|
||||||
|
if out.EUID != tt.uid {
|
||||||
|
t.Errorf("got euid %d; want %d", out.EUID, tt.uid)
|
||||||
|
}
|
||||||
|
if out.EGID != tt.gid {
|
||||||
|
t.Errorf("got egid %d; want %d", out.EGID, tt.gid)
|
||||||
|
}
|
||||||
|
|
||||||
|
// On FreeBSD and Darwin, the set of additional groups
|
||||||
|
// is prefixed with the egid; handle that case by
|
||||||
|
// modifying our expected set.
|
||||||
|
wantGroups := make(map[int]bool)
|
||||||
|
for _, id := range tt.additionalGroups {
|
||||||
|
wantGroups[id] = true
|
||||||
|
}
|
||||||
|
if runtime.GOOS == "darwin" || runtime.GOOS == "freebsd" {
|
||||||
|
wantGroups[tt.gid] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
gotGroups := make(map[int]bool)
|
||||||
|
for _, id := range out.AdditionalGroups {
|
||||||
|
gotGroups[id] = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !reflect.DeepEqual(gotGroups, wantGroups) {
|
||||||
|
t.Errorf("got additional groups %+v; want %+v", gotGroups, wantGroups)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func findUnusedUID(t *testing.T, not ...int) int {
|
||||||
|
for i := 1000; i < 65535; i++ {
|
||||||
|
// Skip UIDs that might be valid
|
||||||
|
if maybeValidUID(i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip UIDs that we're avoiding
|
||||||
|
if slices.Contains(not, i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a valid UID, not one we're avoiding... all good!
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf("unable to find an unused UID")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func findUnusedGID(t *testing.T, not ...int) int {
|
||||||
|
for i := 1000; i < 65535; i++ {
|
||||||
|
if maybeValidGID(i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip GIDs that we're avoiding
|
||||||
|
if slices.Contains(not, i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a valid GID, not one we're avoiding... all good!
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf("unable to find an unused GID")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func findUnusedUIDGID(t *testing.T, not ...int) int {
|
||||||
|
for i := 1000; i < 65535; i++ {
|
||||||
|
if maybeValidUID(i) || maybeValidGID(i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip IDs that we're avoiding
|
||||||
|
if slices.Contains(not, i) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Not a valid ID, not one we're avoiding... all good!
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
t.Fatalf("unable to find an unused UID/GID pair")
|
||||||
|
return -1
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeValidUID(id int) bool {
|
||||||
|
_, err := user.LookupId(strconv.Itoa(id))
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var u1 user.UnknownUserIdError
|
||||||
|
if errors.As(err, &u1) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var u2 user.UnknownUserError
|
||||||
|
if errors.As(err, &u2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some other error; might be valid
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func maybeValidGID(id int) bool {
|
||||||
|
_, err := user.LookupGroupId(strconv.Itoa(id))
|
||||||
|
if err == nil {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
var u1 user.UnknownGroupIdError
|
||||||
|
if errors.As(err, &u1) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
var u2 user.UnknownGroupError
|
||||||
|
if errors.As(err, &u2) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some other error; might be valid
|
||||||
|
return true
|
||||||
|
}
|
|
@ -35,6 +35,7 @@ import (
|
||||||
"tailscale.com/ipn/ipnlocal"
|
"tailscale.com/ipn/ipnlocal"
|
||||||
"tailscale.com/logtail/backoff"
|
"tailscale.com/logtail/backoff"
|
||||||
"tailscale.com/net/tsaddr"
|
"tailscale.com/net/tsaddr"
|
||||||
|
"tailscale.com/net/tsdial"
|
||||||
"tailscale.com/tailcfg"
|
"tailscale.com/tailcfg"
|
||||||
"tailscale.com/tempfork/gliderlabs/ssh"
|
"tailscale.com/tempfork/gliderlabs/ssh"
|
||||||
"tailscale.com/types/logger"
|
"tailscale.com/types/logger"
|
||||||
|
@ -62,7 +63,7 @@ type ipnLocalBackend interface {
|
||||||
NetMap() *netmap.NetworkMap
|
NetMap() *netmap.NetworkMap
|
||||||
WhoIs(ipp netip.AddrPort) (n *tailcfg.Node, u tailcfg.UserProfile, ok bool)
|
WhoIs(ipp netip.AddrPort) (n *tailcfg.Node, u tailcfg.UserProfile, ok bool)
|
||||||
DoNoiseRequest(req *http.Request) (*http.Response, error)
|
DoNoiseRequest(req *http.Request) (*http.Response, error)
|
||||||
TailscaleVarRoot() string
|
Dialer() *tsdial.Dialer
|
||||||
}
|
}
|
||||||
|
|
||||||
type server struct {
|
type server struct {
|
||||||
|
@ -77,11 +78,33 @@ type server struct {
|
||||||
|
|
||||||
// mu protects the following
|
// mu protects the following
|
||||||
mu sync.Mutex
|
mu sync.Mutex
|
||||||
|
httpc *http.Client // for calling out to peers.
|
||||||
activeConns map[*conn]bool // set; value is always true
|
activeConns map[*conn]bool // set; value is always true
|
||||||
fetchPublicKeysCache map[string]pubKeyCacheEntry // by https URL
|
fetchPublicKeysCache map[string]pubKeyCacheEntry // by https URL
|
||||||
shutdownCalled bool
|
shutdownCalled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// sessionRecordingClient returns an http.Client that uses srv.lb.Dialer() to
|
||||||
|
// dial connections. This is used to make requests to the session recording
|
||||||
|
// server to upload session recordings.
|
||||||
|
func (srv *server) sessionRecordingClient() *http.Client {
|
||||||
|
srv.mu.Lock()
|
||||||
|
defer srv.mu.Unlock()
|
||||||
|
if srv.httpc != nil {
|
||||||
|
return srv.httpc
|
||||||
|
}
|
||||||
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
||||||
|
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||||
|
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||||
|
defer cancel()
|
||||||
|
return srv.lb.Dialer().UserDial(ctx, network, addr)
|
||||||
|
}
|
||||||
|
srv.httpc = &http.Client{
|
||||||
|
Transport: tr,
|
||||||
|
}
|
||||||
|
return srv.httpc
|
||||||
|
}
|
||||||
|
|
||||||
func (srv *server) now() time.Time {
|
func (srv *server) now() time.Time {
|
||||||
if srv != nil && srv.timeNow != nil {
|
if srv != nil && srv.timeNow != nil {
|
||||||
return srv.timeNow()
|
return srv.timeNow()
|
||||||
|
@ -987,12 +1010,6 @@ func (ss *sshSession) handleSSHAgentForwarding(s ssh.Session, lu *user.User) err
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// recordSSH is a temporary dev knob to test the SSH recording
|
|
||||||
// functionality and support off-node streaming.
|
|
||||||
//
|
|
||||||
// TODO(bradfitz,maisem): move this to SSHPolicy.
|
|
||||||
var recordSSH = envknob.RegisterBool("TS_DEBUG_LOG_SSH")
|
|
||||||
|
|
||||||
// run is the entrypoint for a newly accepted SSH session.
|
// run is the entrypoint for a newly accepted SSH session.
|
||||||
//
|
//
|
||||||
// It handles ss once it's been accepted and determined
|
// It handles ss once it's been accepted and determined
|
||||||
|
@ -1048,7 +1065,12 @@ func (ss *sshSession) run() {
|
||||||
var err error
|
var err error
|
||||||
rec, err = ss.startNewRecording()
|
rec, err = ss.startNewRecording()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
var uve userVisibleError
|
||||||
|
if errors.As(err, &uve) {
|
||||||
|
fmt.Fprintf(ss, "%s\r\n", uve)
|
||||||
|
} else {
|
||||||
fmt.Fprintf(ss, "can't start new recording\r\n")
|
fmt.Fprintf(ss, "can't start new recording\r\n")
|
||||||
|
}
|
||||||
ss.logf("startNewRecording: %v", err)
|
ss.logf("startNewRecording: %v", err)
|
||||||
ss.Exit(1)
|
ss.Exit(1)
|
||||||
return
|
return
|
||||||
|
@ -1060,6 +1082,13 @@ func (ss *sshSession) run() {
|
||||||
err := ss.launchProcess()
|
err := ss.launchProcess()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logf("start failed: %v", err.Error())
|
logf("start failed: %v", err.Error())
|
||||||
|
if errors.Is(err, context.Canceled) {
|
||||||
|
err := context.Cause(ss.ctx)
|
||||||
|
var uve userVisibleError
|
||||||
|
if errors.As(err, &uve) {
|
||||||
|
fmt.Fprintf(ss, "%s\r\n", uve)
|
||||||
|
}
|
||||||
|
}
|
||||||
ss.Exit(1)
|
ss.Exit(1)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -1125,12 +1154,19 @@ func (ss *sshSession) run() {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// recorders returns the list of recorders to use for this session.
|
||||||
|
// If the final action has a non-empty list of recorders, that list is
|
||||||
|
// returned. Otherwise, the list of recorders from the initial action
|
||||||
|
// is returned.
|
||||||
|
func (ss *sshSession) recorders() []netip.AddrPort {
|
||||||
|
if len(ss.conn.finalAction.Recorders) > 0 {
|
||||||
|
return ss.conn.finalAction.Recorders
|
||||||
|
}
|
||||||
|
return ss.conn.action0.Recorders
|
||||||
|
}
|
||||||
|
|
||||||
func (ss *sshSession) shouldRecord() bool {
|
func (ss *sshSession) shouldRecord() bool {
|
||||||
// for now only record pty sessions
|
return len(ss.recorders()) > 0
|
||||||
// TODO(bradfitz,maisem): make configurable on SSHPolicy and
|
|
||||||
// support recording non-pty stuff too.
|
|
||||||
_, _, isPtyReq := ss.Pty()
|
|
||||||
return recordSSH() && isPtyReq
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type sshConnInfo struct {
|
type sshConnInfo struct {
|
||||||
|
@ -1312,11 +1348,67 @@ func randBytes(n int) []byte {
|
||||||
return b
|
return b
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// CastHeader is the header of an asciinema file.
|
||||||
|
type CastHeader struct {
|
||||||
|
// Version is the asciinema file format version.
|
||||||
|
Version int `json:"version"`
|
||||||
|
|
||||||
|
// Width is the terminal width in characters.
|
||||||
|
// It is non-zero for Pty sessions.
|
||||||
|
Width int `json:"width"`
|
||||||
|
|
||||||
|
// Height is the terminal height in characters.
|
||||||
|
// It is non-zero for Pty sessions.
|
||||||
|
Height int `json:"height"`
|
||||||
|
|
||||||
|
// Timestamp is the unix timestamp of when the recording started.
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
|
||||||
|
// Env is the environment variables of the session.
|
||||||
|
// Only "TERM" is set (2023-03-22).
|
||||||
|
Env map[string]string `json:"env"`
|
||||||
|
|
||||||
|
// Command is the command that was executed.
|
||||||
|
// Typically empty for shell sessions.
|
||||||
|
Command string `json:"command,omitempty"`
|
||||||
|
|
||||||
|
// Tailscale-specific fields:
|
||||||
|
// SrcNode is the FQDN of the node originating the connection.
|
||||||
|
// It is also the MagicDNS name for the node.
|
||||||
|
// It does not have a trailing dot.
|
||||||
|
// e.g. "host.tail-scale.ts.net"
|
||||||
|
SrcNode string `json:"srcNode"`
|
||||||
|
|
||||||
|
// SrcNodeID is the node ID of the node originating the connection.
|
||||||
|
SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"`
|
||||||
|
|
||||||
|
// SrcNodeTags is the list of tags on the node originating the connection (if any).
|
||||||
|
SrcNodeTags []string `json:"srcNodeTags,omitempty"`
|
||||||
|
|
||||||
|
// SrcNodeUserID is the user ID of the node originating the connection (if not tagged).
|
||||||
|
SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged
|
||||||
|
|
||||||
|
// SrcNodeUser is the LoginName of the node originating the connection (if not tagged).
|
||||||
|
SrcNodeUser string `json:"srcNodeUser,omitempty"`
|
||||||
|
|
||||||
|
// SSHUser is the username as presented by the client.
|
||||||
|
SSHUser string `json:"sshUser"` // as presented by the client
|
||||||
|
|
||||||
|
// LocalUser is the effective username on the server.
|
||||||
|
LocalUser string `json:"localUser"`
|
||||||
|
}
|
||||||
|
|
||||||
// startNewRecording starts a new SSH session recording.
|
// startNewRecording starts a new SSH session recording.
|
||||||
//
|
|
||||||
// It writes an asciinema file to
|
|
||||||
// $TAILSCALE_VAR_ROOT/ssh-sessions/ssh-session-<unixtime>-*.cast.
|
|
||||||
func (ss *sshSession) startNewRecording() (_ *recording, err error) {
|
func (ss *sshSession) startNewRecording() (_ *recording, err error) {
|
||||||
|
recorders := ss.recorders()
|
||||||
|
if len(recorders) == 0 {
|
||||||
|
return nil, errors.New("no recorders configured")
|
||||||
|
}
|
||||||
|
recorder := recorders[0]
|
||||||
|
if len(recorders) > 1 {
|
||||||
|
ss.logf("warning: multiple recorders configured, using first one: %v", recorder)
|
||||||
|
}
|
||||||
|
|
||||||
var w ssh.Window
|
var w ssh.Window
|
||||||
if ptyReq, _, isPtyReq := ss.Pty(); isPtyReq {
|
if ptyReq, _, isPtyReq := ss.Pty(); isPtyReq {
|
||||||
w = ptyReq.Window
|
w = ptyReq.Window
|
||||||
|
@ -1332,39 +1424,59 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) {
|
||||||
ss: ss,
|
ss: ss,
|
||||||
start: now,
|
start: now,
|
||||||
}
|
}
|
||||||
varRoot := ss.conn.srv.lb.TailscaleVarRoot()
|
|
||||||
if varRoot == "" {
|
pr, pw := io.Pipe()
|
||||||
return nil, errors.New("no var root for recording storage")
|
|
||||||
}
|
// We want to use a background context for uploading and not ss.ctx.
|
||||||
dir := filepath.Join(varRoot, "ssh-sessions")
|
// ss.ctx is closed when the session closes, but we don't want to break the upload at that time.
|
||||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
// Instead we want to wait for the session to close the writer when it finishes.
|
||||||
|
ctx := context.Background()
|
||||||
|
req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s:%d/record", recorder.Addr(), recorder.Port()), pr)
|
||||||
|
if err != nil {
|
||||||
|
pr.Close()
|
||||||
|
pw.Close()
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
defer func() {
|
// We want to wait for the server to respond with 100 Continue to notifiy us
|
||||||
|
// that it's ready to receive data. We do this to block the session from
|
||||||
|
// starting until the server is ready to receive data.
|
||||||
|
// It also allows the server to reject the request before we start sending
|
||||||
|
// data.
|
||||||
|
req.Header.Set("Expect", "100-continue")
|
||||||
|
go func() {
|
||||||
|
defer pw.Close()
|
||||||
|
ss.logf("starting asciinema recording to %s", recorder)
|
||||||
|
hc := ss.conn.srv.sessionRecordingClient()
|
||||||
|
resp, err := hc.Do(req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
rec.Close()
|
err := fmt.Errorf("recording: error sending recording: %w", err)
|
||||||
|
ss.logf("%v", err)
|
||||||
|
ss.cancelCtx(userVisibleError{
|
||||||
|
msg: "recording: error sending recording",
|
||||||
|
error: err,
|
||||||
|
})
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
defer ss.cancelCtx(errors.New("recording: done"))
|
||||||
|
if resp.StatusCode != http.StatusOK {
|
||||||
|
err := fmt.Errorf("recording: server responded with %s", resp.Status)
|
||||||
|
ss.logf("%v", err)
|
||||||
|
ss.cancelCtx(userVisibleError{
|
||||||
|
msg: "recording server responded with: " + resp.Status,
|
||||||
|
error: err,
|
||||||
|
})
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
f, err := os.CreateTemp(dir, fmt.Sprintf("ssh-session-%v-*.cast", now.UnixNano()))
|
rec.out = pw
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
rec.out = f
|
|
||||||
|
|
||||||
// {"version": 2, "width": 221, "height": 84, "timestamp": 1647146075, "env": {"SHELL": "/bin/bash", "TERM": "screen"}}
|
ch := CastHeader{
|
||||||
type CastHeader struct {
|
|
||||||
Version int `json:"version"`
|
|
||||||
Width int `json:"width"`
|
|
||||||
Height int `json:"height"`
|
|
||||||
Timestamp int64 `json:"timestamp"`
|
|
||||||
Env map[string]string `json:"env"`
|
|
||||||
}
|
|
||||||
j, err := json.Marshal(CastHeader{
|
|
||||||
Version: 2,
|
Version: 2,
|
||||||
Width: w.Width,
|
Width: w.Width,
|
||||||
Height: w.Height,
|
Height: w.Height,
|
||||||
Timestamp: now.Unix(),
|
Timestamp: now.Unix(),
|
||||||
|
Command: strings.Join(ss.Command(), " "),
|
||||||
Env: map[string]string{
|
Env: map[string]string{
|
||||||
"TERM": term,
|
"TERM": term,
|
||||||
// TODO(bradfitz): anything else important?
|
// TODO(bradfitz): anything else important?
|
||||||
|
@ -1376,15 +1488,29 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) {
|
||||||
// it. Then we can (1) make the cmd, (2) start the
|
// it. Then we can (1) make the cmd, (2) start the
|
||||||
// recording, (3) start the process.
|
// recording, (3) start the process.
|
||||||
},
|
},
|
||||||
})
|
SSHUser: ss.conn.info.sshUser,
|
||||||
|
LocalUser: ss.conn.localUser.Username,
|
||||||
|
SrcNode: strings.TrimSuffix(ss.conn.info.node.Name, "."),
|
||||||
|
SrcNodeID: ss.conn.info.node.StableID,
|
||||||
|
}
|
||||||
|
if !ss.conn.info.node.IsTagged() {
|
||||||
|
ch.SrcNodeUser = ss.conn.info.uprof.LoginName
|
||||||
|
ch.SrcNodeUserID = ss.conn.info.node.User
|
||||||
|
} else {
|
||||||
|
ch.SrcNodeTags = ss.conn.info.node.Tags
|
||||||
|
}
|
||||||
|
j, err := json.Marshal(ch)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
f.Close()
|
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
ss.logf("starting asciinema recording to %s", f.Name())
|
|
||||||
j = append(j, '\n')
|
j = append(j, '\n')
|
||||||
if _, err := f.Write(j); err != nil {
|
if _, err := pw.Write(j); err != nil {
|
||||||
f.Close()
|
if errors.Is(err, io.ErrClosedPipe) && ss.ctx.Err() != nil {
|
||||||
|
// If we got an io.ErrClosedPipe, it's likely because
|
||||||
|
// the recording server closed the connection on us. Return
|
||||||
|
// the original context error instead.
|
||||||
|
return nil, context.Cause(ss.ctx)
|
||||||
|
}
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return rec, nil
|
return rec, nil
|
||||||
|
@ -1396,7 +1522,7 @@ type recording struct {
|
||||||
start time.Time
|
start time.Time
|
||||||
|
|
||||||
mu sync.Mutex // guards writes to, close of out
|
mu sync.Mutex // guards writes to, close of out
|
||||||
out *os.File // nil if closed
|
out io.WriteCloser
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *recording) Close() error {
|
func (r *recording) Close() error {
|
||||||
|
@ -1415,10 +1541,17 @@ func (r *recording) Close() error {
|
||||||
// The dir should be "i" for input or "o" for output.
|
// The dir should be "i" for input or "o" for output.
|
||||||
//
|
//
|
||||||
// If r is nil, it returns w unchanged.
|
// If r is nil, it returns w unchanged.
|
||||||
|
//
|
||||||
|
// Currently (2023-03-21) we only record output, not input.
|
||||||
func (r *recording) writer(dir string, w io.Writer) io.Writer {
|
func (r *recording) writer(dir string, w io.Writer) io.Writer {
|
||||||
if r == nil {
|
if r == nil {
|
||||||
return w
|
return w
|
||||||
}
|
}
|
||||||
|
if dir == "i" {
|
||||||
|
// TODO: record input? Maybe not, since it might contain
|
||||||
|
// passwords.
|
||||||
|
return w
|
||||||
|
}
|
||||||
return &loggingWriter{r, dir, w}
|
return &loggingWriter{r, dir, w}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,6 +7,7 @@ package tailssh
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"context"
|
||||||
"crypto/ed25519"
|
"crypto/ed25519"
|
||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
@ -14,6 +15,7 @@ import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httptest"
|
"net/http/httptest"
|
||||||
|
@ -236,6 +238,10 @@ var (
|
||||||
testSignerOnce sync.Once
|
testSignerOnce sync.Once
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (ts *localState) Dialer() *tsdial.Dialer {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ts *localState) GetSSH_HostKeys() ([]gossh.Signer, error) {
|
func (ts *localState) GetSSH_HostKeys() ([]gossh.Signer, error) {
|
||||||
testSignerOnce.Do(func() {
|
testSignerOnce.Do(func() {
|
||||||
_, priv, err := ed25519.GenerateKey(rand.Reader)
|
_, priv, err := ed25519.GenerateKey(rand.Reader)
|
||||||
|
@ -319,9 +325,213 @@ func newSSHRule(action *tailcfg.SSHAction) *tailcfg.SSHRule {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestSSHRecordingCancelsSessionsOnUploadFailure(t *testing.T) {
|
||||||
|
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
|
||||||
|
t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS)
|
||||||
|
}
|
||||||
|
|
||||||
|
var handler http.HandlerFunc
|
||||||
|
recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
handler(w, r)
|
||||||
|
}))
|
||||||
|
defer recordingServer.Close()
|
||||||
|
|
||||||
|
s := &server{
|
||||||
|
logf: t.Logf,
|
||||||
|
httpc: recordingServer.Client(),
|
||||||
|
lb: &localState{
|
||||||
|
sshEnabled: true,
|
||||||
|
matchingRule: newSSHRule(
|
||||||
|
&tailcfg.SSHAction{
|
||||||
|
Accept: true,
|
||||||
|
Recorders: []netip.AddrPort{
|
||||||
|
netip.MustParseAddrPort(recordingServer.Listener.Addr().String()),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
const sshUser = "alice"
|
||||||
|
cfg := &gossh.ClientConfig{
|
||||||
|
User: sshUser,
|
||||||
|
HostKeyCallback: gossh.InsecureIgnoreHostKey(),
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
handler func(w http.ResponseWriter, r *http.Request)
|
||||||
|
sshCommand string
|
||||||
|
wantClientOutput string
|
||||||
|
|
||||||
|
clientOutputMustNotContain []string
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "upload-denied",
|
||||||
|
handler: func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusForbidden)
|
||||||
|
},
|
||||||
|
sshCommand: "echo hello",
|
||||||
|
wantClientOutput: "recording: server responded with 403 Forbidden\r\n",
|
||||||
|
|
||||||
|
clientOutputMustNotContain: []string{"hello"},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "upload-fails-after-starting",
|
||||||
|
handler: func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
r.Body.Read(make([]byte, 1))
|
||||||
|
time.Sleep(100 * time.Millisecond)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
},
|
||||||
|
sshCommand: "echo hello && sleep 1 && echo world",
|
||||||
|
wantClientOutput: "\r\n\r\nrecording server responded with: 500 Internal Server Error\r\n\r\n",
|
||||||
|
|
||||||
|
clientOutputMustNotContain: []string{"world"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
src, dst := must.Get(netip.ParseAddrPort("100.100.100.101:2231")), must.Get(netip.ParseAddrPort("100.100.100.102:22"))
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
|
tstest.Replace(t, &handler, tt.handler)
|
||||||
|
sc, dc := memnet.NewTCPConn(src, dst, 1024)
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
client := gossh.NewClient(c, chans, reqs)
|
||||||
|
defer client.Close()
|
||||||
|
session, err := client.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer session.Close()
|
||||||
|
t.Logf("client established session")
|
||||||
|
got, err := session.CombinedOutput(tt.sshCommand)
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("client got: %q: %v", got, err)
|
||||||
|
} else {
|
||||||
|
t.Errorf("client did not get kicked out: %q", got)
|
||||||
|
}
|
||||||
|
gotStr := string(got)
|
||||||
|
if !strings.HasSuffix(gotStr, tt.wantClientOutput) {
|
||||||
|
t.Errorf("client got %q, want %q", got, tt.wantClientOutput)
|
||||||
|
}
|
||||||
|
for _, x := range tt.clientOutputMustNotContain {
|
||||||
|
if strings.Contains(gotStr, x) {
|
||||||
|
t.Errorf("client output must not contain %q", x)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := s.HandleSSHConn(dc); err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TestSSHRecordingNonInteractive tests that the SSH server records the SSH session
|
||||||
|
// when the client is not interactive (i.e. no PTY).
|
||||||
|
// It starts a local SSH server and a recording server. The recording server
|
||||||
|
// records the SSH session and returns it to the test.
|
||||||
|
// The test then verifies that the recording has a valid CastHeader, it does not
|
||||||
|
// validate the contents of the recording.
|
||||||
|
func TestSSHRecordingNonInteractive(t *testing.T) {
|
||||||
|
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
|
||||||
|
t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS)
|
||||||
|
}
|
||||||
|
var recording []byte
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), time.Second)
|
||||||
|
recordingServer := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
defer cancel()
|
||||||
|
var err error
|
||||||
|
recording, err = ioutil.ReadAll(r.Body)
|
||||||
|
if err != nil {
|
||||||
|
t.Error(err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}))
|
||||||
|
defer recordingServer.Close()
|
||||||
|
|
||||||
|
s := &server{
|
||||||
|
logf: logger.Discard,
|
||||||
|
httpc: recordingServer.Client(),
|
||||||
|
lb: &localState{
|
||||||
|
sshEnabled: true,
|
||||||
|
matchingRule: newSSHRule(
|
||||||
|
&tailcfg.SSHAction{
|
||||||
|
Accept: true,
|
||||||
|
Recorders: []netip.AddrPort{
|
||||||
|
must.Get(netip.ParseAddrPort(recordingServer.Listener.Addr().String())),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
defer s.Shutdown()
|
||||||
|
|
||||||
|
src, dst := must.Get(netip.ParseAddrPort("100.100.100.101:2231")), must.Get(netip.ParseAddrPort("100.100.100.102:22"))
|
||||||
|
sc, dc := memnet.NewTCPConn(src, dst, 1024)
|
||||||
|
|
||||||
|
const sshUser = "alice"
|
||||||
|
cfg := &gossh.ClientConfig{
|
||||||
|
User: sshUser,
|
||||||
|
HostKeyCallback: gossh.InsecureIgnoreHostKey(),
|
||||||
|
}
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
wg.Add(1)
|
||||||
|
go func() {
|
||||||
|
defer wg.Done()
|
||||||
|
c, chans, reqs, err := gossh.NewClientConn(sc, sc.RemoteAddr().String(), cfg)
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
client := gossh.NewClient(c, chans, reqs)
|
||||||
|
defer client.Close()
|
||||||
|
session, err := client.NewSession()
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("client: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
defer session.Close()
|
||||||
|
t.Logf("client established session")
|
||||||
|
_, err = session.CombinedOutput("echo Ran echo!")
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("client: %v", err)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
if err := s.HandleSSHConn(dc); err != nil {
|
||||||
|
t.Errorf("unexpected error: %v", err)
|
||||||
|
}
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
<-ctx.Done() // wait for recording to finish
|
||||||
|
var ch CastHeader
|
||||||
|
if err := json.NewDecoder(bytes.NewReader(recording)).Decode(&ch); err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
if ch.SSHUser != sshUser {
|
||||||
|
t.Errorf("SSHUser = %q; want %q", ch.SSHUser, sshUser)
|
||||||
|
}
|
||||||
|
if ch.Command != "echo Ran echo!" {
|
||||||
|
t.Errorf("Command = %q; want %q", ch.Command, "echo Ran echo!")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func TestSSHAuthFlow(t *testing.T) {
|
func TestSSHAuthFlow(t *testing.T) {
|
||||||
if runtime.GOOS != "linux" {
|
if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
|
||||||
t.Skip("Not running on Linux, skipping")
|
t.Skipf("skipping on %q; only runs on linux and darwin", runtime.GOOS)
|
||||||
}
|
}
|
||||||
acceptRule := newSSHRule(&tailcfg.SSHAction{
|
acceptRule := newSSHRule(&tailcfg.SSHAction{
|
||||||
Accept: true,
|
Accept: true,
|
||||||
|
@ -539,7 +749,8 @@ func TestSSH(t *testing.T) {
|
||||||
node: &tailcfg.Node{},
|
node: &tailcfg.Node{},
|
||||||
uprof: tailcfg.UserProfile{},
|
uprof: tailcfg.UserProfile{},
|
||||||
}
|
}
|
||||||
sc.finalAction = &tailcfg.SSHAction{Accept: true}
|
sc.action0 = &tailcfg.SSHAction{Accept: true}
|
||||||
|
sc.finalAction = sc.action0
|
||||||
|
|
||||||
sc.Handler = func(s ssh.Session) {
|
sc.Handler = func(s ssh.Session) {
|
||||||
sc.newSSHSession(s).run()
|
sc.newSSHSession(s).run()
|
||||||
|
|
|
@ -3,7 +3,7 @@
|
||||||
|
|
||||||
package tailcfg
|
package tailcfg
|
||||||
|
|
||||||
//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHPrincipal,ControlDialPlan --clonefunc
|
//go:generate go run tailscale.com/cmd/viewer --type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan --clonefunc
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
|
@ -1822,7 +1822,8 @@ const (
|
||||||
|
|
||||||
// Funnel warning capabilities used for reporting errors to the user.
|
// Funnel warning capabilities used for reporting errors to the user.
|
||||||
|
|
||||||
// CapabilityWarnFunnelNoInvite indicates an invite has not been accepted for the Funnel alpha.
|
// CapabilityWarnFunnelNoInvite indicates whether Funnel is enabled for the tailnet.
|
||||||
|
// NOTE: In transition from Alpha to Beta, this capability is being reused as the enablement.
|
||||||
CapabilityWarnFunnelNoInvite = "https://tailscale.com/cap/warn-funnel-no-invite"
|
CapabilityWarnFunnelNoInvite = "https://tailscale.com/cap/warn-funnel-no-invite"
|
||||||
|
|
||||||
// CapabilityWarnFunnelNoHTTPS indicates HTTPS has not been enabled for the tailnet.
|
// CapabilityWarnFunnelNoHTTPS indicates HTTPS has not been enabled for the tailnet.
|
||||||
|
@ -2021,9 +2022,9 @@ type SSHAction struct {
|
||||||
// to use local port forwarding if requested.
|
// to use local port forwarding if requested.
|
||||||
AllowLocalPortForwarding bool `json:"allowLocalPortForwarding,omitempty"`
|
AllowLocalPortForwarding bool `json:"allowLocalPortForwarding,omitempty"`
|
||||||
|
|
||||||
// SessionHaulTargetNode, if non-empty, is the Stable ID of a peer to
|
// Recorders defines the destinations of the SSH session recorders.
|
||||||
// stream this SSH session's logs to.
|
// The recording will be uploaded to http://addr:port/record.
|
||||||
SessionHaulTargetNode StableNodeID `json:"sessionHaulTargetNode,omitempty"`
|
Recorders []netip.AddrPort `json:"recorders"`
|
||||||
}
|
}
|
||||||
|
|
||||||
// OverTLSPublicKeyResponse is the JSON response to /key?v=<n>
|
// OverTLSPublicKeyResponse is the JSON response to /key?v=<n>
|
||||||
|
|
|
@ -371,10 +371,7 @@ func (src *SSHRule) Clone() *SSHRule {
|
||||||
dst.SSHUsers[k] = v
|
dst.SSHUsers[k] = v
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if dst.Action != nil {
|
dst.Action = src.Action.Clone()
|
||||||
dst.Action = new(SSHAction)
|
|
||||||
*dst.Action = *src.Action
|
|
||||||
}
|
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -386,6 +383,30 @@ var _SSHRuleCloneNeedsRegeneration = SSHRule(struct {
|
||||||
Action *SSHAction
|
Action *SSHAction
|
||||||
}{})
|
}{})
|
||||||
|
|
||||||
|
// Clone makes a deep copy of SSHAction.
|
||||||
|
// The result aliases no memory with the original.
|
||||||
|
func (src *SSHAction) Clone() *SSHAction {
|
||||||
|
if src == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
dst := new(SSHAction)
|
||||||
|
*dst = *src
|
||||||
|
dst.Recorders = append(src.Recorders[:0:0], src.Recorders...)
|
||||||
|
return dst
|
||||||
|
}
|
||||||
|
|
||||||
|
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||||
|
var _SSHActionCloneNeedsRegeneration = SSHAction(struct {
|
||||||
|
Message string
|
||||||
|
Reject bool
|
||||||
|
Accept bool
|
||||||
|
SessionDuration time.Duration
|
||||||
|
AllowAgentForwarding bool
|
||||||
|
HoldAndDelegate string
|
||||||
|
AllowLocalPortForwarding bool
|
||||||
|
Recorders []netip.AddrPort
|
||||||
|
}{})
|
||||||
|
|
||||||
// Clone makes a deep copy of SSHPrincipal.
|
// Clone makes a deep copy of SSHPrincipal.
|
||||||
// The result aliases no memory with the original.
|
// The result aliases no memory with the original.
|
||||||
func (src *SSHPrincipal) Clone() *SSHPrincipal {
|
func (src *SSHPrincipal) Clone() *SSHPrincipal {
|
||||||
|
@ -426,7 +447,7 @@ var _ControlDialPlanCloneNeedsRegeneration = ControlDialPlan(struct {
|
||||||
|
|
||||||
// Clone duplicates src into dst and reports whether it succeeded.
|
// Clone duplicates src into dst and reports whether it succeeded.
|
||||||
// To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>,
|
// To succeed, <src, dst> must be of types <*T, *T> or <*T, **T>,
|
||||||
// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHPrincipal,ControlDialPlan.
|
// where T is one of User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan.
|
||||||
func Clone(dst, src any) bool {
|
func Clone(dst, src any) bool {
|
||||||
switch src := src.(type) {
|
switch src := src.(type) {
|
||||||
case *User:
|
case *User:
|
||||||
|
@ -528,6 +549,15 @@ func Clone(dst, src any) bool {
|
||||||
*dst = src.Clone()
|
*dst = src.Clone()
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
case *SSHAction:
|
||||||
|
switch dst := dst.(type) {
|
||||||
|
case *SSHAction:
|
||||||
|
*dst = *src.Clone()
|
||||||
|
return true
|
||||||
|
case **SSHAction:
|
||||||
|
*dst = src.Clone()
|
||||||
|
return true
|
||||||
|
}
|
||||||
case *SSHPrincipal:
|
case *SSHPrincipal:
|
||||||
switch dst := dst.(type) {
|
switch dst := dst.(type) {
|
||||||
case *SSHPrincipal:
|
case *SSHPrincipal:
|
||||||
|
|
|
@ -20,7 +20,7 @@ import (
|
||||||
"tailscale.com/types/views"
|
"tailscale.com/types/views"
|
||||||
)
|
)
|
||||||
|
|
||||||
//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHPrincipal,ControlDialPlan
|
//go:generate go run tailscale.com/cmd/cloner -clonefunc=true -type=User,Node,Hostinfo,NetInfo,Login,DNSConfig,RegisterResponse,DERPRegion,DERPMap,DERPNode,SSHRule,SSHAction,SSHPrincipal,ControlDialPlan
|
||||||
|
|
||||||
// View returns a readonly view of User.
|
// View returns a readonly view of User.
|
||||||
func (p *User) View() UserView {
|
func (p *User) View() UserView {
|
||||||
|
@ -865,13 +865,7 @@ func (v SSHRuleView) Principals() views.SliceView[*SSHPrincipal, SSHPrincipalVie
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v SSHRuleView) SSHUsers() views.Map[string, string] { return views.MapOf(v.ж.SSHUsers) }
|
func (v SSHRuleView) SSHUsers() views.Map[string, string] { return views.MapOf(v.ж.SSHUsers) }
|
||||||
func (v SSHRuleView) Action() *SSHAction {
|
func (v SSHRuleView) Action() SSHActionView { return v.ж.Action.View() }
|
||||||
if v.ж.Action == nil {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
x := *v.ж.Action
|
|
||||||
return &x
|
|
||||||
}
|
|
||||||
|
|
||||||
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||||
var _SSHRuleViewNeedsRegeneration = SSHRule(struct {
|
var _SSHRuleViewNeedsRegeneration = SSHRule(struct {
|
||||||
|
@ -881,6 +875,72 @@ var _SSHRuleViewNeedsRegeneration = SSHRule(struct {
|
||||||
Action *SSHAction
|
Action *SSHAction
|
||||||
}{})
|
}{})
|
||||||
|
|
||||||
|
// View returns a readonly view of SSHAction.
|
||||||
|
func (p *SSHAction) View() SSHActionView {
|
||||||
|
return SSHActionView{ж: p}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SSHActionView provides a read-only view over SSHAction.
|
||||||
|
//
|
||||||
|
// Its methods should only be called if `Valid()` returns true.
|
||||||
|
type SSHActionView struct {
|
||||||
|
// ж is the underlying mutable value, named with a hard-to-type
|
||||||
|
// character that looks pointy like a pointer.
|
||||||
|
// It is named distinctively to make you think of how dangerous it is to escape
|
||||||
|
// to callers. You must not let callers be able to mutate it.
|
||||||
|
ж *SSHAction
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid reports whether underlying value is non-nil.
|
||||||
|
func (v SSHActionView) Valid() bool { return v.ж != nil }
|
||||||
|
|
||||||
|
// AsStruct returns a clone of the underlying value which aliases no memory with
|
||||||
|
// the original.
|
||||||
|
func (v SSHActionView) AsStruct() *SSHAction {
|
||||||
|
if v.ж == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return v.ж.Clone()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v SSHActionView) MarshalJSON() ([]byte, error) { return json.Marshal(v.ж) }
|
||||||
|
|
||||||
|
func (v *SSHActionView) UnmarshalJSON(b []byte) error {
|
||||||
|
if v.ж != nil {
|
||||||
|
return errors.New("already initialized")
|
||||||
|
}
|
||||||
|
if len(b) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
var x SSHAction
|
||||||
|
if err := json.Unmarshal(b, &x); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
v.ж = &x
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v SSHActionView) Message() string { return v.ж.Message }
|
||||||
|
func (v SSHActionView) Reject() bool { return v.ж.Reject }
|
||||||
|
func (v SSHActionView) Accept() bool { return v.ж.Accept }
|
||||||
|
func (v SSHActionView) SessionDuration() time.Duration { return v.ж.SessionDuration }
|
||||||
|
func (v SSHActionView) AllowAgentForwarding() bool { return v.ж.AllowAgentForwarding }
|
||||||
|
func (v SSHActionView) HoldAndDelegate() string { return v.ж.HoldAndDelegate }
|
||||||
|
func (v SSHActionView) AllowLocalPortForwarding() bool { return v.ж.AllowLocalPortForwarding }
|
||||||
|
func (v SSHActionView) Recorders() views.Slice[netip.AddrPort] { return views.SliceOf(v.ж.Recorders) }
|
||||||
|
|
||||||
|
// A compilation failure here means this code must be regenerated, with the command at the top of this file.
|
||||||
|
var _SSHActionViewNeedsRegeneration = SSHAction(struct {
|
||||||
|
Message string
|
||||||
|
Reject bool
|
||||||
|
Accept bool
|
||||||
|
SessionDuration time.Duration
|
||||||
|
AllowAgentForwarding bool
|
||||||
|
HoldAndDelegate string
|
||||||
|
AllowLocalPortForwarding bool
|
||||||
|
Recorders []netip.AddrPort
|
||||||
|
}{})
|
||||||
|
|
||||||
// View returns a readonly view of SSHPrincipal.
|
// View returns a readonly view of SSHPrincipal.
|
||||||
func (p *SSHPrincipal) View() SSHPrincipalView {
|
func (p *SSHPrincipal) View() SSHPrincipalView {
|
||||||
return SSHPrincipalView{ж: p}
|
return SSHPrincipalView{ж: p}
|
||||||
|
|
|
@ -150,14 +150,14 @@ func InfoFrom(dir string) (VersionInfo, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Note, this mechanism doesn't correctly support go.mod replacements,
|
// Note, this mechanism doesn't correctly support go.mod replacements,
|
||||||
// or go workdirs. We only parse out the commit hash from go.mod's
|
// or go workdirs. We only parse out the commit ref from go.mod's
|
||||||
// "require" line, nothing else.
|
// "require" line, nothing else.
|
||||||
tailscaleHash, err := tailscaleModuleHash(modBs)
|
tailscaleRef, err := tailscaleModuleRef(modBs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return VersionInfo{}, err
|
return VersionInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
v, err := infoFromCache(tailscaleHash, runner)
|
v, err := infoFromCache(tailscaleRef, runner)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return VersionInfo{}, err
|
return VersionInfo{}, err
|
||||||
}
|
}
|
||||||
|
@ -171,9 +171,10 @@ func InfoFrom(dir string) (VersionInfo, error) {
|
||||||
return mkOutput(v)
|
return mkOutput(v)
|
||||||
}
|
}
|
||||||
|
|
||||||
// tailscaleModuleHash returns the git hash of the 'require tailscale.com' line
|
// tailscaleModuleRef returns the git ref of the 'require tailscale.com' line
|
||||||
// in the given go.mod bytes.
|
// in the given go.mod bytes. The ref is either a short commit hash, or a git
|
||||||
func tailscaleModuleHash(modBs []byte) (string, error) {
|
// tag.
|
||||||
|
func tailscaleModuleRef(modBs []byte) (string, error) {
|
||||||
mod, err := modfile.Parse("go.mod", modBs, nil)
|
mod, err := modfile.Parse("go.mod", modBs, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
|
@ -187,7 +188,8 @@ func tailscaleModuleHash(modBs []byte) (string, error) {
|
||||||
if i := strings.LastIndexByte(req.Mod.Version, '-'); i != -1 {
|
if i := strings.LastIndexByte(req.Mod.Version, '-'); i != -1 {
|
||||||
return req.Mod.Version[i+1:], nil
|
return req.Mod.Version[i+1:], nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("couldn't parse git hash from tailscale.com version %q", req.Mod.Version)
|
// If there are no dashes, the version is a tag.
|
||||||
|
return req.Mod.Version, nil
|
||||||
}
|
}
|
||||||
return "", fmt.Errorf("no require tailscale.com line in go.mod")
|
return "", fmt.Errorf("no require tailscale.com line in go.mod")
|
||||||
}
|
}
|
||||||
|
@ -310,7 +312,7 @@ type verInfo struct {
|
||||||
// sentinel patch number.
|
// sentinel patch number.
|
||||||
const unknownPatchVersion = 9999999
|
const unknownPatchVersion = 9999999
|
||||||
|
|
||||||
func infoFromCache(shortHash string, runner dirRunner) (verInfo, error) {
|
func infoFromCache(ref string, runner dirRunner) (verInfo, error) {
|
||||||
cacheDir, err := os.UserCacheDir()
|
cacheDir, err := os.UserCacheDir()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return verInfo{}, fmt.Errorf("Getting user cache dir: %w", err)
|
return verInfo{}, fmt.Errorf("Getting user cache dir: %w", err)
|
||||||
|
@ -324,16 +326,16 @@ func infoFromCache(shortHash string, runner dirRunner) (verInfo, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if !r.ok("git", "cat-file", "-e", shortHash) {
|
if !r.ok("git", "cat-file", "-e", ref) {
|
||||||
if !r.ok("git", "fetch", "origin") {
|
if !r.ok("git", "fetch", "origin") {
|
||||||
return verInfo{}, fmt.Errorf("updating OSS repo failed")
|
return verInfo{}, fmt.Errorf("updating OSS repo failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
hash, err := r.output("git", "rev-parse", shortHash)
|
hash, err := r.output("git", "rev-parse", ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return verInfo{}, err
|
return verInfo{}, err
|
||||||
}
|
}
|
||||||
date, err := r.output("git", "log", "-n1", "--format=%ct", shortHash)
|
date, err := r.output("git", "log", "-n1", "--format=%ct", ref)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return verInfo{}, err
|
return verInfo{}, err
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue