Compare commits

...

9 Commits

Author SHA1 Message Date
Rhea Ghosh d2684863c2 VERSION.txt: this is v1.40.1
Signed-off-by: Rhea Ghosh <rhea@tailscale.com>
2023-05-10 15:39:26 -05:00
Will Norris ba3ff98da1 net/sockstats: wait before reporting battery usage
Wait 2 minutes before we start reporting battery usage. There is always
radio activity on initial startup, which gets reported as 100% high
power usage.  Let that settle before we report usage data.

Updates tailscale/corp#9230

Signed-off-by: Will Norris <will@tailscale.com>
(cherry picked from commit ea84fc9ad2)
2023-05-10 15:12:20 -05:00
Brad Fitzpatrick 2e44616dd8 ssh/tailssh: support LDAP users for Tailscale SSH
Fixes #4945

Change-Id: Ie013cb47684cb87928a44f92c66352310bfe53f1
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
(cherry picked from commit 58ab66ec51)
2023-05-10 15:11:14 -05:00
Brad Fitzpatrick 176939fa53 ipn/ipnlocal: bound how long cert fetchher checks for existing DNS records
It was supposed to be best effort but in some cases (macsys at least,
per @marwan-at-work) it hangs and exhausts the whole context.Context
deadline so we fail to make the SetDNS call to the server.

Updates #8067
Updates #3273 etc

Change-Id: Ie1f04abe9689951484748aecdeae312afbafdb0f
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
(cherry picked from commit 8864112a0c)
2023-05-10 15:05:55 -05:00
Maisem Ali c6ebbddfed
ssh/tailssh: restore support for recording locally
We removed it earlier in 916aa782af, but we still want to support it for some time longer.

Updates tailscale/corp#9967

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit be190e990f)
2023-05-09 09:36:34 -07:00
salman 42e993e863
release/dist: add deb/rpm arch mappings for mipses
According to https://wiki.debian.org/SupportedArchitectures Debian does
not support big-endian mips64, so that one remains disabled.

According to https://fedoraproject.org/wiki/Architectures Fedora only
supports little-endian mips, so leaving both big-endian ones out too.

Updates #8005.

Signed-off-by: salman <salman@tailscale.com>
(cherry picked from commit 1ce08256c0)
2023-05-09 09:34:29 -07:00
Craig Rodrigues 05493383ef
cmd/k8s-operator: print version in startup logs
Fixes: #7813

Signed-off-by: Craig Rodrigues <rodrigc@crodrigues.org>
(cherry picked from commit 827abbeeaa)
2023-05-09 09:34:10 -07:00
Maisem Ali de26c1c3fa
net/tstun: handle exit nodes in NAT configs
In the case where the exit node requires SNAT, we would SNAT all traffic not just the
traffic meant to go through the exit node. This was a result of the default route being
added to the routing table which would match basically everything.

In this case, we need to account for all peers in the routing table not just the ones
that require NAT.

Fix and add a test.

Updates tailscale/corp#8020

Signed-off-by: Maisem Ali <maisem@tailscale.com>
(cherry picked from commit 3ae7140690)
2023-05-06 10:31:39 -07:00
Denton Gentry 9bdaece3d7
VERSION.txt: this is v1.40.0
Signed-off-by: Denton Gentry <dgentry@tailscale.com>
2023-04-26 15:20:38 -07:00
13 changed files with 341 additions and 131 deletions

View File

@ -1 +1 @@
1.39.0
1.40.1

View File

@ -48,6 +48,7 @@ import (
"tailscale.com/types/logger"
"tailscale.com/types/opt"
"tailscale.com/util/dnsname"
"tailscale.com/version"
)
func main() {
@ -235,7 +236,7 @@ waitOnline:
startlog.Fatalf("could not create controller: %v", err)
}
startlog.Infof("Startup complete, operator running")
startlog.Infof("Startup complete, operator running, version: %s", version.Long())
if shouldRunAuthProxy {
cfg, err := restConfig.TransportConfig()
if err != nil {

View File

@ -31,6 +31,7 @@ import (
"time"
"golang.org/x/crypto/acme"
"golang.org/x/exp/slices"
"tailscale.com/atomicfile"
"tailscale.com/envknob"
"tailscale.com/hostinfo"
@ -361,17 +362,16 @@ func (b *LocalBackend) getCertPEM(ctx context.Context, cs certStore, logf logger
}
key := "_acme-challenge." + domain
// Do a best-effort lookup to see if we've already created this DNS name
// in a previous attempt. Don't burn too much time on it, though. Worst
// case we ask the server to create something that already exists.
var resolver net.Resolver
var ok bool
txts, _ := resolver.LookupTXT(ctx, key)
for _, txt := range txts {
if txt == rec {
ok = true
logf("TXT record already existed")
break
}
}
if !ok {
lookupCtx, lookupCancel := context.WithTimeout(ctx, 500*time.Millisecond)
txts, _ := resolver.LookupTXT(lookupCtx, key)
lookupCancel()
if slices.Contains(txts, rec) {
logf("TXT record already existed")
} else {
logf("starting SetDNS call...")
err = b.SetDNS(ctx, key, rec)
if err != nil {

View File

@ -325,6 +325,10 @@ type radioMonitor struct {
// Usage is measured once per second, so this is the number of seconds of history to track.
const radioSampleSize = 3600 // 1 hour
// initStallPeriod is the minimum amount of time in seconds to collect data before reporting.
// Otherwise, all clients will report 100% radio usage on startup.
var initStallPeriod int64 = 120 // 2 minutes
var radio = &radioMonitor{
now: time.Now,
startTime: time.Now().Unix(),
@ -375,7 +379,7 @@ func (rm *radioMonitor) radioHighPercent() int64 {
}
})
if periodLength == 0 {
if periodLength < initStallPeriod {
return 0
}
@ -386,7 +390,7 @@ func (rm *radioMonitor) radioHighPercent() int64 {
}
// forEachSample calls f for each sample in the past hour (or less if less time
// has passed -- the evaluated period is returned)
// has passed -- the evaluated period is returned, measured in seconds)
func (rm *radioMonitor) forEachSample(f func(c int, isActive bool)) (periodLength int64) {
now := rm.now().Unix()
periodLength = radioSampleSize

View File

@ -33,6 +33,14 @@ func TestRadioMonitor(t *testing.T) {
func(_ *testTime, _ *radioMonitor) {},
0,
},
{
"active less than init stall period",
func(tt *testTime, rm *radioMonitor) {
rm.active()
tt.Add(1 * time.Second)
},
0, // radio on, but not long enough to report data
},
{
"active, 10 sec idle",
func(tt *testTime, rm *radioMonitor) {
@ -42,13 +50,13 @@ func TestRadioMonitor(t *testing.T) {
50, // radio on 5 seconds of 10 seconds
},
{
"active, spanning two seconds",
"active, spanning three seconds",
func(tt *testTime, rm *radioMonitor) {
rm.active()
tt.Add(1100 * time.Millisecond)
tt.Add(2100 * time.Millisecond)
rm.active()
},
100, // radio on for 2 seconds
100, // radio on for 3 seconds
},
{
"400 iterations: 2 sec active, 1 min idle",
@ -66,13 +74,17 @@ func TestRadioMonitor(t *testing.T) {
{
"activity at end of time window",
func(tt *testTime, rm *radioMonitor) {
tt.Add(1 * time.Second)
tt.Add(3 * time.Second)
rm.active()
},
50,
25,
},
}
oldStallPeriod := initStallPeriod
initStallPeriod = 3
t.Cleanup(func() { initStallPeriod = oldStallPeriod })
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tm := &testTime{time.Date(2021, 1, 1, 0, 0, 0, 0, time.UTC)}

View File

@ -20,6 +20,7 @@ import (
"github.com/tailscale/wireguard-go/device"
"github.com/tailscale/wireguard-go/tun"
"go4.org/mem"
"golang.org/x/exp/slices"
"gvisor.dev/gvisor/pkg/tcpip/stack"
"tailscale.com/disco"
"tailscale.com/net/connstats"
@ -590,16 +591,33 @@ func natConfigFromWGConfig(wcfg *wgcfg.Config) *natV4Config {
dstMasqAddrs map[key.NodePublic]netip.Addr
listenAddrs map[netip.Addr]struct{}
)
// When using an exit node that requires masquerading, we need to
// fill out the routing table with all peers not just the ones that
// require masquerading.
exitNodeRequiresMasq := false // true if using an exit node and it requires masquerading
for _, p := range wcfg.Peers {
isExitNode := slices.Contains(p.AllowedIPs, tsaddr.AllIPv4()) || slices.Contains(p.AllowedIPs, tsaddr.AllIPv6())
if isExitNode && p.V4MasqAddr != nil && p.V4MasqAddr.IsValid() {
exitNodeRequiresMasq = true
break
}
}
for i := range wcfg.Peers {
p := &wcfg.Peers[i]
if p.V4MasqAddr == nil || !p.V4MasqAddr.IsValid() {
var addrToUse netip.Addr
if p.V4MasqAddr != nil && p.V4MasqAddr.IsValid() {
addrToUse = *p.V4MasqAddr
mak.Set(&listenAddrs, addrToUse, struct{}{})
} else if exitNodeRequiresMasq {
addrToUse = nativeAddr
} else {
continue
}
rt.InsertOrReplace(p.PublicKey, p.AllowedIPs...)
mak.Set(&dstMasqAddrs, p.PublicKey, *p.V4MasqAddr)
mak.Set(&listenAddrs, *p.V4MasqAddr, struct{}{})
mak.Set(&dstMasqAddrs, p.PublicKey, addrToUse)
}
if len(listenAddrs) == 0 || len(dstMasqAddrs) == 0 {
if len(listenAddrs) == 0 && len(dstMasqAddrs) == 0 {
return nil
}
return &natV4Config{

View File

@ -602,13 +602,13 @@ func TestFilterDiscoLoop(t *testing.T) {
}
func TestNATCfg(t *testing.T) {
node := func(ip, eip netip.Addr, otherAllowedIPs ...netip.Prefix) wgcfg.Peer {
node := func(ip, masqIP netip.Addr, otherAllowedIPs ...netip.Prefix) wgcfg.Peer {
p := wgcfg.Peer{
PublicKey: key.NewNode().Public(),
AllowedIPs: []netip.Prefix{
netip.PrefixFrom(ip, ip.BitLen()),
},
V4MasqAddr: ptr.To(eip),
V4MasqAddr: ptr.To(masqIP),
}
p.AllowedIPs = append(p.AllowedIPs, otherAllowedIPs...)
return p
@ -619,13 +619,16 @@ func TestNATCfg(t *testing.T) {
selfNativeIP = netip.MustParseAddr("100.64.0.1")
selfEIP1 = netip.MustParseAddr("100.64.1.1")
selfEIP2 = netip.MustParseAddr("100.64.1.2")
selfAddrs = []netip.Prefix{netip.PrefixFrom(selfNativeIP, selfNativeIP.BitLen())}
peer1IP = netip.MustParseAddr("100.64.0.2")
peer2IP = netip.MustParseAddr("100.64.0.3")
subnet = netip.MustParseAddr("192.168.0.1")
subnet = netip.MustParsePrefix("192.168.0.0/24")
subnetIP = netip.MustParseAddr("192.168.0.1")
selfAddrs = []netip.Prefix{netip.PrefixFrom(selfNativeIP, selfNativeIP.BitLen())}
exitRoute = netip.MustParsePrefix("0.0.0.0/0")
publicIP = netip.MustParseAddr("8.8.8.8")
)
tests := []struct {
@ -638,9 +641,9 @@ func TestNATCfg(t *testing.T) {
name: "no-cfg",
wcfg: nil,
snatMap: map[netip.Addr]netip.Addr{
peer1IP: selfNativeIP,
peer2IP: selfNativeIP,
subnet: selfNativeIP,
peer1IP: selfNativeIP,
peer2IP: selfNativeIP,
subnetIP: selfNativeIP,
},
dnatMap: map[netip.Addr]netip.Addr{
selfNativeIP: selfNativeIP,
@ -658,15 +661,15 @@ func TestNATCfg(t *testing.T) {
},
},
snatMap: map[netip.Addr]netip.Addr{
peer1IP: selfNativeIP,
peer2IP: selfEIP1,
subnet: selfNativeIP,
peer1IP: selfNativeIP,
peer2IP: selfEIP1,
subnetIP: selfNativeIP,
},
dnatMap: map[netip.Addr]netip.Addr{
selfNativeIP: selfNativeIP,
selfEIP1: selfNativeIP,
selfEIP2: selfEIP2,
subnet: subnet,
subnetIP: subnetIP,
},
},
{
@ -679,15 +682,15 @@ func TestNATCfg(t *testing.T) {
},
},
snatMap: map[netip.Addr]netip.Addr{
peer1IP: selfEIP1,
peer2IP: selfEIP2,
subnet: selfNativeIP,
peer1IP: selfEIP1,
peer2IP: selfEIP2,
subnetIP: selfNativeIP,
},
dnatMap: map[netip.Addr]netip.Addr{
selfNativeIP: selfNativeIP,
selfEIP1: selfNativeIP,
selfEIP2: selfNativeIP,
subnet: subnet,
subnetIP: subnetIP,
},
},
{
@ -696,19 +699,19 @@ func TestNATCfg(t *testing.T) {
Addresses: selfAddrs,
Peers: []wgcfg.Peer{
node(peer1IP, selfEIP1),
node(peer2IP, selfEIP2, netip.MustParsePrefix("192.168.0.0/24")),
node(peer2IP, selfEIP2, subnet),
},
},
snatMap: map[netip.Addr]netip.Addr{
peer1IP: selfEIP1,
peer2IP: selfEIP2,
subnet: selfEIP2,
peer1IP: selfEIP1,
peer2IP: selfEIP2,
subnetIP: selfEIP2,
},
dnatMap: map[netip.Addr]netip.Addr{
selfNativeIP: selfNativeIP,
selfEIP1: selfNativeIP,
selfEIP2: selfNativeIP,
subnet: subnet,
subnetIP: subnetIP,
},
},
{
@ -717,19 +720,19 @@ func TestNATCfg(t *testing.T) {
Addresses: selfAddrs,
Peers: []wgcfg.Peer{
node(peer1IP, selfEIP1),
node(peer2IP, selfEIP2, netip.MustParsePrefix("0.0.0.0/0")),
node(peer2IP, selfEIP2, exitRoute),
},
},
snatMap: map[netip.Addr]netip.Addr{
peer1IP: selfEIP1,
peer2IP: selfEIP2,
netip.MustParseAddr("8.8.8.8"): selfEIP2,
peer1IP: selfEIP1,
peer2IP: selfEIP2,
publicIP: selfEIP2,
},
dnatMap: map[netip.Addr]netip.Addr{
selfNativeIP: selfNativeIP,
selfEIP1: selfNativeIP,
selfEIP2: selfNativeIP,
subnet: subnet,
subnetIP: subnetIP,
},
},
{
@ -742,15 +745,35 @@ func TestNATCfg(t *testing.T) {
},
},
snatMap: map[netip.Addr]netip.Addr{
peer1IP: selfNativeIP,
peer2IP: selfNativeIP,
subnet: selfNativeIP,
peer1IP: selfNativeIP,
peer2IP: selfNativeIP,
subnetIP: selfNativeIP,
},
dnatMap: map[netip.Addr]netip.Addr{
selfNativeIP: selfNativeIP,
selfEIP1: selfEIP1,
selfEIP2: selfEIP2,
subnet: subnet,
subnetIP: subnetIP,
},
},
{
name: "exit-node-require-nat-peer-doesnt",
wcfg: &wgcfg.Config{
Addresses: selfAddrs,
Peers: []wgcfg.Peer{
node(peer1IP, noIP),
node(peer2IP, selfEIP2, exitRoute),
},
},
snatMap: map[netip.Addr]netip.Addr{
peer1IP: selfNativeIP,
peer2IP: selfEIP2,
publicIP: selfEIP2,
},
dnatMap: map[netip.Addr]netip.Addr{
selfNativeIP: selfNativeIP,
selfEIP2: selfNativeIP,
subnetIP: subnetIP,
},
},
}

View File

@ -354,6 +354,10 @@ func debArch(arch string) string {
// can ship more than 1 ARM deb, so for now match redo's behavior of
// shipping armv5 binaries in an armv7 trenchcoat.
return "armhf"
case "mipsle":
return "mipsel"
case "mips64le":
return "mips64el"
default:
return arch
}
@ -372,6 +376,10 @@ func rpmArch(arch string) string {
return "armv7hl"
case "arm64":
return "aarch64"
case "mipsle":
return "mipsel"
case "mips64le":
return "mips64el"
default:
return arch
}

View File

@ -82,31 +82,31 @@ var (
}
debs = map[string]bool{
"linux/386": true,
"linux/amd64": true,
"linux/arm": true,
"linux/arm64": true,
"linux/riscv64": true,
// TODO: maybe mipses, we accidentally started building them at some
// point even though they probably don't work right.
// "linux/mips": true,
// "linux/mipsle": true,
"linux/386": true,
"linux/amd64": true,
"linux/arm": true,
"linux/arm64": true,
"linux/riscv64": true,
"linux/mipsle": true,
"linux/mips64le": true,
"linux/mips": true,
// Debian does not support big endian mips64. Leave that out until we know
// we need it.
// "linux/mips64": true,
// "linux/mips64le": true,
}
rpms = map[string]bool{
"linux/386": true,
"linux/amd64": true,
"linux/arm": true,
"linux/arm64": true,
"linux/riscv64": true,
// TODO: maybe mipses, we accidentally started building them at some
// point even though they probably don't work right.
"linux/386": true,
"linux/amd64": true,
"linux/arm": true,
"linux/arm64": true,
"linux/riscv64": true,
"linux/mipsle": true,
"linux/mips64le": true,
// Fedora only supports little endian mipses. Maybe some other distribution
// supports big-endian? Leave them out for now.
// "linux/mips": true,
// "linux/mipsle": true,
// "linux/mips64": true,
// "linux/mips64le": true,
}
)

View File

@ -688,18 +688,14 @@ func (ss *sshSession) startWithStdPipes() (err error) {
return nil
}
func loginShell(u *user.User) string {
func loginShell(u *userMeta) string {
if u.LoginShell != "" {
// This field should be populated on Linux, at least, because
// func userLookup on Linux uses "getent" to look up the user
// and that populates it.
return u.LoginShell
}
switch runtime.GOOS {
case "linux":
if distro.Get() == distro.Gokrazy {
return "/tmp/serial-busybox/ash"
}
out, _ := exec.Command("getent", "passwd", u.Uid).Output()
// out is "root:x:0:0:root:/root:/bin/bash"
f := strings.SplitN(string(out), ":", 10)
if len(f) > 6 {
return strings.TrimSpace(f[6]) // shell
}
case "darwin":
// Note: /Users/username is key, and not the same as u.HomeDir.
out, _ := exec.Command("dscl", ".", "-read", filepath.Join("/Users", u.Username), "UserShell").Output()
@ -715,12 +711,12 @@ func loginShell(u *user.User) string {
return "/bin/sh"
}
func envForUser(u *user.User) []string {
func envForUser(u *userMeta) []string {
return []string{
fmt.Sprintf("SHELL=" + loginShell(u)),
fmt.Sprintf("USER=" + u.Username),
fmt.Sprintf("HOME=" + u.HomeDir),
fmt.Sprintf("PATH=" + defaultPathForUser(u)),
fmt.Sprintf("PATH=" + defaultPathForUser(&u.User)),
}
}

View File

@ -22,7 +22,6 @@ import (
"net/url"
"os"
"os/exec"
"os/user"
"path/filepath"
"runtime"
"strconv"
@ -44,7 +43,6 @@ import (
"tailscale.com/util/clientmetric"
"tailscale.com/util/mak"
"tailscale.com/util/multierr"
"tailscale.com/version/distro"
)
var (
@ -67,6 +65,7 @@ type ipnLocalBackend interface {
WhoIs(ipp netip.AddrPort) (n *tailcfg.Node, u tailcfg.UserProfile, ok bool)
DoNoiseRequest(req *http.Request) (*http.Response, error)
Dialer() *tsdial.Dialer
TailscaleVarRoot() string
}
type server struct {
@ -218,7 +217,7 @@ type conn struct {
finalActionErr error // set by doPolicyAuth or resolveNextAction
info *sshConnInfo // set by setInfo
localUser *user.User // set by doPolicyAuth
localUser *userMeta // set by doPolicyAuth
userGroupIDs []string // set by doPolicyAuth
pubKey gossh.PublicKey // set by doPolicyAuth
@ -369,16 +368,7 @@ func (c *conn) doPolicyAuth(ctx ssh.Context, pubKey ssh.PublicKey) error {
if a.Accept {
c.finalAction = a
}
if runtime.GOOS == "linux" && distro.Get() == distro.Gokrazy {
// Gokrazy is a single-user appliance with ~no userspace.
// There aren't users to look up (no /etc/passwd, etc)
// so rather than fail below, just hardcode root.
// TODO(bradfitz): fix os/user upstream instead?
c.userGroupIDs = []string{"0"}
c.localUser = &user.User{Uid: "0", Gid: "0", Username: "root"}
return nil
}
lu, err := user.Lookup(localUser)
lu, err := userLookup(localUser)
if err != nil {
c.logf("failed to look up %v: %v", localUser, err)
ctx.SendAuthBanner(fmt.Sprintf("failed to look up %v\r\n", localUser))
@ -959,7 +949,7 @@ var errSessionDone = errors.New("session is done")
// handleSSHAgentForwarding starts a Unix socket listener and in the background
// forwards agent connections between the listener and the ssh.Session.
// On success, it assigns ss.agentListener.
func (ss *sshSession) handleSSHAgentForwarding(s ssh.Session, lu *user.User) error {
func (ss *sshSession) handleSSHAgentForwarding(s ssh.Session, lu *userMeta) error {
if !ssh.AgentRequested(ss) || !ss.conn.finalAction.AllowAgentForwarding {
return nil
}
@ -1147,6 +1137,11 @@ func (ss *sshSession) run() {
return
}
// recordSSHToLocalDisk is a deprecated dev knob to allow recording SSH sessions
// to local storage. It is only used if there is no recording configured by the
// coordination server. This will be removed in the future.
var recordSSHToLocalDisk = envknob.RegisterBool("TS_DEBUG_LOG_SSH")
// recorders returns the list of recorders to use for this session.
// If the final action has a non-empty list of recorders, that list is
// returned. Otherwise, the list of recorders from the initial action
@ -1160,7 +1155,7 @@ func (ss *sshSession) recorders() ([]netip.AddrPort, *tailcfg.SSHRecorderFailure
func (ss *sshSession) shouldRecord() bool {
recs, _ := ss.recorders()
return len(recs) > 0
return len(recs) > 0 || recordSSHToLocalDisk()
}
type sshConnInfo struct {
@ -1499,12 +1494,33 @@ func (ss *sshSession) connectToRecorder(ctx context.Context, recs []netip.AddrPo
return nil, nil, multierr.New(errs...)
}
func (ss *sshSession) openFileForRecording(now time.Time) (_ io.WriteCloser, err error) {
varRoot := ss.conn.srv.lb.TailscaleVarRoot()
if varRoot == "" {
return nil, errors.New("no var root for recording storage")
}
dir := filepath.Join(varRoot, "ssh-sessions")
if err := os.MkdirAll(dir, 0700); err != nil {
return nil, err
}
f, err := os.CreateTemp(dir, fmt.Sprintf("ssh-session-%v-*.cast", now.UnixNano()))
if err != nil {
return nil, err
}
return f, nil
}
// startNewRecording starts a new SSH session recording.
// It may return a nil recording if recording is not available.
func (ss *sshSession) startNewRecording() (_ *recording, err error) {
recorders, onFailure := ss.recorders()
var localRecording bool
if len(recorders) == 0 {
return nil, errors.New("no recorders configured")
if recordSSHToLocalDisk() {
localRecording = true
} else {
return nil, errors.New("no recorders configured")
}
}
var w ssh.Window
@ -1528,40 +1544,45 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) {
// ss.ctx is closed when the session closes, but we don't want to break the upload at that time.
// Instead we want to wait for the session to close the writer when it finishes.
ctx := context.Background()
wc, errChan, err := ss.connectToRecorder(ctx, recorders)
if err != nil {
// TODO(catzkorn): notify control here.
if onFailure != nil && onFailure.RejectSessionWithMessage != "" {
ss.logf("recording: error starting recording (rejecting session): %v", err)
return nil, userVisibleError{
error: err,
msg: onFailure.RejectSessionWithMessage,
if localRecording {
rec.out, err = ss.openFileForRecording(now)
if err != nil {
return nil, err
}
} else {
var errChan <-chan error
rec.out, errChan, err = ss.connectToRecorder(ctx, recorders)
if err != nil {
// TODO(catzkorn): notify control here.
if onFailure != nil && onFailure.RejectSessionWithMessage != "" {
ss.logf("recording: error starting recording (rejecting session): %v", err)
return nil, userVisibleError{
error: err,
msg: onFailure.RejectSessionWithMessage,
}
}
ss.logf("recording: error starting recording (failing open): %v", err)
return nil, nil
}
ss.logf("recording: error starting recording (failing open): %v", err)
return nil, nil
go func() {
err := <-errChan
if err == nil {
// Success.
return
}
// TODO(catzkorn): notify control here.
if onFailure != nil && onFailure.TerminateSessionWithMessage != "" {
ss.logf("recording: error uploading recording (closing session): %v", err)
ss.cancelCtx(userVisibleError{
error: err,
msg: onFailure.TerminateSessionWithMessage,
})
return
}
ss.logf("recording: error uploading recording (failing open): %v", err)
}()
}
go func() {
err := <-errChan
if err == nil {
// Success.
return
}
// TODO(catzkorn): notify control here.
if onFailure != nil && onFailure.TerminateSessionWithMessage != "" {
ss.logf("recording: error uploading recording (closing session): %v", err)
ss.cancelCtx(userVisibleError{
error: err,
msg: onFailure.TerminateSessionWithMessage,
})
return
}
ss.logf("recording: error uploading recording (failing open): %v", err)
}()
rec.out = wc
ch := CastHeader{
Version: 2,
Width: w.Width,

View File

@ -845,7 +845,11 @@ func TestSSH(t *testing.T) {
if err != nil {
t.Fatal(err)
}
sc.localUser = u
um, err := userLookup(u.Uid)
if err != nil {
t.Fatal(err)
}
sc.localUser = um
sc.info = &sshConnInfo{
sshUser: "test",
src: netip.MustParseAddrPort("1.2.3.4:32342"),
@ -1129,3 +1133,10 @@ func TestPathFromPAMEnvLineOnNixOS(t *testing.T) {
}
t.Logf("success; got=%q", got)
}
func TestStdOsUserUserAssumptions(t *testing.T) {
v := reflect.TypeOf(user.User{})
if got, want := v.NumField(), 5; got != want {
t.Errorf("os/user.User has %v fields; this package assumes %v", got, want)
}
}

116
ssh/tailssh/user.go 100644
View File

@ -0,0 +1,116 @@
// Copyright (c) Tailscale Inc & AUTHORS
// SPDX-License-Identifier: BSD-3-Clause
//go:build linux || (darwin && !ios) || freebsd || openbsd
package tailssh
import (
"context"
"errors"
"log"
"os/exec"
"os/user"
"runtime"
"strings"
"time"
"unicode/utf8"
"tailscale.com/version/distro"
)
// userMeta is a wrapper around *user.User with extra fields.
type userMeta struct {
user.User
// LoginShell is the user's login shell.
LoginShell string
}
// GroupIds returns the list of group IDs that the user is a member of.
func (u *userMeta) GroupIds() ([]string, error) {
if runtime.GOOS == "linux" && distro.Get() == distro.Gokrazy {
// Gokrazy is a single-user appliance with ~no userspace.
// There aren't users to look up (no /etc/passwd, etc)
// so rather than fail below, just hardcode root.
// TODO(bradfitz): fix os/user upstream instead?
return []string{"0"}, nil
}
return u.User.GroupIds()
}
// userLookup is like os/user.LookupId but it returns a *userMeta wrapper
// around a *user.User with extra fields.
func userLookup(uid string) (*userMeta, error) {
if runtime.GOOS != "linux" {
return userLookupStd(uid)
}
// No getent on Gokrazy. So hard-code the login shell.
if distro.Get() == distro.Gokrazy {
um, err := userLookupStd(uid)
if err == nil {
um.LoginShell = "/tmp/serial-busybox/ash"
}
return um, err
}
// On Linux, default to using "getent" to look up users so that
// even with static tailscaled binaries without cgo (as we distribute),
// we can still look up PAM/NSS users which the standard library's
// os/user without cgo won't get (because of no libc hooks).
// But if "getent" fails, userLookupGetent falls back to the standard
// library anyway.
return userLookupGetent(uid)
}
func validUsername(uid string) bool {
if len(uid) > 32 || len(uid) == 0 {
return false
}
for _, r := range uid {
if r < ' ' || r == 0x7f || r == utf8.RuneError { // TODO(bradfitz): more?
return false
}
}
return true
}
func userLookupGetent(uid string) (*userMeta, error) {
// Do some basic validation before passing this string to "getent", even though
// getent should do its own validation.
if !validUsername(uid) {
return nil, errors.New("invalid username")
}
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
defer cancel()
out, err := exec.CommandContext(ctx, "getent", "passwd", uid).Output()
if err != nil {
log.Printf("error calling getent for user %q: %v", uid, err)
return userLookupStd(uid)
}
// output is "alice:x:1001:1001:Alice Smith,,,:/home/alice:/bin/bash"
f := strings.SplitN(strings.TrimSpace(string(out)), ":", 10)
for len(f) < 7 {
f = append(f, "")
}
um := &userMeta{
User: user.User{
Username: f[0],
Uid: f[2],
Gid: f[3],
Name: f[4],
HomeDir: f[5],
},
LoginShell: f[6],
}
return um, nil
}
func userLookupStd(uid string) (*userMeta, error) {
u, err := user.LookupId(uid)
if err != nil {
return nil, err
}
return &userMeta{User: *u}, nil
}