Compare commits
2 Commits
main
...
mihaip/fas
Author | SHA1 | Date |
---|---|---|
![]() |
bc361df175 | |
![]() |
42464392ba |
|
@ -635,7 +635,6 @@ func runUp(ctx context.Context, args []string) (retErr error) {
|
|||
return err
|
||||
}
|
||||
opts := ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
AuthKey: authKey,
|
||||
UpdatePrefs: prefs,
|
||||
}
|
||||
|
@ -648,9 +647,6 @@ func runUp(ctx context.Context, args []string) (retErr error) {
|
|||
// StateKey based on the connection identity. So for now, just
|
||||
// do as the Windows GUI's always done:
|
||||
if effectiveGOOS() == "windows" {
|
||||
// The Windows service will set this as needed based
|
||||
// on our connection's identity.
|
||||
opts.StateKey = ""
|
||||
opts.Prefs = prefs
|
||||
}
|
||||
|
||||
|
|
|
@ -496,9 +496,7 @@ func tailscaleUp(ctx context.Context, prefs *ipn.Prefs, forceReauth bool) (authU
|
|||
|
||||
bc.SetPrefs(prefs)
|
||||
|
||||
bc.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
})
|
||||
bc.Start(ipn.Options{})
|
||||
if forceReauth {
|
||||
bc.StartLoginInteractive()
|
||||
}
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
"tailscale.com/cmd/tailscaled/childproc"
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/ipn/ipnserver"
|
||||
"tailscale.com/ipn/store"
|
||||
"tailscale.com/logpolicy"
|
||||
|
@ -306,7 +306,6 @@ func ipnServerOpts() (o ipnserver.Options) {
|
|||
fallthrough
|
||||
default:
|
||||
o.SurviveDisconnects = true
|
||||
o.AutostartStateKey = ipn.GlobalDaemonStateKey
|
||||
case "windows":
|
||||
// Not those.
|
||||
}
|
||||
|
@ -452,7 +451,11 @@ func run() error {
|
|||
if err != nil {
|
||||
return fmt.Errorf("store.New: %w", err)
|
||||
}
|
||||
srv, err := ipnserver.New(logf, pol.PublicID.String(), store, e, dialer, nil, opts)
|
||||
pm, err := ipnlocal.NewProfileManager(store, logf, "")
|
||||
if err != nil {
|
||||
return fmt.Errorf("ipnlocal.NewProfileManager: %w", err)
|
||||
}
|
||||
srv, err := ipnserver.New(logf, pol.PublicID.String(), pm, e, dialer, opts)
|
||||
if err != nil {
|
||||
return fmt.Errorf("ipnserver.New: %w", err)
|
||||
}
|
||||
|
|
|
@ -124,7 +124,11 @@ func newIPN(jsConfig js.Value) map[string]any {
|
|||
return ns.DialContextTCP(ctx, dst)
|
||||
}
|
||||
|
||||
srv, err := ipnserver.New(logf, lpc.PublicID.String(), store, eng, dialer, nil, ipnserver.Options{
|
||||
pm, err := ipnlocal.NewProfileManager(store, logf, "wasm")
|
||||
if err != nil {
|
||||
log.Fatalf("ipnlocal.NewProfileManager: %v", err)
|
||||
}
|
||||
srv, err := ipnserver.New(logf, lpc.PublicID.String(), pm, eng, dialer, ipnserver.Options{
|
||||
SurviveDisconnects: true,
|
||||
LoginFlags: controlclient.LoginEphemeral,
|
||||
})
|
||||
|
@ -284,7 +288,6 @@ func (i *jsIPN) run(jsCallbacks js.Value) {
|
|||
|
||||
go func() {
|
||||
err := i.lb.Start(ipn.Options{
|
||||
StateKey: "wasm",
|
||||
UpdatePrefs: &ipn.Prefs{
|
||||
ControlURL: i.controlURL,
|
||||
RouteAll: false,
|
||||
|
|
|
@ -590,7 +590,7 @@ func (c *Auto) sendStatus(who string, err error, url string, nm *netmap.NetworkM
|
|||
}
|
||||
if nm != nil && loggedIn && synced {
|
||||
pp := c.direct.GetPersist()
|
||||
p = &pp
|
||||
p = pp.AsStruct()
|
||||
} else {
|
||||
// don't send netmap status, as it's misleading when we're
|
||||
// not logged in.
|
||||
|
@ -708,7 +708,7 @@ func (c *Auto) Shutdown() {
|
|||
// used exclusively in tests.
|
||||
func (c *Auto) TestOnlyNodePublicKey() key.NodePublic {
|
||||
priv := c.direct.GetPersist()
|
||||
return priv.PrivateNodeKey.Public()
|
||||
return priv.PrivateNodeKey().Public()
|
||||
}
|
||||
|
||||
func (c *Auto) TestOnlySetAuthKey(authkey string) {
|
||||
|
|
|
@ -333,10 +333,10 @@ func (c *Direct) SetTKAHead(tkaHead string) bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *Direct) GetPersist() persist.Persist {
|
||||
func (c *Direct) GetPersist() persist.PersistView {
|
||||
c.mu.Lock()
|
||||
defer c.mu.Unlock()
|
||||
return c.persist
|
||||
return c.persist.View()
|
||||
}
|
||||
|
||||
func (c *Direct) TryLogout(ctx context.Context) error {
|
||||
|
|
|
@ -178,21 +178,11 @@ type StateKey string
|
|||
type Options struct {
|
||||
// FrontendLogID is the public logtail id used by the frontend.
|
||||
FrontendLogID string
|
||||
// StateKey and Prefs together define the state the backend should
|
||||
// use:
|
||||
// - StateKey=="" && Prefs!=nil: use Prefs for internal state,
|
||||
// don't persist changes in the backend, except for the machine key
|
||||
// for migration purposes.
|
||||
// - StateKey!="" && Prefs==nil: load the given backend-side
|
||||
// state and use/update that.
|
||||
// - StateKey!="" && Prefs!=nil: like the previous case, but do
|
||||
// an initial overwrite of backend state with Prefs.
|
||||
//
|
||||
// NOTE(apenwarr): The above means that this Prefs field does not do
|
||||
// what you probably think it does. It will overwrite your encryption
|
||||
// keys. Do not use unless you know what you're doing.
|
||||
StateKey StateKey
|
||||
Prefs *Prefs
|
||||
// Prefs is the initial preferences to use. If nil, the current
|
||||
// profile's preferences are loaded from the store.
|
||||
// If non-nil, the Prefs are used as-is, and the state store is
|
||||
// updated to match.
|
||||
Prefs *Prefs
|
||||
// UpdatePrefs, if provided, overrides Options.Prefs *and* the Prefs
|
||||
// already stored in the backend state, *except* for the Persist
|
||||
// Persist member. If you just want to provide prefs, this is
|
||||
|
|
|
@ -125,6 +125,7 @@ type LocalBackend struct {
|
|||
keyLogf logger.Logf // for printing list of peers on change
|
||||
statsLogf logger.Logf // for printing peers stats on change
|
||||
e wgengine.Engine
|
||||
pm *ProfileManager
|
||||
store ipn.StateStore
|
||||
dialer *tsdial.Dialer // non-nil
|
||||
backendLogID string
|
||||
|
@ -139,6 +140,10 @@ type LocalBackend struct {
|
|||
sshAtomicBool atomic.Bool
|
||||
shutdownCalled bool // if Shutdown has been called
|
||||
|
||||
// lastProfileID tracks the last profile we've seen from the ProfileManager.
|
||||
// It's used to detect when the user has changed their profile.
|
||||
lastProfileID string
|
||||
|
||||
filterAtomic atomic.Pointer[filter.Filter]
|
||||
containsViaIPFuncAtomic syncs.AtomicValue[func(netip.Addr) bool]
|
||||
shouldInterceptTCPPortAtomic syncs.AtomicValue[func(uint16) bool]
|
||||
|
@ -152,9 +157,6 @@ type LocalBackend struct {
|
|||
notify func(ipn.Notify)
|
||||
cc controlclient.Client
|
||||
ccAuto *controlclient.Auto // if cc is of type *controlclient.Auto
|
||||
stateKey ipn.StateKey // computed in part from user-provided value
|
||||
userID string // current controlling user ID (for Windows, primarily)
|
||||
prefs ipn.PrefsView // may not be Valid.
|
||||
inServerMode bool
|
||||
machinePrivKey key.MachinePrivate
|
||||
nlPrivKey key.NLPrivate
|
||||
|
@ -227,7 +229,7 @@ type clientGen func(controlclient.Options) (controlclient.Client, error)
|
|||
// but is not actually running.
|
||||
//
|
||||
// If dialer is nil, a new one is made.
|
||||
func NewLocalBackend(logf logger.Logf, logid string, store ipn.StateStore, dialer *tsdial.Dialer, e wgengine.Engine, loginFlags controlclient.LoginFlags) (*LocalBackend, error) {
|
||||
func NewLocalBackend(logf logger.Logf, logid string, pm *ProfileManager, dialer *tsdial.Dialer, e wgengine.Engine, loginFlags controlclient.LoginFlags) (*LocalBackend, error) {
|
||||
if e == nil {
|
||||
panic("ipn.NewLocalBackend: engine must not be nil")
|
||||
}
|
||||
|
@ -254,7 +256,8 @@ func NewLocalBackend(logf logger.Logf, logid string, store ipn.StateStore, diale
|
|||
keyLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
||||
statsLogf: logger.LogOnChange(logf, 5*time.Minute, time.Now),
|
||||
e: e,
|
||||
store: store,
|
||||
pm: pm,
|
||||
store: pm.Store(),
|
||||
dialer: dialer,
|
||||
backendLogID: logid,
|
||||
state: ipn.NoState,
|
||||
|
@ -293,7 +296,7 @@ func NewLocalBackend(logf logger.Logf, logid string, store ipn.StateStore, diale
|
|||
|
||||
for _, component := range debuggableComponents {
|
||||
key := componentStateKey(component)
|
||||
if ut, err := ipn.ReadStoreInt(store, key); err == nil {
|
||||
if ut, err := ipn.ReadStoreInt(pm.Store(), key); err == nil {
|
||||
if until := time.Unix(ut, 0); until.After(time.Now()) {
|
||||
// conditional to avoid log spam at start when off
|
||||
b.SetComponentDebugLogging(component, until)
|
||||
|
@ -451,7 +454,7 @@ func (b *LocalBackend) linkChange(major bool, ifst *interfaces.State) {
|
|||
|
||||
// If the local network configuration has changed, our filter may
|
||||
// need updating to tweak default routes.
|
||||
b.updateFilterLocked(b.netMap, b.prefs)
|
||||
b.updateFilterLocked(b.netMap, b.pm.CurrentPrefs())
|
||||
|
||||
if peerAPIListenAsync && b.netMap != nil && b.state == ipn.Running {
|
||||
want := len(b.netMap.Addresses)
|
||||
|
@ -521,7 +524,7 @@ func stripKeysFromPrefs(p ipn.PrefsView) ipn.PrefsView {
|
|||
func (b *LocalBackend) Prefs() ipn.PrefsView {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return stripKeysFromPrefs(b.prefs)
|
||||
return stripKeysFromPrefs(b.pm.CurrentPrefs())
|
||||
}
|
||||
|
||||
// Status returns the latest status of the backend and its
|
||||
|
@ -578,14 +581,14 @@ func (b *LocalBackend) updateStatus(sb *ipnstate.StatusBuilder, extraLocked func
|
|||
s.CurrentTailnet.MagicDNSSuffix = b.netMap.MagicDNSSuffix()
|
||||
s.CurrentTailnet.MagicDNSEnabled = b.netMap.DNS.Proxied
|
||||
s.CurrentTailnet.Name = b.netMap.Domain
|
||||
if b.prefs.Valid() && !b.prefs.ExitNodeID().IsZero() {
|
||||
if exitPeer, ok := b.netMap.PeerWithStableID(b.prefs.ExitNodeID()); ok {
|
||||
if prefs := b.pm.CurrentPrefs(); prefs.Valid() && !prefs.ExitNodeID().IsZero() {
|
||||
if exitPeer, ok := b.netMap.PeerWithStableID(prefs.ExitNodeID()); ok {
|
||||
var online = false
|
||||
if exitPeer.Online != nil {
|
||||
online = *exitPeer.Online
|
||||
}
|
||||
s.ExitNodeStatus = &ipnstate.ExitNodeStatus{
|
||||
ID: b.prefs.ExitNodeID(),
|
||||
ID: prefs.ExitNodeID(),
|
||||
Online: online,
|
||||
TailscaleIPs: exitPeer.Addresses,
|
||||
}
|
||||
|
@ -629,6 +632,7 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) {
|
|||
for id, up := range b.netMap.UserProfiles {
|
||||
sb.AddUser(id, up)
|
||||
}
|
||||
exitNodeID := b.pm.CurrentPrefs().ExitNodeID()
|
||||
for _, p := range b.netMap.Peers {
|
||||
var lastSeen time.Time
|
||||
if p.LastSeen != nil {
|
||||
|
@ -651,7 +655,7 @@ func (b *LocalBackend) populatePeerStatusLocked(sb *ipnstate.StatusBuilder) {
|
|||
LastSeen: lastSeen,
|
||||
Online: p.Online != nil && *p.Online,
|
||||
ShareeNode: p.Hostinfo.ShareeNode(),
|
||||
ExitNode: p.StableID != "" && p.StableID == b.prefs.ExitNodeID(),
|
||||
ExitNode: p.StableID != "" && p.StableID == exitNodeID,
|
||||
SSH_HostKeys: p.Hostinfo.SSH_HostKeys().AsSlice(),
|
||||
}
|
||||
peerStatusFromNode(ps, p)
|
||||
|
@ -797,8 +801,7 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
|||
b.e.SetNetworkMap(new(netmap.NetworkMap))
|
||||
}
|
||||
|
||||
prefs := b.prefs.AsStruct()
|
||||
stateKey := b.stateKey
|
||||
prefs := b.pm.CurrentPrefs().AsStruct()
|
||||
netMap := b.netMap
|
||||
interact := b.interact
|
||||
|
||||
|
@ -835,9 +838,6 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
|||
prefsChanged = true
|
||||
}
|
||||
// Prefs will be written out; this is not safe unless locked or cloned.
|
||||
if prefsChanged {
|
||||
b.prefs = prefs.View()
|
||||
}
|
||||
if st.NetMap != nil {
|
||||
b.mu.Unlock() // respect locking rules for tkaSyncIfNeeded
|
||||
if err := b.tkaSyncIfNeeded(st.NetMap); err != nil {
|
||||
|
@ -859,18 +859,16 @@ func (b *LocalBackend) setClientStatus(st controlclient.Status) {
|
|||
b.tkaFilterNetmapLocked(st.NetMap)
|
||||
}
|
||||
b.setNetMapLocked(st.NetMap)
|
||||
b.updateFilterLocked(st.NetMap, b.prefs)
|
||||
b.updateFilterLocked(st.NetMap, prefs.View())
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
// Now complete the lock-free parts of what we started while locked.
|
||||
if prefsChanged {
|
||||
if stateKey != "" {
|
||||
if err := b.store.WriteState(stateKey, prefs.ToBytes()); err != nil {
|
||||
b.logf("Failed to save new controlclient state: %v", err)
|
||||
}
|
||||
}
|
||||
p := prefs.View()
|
||||
if err := b.pm.SetPrefs(p); err != nil {
|
||||
b.logf("Failed to save new controlclient state: %v", err)
|
||||
}
|
||||
b.send(ipn.Notify{Prefs: &p})
|
||||
}
|
||||
if st.NetMap != nil {
|
||||
|
@ -1062,7 +1060,6 @@ func (b *LocalBackend) startIsNoopLocked(opts ipn.Options) bool {
|
|||
return b.state == ipn.Running &&
|
||||
b.hostinfo != nil &&
|
||||
b.hostinfo.FrontendLogID == opts.FrontendLogID &&
|
||||
b.stateKey == opts.StateKey &&
|
||||
opts.Prefs == nil &&
|
||||
opts.UpdatePrefs == nil &&
|
||||
opts.AuthKey == ""
|
||||
|
@ -1079,8 +1076,8 @@ func (b *LocalBackend) startIsNoopLocked(opts ipn.Options) bool {
|
|||
// actually a supported operation (it should be, but it's very unclear
|
||||
// from the following whether or not that is a safe transition).
|
||||
func (b *LocalBackend) Start(opts ipn.Options) error {
|
||||
if opts.Prefs == nil && opts.StateKey == "" {
|
||||
return errors.New("no state key or prefs provided")
|
||||
if opts.Prefs == nil && !b.pm.CurrentPrefs().Valid() {
|
||||
return errors.New("no prefs provided")
|
||||
}
|
||||
|
||||
if opts.Prefs != nil {
|
||||
|
@ -1090,18 +1087,19 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
|||
}
|
||||
|
||||
b.mu.Lock()
|
||||
_, profileID := b.pm.CurrentProfile()
|
||||
|
||||
// The iOS client sends a "Start" whenever its UI screen comes
|
||||
// up, just because it wants a netmap. That should be fixed,
|
||||
// but meanwhile we can make Start cheaper here for such a
|
||||
// case and not restart the world (which takes a few seconds).
|
||||
// Instead, just send a notify with the state that iOS needs.
|
||||
if b.startIsNoopLocked(opts) {
|
||||
if b.startIsNoopLocked(opts) && profileID == b.lastProfileID {
|
||||
b.logf("Start: already running; sending notify")
|
||||
nm := b.netMap
|
||||
state := b.state
|
||||
b.mu.Unlock()
|
||||
p := b.prefs
|
||||
p := b.pm.CurrentPrefs()
|
||||
b.send(ipn.Notify{
|
||||
State: &state,
|
||||
NetMap: nm,
|
||||
|
@ -1137,26 +1135,24 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
|||
b.hostinfo = hostinfo
|
||||
b.state = ipn.NoState
|
||||
|
||||
if err := b.loadStateLocked(opts.StateKey, opts.Prefs); err != nil {
|
||||
if err := b.loadStateLocked(opts.Prefs); err != nil {
|
||||
b.mu.Unlock()
|
||||
return fmt.Errorf("loading requested state: %v", err)
|
||||
}
|
||||
|
||||
if opts.UpdatePrefs != nil {
|
||||
newPrefs := opts.UpdatePrefs
|
||||
newPrefs.Persist = b.prefs.Persist()
|
||||
b.prefs = newPrefs.View()
|
||||
|
||||
if opts.StateKey != "" {
|
||||
if err := b.store.WriteState(opts.StateKey, b.prefs.ToBytes()); err != nil {
|
||||
b.logf("failed to save UpdatePrefs state: %v", err)
|
||||
}
|
||||
oldPrefs := b.pm.CurrentPrefs()
|
||||
newPrefs := opts.UpdatePrefs.Clone()
|
||||
newPrefs.Persist = oldPrefs.Persist()
|
||||
pv := newPrefs.View()
|
||||
if err := b.pm.SetPrefs(pv); err != nil {
|
||||
b.logf("failed to save UpdatePrefs state: %v", err)
|
||||
}
|
||||
b.setAtomicValuesFromPrefs(b.prefs)
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked()
|
||||
b.setAtomicValuesFromPrefs(pv)
|
||||
}
|
||||
|
||||
wantRunning := b.prefs.WantRunning()
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
wantRunning := prefs.WantRunning()
|
||||
if wantRunning {
|
||||
if err := b.initMachineKeyLocked(); err != nil {
|
||||
return fmt.Errorf("initMachineKeyLocked: %w", err)
|
||||
|
@ -1166,17 +1162,20 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
|||
return fmt.Errorf("initNLKeyLocked: %w", err)
|
||||
}
|
||||
|
||||
loggedOut := b.prefs.LoggedOut()
|
||||
loggedOut := prefs.LoggedOut()
|
||||
|
||||
b.inServerMode = b.prefs.ForceDaemon()
|
||||
b.serverURL = b.prefs.ControlURLOrDefault()
|
||||
b.inServerMode = prefs.ForceDaemon()
|
||||
b.serverURL = prefs.ControlURLOrDefault()
|
||||
if b.inServerMode || runtime.GOOS == "windows" {
|
||||
b.logf("Start: serverMode=%v", b.inServerMode)
|
||||
}
|
||||
b.applyPrefsToHostinfo(hostinfo, b.prefs)
|
||||
b.applyPrefsToHostinfo(hostinfo, prefs)
|
||||
|
||||
b.setNetMapLocked(nil)
|
||||
persistv := b.prefs.Persist()
|
||||
persistv := prefs.Persist()
|
||||
if persistv == nil {
|
||||
persistv = new(persist.Persist)
|
||||
}
|
||||
b.updateFilterLocked(nil, ipn.PrefsView{})
|
||||
b.mu.Unlock()
|
||||
|
||||
|
@ -1204,10 +1203,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
|||
discoPublic := b.e.DiscoPublicKey()
|
||||
|
||||
var err error
|
||||
if persistv == nil {
|
||||
// let controlclient initialize it
|
||||
persistv = &persist.Persist{}
|
||||
}
|
||||
|
||||
isNetstack := wgengine.IsNetstackRouter(b.e)
|
||||
debugFlags := controlDebugFlags
|
||||
|
@ -1270,10 +1265,6 @@ func (b *LocalBackend) Start(opts ipn.Options) error {
|
|||
|
||||
b.e.SetNetInfoCallback(b.setNetInfo)
|
||||
|
||||
b.mu.Lock()
|
||||
prefs := b.prefs
|
||||
b.mu.Unlock()
|
||||
|
||||
blid := b.backendLogID
|
||||
b.logf("Backend: logs: be:%v fe:%v", blid, opts.FrontendLogID)
|
||||
b.send(ipn.Notify{BackendLogID: &blid})
|
||||
|
@ -1777,8 +1768,8 @@ func (b *LocalBackend) initMachineKeyLocked() (err error) {
|
|||
}
|
||||
|
||||
var legacyMachineKey key.MachinePrivate
|
||||
if b.prefs.Persist() != nil {
|
||||
legacyMachineKey = b.prefs.Persist().LegacyFrontendPrivateMachineKey
|
||||
if p := b.pm.CurrentPrefs().Persist(); p != nil {
|
||||
legacyMachineKey = p.LegacyFrontendPrivateMachineKey
|
||||
}
|
||||
|
||||
keyText, err := b.store.ReadState(ipn.MachineKeyStateKey)
|
||||
|
@ -1802,11 +1793,6 @@ func (b *LocalBackend) initMachineKeyLocked() (err error) {
|
|||
// have a legacy machine key, use that. Otherwise generate a
|
||||
// new one.
|
||||
if !legacyMachineKey.IsZero() {
|
||||
if b.stateKey == "" {
|
||||
b.logf("using frontend-provided legacy machine key")
|
||||
} else {
|
||||
b.logf("using legacy machine key from state key %q", b.stateKey)
|
||||
}
|
||||
b.machinePrivKey = legacyMachineKey
|
||||
} else {
|
||||
b.logf("generating new machine key")
|
||||
|
@ -1863,110 +1849,23 @@ func (b *LocalBackend) initNLKeyLocked() (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
// writeServerModeStartState stores the ServerModeStartKey value based on the current
|
||||
// user and prefs. If userID is blank or prefs is blank, no work is done.
|
||||
//
|
||||
// b.mu may either be held or not.
|
||||
func (b *LocalBackend) writeServerModeStartState(userID string, prefs ipn.PrefsView) {
|
||||
if userID == "" || !prefs.Valid() {
|
||||
return
|
||||
}
|
||||
|
||||
if prefs.ForceDaemon() {
|
||||
stateKey := ipn.StateKey("user-" + userID)
|
||||
if err := b.store.WriteState(ipn.ServerModeStartKey, []byte(stateKey)); err != nil {
|
||||
b.logf("WriteState error: %v", err)
|
||||
}
|
||||
// It's important we do this here too, even if it looks
|
||||
// redundant with the one in the 'if stateKey != ""'
|
||||
// check block above. That one won't fire in the case
|
||||
// where the Windows client started up in client mode.
|
||||
// This happens when we transition into server mode:
|
||||
if err := b.store.WriteState(stateKey, prefs.ToBytes()); err != nil {
|
||||
b.logf("WriteState error: %v", err)
|
||||
}
|
||||
} else {
|
||||
if err := b.store.WriteState(ipn.ServerModeStartKey, nil); err != nil {
|
||||
b.logf("WriteState error: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// loadStateLocked sets b.prefs and b.stateKey based on a complex
|
||||
// combination of key, prefs, and legacyPath. b.mu must be held when
|
||||
// calling.
|
||||
func (b *LocalBackend) loadStateLocked(key ipn.StateKey, prefs *ipn.Prefs) (err error) {
|
||||
if prefs == nil && key == "" {
|
||||
panic("state key and prefs are both unset")
|
||||
func (b *LocalBackend) loadStateLocked(prefs *ipn.Prefs) (err error) {
|
||||
if prefs == nil && !b.pm.CurrentPrefs().Valid() {
|
||||
return fmt.Errorf("no prefs provided and no current profile")
|
||||
}
|
||||
|
||||
// Optimistically set stateKey (for initMachineKeyLocked's
|
||||
// logging), but revert it if we return an error so a later SetPrefs
|
||||
// call can't pick it up if it's bogus.
|
||||
b.stateKey = key
|
||||
defer func() {
|
||||
if err != nil {
|
||||
b.stateKey = ""
|
||||
}
|
||||
}()
|
||||
|
||||
if key == "" {
|
||||
// Frontend owns the state, we just need to obey it.
|
||||
//
|
||||
// If the frontend (e.g. on Windows) supplied the
|
||||
// optional/legacy machine key then it's used as the
|
||||
// value instead of making up a new one.
|
||||
b.logf("using frontend prefs: %s", prefs.Pretty())
|
||||
b.prefs = prefs.Clone().View()
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked()
|
||||
b.writeServerModeStartState(b.userID, b.prefs)
|
||||
return nil
|
||||
}
|
||||
|
||||
if prefs != nil {
|
||||
// Backend owns the state, but frontend is trying to migrate
|
||||
// state into the backend.
|
||||
b.logf("importing frontend prefs into backend store; frontend prefs: %s", prefs.Pretty())
|
||||
if err := b.store.WriteState(key, prefs.ToBytes()); err != nil {
|
||||
if err := b.pm.SetPrefs(prefs.View()); err != nil {
|
||||
return fmt.Errorf("store.WriteState: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
bs, err := b.store.ReadState(key)
|
||||
switch {
|
||||
case errors.Is(err, ipn.ErrStateNotExist):
|
||||
prefs := ipn.NewPrefs()
|
||||
prefs.WantRunning = false
|
||||
b.logf("using backend prefs; created empty state for %q: %s", key, prefs.Pretty())
|
||||
b.prefs = prefs.View()
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked()
|
||||
return nil
|
||||
case err != nil:
|
||||
return fmt.Errorf("backend prefs: store.ReadState(%q): %v", key, err)
|
||||
}
|
||||
prefs, err = ipn.PrefsFromBytes(bs)
|
||||
if err != nil {
|
||||
b.logf("using backend prefs for %q", key)
|
||||
return fmt.Errorf("PrefsFromBytes: %v", err)
|
||||
}
|
||||
|
||||
// Ignore any old stored preferences for https://login.tailscale.com
|
||||
// as the control server that would override the new default of
|
||||
// controlplane.tailscale.com.
|
||||
// This makes sure that mobile clients go through the new
|
||||
// frontends where we're (2021-10-02) doing battery
|
||||
// optimization work ahead of turning down the old backends.
|
||||
if prefs != nil && prefs.ControlURL != "" &&
|
||||
prefs.ControlURL != ipn.DefaultControlURL &&
|
||||
ipn.IsLoginServerSynonym(prefs.ControlURL) {
|
||||
prefs.ControlURL = ""
|
||||
}
|
||||
|
||||
b.logf("using backend prefs for %q: %s", key, prefs.Pretty())
|
||||
b.prefs = prefs.View()
|
||||
|
||||
b.setAtomicValuesFromPrefs(b.prefs)
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked()
|
||||
b.setAtomicValuesFromPrefs(b.pm.CurrentPrefs())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -2012,6 +1911,7 @@ func (b *LocalBackend) setTCPPortsIntercepted(ports []uint16) {
|
|||
// setAtomicValuesFromPrefs populates sshAtomicBool and containsViaIPFuncAtomic
|
||||
// from the prefs p, which may be nil.
|
||||
func (b *LocalBackend) setAtomicValuesFromPrefs(p ipn.PrefsView) {
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(p)
|
||||
b.sshAtomicBool.Store(p.Valid() && p.RunSSH() && envknob.CanSSHD())
|
||||
|
||||
if !p.Valid() {
|
||||
|
@ -2174,15 +2074,16 @@ func (b *LocalBackend) shouldUploadServices() bool {
|
|||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
if !b.prefs.Valid() || b.netMap == nil {
|
||||
p := b.pm.CurrentPrefs()
|
||||
if !p.Valid() || b.netMap == nil {
|
||||
return false // default to safest setting
|
||||
}
|
||||
return !b.prefs.ShieldsUp() && b.netMap.CollectServices
|
||||
return !p.ShieldsUp() && b.netMap.CollectServices
|
||||
}
|
||||
|
||||
func (b *LocalBackend) SetCurrentUserID(uid string) {
|
||||
b.mu.Lock()
|
||||
b.userID = uid
|
||||
b.pm.SetCurrentUser(uid)
|
||||
b.mu.Unlock()
|
||||
}
|
||||
|
||||
|
@ -2246,7 +2147,7 @@ func (b *LocalBackend) checkSSHPrefsLocked(p *ipn.Prefs) error {
|
|||
}
|
||||
|
||||
func (b *LocalBackend) sshOnButUnusableHealthCheckMessageLocked() (healthMessage string) {
|
||||
if !b.prefs.Valid() || !b.prefs.RunSSH() {
|
||||
if p := b.pm.CurrentPrefs(); !p.Valid() || !p.RunSSH() {
|
||||
return ""
|
||||
}
|
||||
if envknob.SSHIgnoreTailnetPolicy() || envknob.SSHPolicyFile() != "" {
|
||||
|
@ -2272,10 +2173,11 @@ func (b *LocalBackend) sshOnButUnusableHealthCheckMessageLocked() (healthMessage
|
|||
}
|
||||
|
||||
func (b *LocalBackend) isDefaultServerLocked() bool {
|
||||
if !b.prefs.Valid() {
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
if !prefs.Valid() {
|
||||
return true // assume true until set otherwise
|
||||
}
|
||||
return b.prefs.ControlURLOrDefault() == ipn.DefaultControlURL
|
||||
return prefs.ControlURLOrDefault() == ipn.DefaultControlURL
|
||||
}
|
||||
|
||||
func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) {
|
||||
|
@ -2285,8 +2187,8 @@ func (b *LocalBackend) EditPrefs(mp *ipn.MaskedPrefs) (ipn.PrefsView, error) {
|
|||
b.egg = true
|
||||
go b.doSetHostinfoFilterServices(b.hostinfo.Clone())
|
||||
}
|
||||
p0 := b.prefs
|
||||
p1 := b.prefs.AsStruct()
|
||||
p0 := b.pm.CurrentPrefs()
|
||||
p1 := b.pm.CurrentPrefs().AsStruct()
|
||||
p1.ApplyEdits(mp)
|
||||
if err := b.checkPrefsLocked(p1); err != nil {
|
||||
b.mu.Unlock()
|
||||
|
@ -2328,66 +2230,57 @@ func (b *LocalBackend) SetPrefs(newp *ipn.Prefs) {
|
|||
// It returns a readonly copy of the new prefs.
|
||||
func (b *LocalBackend) setPrefsLockedOnEntry(caller string, newp *ipn.Prefs) ipn.PrefsView {
|
||||
netMap := b.netMap
|
||||
stateKey := b.stateKey
|
||||
oldp := b.prefs
|
||||
newp.Persist = oldp.Persist() // caller isn't allowed to override this
|
||||
b.setAtomicValuesFromPrefs(newp.View())
|
||||
|
||||
oldp := b.pm.CurrentPrefs()
|
||||
if oldp.Valid() {
|
||||
newp.Persist = oldp.Persist().Clone() // caller isn't allowed to override this
|
||||
}
|
||||
// findExitNodeIDLocked returns whether it updated b.prefs, but
|
||||
// everything in this function treats b.prefs as completely new
|
||||
// anyway. No-op if no exit node resolution is needed.
|
||||
findExitNodeIDLocked(newp, netMap)
|
||||
b.prefs = newp.View()
|
||||
b.setAtomicValuesFromPrefs(b.prefs)
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked()
|
||||
b.inServerMode = b.prefs.ForceDaemon()
|
||||
// We do this to avoid holding the lock while doing everything else.
|
||||
b.inServerMode = newp.ForceDaemon
|
||||
|
||||
oldHi := b.hostinfo
|
||||
newHi := oldHi.Clone()
|
||||
b.applyPrefsToHostinfo(newHi, b.prefs)
|
||||
b.applyPrefsToHostinfo(newHi, newp.View())
|
||||
b.hostinfo = newHi
|
||||
hostInfoChanged := !oldHi.Equal(newHi)
|
||||
userID := b.userID
|
||||
cc := b.cc
|
||||
|
||||
// [GRINDER STATS LINE] - please don't remove (used for log parsing)
|
||||
if caller == "SetPrefs" {
|
||||
b.logf("SetPrefs: %v", b.prefs.Pretty())
|
||||
b.logf("SetPrefs: %v", newp.Pretty())
|
||||
}
|
||||
b.updateFilterLocked(netMap, b.prefs)
|
||||
b.updateFilterLocked(netMap, newp.View())
|
||||
|
||||
if oldp.ShouldSSHBeRunning() && !b.prefs.ShouldSSHBeRunning() {
|
||||
if oldp.ShouldSSHBeRunning() && !newp.ShouldSSHBeRunning() {
|
||||
if b.sshServer != nil {
|
||||
go b.sshServer.Shutdown()
|
||||
b.sshServer = nil
|
||||
}
|
||||
}
|
||||
prefs := b.prefs // We can grab the view before unlocking. It can't be mutated.
|
||||
b.mu.Unlock()
|
||||
|
||||
if stateKey != "" {
|
||||
if err := b.store.WriteState(stateKey, prefs.ToBytes()); err != nil {
|
||||
b.logf("failed to save new controlclient state: %v", err)
|
||||
}
|
||||
}
|
||||
b.writeServerModeStartState(userID, prefs)
|
||||
|
||||
if netMap != nil {
|
||||
if login := netMap.UserProfiles[netMap.User].LoginName; login != "" {
|
||||
if prefs.Persist() == nil {
|
||||
if newp.Persist == nil {
|
||||
b.logf("active login: %s", login)
|
||||
} else if prefs.Persist().LoginName != login {
|
||||
// Corp issue 461: sometimes the wrong prefs are
|
||||
// logged; the frontend isn't always getting
|
||||
// notified (to update its prefs/persist) on
|
||||
// account switch. Log this while we figure it
|
||||
// out.
|
||||
b.logf("active login: %q ([unexpected] corp#461, not %q)", prefs.Persist().LoginName, login)
|
||||
} else if newp.Persist.LoginName != login {
|
||||
b.logf("active login: %q (changed from %q)", login, newp.Persist.LoginName)
|
||||
newp.Persist.LoginName = login
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if oldp.ShieldsUp() != prefs.ShieldsUp() || hostInfoChanged {
|
||||
prefs := newp.View()
|
||||
if err := b.pm.SetPrefs(prefs); err != nil {
|
||||
b.logf("failed to save new controlclient state: %v", err)
|
||||
}
|
||||
_, b.lastProfileID = b.pm.CurrentProfile()
|
||||
b.mu.Unlock()
|
||||
|
||||
if oldp.ShieldsUp() != newp.ShieldsUp || hostInfoChanged {
|
||||
b.doSetHostinfoFilterServices(newHi)
|
||||
}
|
||||
|
||||
|
@ -2395,12 +2288,12 @@ func (b *LocalBackend) setPrefsLockedOnEntry(caller string, newp *ipn.Prefs) ipn
|
|||
b.e.SetDERPMap(netMap.DERPMap)
|
||||
}
|
||||
|
||||
if !oldp.WantRunning() && prefs.WantRunning() {
|
||||
if !oldp.WantRunning() && newp.WantRunning {
|
||||
b.logf("transitioning to running; doing Login...")
|
||||
cc.Login(nil, controlclient.LoginDefault)
|
||||
}
|
||||
|
||||
if oldp.WantRunning() != prefs.WantRunning() {
|
||||
if oldp.WantRunning() != newp.WantRunning {
|
||||
b.stateMachine()
|
||||
} else {
|
||||
b.authReconfig()
|
||||
|
@ -2534,7 +2427,7 @@ func (b *LocalBackend) blockEngineUpdates(block bool) {
|
|||
func (b *LocalBackend) authReconfig() {
|
||||
b.mu.Lock()
|
||||
blocked := b.blocked
|
||||
prefs := b.prefs
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
nm := b.netMap
|
||||
hasPAC := b.prevIfState.HasPAC()
|
||||
disableSubnetsIfPAC := nm != nil && nm.Debug != nil && nm.Debug.DisableSubnetsIfPAC.EqualBool(true)
|
||||
|
@ -3139,7 +3032,7 @@ func (b *LocalBackend) enterState(newState ipn.State) {
|
|||
b.mu.Lock()
|
||||
oldState := b.state
|
||||
b.state = newState
|
||||
prefs := b.prefs
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
netMap := b.netMap
|
||||
activeLogin := b.activeLogin
|
||||
authURL := b.authURL
|
||||
|
@ -3155,12 +3048,12 @@ func (b *LocalBackend) enterState(newState ipn.State) {
|
|||
|
||||
// prefs may change irrespective of state; WantRunning should be explicitly
|
||||
// set before potential early return even if the state is unchanged.
|
||||
health.SetIPNState(newState.String(), prefs.WantRunning())
|
||||
health.SetIPNState(newState.String(), prefs.Valid() && prefs.WantRunning())
|
||||
if oldState == newState {
|
||||
return
|
||||
}
|
||||
b.logf("Switching ipn state %v -> %v (WantRunning=%v, nm=%v)",
|
||||
oldState, newState, prefs.WantRunning, netMap != nil)
|
||||
oldState, newState, prefs.WantRunning(), netMap != nil)
|
||||
b.send(ipn.Notify{State: &newState})
|
||||
|
||||
switch newState {
|
||||
|
@ -3197,8 +3090,8 @@ func (b *LocalBackend) hasNodeKey() bool {
|
|||
// we can't use b.Prefs(), because it strips the keys, oops!
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
|
||||
return b.prefs.Valid() && b.prefs.Persist() != nil && !b.prefs.Persist().PrivateNodeKey.IsZero()
|
||||
p := b.pm.CurrentPrefs()
|
||||
return p.Valid() && p.Persist() != nil && !p.Persist().PrivateNodeKey.IsZero()
|
||||
}
|
||||
|
||||
// nextState returns the state the backend seems to be in, based on
|
||||
|
@ -3207,15 +3100,20 @@ func (b *LocalBackend) nextState() ipn.State {
|
|||
b.mu.Lock()
|
||||
b.assertClientLocked()
|
||||
var (
|
||||
cc = b.cc
|
||||
netMap = b.netMap
|
||||
state = b.state
|
||||
blocked = b.blocked
|
||||
wantRunning = b.prefs.WantRunning()
|
||||
loggedOut = b.prefs.LoggedOut()
|
||||
st = b.engineStatus
|
||||
keyExpired = b.keyExpired
|
||||
cc = b.cc
|
||||
netMap = b.netMap
|
||||
state = b.state
|
||||
blocked = b.blocked
|
||||
st = b.engineStatus
|
||||
keyExpired = b.keyExpired
|
||||
|
||||
wantRunning = false
|
||||
loggedOut = false
|
||||
)
|
||||
if p := b.pm.CurrentPrefs(); p.Valid() {
|
||||
wantRunning = p.WantRunning()
|
||||
loggedOut = p.LoggedOut()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
|
||||
switch {
|
||||
|
@ -3326,15 +3224,13 @@ func (b *LocalBackend) ResetForClientDisconnect() {
|
|||
go b.cc.Shutdown()
|
||||
b.cc = nil
|
||||
}
|
||||
b.stateKey = ""
|
||||
b.userID = ""
|
||||
b.setNetMapLocked(nil)
|
||||
b.prefs = new(ipn.Prefs).View()
|
||||
b.pm.Reset()
|
||||
b.keyExpired = false
|
||||
b.authURL = ""
|
||||
b.authURLSticky = ""
|
||||
b.activeLogin = ""
|
||||
b.setAtomicValuesFromPrefs(b.prefs)
|
||||
b.setAtomicValuesFromPrefs(ipn.PrefsView{})
|
||||
b.setTCPPortsIntercepted(nil)
|
||||
}
|
||||
|
||||
|
@ -3457,7 +3353,7 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
|
|||
}
|
||||
b.capFileSharing = fs
|
||||
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked()
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs())
|
||||
if nm == nil {
|
||||
b.nodeByAddr = nil
|
||||
return
|
||||
|
@ -3496,10 +3392,9 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
|
|||
// the ports that tailscaled should handle as a function of b.netMap and b.prefs.
|
||||
//
|
||||
// b.mu must be held.
|
||||
func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked() {
|
||||
func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked(prefs ipn.PrefsView) {
|
||||
handlePorts := make([]uint16, 0, 4)
|
||||
|
||||
prefs := b.prefs
|
||||
if prefs.Valid() && prefs.RunSSH() && envknob.CanSSHD() {
|
||||
handlePorts = append(handlePorts, 22)
|
||||
}
|
||||
|
@ -3532,10 +3427,11 @@ func (b *LocalBackend) setTCPPortsInterceptedFromNetmapAndPrefsLocked() {
|
|||
func (b *LocalBackend) operatorUserName() string {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if !b.prefs.Valid() {
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
if !prefs.Valid() {
|
||||
return ""
|
||||
}
|
||||
return b.prefs.OperatorUser()
|
||||
return prefs.OperatorUser()
|
||||
}
|
||||
|
||||
// OperatorUserID returns the current pref's OperatorUser's ID (in
|
||||
|
@ -3558,8 +3454,8 @@ func (b *LocalBackend) OperatorUserID() string {
|
|||
// in the test harness.
|
||||
func (b *LocalBackend) TestOnlyPublicKeys() (machineKey key.MachinePublic, nodeKey key.NodePublic) {
|
||||
b.mu.Lock()
|
||||
prefs := b.prefs
|
||||
machinePrivKey := b.machinePrivKey
|
||||
prefs := b.pm.CurrentPrefs()
|
||||
b.mu.Unlock()
|
||||
|
||||
if !prefs.Valid() || machinePrivKey.IsZero() {
|
||||
|
@ -3676,8 +3572,8 @@ func (b *LocalBackend) SetDNS(ctx context.Context, name, value string) error {
|
|||
|
||||
b.mu.Lock()
|
||||
cc := b.ccAuto
|
||||
if b.prefs.Valid() {
|
||||
req.NodeKey = b.prefs.Persist().PublicNodeKey()
|
||||
if prefs := b.pm.CurrentPrefs(); prefs.Valid() {
|
||||
req.NodeKey = prefs.Persist().PrivateNodeKey.Public()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
if cc == nil {
|
||||
|
@ -3789,11 +3685,11 @@ func (b *LocalBackend) DERPMap() *tailcfg.DERPMap {
|
|||
func (b *LocalBackend) OfferingExitNode() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if !b.prefs.Valid() {
|
||||
if !b.pm.CurrentPrefs().Valid() {
|
||||
return false
|
||||
}
|
||||
var def4, def6 bool
|
||||
ar := b.prefs.AdvertiseRoutes()
|
||||
ar := b.pm.CurrentPrefs().AdvertiseRoutes()
|
||||
for i := 0; i < ar.Len(); i++ {
|
||||
r := ar.At(i)
|
||||
if r.Bits() != 0 {
|
||||
|
@ -3980,7 +3876,8 @@ func (b *LocalBackend) DoNoiseRequest(req *http.Request) (*http.Response, error)
|
|||
func (b *LocalBackend) tailscaleSSHEnabled() bool {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
return b.prefs.Valid() && b.prefs.RunSSH()
|
||||
p := b.pm.CurrentPrefs()
|
||||
return p.Valid() && p.RunSSH()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) sshServerOrInit() (_ SSHServer, err error) {
|
||||
|
@ -4074,7 +3971,7 @@ func (b *LocalBackend) SetDevStateStore(key, value string) error {
|
|||
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked()
|
||||
b.setTCPPortsInterceptedFromNetmapAndPrefsLocked(b.pm.CurrentPrefs())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -4085,3 +3982,27 @@ func (b *LocalBackend) SetDevStateStore(key, value string) error {
|
|||
func (b *LocalBackend) ShouldInterceptTCPPort(port uint16) bool {
|
||||
return b.shouldInterceptTCPPortAtomic.Load()(port)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) SwitchProfile(profile string) error {
|
||||
if err := b.pm.SwitchProfile(profile); err != nil {
|
||||
return nil
|
||||
}
|
||||
return b.Start(ipn.Options{})
|
||||
}
|
||||
|
||||
func (b *LocalBackend) DeleteProfile(p string) error {
|
||||
return b.pm.DeleteProfile(p)
|
||||
}
|
||||
|
||||
func (b *LocalBackend) CurrentProfile() string {
|
||||
p, _ := b.pm.CurrentProfile()
|
||||
return p
|
||||
}
|
||||
|
||||
func (b *LocalBackend) NewProfile() {
|
||||
b.pm.NewProfile()
|
||||
}
|
||||
|
||||
func (b *LocalBackend) ListProfiles() []string {
|
||||
return b.pm.Profiles()
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/wgengine"
|
||||
"tailscale.com/wgengine/filter"
|
||||
"tailscale.com/wgengine/wgcfg"
|
||||
|
@ -489,7 +490,8 @@ func TestLazyMachineKeyGeneration(t *testing.T) {
|
|||
t.Fatalf("NewFakeUserspaceEngine: %v", err)
|
||||
}
|
||||
t.Cleanup(eng.Close)
|
||||
lb, err := NewLocalBackend(logf, "logid", store, nil, eng, 0)
|
||||
pm := must.Get(NewProfileManager(store, logf, "default"))
|
||||
lb, err := NewLocalBackend(logf, "logid", pm, nil, eng, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("NewLocalBackend: %v", err)
|
||||
}
|
||||
|
@ -498,9 +500,7 @@ func TestLazyMachineKeyGeneration(t *testing.T) {
|
|||
Transport: panicOnUseTransport{}, // validate we don't send HTTP requests
|
||||
})
|
||||
|
||||
if err := lb.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
}); err != nil {
|
||||
if err := lb.Start(ipn.Options{}); err != nil {
|
||||
t.Fatalf("Start: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/wgengine"
|
||||
)
|
||||
|
||||
|
@ -55,14 +56,13 @@ func TestLocalLogLines(t *testing.T) {
|
|||
}
|
||||
t.Cleanup(e.Close)
|
||||
|
||||
lb, err := NewLocalBackend(logf, idA.String(), store, nil, e, 0)
|
||||
pm := must.Get(NewProfileManager(store, logf, ""))
|
||||
lb, err := NewLocalBackend(logf, idA.String(), pm, nil, e, 0)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer lb.Shutdown()
|
||||
|
||||
// custom adjustments for required non-nil fields
|
||||
lb.prefs = ipn.NewPrefs().View()
|
||||
lb.hostinfo = &tailcfg.Hostinfo{}
|
||||
// hacky manual override of the usual log-on-change behaviour of keylogf
|
||||
lb.keyLogf = logListen.Logf
|
||||
|
|
|
@ -109,7 +109,7 @@ func (b *LocalBackend) tkaSyncIfNeeded(nm *netmap.NetworkMap) error {
|
|||
b.mu.Lock() // take mu to protect access to synchronized fields.
|
||||
defer b.mu.Unlock()
|
||||
|
||||
ourNodeKey := b.prefs.Persist().PublicNodeKey()
|
||||
ourNodeKey := b.pm.CurrentPrefs().Persist().PublicNodeKey()
|
||||
|
||||
isEnabled := b.tka != nil
|
||||
wantEnabled := nm.TKAEnabled
|
||||
|
@ -362,8 +362,8 @@ func (b *LocalBackend) NetworkLockInit(keys []tka.Key, disablementValues [][]byt
|
|||
|
||||
var ourNodeKey key.NodePublic
|
||||
b.mu.Lock()
|
||||
if b.prefs.Valid() {
|
||||
ourNodeKey = b.prefs.Persist().PublicNodeKey()
|
||||
if p := b.pm.CurrentPrefs(); p.Valid() {
|
||||
ourNodeKey = p.Persist().PublicNodeKey()
|
||||
}
|
||||
b.mu.Unlock()
|
||||
if ourNodeKey.IsZero() {
|
||||
|
@ -465,7 +465,8 @@ func (b *LocalBackend) NetworkLockSign(nodeKey key.NodePublic, rotationPublic []
|
|||
if err != nil {
|
||||
return key.NodePublic{}, tka.NodeKeySignature{}, fmt.Errorf("signature failed: %w", err)
|
||||
}
|
||||
return b.prefs.Persist().PublicNodeKey(), sig, nil
|
||||
|
||||
return b.pm.CurrentPrefs().Persist().PublicNodeKey(), sig, nil
|
||||
}(nodeKey, rotationPublic)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -518,7 +519,7 @@ func (b *LocalBackend) NetworkLockModify(addKeys, removeKeys []tka.Key) (err err
|
|||
return nil
|
||||
}
|
||||
|
||||
ourNodeKey := b.prefs.Persist().PublicNodeKey()
|
||||
ourNodeKey := b.pm.CurrentPrefs().Persist().PublicNodeKey()
|
||||
head := b.tka.authority.Head()
|
||||
b.mu.Unlock()
|
||||
resp, err := b.tkaDoSyncSend(ourNodeKey, head, aums, true)
|
||||
|
@ -553,8 +554,8 @@ func (b *LocalBackend) NetworkLockDisable(secret []byte) error {
|
|||
)
|
||||
|
||||
b.mu.Lock()
|
||||
if b.prefs.Valid() {
|
||||
ourNodeKey = b.prefs.Persist().PublicNodeKey()
|
||||
if p := b.pm.CurrentPrefs(); p.Valid() {
|
||||
ourNodeKey = p.Persist().PublicNodeKey()
|
||||
}
|
||||
if b.tka == nil {
|
||||
err = errNetworkLockNotActive
|
||||
|
|
|
@ -20,12 +20,14 @@ import (
|
|||
"tailscale.com/envknob"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tka"
|
||||
"tailscale.com/types/key"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/types/tkatype"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
func fakeControlClient(t *testing.T, c *http.Client) *controlclient.Auto {
|
||||
|
@ -117,14 +119,17 @@ func TestTKAEnablementFlow(t *testing.T) {
|
|||
temp := t.TempDir()
|
||||
|
||||
cc := fakeControlClient(t, client)
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), t.Logf, ""))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View()))
|
||||
b := LocalBackend{
|
||||
varRoot: temp,
|
||||
cc: cc,
|
||||
ccAuto: cc,
|
||||
logf: t.Logf,
|
||||
prefs: (&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View(),
|
||||
pm: pm,
|
||||
store: pm.Store(),
|
||||
}
|
||||
|
||||
err = b.tkaSyncIfNeeded(&netmap.NetworkMap{
|
||||
|
@ -210,6 +215,10 @@ func TestTKADisablementFlow(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
cc := fakeControlClient(t, client)
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), t.Logf, ""))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View()))
|
||||
b := LocalBackend{
|
||||
varRoot: temp,
|
||||
cc: cc,
|
||||
|
@ -219,9 +228,8 @@ func TestTKADisablementFlow(t *testing.T) {
|
|||
authority: authority,
|
||||
storage: chonk,
|
||||
},
|
||||
prefs: (&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View(),
|
||||
pm: pm,
|
||||
store: pm.Store(),
|
||||
}
|
||||
|
||||
// Test that the wrong disablement secret does not shut down the authority.
|
||||
|
@ -456,18 +464,21 @@ func TestTKASync(t *testing.T) {
|
|||
|
||||
// Setup the client.
|
||||
cc := fakeControlClient(t, client)
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), t.Logf, ""))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View()))
|
||||
b := LocalBackend{
|
||||
varRoot: temp,
|
||||
cc: cc,
|
||||
ccAuto: cc,
|
||||
logf: t.Logf,
|
||||
pm: pm,
|
||||
store: pm.Store(),
|
||||
tka: &tkaState{
|
||||
authority: nodeAuthority,
|
||||
storage: nodeStorage,
|
||||
},
|
||||
prefs: (&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View(),
|
||||
}
|
||||
|
||||
// Finally, lets trigger a sync.
|
||||
|
@ -607,6 +618,11 @@ func TestTKADisable(t *testing.T) {
|
|||
defer ts.Close()
|
||||
|
||||
cc := fakeControlClient(t, client)
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), t.Logf, ""))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View()))
|
||||
|
||||
b := LocalBackend{
|
||||
varRoot: temp,
|
||||
cc: cc,
|
||||
|
@ -616,9 +632,8 @@ func TestTKADisable(t *testing.T) {
|
|||
authority: authority,
|
||||
storage: chonk,
|
||||
},
|
||||
prefs: (&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View(),
|
||||
pm: pm,
|
||||
store: pm.Store(),
|
||||
}
|
||||
|
||||
// Test that we get an error for an incorrect disablement secret.
|
||||
|
@ -688,7 +703,10 @@ func TestTKASign(t *testing.T) {
|
|||
}
|
||||
}))
|
||||
defer ts.Close()
|
||||
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), t.Logf, ""))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View()))
|
||||
cc := fakeControlClient(t, client)
|
||||
b := LocalBackend{
|
||||
varRoot: temp,
|
||||
|
@ -699,9 +717,8 @@ func TestTKASign(t *testing.T) {
|
|||
authority: authority,
|
||||
storage: chonk,
|
||||
},
|
||||
prefs: (&ipn.Prefs{
|
||||
Persist: &persist.Persist{PrivateNodeKey: nodePriv},
|
||||
}).View(),
|
||||
pm: pm,
|
||||
store: pm.Store(),
|
||||
nlPrivKey: nlPriv,
|
||||
}
|
||||
|
||||
|
|
|
@ -21,9 +21,11 @@ import (
|
|||
|
||||
"go4.org/netipx"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/wgengine"
|
||||
"tailscale.com/wgengine/filter"
|
||||
)
|
||||
|
@ -585,20 +587,23 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) {
|
|||
h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345")
|
||||
|
||||
eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0)
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), t.Logf, ""))
|
||||
h.ps = &peerAPIServer{
|
||||
b: &LocalBackend{
|
||||
e: eng,
|
||||
e: eng,
|
||||
pm: pm,
|
||||
store: pm.Store(),
|
||||
},
|
||||
}
|
||||
if h.ps.b.OfferingExitNode() {
|
||||
t.Fatal("unexpectedly offering exit node")
|
||||
}
|
||||
h.ps.b.prefs = (&ipn.Prefs{
|
||||
h.ps.b.pm.SetPrefs((&ipn.Prefs{
|
||||
AdvertiseRoutes: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"),
|
||||
netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
}).View()
|
||||
}).View())
|
||||
if !h.ps.b.OfferingExitNode() {
|
||||
t.Fatal("unexpectedly not offering exit node")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,435 @@
|
|||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/strs"
|
||||
)
|
||||
|
||||
type loginProfile struct {
|
||||
ID string
|
||||
Name string
|
||||
Key ipn.StateKey
|
||||
|
||||
// LocalUserID is the user ID of the user who created this profile.
|
||||
// It is only relevant on Windows where we have a multi-user system.
|
||||
LocalUserID string
|
||||
}
|
||||
|
||||
// ProfileManager is a wrapper around a StateStore that manages
|
||||
// multiple profiles and the current profile.
|
||||
type ProfileManager struct {
|
||||
store ipn.StateStore
|
||||
logf logger.Logf
|
||||
|
||||
// Lock order: LocalBackend.mu, then pm.mu.
|
||||
mu sync.Mutex // guards following
|
||||
currentUserID string // only used on Windows
|
||||
knownProfiles map[string]*loginProfile // key is profile name
|
||||
currentProfile *loginProfile
|
||||
prefs ipn.PrefsView
|
||||
isNewProfile bool
|
||||
}
|
||||
|
||||
// CurrentUser returns the current user ID. It is only non-empty on
|
||||
// Windows where we have a multi-user system.
|
||||
func (pm *ProfileManager) CurrentUser() string {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
return pm.currentUserID
|
||||
}
|
||||
|
||||
// SetCurrentUser sets the current user ID. The uid is only non-empty
|
||||
// on Windows where we have a multi-user system.
|
||||
func (pm *ProfileManager) SetCurrentUser(uid string) error {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
if pm.currentUserID == uid {
|
||||
return nil
|
||||
}
|
||||
cpk := ipn.CurrentProfileKey(uid)
|
||||
if b, err := pm.store.ReadState(cpk); err == nil {
|
||||
pk := ipn.StateKey(string(b))
|
||||
prefs, err := pm.loadSavedPrefs(pk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pm.currentProfile = pm.findProfileByKey(pk)
|
||||
pm.prefs = prefs
|
||||
pm.isNewProfile = false
|
||||
} else if err == ipn.ErrStateNotExist {
|
||||
pm.prefs = emptyPrefs
|
||||
pm.isNewProfile = true
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
pm.currentUserID = uid
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) findProfileByKey(key ipn.StateKey) *loginProfile {
|
||||
for _, p := range pm.knownProfiles {
|
||||
if p.Key == key {
|
||||
return p
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) setUnattendedModeAsConfigured() error {
|
||||
if pm.currentUserID == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pm.prefs.ForceDaemon() {
|
||||
return pm.store.WriteState(ipn.ServerModeStartKey, []byte(pm.currentProfile.Key))
|
||||
} else {
|
||||
return pm.store.WriteState(ipn.ServerModeStartKey, nil)
|
||||
}
|
||||
}
|
||||
|
||||
// Reset unloads the current profile, if any.
|
||||
func (pm *ProfileManager) Reset() {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
pm.prefs = emptyPrefs
|
||||
pm.currentUserID = ""
|
||||
pm.currentProfile = nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
func randID() string {
|
||||
var b [4]byte
|
||||
rand.Read(b[:])
|
||||
return fmt.Sprintf("%x", b)
|
||||
}
|
||||
|
||||
// SetPrefs sets the current profile's prefs to the provided value.
|
||||
// It also saves the prefs to the StateStore. It stores a copy of the
|
||||
// provided prefs, which may be accessed via CurrentPrefs.
|
||||
func (pm *ProfileManager) SetPrefs(prefsIn ipn.PrefsView) error {
|
||||
prefs := prefsIn.AsStruct().View()
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
ps := prefs.Persist()
|
||||
if !pm.isNewProfile || ps == nil || ps.LoginName == "" {
|
||||
return pm.setPrefsLocked(prefs)
|
||||
}
|
||||
id, k := newUnusedKey(pm.knownProfiles)
|
||||
pm.currentProfile = &loginProfile{
|
||||
Name: ps.LoginName,
|
||||
Key: k,
|
||||
ID: id,
|
||||
LocalUserID: pm.currentUserID,
|
||||
}
|
||||
pm.knownProfiles[ps.LoginName] = pm.currentProfile
|
||||
if err := pm.writeKnownProfiles(); err != nil {
|
||||
delete(pm.knownProfiles, ps.LoginName)
|
||||
return err
|
||||
}
|
||||
if err := pm.setAsUserSelectedProfileLocked(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := pm.setPrefsLocked(prefs); err != nil {
|
||||
return err
|
||||
}
|
||||
pm.isNewProfile = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func newUnusedKey(knownProfiles map[string]*loginProfile) (id string, key ipn.StateKey) {
|
||||
keyGenLoop:
|
||||
for {
|
||||
id := randID()
|
||||
for _, kp := range knownProfiles {
|
||||
if kp.ID == id {
|
||||
continue keyGenLoop
|
||||
}
|
||||
}
|
||||
return id, ipn.StateKey("profile-" + id)
|
||||
}
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) setPrefsLocked(clonedPrefs ipn.PrefsView) error {
|
||||
pm.prefs = clonedPrefs
|
||||
if pm.currentProfile == nil {
|
||||
return nil
|
||||
}
|
||||
if err := pm.writePrefsToStore(pm.currentProfile.Key, pm.prefs); err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.setUnattendedModeAsConfigured()
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) writePrefsToStore(key ipn.StateKey, prefs ipn.PrefsView) error {
|
||||
if key == "" {
|
||||
return nil
|
||||
}
|
||||
if err := pm.store.WriteState(key, prefs.ToBytes()); err != nil {
|
||||
pm.logf("WriteState(%q): %v", key, err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Profiles returns the list of known profiles.
|
||||
func (pm *ProfileManager) Profiles() []string {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
var profiles []string
|
||||
for _, p := range pm.knownProfiles {
|
||||
if p.LocalUserID == pm.currentUserID {
|
||||
profiles = append(profiles, p.Name)
|
||||
}
|
||||
}
|
||||
return profiles
|
||||
}
|
||||
|
||||
// SwitchProfile switches to the profile with the given name.
|
||||
func (pm *ProfileManager) SwitchProfile(profile string) error {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
kp, ok := pm.knownProfiles[profile]
|
||||
if !ok {
|
||||
return fmt.Errorf("profile %q not found", profile)
|
||||
}
|
||||
|
||||
if pm.currentProfile != nil && kp.Key == pm.currentProfile.Key && pm.prefs.Valid() {
|
||||
return nil
|
||||
}
|
||||
if kp.LocalUserID != pm.currentUserID {
|
||||
return fmt.Errorf("profile %q is not owned by current user", profile)
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(kp.Key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
pm.prefs = prefs
|
||||
pm.currentProfile = kp
|
||||
pm.isNewProfile = false
|
||||
return pm.setAsUserSelectedProfileLocked()
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) setAsUserSelectedProfileLocked() error {
|
||||
k := ipn.CurrentProfileKey(pm.currentUserID)
|
||||
if pm.currentProfile == nil {
|
||||
return pm.store.WriteState(k, nil)
|
||||
}
|
||||
return pm.store.WriteState(k, []byte(pm.currentProfile.Key))
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) loadSavedPrefs(key ipn.StateKey) (ipn.PrefsView, error) {
|
||||
bs, err := pm.store.ReadState(key)
|
||||
if err != nil {
|
||||
if err == ipn.ErrStateNotExist {
|
||||
return emptyPrefs, nil
|
||||
}
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
savedPrefs, err := ipn.PrefsFromBytes(bs)
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, fmt.Errorf("PrefsFromBytes: %v", err)
|
||||
}
|
||||
pm.logf("using backend prefs for %q: %v", key, savedPrefs.Pretty())
|
||||
|
||||
// Ignore any old stored preferences for https://login.tailscale.com
|
||||
// as the control server that would override the new default of
|
||||
// controlplane.tailscale.com.
|
||||
if savedPrefs.ControlURL != "" &&
|
||||
savedPrefs.ControlURL != ipn.DefaultControlURL &&
|
||||
ipn.IsLoginServerSynonym(savedPrefs.ControlURL) {
|
||||
savedPrefs.ControlURL = ""
|
||||
}
|
||||
return savedPrefs.View(), nil
|
||||
}
|
||||
|
||||
// CurrentProfile returns the name and ID of the current profile, or "" if the profile
|
||||
// is not named.
|
||||
func (pm *ProfileManager) CurrentProfile() (name string, id string) {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
if pm.currentProfile == nil {
|
||||
return "", ""
|
||||
}
|
||||
return pm.currentProfile.Name, pm.currentProfile.ID
|
||||
}
|
||||
|
||||
// DeleteProfile removes the profile with the given name. It is a no-op if the
|
||||
// profile does not exist.
|
||||
func (pm *ProfileManager) DeleteProfile(profile string) error {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
kp, ok := pm.knownProfiles[profile]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if kp.Key == pm.currentProfile.Key {
|
||||
return fmt.Errorf("cannot remove current profile")
|
||||
}
|
||||
if err := pm.store.WriteState(kp.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(pm.knownProfiles, profile)
|
||||
return pm.writeKnownProfiles()
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) writeKnownProfiles() error {
|
||||
b, err := json.Marshal(pm.knownProfiles)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return pm.store.WriteState(ipn.KnownProfilesStateKey, b)
|
||||
}
|
||||
|
||||
// NewProfile creates a new profile with the given name. It switches to the new
|
||||
// profile. The new profile is not persisted until SetPrefs is called.
|
||||
func (pm *ProfileManager) NewProfile() {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
pm.prefs = emptyPrefs
|
||||
pm.currentProfile = nil
|
||||
pm.isNewProfile = true
|
||||
}
|
||||
|
||||
// emptyPrefs is the default prefs for a new profile.
|
||||
var emptyPrefs = func() ipn.PrefsView {
|
||||
prefs := ipn.NewPrefs()
|
||||
prefs.WantRunning = false
|
||||
return prefs.View()
|
||||
}()
|
||||
|
||||
// Store returns the StateStore used by the ProfileManager.
|
||||
func (pm *ProfileManager) Store() ipn.StateStore {
|
||||
return pm.store
|
||||
}
|
||||
|
||||
// CurrentPrefs returns a read-only view of the current prefs.
|
||||
func (pm *ProfileManager) CurrentPrefs() ipn.PrefsView {
|
||||
pm.mu.Lock()
|
||||
defer pm.mu.Unlock()
|
||||
return pm.prefs
|
||||
}
|
||||
|
||||
// NewProfileManager creates a new ProfileManager using the provided StateStore.
|
||||
// It also loads the list of known profiles from the StateStore.
|
||||
// If a state key is provided, it will be used to load the current profile.
|
||||
func NewProfileManager(store ipn.StateStore, logf logger.Logf, stateKey ipn.StateKey) (*ProfileManager, error) {
|
||||
return newProfileManagerWithGOOS(store, logf, stateKey, runtime.GOOS)
|
||||
}
|
||||
|
||||
func readAutoStartKey(store ipn.StateStore, goos string) (ipn.StateKey, error) {
|
||||
startKey := ipn.CurrentProfileStateKey
|
||||
if goos == "windows" {
|
||||
// When tailscaled runs on Windows it is not typically run unattended.
|
||||
// So we can't use the profile mechanism to load the profile at startup.
|
||||
startKey = ipn.ServerModeStartKey
|
||||
}
|
||||
autoStartKey, err := store.ReadState(startKey)
|
||||
if err != nil && err != ipn.ErrStateNotExist {
|
||||
return "", fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
return ipn.StateKey(autoStartKey), nil
|
||||
}
|
||||
|
||||
func readKnownProfiles(store ipn.StateStore) (map[string]*loginProfile, error) {
|
||||
var knownProfiles map[string]*loginProfile
|
||||
prfB, err := store.ReadState(ipn.KnownProfilesStateKey)
|
||||
switch err {
|
||||
case nil:
|
||||
if err := json.Unmarshal(prfB, &knownProfiles); err != nil {
|
||||
return nil, fmt.Errorf("unmarshaling known profiles: %w", err)
|
||||
}
|
||||
case ipn.ErrStateNotExist:
|
||||
knownProfiles = make(map[string]*loginProfile)
|
||||
default:
|
||||
return nil, fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
return knownProfiles, nil
|
||||
}
|
||||
|
||||
func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, stateKey ipn.StateKey, goos string) (*ProfileManager, error) {
|
||||
if stateKey == "" {
|
||||
var err error
|
||||
stateKey, err = readAutoStartKey(store, goos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
knownProfiles, err := readKnownProfiles(store)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pm := &ProfileManager{
|
||||
store: store,
|
||||
knownProfiles: knownProfiles,
|
||||
logf: logf,
|
||||
}
|
||||
|
||||
if stateKey != "" {
|
||||
for _, v := range knownProfiles {
|
||||
if v.Key == stateKey {
|
||||
pm.currentProfile = v
|
||||
}
|
||||
}
|
||||
if pm.currentProfile == nil {
|
||||
if suf, ok := strs.CutPrefix(string(stateKey), "user-"); ok {
|
||||
pm.currentUserID = suf
|
||||
}
|
||||
pm.isNewProfile = true
|
||||
} else {
|
||||
pm.currentUserID = pm.currentProfile.LocalUserID
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(stateKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := pm.setPrefsLocked(prefs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else if len(knownProfiles) == 0 && goos != "windows" {
|
||||
// No known profiles, try a migration.
|
||||
if err := pm.migrateFromLegacyPrefs(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
pm.prefs = emptyPrefs
|
||||
}
|
||||
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
func (pm *ProfileManager) migrateFromLegacyPrefs() error {
|
||||
pm.NewProfile()
|
||||
k := ipn.GlobalDaemonStateKey
|
||||
switch runtime.GOOS {
|
||||
case "ios", "darwin":
|
||||
k = "ipn-go-bridge"
|
||||
}
|
||||
prefs, err := pm.loadSavedPrefs(k)
|
||||
if err != nil {
|
||||
return fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
pm.logf("migrating %q profile to new format", k)
|
||||
if err := pm.SetPrefs(prefs); err != nil {
|
||||
return fmt.Errorf("migrating _daemon profile: %w", err)
|
||||
}
|
||||
// Do not delete the old state key, as we may be downgraded to an
|
||||
// older version that still relies on it.
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,232 @@
|
|||
// Copyright (c) 2022 Tailscale Inc & AUTHORS All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package ipnlocal
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/persist"
|
||||
)
|
||||
|
||||
// TestProfileManagement tests creating, loading, and switching profiles.
|
||||
func TestProfileManagement(t *testing.T) {
|
||||
store := new(mem.Store)
|
||||
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, "", "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantCurProfile := ""
|
||||
wantProfiles := map[string]ipn.PrefsView{
|
||||
"": emptyPrefs,
|
||||
}
|
||||
checkProfiles := func(t *testing.T) {
|
||||
t.Helper()
|
||||
prof, _ := pm.CurrentProfile()
|
||||
if prof != wantCurProfile {
|
||||
t.Fatalf("CurrentProfile = %q; want %q", prof, wantCurProfile)
|
||||
}
|
||||
profiles := pm.Profiles()
|
||||
wantLen := len(wantProfiles)
|
||||
if _, ok := wantProfiles[""]; ok {
|
||||
wantLen--
|
||||
}
|
||||
if len(profiles) != wantLen {
|
||||
t.Fatalf("Profiles = %v; want %v", profiles, wantProfiles)
|
||||
}
|
||||
p := pm.CurrentPrefs()
|
||||
if !p.Valid() {
|
||||
t.Fatalf("CurrentPrefs = %v; want valid", p)
|
||||
}
|
||||
if !p.Equals(wantProfiles[wantCurProfile]) {
|
||||
t.Fatalf("CurrentPrefs = %v; want %v", p.Hostname(), wantProfiles[wantCurProfile].Hostname())
|
||||
}
|
||||
for _, p := range profiles {
|
||||
if _, ok := wantProfiles[p]; !ok {
|
||||
t.Fatalf("Profiles = %v; want %v", profiles, wantProfiles)
|
||||
}
|
||||
kp := pm.knownProfiles[p]
|
||||
got, err := pm.loadSavedPrefs(kp.Key)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Use Hostname as a proxy for all prefs.
|
||||
if got.Hostname() != wantProfiles[p].Hostname() {
|
||||
t.Fatalf("Prefs for profile %q = %v; want %v", p, got.Hostname(), wantProfiles[p].Hostname())
|
||||
}
|
||||
}
|
||||
}
|
||||
setPrefs := func(t *testing.T, loginName string) ipn.PrefsView {
|
||||
p := pm.CurrentPrefs().AsStruct()
|
||||
p.Persist = &persist.Persist{
|
||||
LoginName: loginName,
|
||||
}
|
||||
if err := pm.SetPrefs(p.View()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return p.View()
|
||||
}
|
||||
t.Logf("Check initial state from empty store")
|
||||
checkProfiles(t)
|
||||
|
||||
{
|
||||
t.Logf("Set prefs for default profile")
|
||||
wantProfiles["user@1.example.com"] = setPrefs(t, "user@1.example.com")
|
||||
wantCurProfile = "user@1.example.com"
|
||||
delete(wantProfiles, "")
|
||||
}
|
||||
checkProfiles(t)
|
||||
|
||||
t.Logf("Create new profile")
|
||||
pm.NewProfile()
|
||||
wantCurProfile = ""
|
||||
wantProfiles[""] = emptyPrefs
|
||||
checkProfiles(t)
|
||||
|
||||
{
|
||||
t.Logf("Set prefs for test profile")
|
||||
wantProfiles["user@2.example.com"] = setPrefs(t, "user@2.example.com")
|
||||
wantCurProfile = "user@2.example.com"
|
||||
delete(wantProfiles, "")
|
||||
}
|
||||
checkProfiles(t)
|
||||
|
||||
t.Logf("pm: %+v", pm)
|
||||
t.Logf("Recreate profile manager from store")
|
||||
// Recreate the profile manager to ensure that it can load the profiles
|
||||
// from the store at startup.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "", "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkProfiles(t)
|
||||
|
||||
{
|
||||
t.Logf("Try to delete test profile while it is active")
|
||||
if err := pm.DeleteProfile("user@2.example.com"); err == nil {
|
||||
t.Fatal("expected error deleting active profile")
|
||||
}
|
||||
}
|
||||
|
||||
t.Logf("Delete default profile")
|
||||
if err := pm.DeleteProfile("user@1.example.com"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
delete(wantProfiles, "user@1.example.com")
|
||||
checkProfiles(t)
|
||||
|
||||
t.Logf("Recreate profile manager from store after deleting default profile")
|
||||
// Recreate the profile manager to ensure that it can load the profiles
|
||||
// from the store at startup.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "", "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkProfiles(t)
|
||||
}
|
||||
|
||||
// TestProfileManagementWindows tests going into and out of Unattended mode on
|
||||
// Windows.
|
||||
func TestProfileManagementWindows(t *testing.T) {
|
||||
store := new(mem.Store)
|
||||
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, "", "windows")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantCurProfile := ""
|
||||
wantProfiles := map[string]ipn.PrefsView{
|
||||
"": emptyPrefs,
|
||||
}
|
||||
checkProfiles := func(t *testing.T) {
|
||||
t.Helper()
|
||||
prof, _ := pm.CurrentProfile()
|
||||
t.Logf("\tCurrentProfile = %q", prof)
|
||||
if prof != wantCurProfile {
|
||||
t.Fatalf("CurrentProfile = %q; want %q", prof, wantCurProfile)
|
||||
}
|
||||
if p := pm.CurrentPrefs(); !p.Equals(wantProfiles[wantCurProfile]) {
|
||||
t.Fatalf("CurrentPrefs = %+v; want %+v", p.Pretty(), wantProfiles[wantCurProfile].Pretty())
|
||||
}
|
||||
}
|
||||
setPrefs := func(t *testing.T, loginName string, forceDaemon bool) ipn.PrefsView {
|
||||
p := pm.CurrentPrefs().AsStruct()
|
||||
p.ForceDaemon = forceDaemon
|
||||
p.Persist = &persist.Persist{
|
||||
LoginName: loginName,
|
||||
}
|
||||
if err := pm.SetPrefs(p.View()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
return p.View()
|
||||
}
|
||||
t.Logf("Check initial state from empty store")
|
||||
checkProfiles(t)
|
||||
|
||||
{
|
||||
t.Logf("Set user1 as logged in user")
|
||||
if err := pm.SetCurrentUser("user1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkProfiles(t)
|
||||
t.Logf("Save prefs for user1")
|
||||
wantProfiles["default"] = setPrefs(t, "default", false)
|
||||
wantCurProfile = "default"
|
||||
}
|
||||
checkProfiles(t)
|
||||
|
||||
{
|
||||
t.Logf("Create new profile")
|
||||
pm.NewProfile()
|
||||
wantCurProfile = ""
|
||||
wantProfiles[""] = emptyPrefs
|
||||
checkProfiles(t)
|
||||
|
||||
t.Logf("Save as test profile")
|
||||
wantProfiles["test"] = setPrefs(t, "test", false)
|
||||
wantCurProfile = "test"
|
||||
checkProfiles(t)
|
||||
}
|
||||
|
||||
t.Logf("Recreate profile manager from store, should reset prefs")
|
||||
// Recreate the profile manager to ensure that it can load the profiles
|
||||
// from the store at startup.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "", "windows")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantCurProfile = ""
|
||||
wantProfiles[""] = emptyPrefs
|
||||
checkProfiles(t)
|
||||
|
||||
{
|
||||
t.Logf("Set user1 as current user")
|
||||
if err := pm.SetCurrentUser("user1"); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantCurProfile = "test"
|
||||
}
|
||||
checkProfiles(t)
|
||||
{
|
||||
t.Logf("set unattended mode")
|
||||
wantProfiles["test"] = setPrefs(t, "test", true)
|
||||
}
|
||||
if pm.CurrentUser() != "user1" {
|
||||
t.Fatalf("CurrentUserID = %q; want %q", pm.CurrentUser(), "user1")
|
||||
}
|
||||
|
||||
// Recreate the profile manager to ensure that it starts with test profile.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "", "windows")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkProfiles(t)
|
||||
if pm.CurrentUser() != "user1" {
|
||||
t.Fatalf("CurrentUserID = %q; want %q", pm.CurrentUser(), "user1")
|
||||
}
|
||||
}
|
|
@ -11,6 +11,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
@ -49,7 +50,8 @@ type fakeSSHServer struct {
|
|||
}
|
||||
|
||||
func TestGetSSHUsernames(t *testing.T) {
|
||||
b := new(LocalBackend)
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), t.Logf, ""))
|
||||
b := &LocalBackend{pm: pm, store: pm.Store()}
|
||||
b.sshServer = fakeSSHServer{}
|
||||
res, err := b.getSSHUsernames(new(tailcfg.C2NSSHUsernamesRequest))
|
||||
if err != nil {
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/persist"
|
||||
"tailscale.com/util/must"
|
||||
"tailscale.com/wgengine"
|
||||
)
|
||||
|
||||
|
@ -97,7 +98,7 @@ type mockControl struct {
|
|||
mu sync.Mutex
|
||||
calls []string
|
||||
authBlocked bool
|
||||
persist persist.Persist
|
||||
persist *persist.Persist
|
||||
machineKey key.MachinePrivate
|
||||
}
|
||||
|
||||
|
@ -125,7 +126,7 @@ func (cc *mockControl) populateKeys() (newKeys bool) {
|
|||
newKeys = true
|
||||
}
|
||||
|
||||
if cc.persist.PrivateNodeKey.IsZero() {
|
||||
if cc.persist != nil && cc.persist.PrivateNodeKey.IsZero() {
|
||||
cc.logf("Generating a new nodekey.")
|
||||
cc.persist.OldPrivateNodeKey = cc.persist.PrivateNodeKey
|
||||
cc.persist.PrivateNodeKey = key.NewNode()
|
||||
|
@ -142,7 +143,7 @@ func (cc *mockControl) send(err error, url string, loginFinished bool, nm *netma
|
|||
s := controlclient.Status{
|
||||
URL: url,
|
||||
NetMap: nm,
|
||||
Persist: &cc.persist,
|
||||
Persist: cc.persist,
|
||||
Err: err,
|
||||
}
|
||||
if loginFinished {
|
||||
|
@ -289,8 +290,9 @@ func TestStateMachine(t *testing.T) {
|
|||
t.Fatalf("NewFakeUserspaceEngine: %v", err)
|
||||
}
|
||||
t.Cleanup(e.Close)
|
||||
pm := must.Get(NewProfileManager(store, logf, "default"))
|
||||
|
||||
b, err := NewLocalBackend(logf, "logid", store, nil, e, 0)
|
||||
b, err := NewLocalBackend(logf, "logid", pm, nil, e, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("NewLocalBackend: %v", err)
|
||||
}
|
||||
|
@ -303,7 +305,7 @@ func TestStateMachine(t *testing.T) {
|
|||
cc.opts = opts
|
||||
cc.logfActual = opts.Logf
|
||||
cc.authBlocked = true
|
||||
cc.persist = cc.opts.Persist
|
||||
cc.persist = &cc.opts.Persist
|
||||
cc.mu.Unlock()
|
||||
|
||||
cc.logf("ccGen: new mockControl.")
|
||||
|
@ -335,7 +337,7 @@ func TestStateMachine(t *testing.T) {
|
|||
// but not ask it to do anything yet.
|
||||
t.Logf("\n\nStart")
|
||||
notifies.expect(2)
|
||||
c.Assert(b.Start(ipn.Options{StateKey: ipn.GlobalDaemonStateKey}), qt.IsNil)
|
||||
c.Assert(b.Start(ipn.Options{}), qt.IsNil)
|
||||
{
|
||||
// BUG: strictly, it should pause, not unpause, here, since !WantRunning.
|
||||
cc.assertCalls("New", "unpause")
|
||||
|
@ -360,7 +362,7 @@ func TestStateMachine(t *testing.T) {
|
|||
// events as the first time, so UIs always know what to expect.
|
||||
t.Logf("\n\nStart2")
|
||||
notifies.expect(2)
|
||||
c.Assert(b.Start(ipn.Options{StateKey: ipn.GlobalDaemonStateKey}), qt.IsNil)
|
||||
c.Assert(b.Start(ipn.Options{}), qt.IsNil)
|
||||
{
|
||||
// BUG: strictly, it should pause, not unpause, here, since !WantRunning.
|
||||
cc.assertCalls("Shutdown", "unpause", "New", "unpause")
|
||||
|
@ -552,7 +554,7 @@ func TestStateMachine(t *testing.T) {
|
|||
t.Logf("\n\nFastpath Start()")
|
||||
notifies.expect(1)
|
||||
b.state = ipn.Running
|
||||
c.Assert(b.Start(ipn.Options{StateKey: ipn.GlobalDaemonStateKey}), qt.IsNil)
|
||||
c.Assert(b.Start(ipn.Options{}), qt.IsNil)
|
||||
{
|
||||
nn := notifies.drain(1)
|
||||
cc.assertCalls()
|
||||
|
@ -662,7 +664,7 @@ func TestStateMachine(t *testing.T) {
|
|||
// The frontend restarts!
|
||||
t.Logf("\n\nStart3")
|
||||
notifies.expect(2)
|
||||
c.Assert(b.Start(ipn.Options{StateKey: ipn.GlobalDaemonStateKey}), qt.IsNil)
|
||||
c.Assert(b.Start(ipn.Options{}), qt.IsNil)
|
||||
{
|
||||
// BUG: We already called Shutdown(), no need to do it again.
|
||||
// BUG: don't unpause because we're not logged in.
|
||||
|
@ -722,7 +724,7 @@ func TestStateMachine(t *testing.T) {
|
|||
// One more restart, this time with a valid key, but WantRunning=false.
|
||||
t.Logf("\n\nStart4")
|
||||
notifies.expect(2)
|
||||
c.Assert(b.Start(ipn.Options{StateKey: ipn.GlobalDaemonStateKey}), qt.IsNil)
|
||||
c.Assert(b.Start(ipn.Options{}), qt.IsNil)
|
||||
{
|
||||
// NOTE: cc.Shutdown() is correct here, since we didn't call
|
||||
// b.Shutdown() explicitly ourselves.
|
||||
|
@ -844,7 +846,7 @@ func TestStateMachine(t *testing.T) {
|
|||
// logged in and WantRunning.
|
||||
t.Logf("\n\nStart5")
|
||||
notifies.expect(1)
|
||||
c.Assert(b.Start(ipn.Options{StateKey: ipn.GlobalDaemonStateKey}), qt.IsNil)
|
||||
c.Assert(b.Start(ipn.Options{}), qt.IsNil)
|
||||
{
|
||||
// NOTE: cc.Shutdown() is correct here, since we didn't call
|
||||
// b.Shutdown() ourselves.
|
||||
|
@ -918,6 +920,7 @@ func TestStateMachine(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
/*
|
||||
func TestEditPrefsHasNoKeys(t *testing.T) {
|
||||
logf := tstest.WhileTestRunningLogger(t)
|
||||
store := new(testStateStorage)
|
||||
|
@ -970,7 +973,7 @@ func TestEditPrefsHasNoKeys(t *testing.T) {
|
|||
if !p.Persist().LegacyFrontendPrivateMachineKey.IsZero() {
|
||||
t.Errorf("LegacyFrontendPrivateMachineKey = %v; want zero", p.Persist().LegacyFrontendPrivateMachineKey)
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
type testStateStorage struct {
|
||||
mem mem.Store
|
||||
|
@ -1005,7 +1008,8 @@ func TestWGEngineStatusRace(t *testing.T) {
|
|||
eng, err := wgengine.NewFakeUserspaceEngine(logf, 0)
|
||||
c.Assert(err, qt.IsNil)
|
||||
t.Cleanup(eng.Close)
|
||||
b, err := NewLocalBackend(logf, "logid", new(mem.Store), nil, eng, 0)
|
||||
pm := must.Get(NewProfileManager(new(mem.Store), logf, ""))
|
||||
b, err := NewLocalBackend(logf, "logid", pm, nil, eng, 0)
|
||||
c.Assert(err, qt.IsNil)
|
||||
|
||||
cc := newMockControl(t)
|
||||
|
@ -1030,7 +1034,7 @@ func TestWGEngineStatusRace(t *testing.T) {
|
|||
wantState(ipn.NoState)
|
||||
|
||||
// Start the backend.
|
||||
err = b.Start(ipn.Options{StateKey: ipn.GlobalDaemonStateKey})
|
||||
err = b.Start(ipn.Options{})
|
||||
c.Assert(err, qt.IsNil)
|
||||
wantState(ipn.NeedsLogin)
|
||||
|
||||
|
|
|
@ -94,6 +94,7 @@ type Options struct {
|
|||
// TCP or unix socket connections talking to that backend.
|
||||
type Server struct {
|
||||
b *ipnlocal.LocalBackend
|
||||
pm *ipnlocal.ProfileManager
|
||||
logf logger.Logf
|
||||
backendLogID string
|
||||
// resetOnZero is whether to call bs.Reset on transition from
|
||||
|
@ -101,8 +102,7 @@ type Server struct {
|
|||
// being run in "client mode" that requires an active GUI
|
||||
// connection (such as on Windows by default). Even if this
|
||||
// is true, the ForceDaemon pref can override this.
|
||||
resetOnZero bool
|
||||
autostartStateKey ipn.StateKey
|
||||
resetOnZero bool
|
||||
|
||||
bsMu sync.Mutex // lock order: bsMu, then mu
|
||||
bs *ipn.BackendServer
|
||||
|
@ -685,24 +685,9 @@ func Run(ctx context.Context, logf logger.Logf, ln net.Listener, store ipn.State
|
|||
}()
|
||||
logf("Listening on %v", ln.Addr())
|
||||
|
||||
var serverModeUser *user.User
|
||||
if opts.AutostartStateKey == "" {
|
||||
autoStartKey, err := store.ReadState(ipn.ServerModeStartKey)
|
||||
if err != nil && err != ipn.ErrStateNotExist {
|
||||
return fmt.Errorf("calling ReadState on state store: %w", err)
|
||||
}
|
||||
key := string(autoStartKey)
|
||||
if strings.HasPrefix(key, "user-") {
|
||||
uid := strings.TrimPrefix(key, "user-")
|
||||
u, err := lookupUserFromID(logf, uid)
|
||||
if err != nil {
|
||||
logf("ipnserver: found server mode auto-start key %q; failed to load user: %v", key, err)
|
||||
} else {
|
||||
logf("ipnserver: found server mode auto-start key %q (user %s)", key, u.Username)
|
||||
serverModeUser = u
|
||||
}
|
||||
opts.AutostartStateKey = ipn.StateKey(key)
|
||||
}
|
||||
pm, err := ipnlocal.NewProfileManager(store, logf, opts.AutostartStateKey)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
bo := backoff.NewBackoff("ipnserver", logf, 30*time.Second)
|
||||
|
@ -745,7 +730,7 @@ func Run(ctx context.Context, logf logger.Logf, ln net.Listener, store ipn.State
|
|||
}
|
||||
}
|
||||
|
||||
server, err := New(logf, logid, store, eng, dialer, serverModeUser, opts)
|
||||
server, err := New(logf, logid, pm, eng, dialer, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -761,8 +746,8 @@ func Run(ctx context.Context, logf logger.Logf, ln net.Listener, store ipn.State
|
|||
// New returns a new Server.
|
||||
//
|
||||
// To start it, use the Server.Run method.
|
||||
func New(logf logger.Logf, logid string, store ipn.StateStore, eng wgengine.Engine, dialer *tsdial.Dialer, serverModeUser *user.User, opts Options) (*Server, error) {
|
||||
b, err := ipnlocal.NewLocalBackend(logf, logid, store, dialer, eng, opts.LoginFlags)
|
||||
func New(logf logger.Logf, logid string, pm *ipnlocal.ProfileManager, eng wgengine.Engine, dialer *tsdial.Dialer, opts Options) (*Server, error) {
|
||||
b, err := ipnlocal.NewLocalBackend(logf, logid, pm, dialer, eng, opts.LoginFlags)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("NewLocalBackend: %v", err)
|
||||
}
|
||||
|
@ -808,32 +793,24 @@ func New(logf logger.Logf, logid string, store ipn.StateStore, eng wgengine.Engi
|
|||
|
||||
}
|
||||
|
||||
if opts.AutostartStateKey == "" {
|
||||
autoStartKey, err := store.ReadState(ipn.ServerModeStartKey)
|
||||
if err != nil && err != ipn.ErrStateNotExist {
|
||||
return nil, fmt.Errorf("calling ReadState on store: %w", err)
|
||||
}
|
||||
key := string(autoStartKey)
|
||||
if strings.HasPrefix(key, "user-") {
|
||||
uid := strings.TrimPrefix(key, "user-")
|
||||
u, err := lookupUserFromID(logf, uid)
|
||||
if err != nil {
|
||||
logf("ipnserver: found server mode auto-start key %q; failed to load user: %v", key, err)
|
||||
} else {
|
||||
logf("ipnserver: found server mode auto-start key %q (user %s)", key, u.Username)
|
||||
serverModeUser = u
|
||||
}
|
||||
opts.AutostartStateKey = ipn.StateKey(key)
|
||||
var serverModeUser *user.User
|
||||
if uid := pm.CurrentUser(); uid != "" {
|
||||
u, err := lookupUserFromID(logf, uid)
|
||||
if err != nil {
|
||||
logf("ipnserver: found server mode auto-start key; failed to load user: %v", err)
|
||||
} else {
|
||||
logf("ipnserver: found server mode auto-start key (user %s)", u.Username)
|
||||
serverModeUser = u
|
||||
}
|
||||
}
|
||||
|
||||
server := &Server{
|
||||
b: b,
|
||||
backendLogID: logid,
|
||||
logf: logf,
|
||||
resetOnZero: !opts.SurviveDisconnects,
|
||||
serverModeUser: serverModeUser,
|
||||
autostartStateKey: opts.AutostartStateKey,
|
||||
b: b,
|
||||
pm: pm,
|
||||
backendLogID: logid,
|
||||
logf: logf,
|
||||
resetOnZero: !opts.SurviveDisconnects,
|
||||
serverModeUser: serverModeUser,
|
||||
}
|
||||
server.bs = ipn.NewBackendServer(logf, b, server.writeToClients)
|
||||
return server, nil
|
||||
|
@ -859,11 +836,11 @@ func (s *Server) Run(ctx context.Context, ln net.Listener) error {
|
|||
ln.Close()
|
||||
}()
|
||||
|
||||
if s.autostartStateKey != "" {
|
||||
if s.pm.CurrentPrefs().Valid() {
|
||||
s.bs.GotCommand(ctx, &ipn.Command{
|
||||
Version: version.Long,
|
||||
Start: &ipn.StartArgs{
|
||||
Opts: ipn.Options{StateKey: s.autostartStateKey},
|
||||
Opts: ipn.Options{},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -71,16 +71,19 @@ var handler = map[string]localAPIHandler{
|
|||
"metrics": (*Handler).serveMetrics,
|
||||
"ping": (*Handler).servePing,
|
||||
"prefs": (*Handler).servePrefs,
|
||||
"profile": (*Handler).serveProfile,
|
||||
"set-dns": (*Handler).serveSetDNS,
|
||||
"set-expiry-sooner": (*Handler).serveSetExpirySooner,
|
||||
"status": (*Handler).serveStatus,
|
||||
"tka/init": (*Handler).serveTKAInit,
|
||||
"tka/modify": (*Handler).serveTKAModify,
|
||||
"tka/sign": (*Handler).serveTKASign,
|
||||
"tka/status": (*Handler).serveTKAStatus,
|
||||
"upload-client-metrics": (*Handler).serveUploadClientMetrics,
|
||||
"whois": (*Handler).serveWhoIs,
|
||||
// TODO: rename to debug-profile or pprof-profile to separatate it from the
|
||||
// (user) profile functionality.
|
||||
"profile": (*Handler).serveProfile,
|
||||
"set-dns": (*Handler).serveSetDNS,
|
||||
"set-expiry-sooner": (*Handler).serveSetExpirySooner,
|
||||
"status": (*Handler).serveStatus,
|
||||
"tka/init": (*Handler).serveTKAInit,
|
||||
"tka/modify": (*Handler).serveTKAModify,
|
||||
"tka/sign": (*Handler).serveTKASign,
|
||||
"tka/status": (*Handler).serveTKAStatus,
|
||||
"upload-client-metrics": (*Handler).serveUploadClientMetrics,
|
||||
"whois": (*Handler).serveWhoIs,
|
||||
"profiles": (*Handler).serveProfiles,
|
||||
}
|
||||
|
||||
func randHex(n int) string {
|
||||
|
@ -1037,6 +1040,64 @@ func (h *Handler) serveTKAModify(w http.ResponseWriter, r *http.Request) {
|
|||
w.Write(j)
|
||||
}
|
||||
|
||||
func (h *Handler) serveProfiles(w http.ResponseWriter, r *http.Request) {
|
||||
if !h.PermitRead {
|
||||
http.Error(w, "profiles access denied", http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
switch r.Method {
|
||||
case "GET": // List profiles
|
||||
profiles := make([]tailcfg.UserProfile, 0)
|
||||
for i, name := range h.b.ListProfiles() {
|
||||
profiles = append(profiles, tailcfg.UserProfile{
|
||||
ID: tailcfg.UserID(i),
|
||||
LoginName: name,
|
||||
DisplayName: name,
|
||||
ProfilePicURL: "",
|
||||
})
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
e := json.NewEncoder(w)
|
||||
e.SetIndent("", "\t")
|
||||
e.Encode(profiles)
|
||||
case "POST": // Switch profile
|
||||
// TODO: it would be more REST-y to get the profile name from the path
|
||||
profile := r.FormValue("profile")
|
||||
if profile == "" {
|
||||
http.Error(w, "missing profile", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err := h.b.SwitchProfile(profile)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "done\n")
|
||||
case "PUT": // Add profile
|
||||
h.b.NewProfile()
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "done\n")
|
||||
case "DELETE": // Delete profile
|
||||
// TODO: it would be more REST-y to get the profile name from the path
|
||||
profile := r.FormValue("profile")
|
||||
if profile == "" {
|
||||
http.Error(w, "missing profile", http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
err := h.b.DeleteProfile(profile)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), 500)
|
||||
return
|
||||
}
|
||||
w.Header().Set("Content-Type", "text/plain")
|
||||
io.WriteString(w, "done\n")
|
||||
default:
|
||||
http.Error(w, "unsupported method", http.StatusMethodNotAllowed)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func defBool(a string, def bool) bool {
|
||||
if a == "" {
|
||||
return def
|
||||
|
|
22
ipn/store.go
22
ipn/store.go
|
@ -26,6 +26,13 @@ const (
|
|||
// particular), but right now Unix daemons run with a single
|
||||
// node-global state. To keep open the option of having per-user state
|
||||
// later, the global state key doesn't look like a username.
|
||||
//
|
||||
// As of 2022-10-21, it has been superseded by profiles and is no longer
|
||||
// written to disk. It is only read at startup when there are no profiles,
|
||||
// to migrate the state to the "default" profile.
|
||||
// The existing state is left on disk in case the user downgrades to an
|
||||
// older version of Tailscale that doesn't support profiles. We can
|
||||
// remove this in a future release.
|
||||
GlobalDaemonStateKey = StateKey("_daemon")
|
||||
|
||||
// ServerModeStartKey's value, if non-empty, is the value of a
|
||||
|
@ -40,8 +47,23 @@ const (
|
|||
// NLKeyStateKey is the key under which we store the node's
|
||||
// network-lock node key, in its key.NLPrivate.MarshalText representation.
|
||||
NLKeyStateKey = StateKey("_nl-node-key")
|
||||
|
||||
// KnownProfilesStateKey is the key under which we store the list of
|
||||
// known profiles.
|
||||
KnownProfilesStateKey = StateKey("_profiles")
|
||||
|
||||
// CurrentProfileStateKey is the key under which we store the current
|
||||
// profile.
|
||||
CurrentProfileStateKey = StateKey("_current-profile")
|
||||
)
|
||||
|
||||
func CurrentProfileKey(userID string) StateKey {
|
||||
if userID == "" {
|
||||
return CurrentProfileStateKey
|
||||
}
|
||||
return StateKey("_current/" + userID)
|
||||
}
|
||||
|
||||
// StateStore persists state, and produces it back on request.
|
||||
type StateStore interface {
|
||||
// ReadState returns the bytes associated with ID. Returns (nil,
|
||||
|
|
|
@ -507,7 +507,7 @@ func TestSSH(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
lb, err := ipnlocal.NewLocalBackend(logf, "",
|
||||
new(mem.Store),
|
||||
must.Get(ipnlocal.NewProfileManager(new(mem.Store), logf, "")),
|
||||
new(tsdial.Dialer),
|
||||
eng, 0)
|
||||
if err != nil {
|
||||
|
|
|
@ -324,7 +324,11 @@ func (s *Server) start() (reterr error) {
|
|||
if s.Ephemeral {
|
||||
loginFlags = controlclient.LoginEphemeral
|
||||
}
|
||||
lb, err := ipnlocal.NewLocalBackend(logf, logid, s.Store, s.dialer, eng, loginFlags)
|
||||
pm, err := ipnlocal.NewProfileManager(s.Store, logf, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
lb, err := ipnlocal.NewLocalBackend(logf, logid, pm, s.dialer, eng, loginFlags)
|
||||
if err != nil {
|
||||
return fmt.Errorf("NewLocalBackend: %v", err)
|
||||
}
|
||||
|
@ -340,7 +344,6 @@ func (s *Server) start() (reterr error) {
|
|||
prefs.WantRunning = true
|
||||
authKey := s.getAuthKey()
|
||||
err = lb.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
UpdatePrefs: prefs,
|
||||
AuthKey: authKey,
|
||||
})
|
||||
|
|
|
@ -31,6 +31,7 @@ import (
|
|||
|
||||
"go4.org/mem"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/ipnlocal"
|
||||
"tailscale.com/ipn/ipnstate"
|
||||
"tailscale.com/ipn/store"
|
||||
"tailscale.com/safesocket"
|
||||
|
@ -39,6 +40,7 @@ import (
|
|||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstest/integration/testcontrol"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/must"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -659,15 +661,8 @@ func (n *testNode) diskPrefs() *ipn.Prefs {
|
|||
if err != nil {
|
||||
t.Fatalf("reading prefs, NewFileStore: %v", err)
|
||||
}
|
||||
prefBytes, err := fs.ReadState(ipn.GlobalDaemonStateKey)
|
||||
if err != nil {
|
||||
t.Fatalf("reading prefs, ReadState: %v", err)
|
||||
}
|
||||
p := new(ipn.Prefs)
|
||||
if err := json.Unmarshal(prefBytes, p); err != nil {
|
||||
t.Fatalf("reading prefs, JSON unmarshal: %v", err)
|
||||
}
|
||||
return p
|
||||
pm := must.Get(ipnlocal.NewProfileManager(fs, t.Logf, ""))
|
||||
return pm.CurrentPrefs().AsStruct()
|
||||
}
|
||||
|
||||
// AwaitResponding waits for n's tailscaled to be up enough to be
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
_ "tailscale.com/derp/derphttp"
|
||||
_ "tailscale.com/envknob"
|
||||
_ "tailscale.com/ipn"
|
||||
_ "tailscale.com/ipn/ipnlocal"
|
||||
_ "tailscale.com/ipn/ipnserver"
|
||||
_ "tailscale.com/ipn/store"
|
||||
_ "tailscale.com/logpolicy"
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
_ "tailscale.com/derp/derphttp"
|
||||
_ "tailscale.com/envknob"
|
||||
_ "tailscale.com/ipn"
|
||||
_ "tailscale.com/ipn/ipnlocal"
|
||||
_ "tailscale.com/ipn/ipnserver"
|
||||
_ "tailscale.com/ipn/store"
|
||||
_ "tailscale.com/logpolicy"
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
_ "tailscale.com/derp/derphttp"
|
||||
_ "tailscale.com/envknob"
|
||||
_ "tailscale.com/ipn"
|
||||
_ "tailscale.com/ipn/ipnlocal"
|
||||
_ "tailscale.com/ipn/ipnserver"
|
||||
_ "tailscale.com/ipn/store"
|
||||
_ "tailscale.com/logpolicy"
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
_ "tailscale.com/derp/derphttp"
|
||||
_ "tailscale.com/envknob"
|
||||
_ "tailscale.com/ipn"
|
||||
_ "tailscale.com/ipn/ipnlocal"
|
||||
_ "tailscale.com/ipn/ipnserver"
|
||||
_ "tailscale.com/ipn/store"
|
||||
_ "tailscale.com/logpolicy"
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
_ "tailscale.com/derp/derphttp"
|
||||
_ "tailscale.com/envknob"
|
||||
_ "tailscale.com/ipn"
|
||||
_ "tailscale.com/ipn/ipnlocal"
|
||||
_ "tailscale.com/ipn/ipnserver"
|
||||
_ "tailscale.com/ipn/store"
|
||||
_ "tailscale.com/logpolicy"
|
||||
|
|
|
@ -293,8 +293,7 @@ func TestShouldProcessInbound(t *testing.T) {
|
|||
netip.MustParsePrefix("fd7a:115c:a1e0:b1a:0:7:a01:100/120"),
|
||||
}
|
||||
i.lb.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
UpdatePrefs: prefs,
|
||||
Prefs: prefs,
|
||||
})
|
||||
i.atomicIsLocalIPFunc.Store(looksLikeATailscaleSelfAddress)
|
||||
|
||||
|
@ -326,8 +325,7 @@ func TestShouldProcessInbound(t *testing.T) {
|
|||
netip.MustParsePrefix("fd7a:115c:a1e0:b1a:0:7:a01:200/120"),
|
||||
}
|
||||
i.lb.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
UpdatePrefs: prefs,
|
||||
Prefs: prefs,
|
||||
})
|
||||
},
|
||||
want: false,
|
||||
|
@ -345,8 +343,7 @@ func TestShouldProcessInbound(t *testing.T) {
|
|||
prefs := ipn.NewPrefs()
|
||||
prefs.RunSSH = true
|
||||
i.lb.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
UpdatePrefs: prefs,
|
||||
Prefs: prefs,
|
||||
})
|
||||
i.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool {
|
||||
return addr.String() == "100.101.102.104" // Dst, above
|
||||
|
@ -367,8 +364,7 @@ func TestShouldProcessInbound(t *testing.T) {
|
|||
prefs := ipn.NewPrefs()
|
||||
prefs.RunSSH = false // default, but to be explicit
|
||||
i.lb.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
UpdatePrefs: prefs,
|
||||
Prefs: prefs,
|
||||
})
|
||||
i.atomicIsLocalIPFunc.Store(func(addr netip.Addr) bool {
|
||||
return addr.String() == "100.101.102.104" // Dst, above
|
||||
|
@ -427,8 +423,7 @@ func TestShouldProcessInbound(t *testing.T) {
|
|||
netip.MustParsePrefix("10.0.0.1/24"),
|
||||
}
|
||||
i.lb.Start(ipn.Options{
|
||||
StateKey: ipn.GlobalDaemonStateKey,
|
||||
UpdatePrefs: prefs,
|
||||
Prefs: prefs,
|
||||
})
|
||||
|
||||
// As if we were running on Linux where netstack isn't used.
|
||||
|
@ -458,7 +453,11 @@ func TestShouldProcessInbound(t *testing.T) {
|
|||
}
|
||||
t.Cleanup(e.Close)
|
||||
|
||||
lb, err := ipnlocal.NewLocalBackend(logf, "logid", new(mem.Store), new(tsdial.Dialer), e, 0)
|
||||
pm, err := ipnlocal.NewProfileManager(new(mem.Store), t.Logf, "")
|
||||
if err != nil {
|
||||
t.Fatalf("NewProfileManager: %v", err)
|
||||
}
|
||||
lb, err := ipnlocal.NewLocalBackend(logf, "logid", pm, new(tsdial.Dialer), e, 0)
|
||||
if err != nil {
|
||||
t.Fatalf("NewLocalBackend: %v", err)
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue