types/netmap, all: make NetworkMap.SelfNode a tailcfg.NodeView

Updates #1909

Change-Id: I8c470cbc147129a652c1d58eac9b790691b87606
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick 2023-08-21 10:53:57 -07:00 committed by Brad Fitzpatrick
parent 699f9699ca
commit 84b94b3146
26 changed files with 90 additions and 90 deletions

View File

@ -266,9 +266,9 @@ authLoop:
log.Fatalf("installing proxy rules: %v", err)
}
}
deviceInfo := []any{n.NetMap.SelfNode.StableID, n.NetMap.SelfNode.Name}
deviceInfo := []any{n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name()}
if cfg.InKubernetes && cfg.KubernetesCanPatch && cfg.KubeSecret != "" && deephash.Update(&currentDeviceInfo, &deviceInfo) {
if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID, n.NetMap.SelfNode.Name); err != nil {
if err := storeDeviceInfo(ctx, cfg.KubeSecret, n.NetMap.SelfNode.StableID(), n.NetMap.SelfNode.Name()); err != nil {
log.Fatalf("storing device ID in kube secret: %v", err)
}
}

View File

@ -112,10 +112,10 @@ func TestContainerBoot(t *testing.T) {
runningNotify := &ipn.Notify{
State: ptr.To(ipn.Running),
NetMap: &netmap.NetworkMap{
SelfNode: &tailcfg.Node{
SelfNode: (&tailcfg.Node{
StableID: tailcfg.StableNodeID("myID"),
Name: "test-node.test.ts.net",
},
}).View(),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
},
}
@ -482,10 +482,10 @@ func TestContainerBoot(t *testing.T) {
Notify: &ipn.Notify{
State: ptr.To(ipn.Running),
NetMap: &netmap.NetworkMap{
SelfNode: &tailcfg.Node{
SelfNode: (&tailcfg.Node{
StableID: tailcfg.StableNodeID("newID"),
Name: "new-name.test.ts.net",
},
}).View(),
Addresses: []netip.Prefix{netip.MustParsePrefix("100.64.0.1/32")},
},
},

View File

@ -851,8 +851,8 @@ func (e *serveEnv) enableFeatureInteractive(ctx context.Context, feature string,
e.lc.IncrementCounter(ctx, fmt.Sprintf("%s_enablement_lost_connection", feature), 1)
return err
}
if nm := n.NetMap; nm != nil && nm.SelfNode != nil {
if hasRequiredCapabilities(nm.SelfNode.Capabilities) {
if nm := n.NetMap; nm != nil && nm.SelfNode.Valid() {
if hasRequiredCapabilities(nm.SelfNode.Capabilities().AsSlice()) {
e.lc.IncrementCounter(ctx, fmt.Sprintf("%s_enabled", feature), 1)
fmt.Fprintln(os.Stdout, "Success.")
return nil

View File

@ -278,7 +278,7 @@ func (i *jsIPN) run(jsCallbacks js.Value) {
TailscaleSSHEnabled: p.Hostinfo().TailscaleSSHEnabled(),
}
}),
LockedOut: nm.TKAEnabled && len(nm.SelfNode.KeySignature) == 0,
LockedOut: nm.TKAEnabled && nm.SelfNode.KeySignature().Len() == 0,
}
if jsonNetMap, err := json.Marshal(jsNetMap); err == nil {
jsCallbacks.Call("notifyNetMap", string(jsonNetMap))

View File

@ -984,7 +984,7 @@ func (c *Direct) sendMapRequest(ctx context.Context, isStreaming bool, nu Netmap
// change it if the Node info changed.
if persist == c.persist {
newPersist := persist.AsStruct()
newPersist.NodeID = nm.SelfNode.StableID
newPersist.NodeID = nm.SelfNode.StableID()
newPersist.UserProfile = nm.UserProfiles[nm.User()]
c.persist = newPersist.View()

View File

@ -315,7 +315,7 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
ms.lastNode = resp.Node
}
if node := ms.lastNode.Clone(); node != nil {
nm.SelfNode = node
nm.SelfNode = node.View()
nm.Expiry = node.KeyExpiry
nm.Name = node.Name
nm.Addresses = filterSelfAddresses(node.Addresses)

View File

@ -444,18 +444,18 @@ func TestNetmapForResponse(t *testing.T) {
someNode := &tailcfg.Node{
Name: "foo",
}
wantNode := &tailcfg.Node{
wantNode := (&tailcfg.Node{
Name: "foo",
ComputedName: "foo",
ComputedNameWithHost: "foo",
}
}).View()
ms := newTestMapSession(t, nil)
mapRes := &tailcfg.MapResponse{
Node: someNode,
}
initDisplayNames(mapRes.Node.View(), mapRes)
nm1 := ms.netmapForResponse(mapRes)
if nm1.SelfNode == nil {
if !nm1.SelfNode.Valid() {
t.Fatal("nil Node in 1st netmap")
}
if !reflect.DeepEqual(nm1.SelfNode, wantNode) {
@ -464,7 +464,7 @@ func TestNetmapForResponse(t *testing.T) {
}
nm2 := ms.netmapForResponse(&tailcfg.MapResponse{})
if nm2.SelfNode == nil {
if !nm2.SelfNode.Valid() {
t.Fatal("nil Node in 1st netmap")
}
if !reflect.DeepEqual(nm2.SelfNode, wantNode) {

View File

@ -6,11 +6,11 @@
package logknob
import (
"slices"
"sync/atomic"
"tailscale.com/envknob"
"tailscale.com/types/logger"
"tailscale.com/types/views"
)
// TODO(andrew-d): should we have a package-global registry of logknobs? It
@ -58,7 +58,7 @@ func (lk *LogKnob) Set(v bool) {
// about; we use this rather than a concrete type to avoid a circular
// dependency.
type NetMap interface {
SelfCapabilities() []string
SelfCapabilities() views.Slice[string]
}
// UpdateFromNetMap will enable logging if the SelfNode in the provided NetMap
@ -68,7 +68,7 @@ func (lk *LogKnob) UpdateFromNetMap(nm NetMap) {
return
}
lk.cap.Store(slices.Contains(nm.SelfCapabilities(), lk.capName))
lk.cap.Store(views.SliceContains(nm.SelfCapabilities(), lk.capName))
}
// Do will call log with the provided format and arguments if any of the

View File

@ -63,11 +63,11 @@ func TestLogKnob(t *testing.T) {
}
testKnob.UpdateFromNetMap(&netmap.NetworkMap{
SelfNode: &tailcfg.Node{
SelfNode: (&tailcfg.Node{
Capabilities: []string{
"https://tailscale.com/cap/testing",
},
},
}).View(),
})
if !testKnob.shouldLog() {
t.Errorf("expected shouldLog()=true")

View File

@ -170,8 +170,8 @@ func (em *expiryManager) nextPeerExpiry(nm *netmap.NetworkMap, localNow time.Tim
}
// Ensure that we also fire this timer if our own node key expires.
if nm.SelfNode != nil {
selfExpiry := nm.SelfNode.KeyExpiry
if nm.SelfNode.Valid() {
selfExpiry := nm.SelfNode.KeyExpiry()
if selfExpiry.IsZero() {
// No expiry for self node

View File

@ -151,7 +151,7 @@ func TestNextPeerExpiry(t *testing.T) {
n(1, "foo", noExpiry),
n(2, "bar", noExpiry),
}),
SelfNode: n(3, "self", noExpiry),
SelfNode: n(3, "self", noExpiry).View(),
},
want: noExpiry,
},
@ -162,7 +162,7 @@ func TestNextPeerExpiry(t *testing.T) {
n(1, "foo", noExpiry),
n(2, "bar", timeInFuture),
}),
SelfNode: n(3, "self", noExpiry),
SelfNode: n(3, "self", noExpiry).View(),
},
want: timeInFuture,
},
@ -173,7 +173,7 @@ func TestNextPeerExpiry(t *testing.T) {
n(1, "foo", noExpiry),
n(2, "bar", noExpiry),
}),
SelfNode: n(3, "self", timeInFuture),
SelfNode: n(3, "self", timeInFuture).View(),
},
want: timeInFuture,
},
@ -184,7 +184,7 @@ func TestNextPeerExpiry(t *testing.T) {
n(1, "foo", timeInFuture),
n(2, "bar", timeInMoreFuture),
}),
SelfNode: n(3, "self", noExpiry),
SelfNode: n(3, "self", noExpiry).View(),
},
want: timeInFuture,
},
@ -194,7 +194,7 @@ func TestNextPeerExpiry(t *testing.T) {
Peers: nodeViews([]*tailcfg.Node{
n(1, "foo", timeInMoreFuture),
}),
SelfNode: n(2, "self", timeInFuture),
SelfNode: n(2, "self", timeInFuture).View(),
},
want: timeInFuture,
},
@ -202,7 +202,7 @@ func TestNextPeerExpiry(t *testing.T) {
name: "only_self",
netmap: &netmap.NetworkMap{
Peers: nodeViews([]*tailcfg.Node{}),
SelfNode: n(1, "self", timeInFuture),
SelfNode: n(1, "self", timeInFuture).View(),
},
want: timeInFuture,
},
@ -212,7 +212,7 @@ func TestNextPeerExpiry(t *testing.T) {
Peers: nodeViews([]*tailcfg.Node{
n(1, "foo", timeInPast),
}),
SelfNode: n(2, "self", timeInFuture),
SelfNode: n(2, "self", timeInFuture).View(),
},
want: timeInFuture,
},
@ -222,7 +222,7 @@ func TestNextPeerExpiry(t *testing.T) {
Peers: nodeViews([]*tailcfg.Node{
n(1, "foo", timeInFuture),
}),
SelfNode: n(2, "self", timeInPast),
SelfNode: n(2, "self", timeInPast).View(),
},
want: timeInFuture,
},
@ -232,7 +232,7 @@ func TestNextPeerExpiry(t *testing.T) {
Peers: nodeViews([]*tailcfg.Node{
n(1, "foo", timeInPast),
}),
SelfNode: n(2, "self", timeInPast),
SelfNode: n(2, "self", timeInPast).View(),
},
want: noExpiry,
},

View File

@ -704,10 +704,10 @@ func (b *LocalBackend) updateStatus(sb *ipnstate.StatusBuilder, extraLocked func
ss.HostName = b.netMap.Hostinfo.Hostname
ss.DNSName = b.netMap.Name
ss.UserID = b.netMap.User()
if sn := b.netMap.SelfNode; sn != nil {
peerStatusFromNode(ss, sn.View())
if c := sn.Capabilities; len(c) > 0 {
ss.Capabilities = append([]string(nil), c...)
if sn := b.netMap.SelfNode; sn.Valid() {
peerStatusFromNode(ss, sn)
if c := sn.Capabilities(); c.Len() > 0 {
ss.Capabilities = c.AsSlice()
}
}
} else {
@ -3375,11 +3375,11 @@ func (b *LocalBackend) initPeerAPIListener() {
b.closePeerAPIListenersLocked()
selfNode := b.netMap.SelfNode
if len(b.netMap.Addresses) == 0 || selfNode == nil {
if len(b.netMap.Addresses) == 0 || !selfNode.Valid() {
return
}
fileRoot := b.fileRootLocked(selfNode.User)
fileRoot := b.fileRootLocked(selfNode.User())
if fileRoot == "" {
b.logf("peerapi starting without Taildrop directory configured")
}
@ -3955,12 +3955,8 @@ func (b *LocalBackend) setNetInfo(ni *tailcfg.NetInfo) {
}
func hasCapability(nm *netmap.NetworkMap, cap string) bool {
if nm != nil && nm.SelfNode != nil {
for _, c := range nm.SelfNode.Capabilities {
if c == cap {
return true
}
}
if nm != nil && nm.SelfNode.Valid() {
return views.SliceContains(nm.SelfNode.Capabilities(), cap)
}
return false
}
@ -4021,8 +4017,8 @@ func (b *LocalBackend) setNetMapLocked(nm *netmap.NetworkMap) {
}
}
}
if nm.SelfNode != nil {
addNode(nm.SelfNode.View())
if nm.SelfNode.Valid() {
addNode(nm.SelfNode)
}
for _, p := range nm.Peers {
addNode(p)
@ -4048,7 +4044,7 @@ func (b *LocalBackend) setDebugLogsByCapabilityLocked(nm *netmap.NetworkMap) {
}
func (b *LocalBackend) reloadServeConfigLocked(prefs ipn.PrefsView) {
if b.netMap == nil || b.netMap.SelfNode == nil || !prefs.Valid() || b.pm.CurrentProfile().ID == "" {
if b.netMap == nil || !b.netMap.SelfNode.Valid() || !prefs.Valid() || b.pm.CurrentProfile().ID == "" {
// We're not logged in, so we don't have a profile.
// Don't try to load the serve config.
b.lastServeConfJSON = mem.B(nil)

View File

@ -795,9 +795,9 @@ func TestStatusWithoutPeers(t *testing.T) {
cc.send(nil, "", false, &netmap.NetworkMap{
MachineStatus: tailcfg.MachineAuthorized,
Addresses: ipps("100.101.101.101"),
SelfNode: &tailcfg.Node{
SelfNode: (&tailcfg.Node{
Addresses: ipps("100.101.101.101"),
},
}).View(),
})
got := b.StatusWithoutPeers()
if got.TailscaleIPs == nil {

View File

@ -116,7 +116,7 @@ func (b *LocalBackend) tkaFilterNetmapLocked(nm *netmap.NetworkMap) {
}
// Check that we ourselves are not locked out, report a health issue if so.
if nm.SelfNode != nil && b.tka.authority.NodeKeyAuthorized(nm.SelfNode.Key, nm.SelfNode.KeySignature) != nil {
if nm.SelfNode.Valid() && b.tka.authority.NodeKeyAuthorized(nm.SelfNode.Key(), nm.SelfNode.KeySignature().AsSlice()) != nil {
health.SetTKAHealth(errors.New(healthmsg.LockedOut))
} else {
health.SetTKAHealth(nil)
@ -425,7 +425,7 @@ func (b *LocalBackend) NetworkLockStatus() *ipnstate.NetworkLockStatus {
var selfAuthorized bool
if b.netMap != nil {
selfAuthorized = b.tka.authority.NodeKeyAuthorized(b.netMap.SelfNode.Key, b.netMap.SelfNode.KeySignature) == nil
selfAuthorized = b.tka.authority.NodeKeyAuthorized(b.netMap.SelfNode.Key(), b.netMap.SelfNode.KeySignature().AsSlice()) == nil
}
keys := b.tka.authority.Keys()

View File

@ -47,6 +47,7 @@ import (
"tailscale.com/net/netutil"
"tailscale.com/net/sockstats"
"tailscale.com/tailcfg"
"tailscale.com/types/views"
"tailscale.com/util/clientmetric"
"tailscale.com/util/multierr"
"tailscale.com/version/distro"
@ -569,14 +570,14 @@ func (pln *peerAPIListener) ServeConn(src netip.AddrPort, c net.Conn) {
return
}
nm := pln.lb.NetMap()
if nm == nil || nm.SelfNode == nil {
if nm == nil || !nm.SelfNode.Valid() {
logf("peerapi: no netmap")
c.Close()
return
}
h := &peerAPIHandler{
ps: pln.ps,
isSelf: nm.SelfNode.User == peerNode.User(),
isSelf: nm.SelfNode.User() == peerNode.User(),
remoteAddr: src,
selfNode: nm.SelfNode,
peerNode: peerNode,
@ -596,7 +597,7 @@ type peerAPIHandler struct {
ps *peerAPIServer
remoteAddr netip.AddrPort
isSelf bool // whether peerNode is owned by same user as this node
selfNode *tailcfg.Node // this node; always non-nil
selfNode tailcfg.NodeView // this node; always non-nil
peerNode tailcfg.NodeView // peerNode is who's making the request
peerUser tailcfg.UserProfile // profile of peerNode
}
@ -612,7 +613,7 @@ func (h *peerAPIHandler) isAddressValid(addr netip.Addr) bool {
return *v == addr
}
pfx := netip.PrefixFrom(addr, addr.BitLen())
return slices.Contains(h.selfNode.Addresses, pfx)
return views.SliceContains(h.selfNode.Addresses(), pfx)
}
func (h *peerAPIHandler) validateHost(r *http.Request) error {
@ -1034,7 +1035,7 @@ func (h *peerAPIHandler) canPutFile() bool {
// canDebug reports whether h can debug this node (goroutines, metrics,
// magicsock internal state, etc).
func (h *peerAPIHandler) canDebug() bool {
if !slices.Contains(h.selfNode.Capabilities, tailcfg.CapabilityDebug) {
if !views.SliceContains(h.selfNode.Capabilities(), tailcfg.CapabilityDebug) {
// This node does not expose debug info.
return false
}

View File

@ -456,12 +456,12 @@ func TestHandlePeerAPI(t *testing.T) {
lb := &LocalBackend{
logf: e.logBuf.Logf,
capFileSharing: tt.capSharing,
netMap: &netmap.NetworkMap{SelfNode: selfNode},
netMap: &netmap.NetworkMap{SelfNode: selfNode.View()},
clock: &tstest.Clock{},
}
e.ph = &peerAPIHandler{
isSelf: tt.isSelf,
selfNode: selfNode,
selfNode: selfNode.View(),
peerNode: (&tailcfg.Node{
ComputedName: "some-peer-name",
}).View(),
@ -516,9 +516,9 @@ func TestFileDeleteRace(t *testing.T) {
peerNode: (&tailcfg.Node{
ComputedName: "some-peer-name",
}).View(),
selfNode: &tailcfg.Node{
selfNode: (&tailcfg.Node{
Addresses: []netip.Prefix{netip.MustParsePrefix("100.100.100.101/32")},
},
}).View(),
ps: ps,
}
buf := make([]byte, 2<<20)

View File

@ -193,7 +193,7 @@ func (b *LocalBackend) updateServeTCPPortNetMapAddrListenersLocked(ports []uint1
b.logf("netMap is nil")
return
}
if nm.SelfNode == nil {
if !nm.SelfNode.Valid() {
b.logf("netMap SelfNode is nil")
return
}
@ -227,7 +227,7 @@ func (b *LocalBackend) SetServeConfig(config *ipn.ServeConfig) error {
if nm == nil {
return errors.New("netMap is nil")
}
if nm.SelfNode == nil {
if !nm.SelfNode.Valid() {
return errors.New("netMap SelfNode is nil")
}
profileID := b.pm.CurrentProfile().ID

View File

@ -190,9 +190,9 @@ func TestServeHTTPProxy(t *testing.T) {
b.pm = pm
b.netMap = &netmap.NetworkMap{
SelfNode: &tailcfg.Node{
SelfNode: (&tailcfg.Node{
Name: "example.ts.net",
},
}).View(),
UserProfiles: map[tailcfg.UserID]tailcfg.UserProfile{
tailcfg.UserID(1): {
LoginName: "someone@example.com",

View File

@ -339,8 +339,8 @@ func (h *Handler) serveBugReport(w http.ResponseWriter, r *http.Request) {
// Information about the current node from the netmap
if nm := h.b.NetMap(); nm != nil {
if self := nm.SelfNode; self != nil {
h.logf("user bugreport node info: nodeid=%q stableid=%q expiry=%q", self.ID, self.StableID, self.KeyExpiry.Format(time.RFC3339))
if self := nm.SelfNode; self.Valid() {
h.logf("user bugreport node info: nodeid=%q stableid=%q expiry=%q", self.ID(), self.StableID(), self.KeyExpiry().Format(time.RFC3339))
}
h.logf("user bugreport public keys: machine=%q node=%q", nm.MachineKey, nm.NodeKey)
} else {

View File

@ -787,7 +787,7 @@ func (c *conn) expandDelegateURLLocked(actionURL string) string {
lu := c.localUser
var dstNodeID string
if nm != nil {
dstNodeID = fmt.Sprint(int64(nm.SelfNode.ID))
dstNodeID = fmt.Sprint(int64(nm.SelfNode.ID()))
}
return strings.NewReplacer(
"$SRC_NODE_IP", url.QueryEscape(ci.src.Addr().String()),

View File

@ -276,9 +276,9 @@ func (ts *localState) NetMap() *netmap.NetworkMap {
}
return &netmap.NetworkMap{
SelfNode: &tailcfg.Node{
SelfNode: (&tailcfg.Node{
ID: 1,
},
}).View(),
SSHPolicy: policy,
}
}

View File

@ -25,7 +25,7 @@ import (
type NetworkMap struct {
// Core networking
SelfNode *tailcfg.Node
SelfNode tailcfg.NodeView
NodeKey key.NodePublic
PrivateKey key.NodePrivate
Expiry time.Time
@ -90,8 +90,8 @@ type NetworkMap struct {
// User returns nm.SelfNode.User if nm.SelfNode is non-nil, otherwise it returns
// 0.
func (nm *NetworkMap) User() tailcfg.UserID {
if nm.SelfNode != nil {
return nm.SelfNode.User
if nm.SelfNode.Valid() {
return nm.SelfNode.User()
}
return 0
}
@ -149,12 +149,13 @@ func (nm *NetworkMap) MagicDNSSuffix() string {
// SelfCapabilities returns SelfNode.Capabilities if nm and nm.SelfNode are
// non-nil. This is a method so we can use it in envknob/logknob without a
// circular dependency.
func (nm *NetworkMap) SelfCapabilities() []string {
if nm == nil || nm.SelfNode == nil {
return nil
func (nm *NetworkMap) SelfCapabilities() views.Slice[string] {
var zero views.Slice[string]
if nm == nil || !nm.SelfNode.Valid() {
return zero
}
return nm.SelfNode.Capabilities
return nm.SelfNode.Capabilities()
}
func (nm *NetworkMap) String() string {

View File

@ -14,7 +14,6 @@ import (
"net"
"net/netip"
"runtime"
"slices"
"strconv"
"strings"
"sync"
@ -50,6 +49,7 @@ import (
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/nettype"
"tailscale.com/types/views"
"tailscale.com/util/clientmetric"
"tailscale.com/util/mak"
"tailscale.com/util/ringbuffer"
@ -2093,8 +2093,8 @@ func (c *Conn) shouldDoPeriodicReSTUNLocked() bool {
c.logf("magicsock: periodicReSTUN: idle for %v", idleFor.Round(time.Second))
}
if idleFor > sessionActiveTimeout {
if c.netMap != nil && c.netMap.SelfNode != nil &&
slices.Contains(c.netMap.SelfNode.Capabilities, tailcfg.NodeAttrDebugForceBackgroundSTUN) {
if c.netMap != nil && c.netMap.SelfNode.Valid() &&
views.SliceContains(c.netMap.SelfNode.Capabilities(), tailcfg.NodeAttrDebugForceBackgroundSTUN) {
// Overridden by control.
return true
}

View File

@ -328,12 +328,14 @@ func (ns *Impl) updateIPs(nm *netmap.NetworkMap) {
newIPs := make(map[tcpip.AddressWithPrefix]bool)
isAddr := map[netip.Prefix]bool{}
if nm.SelfNode != nil {
for _, ipp := range nm.SelfNode.Addresses {
if nm.SelfNode.Valid() {
for i := range nm.SelfNode.Addresses().LenIter() {
ipp := nm.SelfNode.Addresses().At(i)
isAddr[ipp] = true
newIPs[ipPrefixToAddressWithPrefix(ipp)] = true
}
for _, ipp := range nm.SelfNode.AllowedIPs {
for i := range nm.SelfNode.AllowedIPs().LenIter() {
ipp := nm.SelfNode.AllowedIPs().At(i)
if !isAddr[ipp] && ns.ProcessSubnets {
newIPs[ipPrefixToAddressWithPrefix(ipp)] = true
}

View File

@ -799,8 +799,8 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config,
}
isSubnetRouter := false
if e.birdClient != nil && nm != nil && nm.SelfNode != nil {
isSubnetRouter = hasOverlap(nm.SelfNode.PrimaryRoutes, nm.Hostinfo.RoutableIPs)
if e.birdClient != nil && nm != nil && nm.SelfNode.Valid() {
isSubnetRouter = hasOverlap(nm.SelfNode.PrimaryRoutes().AsSlice(), nm.Hostinfo.RoutableIPs)
e.logf("[v1] Reconfig: hasOverlap(%v, %v) = %v; isSubnetRouter=%v lastIsSubnetRouter=%v",
nm.SelfNode.PrimaryRoutes, nm.Hostinfo.RoutableIPs,
isSubnetRouter, isSubnetRouter, e.lastIsSubnetRouter)
@ -1445,7 +1445,7 @@ func (e *userspaceEngine) PeerForIP(ip netip.Addr) (ret PeerForIP, ok bool) {
}
for _, a := range nm.Addresses {
if a.Addr() == ip && a.IsSingleIP() && tsaddr.IsTailscaleIP(ip) {
return PeerForIP{Node: nm.SelfNode.View(), IsSelf: true, Route: a}, true
return PeerForIP{Node: nm.SelfNode, IsSelf: true, Route: a}, true
}
}

View File

@ -8,7 +8,6 @@ import (
"bytes"
"fmt"
"net/netip"
"slices"
"strings"
"tailscale.com/net/tsaddr"
@ -16,6 +15,7 @@ import (
"tailscale.com/types/logger"
"tailscale.com/types/logid"
"tailscale.com/types/netmap"
"tailscale.com/types/views"
"tailscale.com/wgengine/wgcfg"
)
@ -61,11 +61,11 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
}
// Setup log IDs for data plane audit logging.
if nm.SelfNode != nil {
cfg.NodeID = nm.SelfNode.StableID
canNetworkLog := slices.Contains(nm.SelfNode.Capabilities, tailcfg.CapabilityDataPlaneAuditLogs)
if canNetworkLog && nm.SelfNode.DataPlaneAuditLogID != "" && nm.DomainAuditLogID != "" {
nodeID, errNode := logid.ParsePrivateID(nm.SelfNode.DataPlaneAuditLogID)
if nm.SelfNode.Valid() {
cfg.NodeID = nm.SelfNode.StableID()
canNetworkLog := views.SliceContains(nm.SelfNode.Capabilities(), tailcfg.CapabilityDataPlaneAuditLogs)
if canNetworkLog && nm.SelfNode.DataPlaneAuditLogID() != "" && nm.DomainAuditLogID != "" {
nodeID, errNode := logid.ParsePrivateID(nm.SelfNode.DataPlaneAuditLogID())
if errNode != nil {
logf("[v1] wgcfg: unable to parse node audit log ID: %v", errNode)
}