control/controlclient: remove quadratic allocs in mapSession

The mapSession code was previously quadratic: N clients in a netmap
send updates proportional to N and then for each, we do N units of
work. This removes most of that "N units of work" per update. There's
still a netmap-sized slice allocation per update (that's #8963), but
that's it.

Bit more efficient now, especially with larger netmaps:

                                 │     before     │                after                │
                                 │     sec/op     │   sec/op     vs base                │
    MapSessionDelta/size_10-8       47.935µ ±  3%   1.232µ ± 2%  -97.43% (p=0.000 n=10)
    MapSessionDelta/size_100-8      79.950µ ±  3%   1.642µ ± 2%  -97.95% (p=0.000 n=10)
    MapSessionDelta/size_1000-8    355.747µ ± 10%   4.400µ ± 1%  -98.76% (p=0.000 n=10)
    MapSessionDelta/size_10000-8   3079.71µ ±  3%   27.89µ ± 3%  -99.09% (p=0.000 n=10)
    geomean                          254.6µ         3.969µ       -98.44%

                                 │     before     │                after                 │
                                 │      B/op      │     B/op      vs base                │
    MapSessionDelta/size_10-8        9.651Ki ± 0%   2.395Ki ± 0%  -75.19% (p=0.000 n=10)
    MapSessionDelta/size_100-8      83.097Ki ± 0%   3.192Ki ± 0%  -96.16% (p=0.000 n=10)
    MapSessionDelta/size_1000-8     800.25Ki ± 0%   10.32Ki ± 0%  -98.71% (p=0.000 n=10)
    MapSessionDelta/size_10000-8   7896.04Ki ± 0%   82.32Ki ± 0%  -98.96% (p=0.000 n=10)
    geomean                          266.8Ki        8.977Ki       -96.64%

                                 │    before     │               after                │
                                 │   allocs/op   │ allocs/op   vs base                │
    MapSessionDelta/size_10-8         72.00 ± 0%   20.00 ± 0%  -72.22% (p=0.000 n=10)
    MapSessionDelta/size_100-8       523.00 ± 0%   20.00 ± 0%  -96.18% (p=0.000 n=10)
    MapSessionDelta/size_1000-8     5024.00 ± 0%   20.00 ± 0%  -99.60% (p=0.000 n=10)
    MapSessionDelta/size_10000-8   50024.00 ± 0%   20.00 ± 0%  -99.96% (p=0.000 n=10)
    geomean                          1.754k        20.00       -98.86%

Updates #1909

Change-Id: I41ee29358a5521ed762216a76d4cc5b0d16e46ac
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
Brad Fitzpatrick 2023-08-22 08:07:26 -07:00 committed by Brad Fitzpatrick
parent a3b0654ed8
commit db017d3b12
2 changed files with 274 additions and 202 deletions

View File

@ -6,7 +6,6 @@ package controlclient
import (
"context"
"fmt"
"log"
"net/netip"
"sort"
@ -16,6 +15,7 @@ import (
"tailscale.com/types/key"
"tailscale.com/types/logger"
"tailscale.com/types/netmap"
"tailscale.com/types/ptr"
"tailscale.com/types/views"
"tailscale.com/util/cmpx"
"tailscale.com/wgengine/filter"
@ -33,6 +33,7 @@ type mapSession struct {
// Immutable fields.
nu NetmapUpdater // called on changes (in addition to the optional hooks below)
privateNodeKey key.NodePrivate
publicNodeKey key.NodePublic
logf logger.Logf
vlogf logger.Logf
machinePubKey key.MachinePublic
@ -63,6 +64,8 @@ type mapSession struct {
// Fields storing state over the course of multiple MapResponses.
lastNode tailcfg.NodeView
peers map[tailcfg.NodeID]*tailcfg.NodeView // pointer to view (oddly). same pointers as sortedPeers.
sortedPeers []*tailcfg.NodeView // same pointers as peers, but sorted by Node.ID
lastDNSConfig *tailcfg.DNSConfig
lastDERPMap *tailcfg.DERPMap
lastUserProfile map[tailcfg.UserID]tailcfg.UserProfile
@ -70,7 +73,6 @@ type mapSession struct {
lastParsedPacketFilter []filter.Match
lastSSHPolicy *tailcfg.SSHPolicy
collectServices bool
previousPeers []*tailcfg.Node // for delta-purposes
lastDomain string
lastDomainAuditLogID string
lastHealth []string
@ -78,10 +80,6 @@ type mapSession struct {
stickyDebug tailcfg.Debug // accumulated opt.Bool values
lastTKAInfo *tailcfg.TKAInfo
lastNetmapSummary string // from NetworkMap.VeryConcise
// netMapBuilding is non-nil during a netmapForResponse call,
// containing the value to be returned, once fully populated.
netMapBuilding *netmap.NetworkMap
}
// newMapSession returns a mostly unconfigured new mapSession.
@ -93,6 +91,7 @@ func newMapSession(privateNodeKey key.NodePrivate, nu NetmapUpdater) *mapSession
ms := &mapSession{
nu: nu,
privateNodeKey: privateNodeKey,
publicNodeKey: privateNodeKey.Public(),
lastDNSConfig: new(tailcfg.DNSConfig),
lastUserProfile: map[tailcfg.UserID]tailcfg.UserProfile{},
watchdogReset: make(chan struct{}),
@ -184,7 +183,9 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t
// Call Node.InitDisplayNames on any changed nodes.
initDisplayNames(cmpx.Or(resp.Node.View(), ms.lastNode), resp)
nm := ms.netmapForResponse(resp)
ms.updateStateFromResponse(resp)
nm := ms.netmap()
ms.lastNetmapSummary = nm.VeryConcise()
ms.onConciseNetMapSummary(ms.lastNetmapSummary)
@ -198,30 +199,28 @@ func (ms *mapSession) HandleNonKeepAliveMapResponse(ctx context.Context, resp *t
return nil
}
func (ms *mapSession) addUserProfile(userID tailcfg.UserID) {
if userID == 0 {
return
}
nm := ms.netMapBuilding
if _, dup := nm.UserProfiles[userID]; dup {
// Already populated it from a previous peer.
return
}
if up, ok := ms.lastUserProfile[userID]; ok {
nm.UserProfiles[userID] = up
}
// updateStats are some stats from updateStateFromResponse, primarily for
// testing. It's meant to be cheap enough to always compute, though. It doesn't
// allocate.
type updateStats struct {
allNew bool
added int
removed int
changed int
}
// netmapForResponse returns a fully populated NetworkMap from a full
// or incremental MapResponse within the session, filling in omitted
// information from prior MapResponse values.
func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.NetworkMap {
undeltaPeers(resp, ms.previousPeers)
// updateStateFromResponse updates ms from res. It takes ownership of res.
func (ms *mapSession) updateStateFromResponse(resp *tailcfg.MapResponse) {
ms.updatePeersStateFromResponse(resp)
if resp.Node != nil {
ms.lastNode = resp.Node.View()
}
ms.previousPeers = cloneNodes(resp.Peers) // defensive/lazy clone, since this escapes to who knows where
for _, up := range resp.UserProfiles {
ms.lastUserProfile[up.ID] = up
}
// TODO(bradfitz): clean up old user profiles? maybe not worth it.
if dm := resp.DERPMap; dm != nil {
ms.vlogf("netmap: new map contains DERP map")
@ -277,16 +276,169 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
if resp.TKAInfo != nil {
ms.lastTKAInfo = resp.TKAInfo
}
}
// TODO(bradfitz): now that this is a view, remove some of the defensive
// cloning elsewhere in mapSession.
peerViews := make([]tailcfg.NodeView, len(resp.Peers))
for i, n := range resp.Peers {
peerViews[i] = n.View()
// updatePeersStateFromResponseres updates ms.peers and ms.sortedPeers from res. It takes ownership of res.
func (ms *mapSession) updatePeersStateFromResponse(resp *tailcfg.MapResponse) (stats updateStats) {
defer func() {
if stats.removed > 0 || stats.added > 0 {
ms.rebuildSorted()
}
}()
if ms.peers == nil {
ms.peers = make(map[tailcfg.NodeID]*tailcfg.NodeView)
}
if len(resp.Peers) > 0 {
// Not delta encoded.
stats.allNew = true
keep := make(map[tailcfg.NodeID]bool, len(resp.Peers))
for _, n := range resp.Peers {
keep[n.ID] = true
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for id := range ms.peers {
if !keep[id] {
stats.removed++
delete(ms.peers, id)
}
}
// Peers precludes all other delta operations so just return.
return
}
for _, id := range resp.PeersRemoved {
if _, ok := ms.peers[id]; ok {
delete(ms.peers, id)
stats.removed++
}
}
for _, n := range resp.PeersChanged {
if vp, ok := ms.peers[n.ID]; ok {
stats.changed++
*vp = n.View()
} else {
stats.added++
ms.peers[n.ID] = ptr.To(n.View())
}
}
for nodeID, seen := range resp.PeerSeenChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
if seen {
mut.LastSeen = ptr.To(clock.Now())
} else {
mut.LastSeen = nil
}
*vp = mut.View()
stats.changed++
}
}
for nodeID, online := range resp.OnlineChange {
if vp, ok := ms.peers[nodeID]; ok {
mut := vp.AsStruct()
mut.Online = ptr.To(online)
*vp = mut.View()
stats.changed++
}
}
for _, pc := range resp.PeersChangedPatch {
vp, ok := ms.peers[pc.NodeID]
if !ok {
continue
}
stats.changed++
mut := vp.AsStruct()
if pc.DERPRegion != 0 {
mut.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, pc.DERPRegion)
}
if pc.Cap != 0 {
mut.Cap = pc.Cap
}
if pc.Endpoints != nil {
mut.Endpoints = pc.Endpoints
}
if pc.Key != nil {
mut.Key = *pc.Key
}
if pc.DiscoKey != nil {
mut.DiscoKey = *pc.DiscoKey
}
if v := pc.Online; v != nil {
mut.Online = ptr.To(*v)
}
if v := pc.LastSeen; v != nil {
mut.LastSeen = ptr.To(*v)
}
if v := pc.KeyExpiry; v != nil {
mut.KeyExpiry = *v
}
if v := pc.Capabilities; v != nil {
mut.Capabilities = *v
}
if v := pc.KeySignature; v != nil {
mut.KeySignature = v
}
*vp = mut.View()
}
return
}
// rebuildSorted rebuilds ms.sortedPeers from ms.peers. It should be called
// after any additions or removals from peers.
func (ms *mapSession) rebuildSorted() {
if ms.sortedPeers == nil {
ms.sortedPeers = make([]*tailcfg.NodeView, 0, len(ms.peers))
} else {
if len(ms.sortedPeers) > len(ms.peers) {
clear(ms.sortedPeers[len(ms.peers):])
}
ms.sortedPeers = ms.sortedPeers[:0]
}
for _, p := range ms.peers {
ms.sortedPeers = append(ms.sortedPeers, p)
}
sort.Slice(ms.sortedPeers, func(i, j int) bool {
return ms.sortedPeers[i].ID() < ms.sortedPeers[j].ID()
})
}
func (ms *mapSession) addUserProfile(nm *netmap.NetworkMap, userID tailcfg.UserID) {
if userID == 0 {
return
}
if _, dup := nm.UserProfiles[userID]; dup {
// Already populated it from a previous peer.
return
}
if up, ok := ms.lastUserProfile[userID]; ok {
nm.UserProfiles[userID] = up
}
}
// netmap returns a fully populated NetworkMap from the last state seen from
// a call to updateStateFromResponse, filling in omitted
// information from prior MapResponse values.
func (ms *mapSession) netmap() *netmap.NetworkMap {
peerViews := make([]tailcfg.NodeView, len(ms.sortedPeers))
for i, vp := range ms.sortedPeers {
peerViews[i] = *vp
}
nm := &netmap.NetworkMap{
NodeKey: ms.privateNodeKey.Public(),
NodeKey: ms.publicNodeKey,
PrivateKey: ms.privateNodeKey,
MachineKey: ms.machinePubKey,
Peers: peerViews,
@ -302,7 +454,6 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
ControlHealth: ms.lastHealth,
TKAEnabled: ms.lastTKAInfo != nil && !ms.lastTKAInfo.Disabled,
}
ms.netMapBuilding = nm
if ms.lastTKAInfo != nil && ms.lastTKAInfo.Head != "" {
if err := nm.TKAHead.UnmarshalText([]byte(ms.lastTKAInfo.Head)); err != nil {
@ -311,9 +462,6 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
}
}
if resp.Node != nil {
ms.lastNode = resp.Node.View()
}
if node := ms.lastNode; node.Valid() {
nm.SelfNode = node
nm.Expiry = node.KeyExpiry()
@ -329,156 +477,17 @@ func (ms *mapSession) netmapForResponse(resp *tailcfg.MapResponse) *netmap.Netwo
}
}
ms.addUserProfile(nm.User())
for _, peer := range resp.Peers {
ms.addUserProfile(peer.Sharer)
ms.addUserProfile(peer.User)
ms.addUserProfile(nm, nm.User())
for _, peer := range peerViews {
ms.addUserProfile(nm, peer.Sharer())
ms.addUserProfile(nm, peer.User())
}
if DevKnob.ForceProxyDNS() {
nm.DNS.Proxied = true
}
ms.netMapBuilding = nil
return nm
}
// undeltaPeers updates mapRes.Peers to be complete based on the
// provided previous peer list and the PeersRemoved and PeersChanged
// fields in mapRes, as well as the PeerSeenChange and OnlineChange
// maps.
//
// It then also nils out the delta fields.
func undeltaPeers(mapRes *tailcfg.MapResponse, prev []*tailcfg.Node) {
if len(mapRes.Peers) > 0 {
// Not delta encoded.
if !nodesSorted(mapRes.Peers) {
log.Printf("netmap: undeltaPeers: MapResponse.Peers not sorted; sorting")
sortNodes(mapRes.Peers)
}
return
}
var removed map[tailcfg.NodeID]bool
if pr := mapRes.PeersRemoved; len(pr) > 0 {
removed = make(map[tailcfg.NodeID]bool, len(pr))
for _, id := range pr {
removed[id] = true
}
}
changed := mapRes.PeersChanged
if !nodesSorted(changed) {
log.Printf("netmap: undeltaPeers: MapResponse.PeersChanged not sorted; sorting")
sortNodes(changed)
}
if !nodesSorted(prev) {
// Internal error (unrelated to the network) if we get here.
log.Printf("netmap: undeltaPeers: [unexpected] prev not sorted; sorting")
sortNodes(prev)
}
newFull := prev
if len(removed) > 0 || len(changed) > 0 {
newFull = make([]*tailcfg.Node, 0, len(prev)-len(removed))
for len(prev) > 0 && len(changed) > 0 {
pID := prev[0].ID
cID := changed[0].ID
if removed[pID] {
prev = prev[1:]
continue
}
switch {
case pID < cID:
newFull = append(newFull, prev[0])
prev = prev[1:]
case pID == cID:
newFull = append(newFull, changed[0])
prev, changed = prev[1:], changed[1:]
case cID < pID:
newFull = append(newFull, changed[0])
changed = changed[1:]
}
}
newFull = append(newFull, changed...)
for _, n := range prev {
if !removed[n.ID] {
newFull = append(newFull, n)
}
}
sortNodes(newFull)
}
if len(mapRes.PeerSeenChange) != 0 || len(mapRes.OnlineChange) != 0 || len(mapRes.PeersChangedPatch) != 0 {
peerByID := make(map[tailcfg.NodeID]*tailcfg.Node, len(newFull))
for _, n := range newFull {
peerByID[n.ID] = n
}
now := clock.Now()
for nodeID, seen := range mapRes.PeerSeenChange {
if n, ok := peerByID[nodeID]; ok {
if seen {
n.LastSeen = &now
} else {
n.LastSeen = nil
}
}
}
for nodeID, online := range mapRes.OnlineChange {
if n, ok := peerByID[nodeID]; ok {
online := online
n.Online = &online
}
}
for _, ec := range mapRes.PeersChangedPatch {
if n, ok := peerByID[ec.NodeID]; ok {
if ec.DERPRegion != 0 {
n.DERP = fmt.Sprintf("%s:%v", tailcfg.DerpMagicIP, ec.DERPRegion)
}
if ec.Cap != 0 {
n.Cap = ec.Cap
}
if ec.Endpoints != nil {
n.Endpoints = ec.Endpoints
}
if ec.Key != nil {
n.Key = *ec.Key
}
if ec.DiscoKey != nil {
n.DiscoKey = *ec.DiscoKey
}
if v := ec.Online; v != nil {
n.Online = ptrCopy(v)
}
if v := ec.LastSeen; v != nil {
n.LastSeen = ptrCopy(v)
}
if v := ec.KeyExpiry; v != nil {
n.KeyExpiry = *v
}
if v := ec.Capabilities; v != nil {
n.Capabilities = *v
}
if v := ec.KeySignature; v != nil {
n.KeySignature = v
}
}
}
}
mapRes.Peers = newFull
mapRes.PeersChanged = nil
mapRes.PeersRemoved = nil
}
// ptrCopy returns a pointer to a newly allocated shallow copy of *v.
func ptrCopy[T any](v *T) *T {
if v == nil {
return nil
}
ret := new(T)
*ret = *v
return ret
}
func nodesSorted(v []*tailcfg.Node) bool {
for i, n := range v {
if i > 0 && n.ID <= v[i-1].ID {

View File

@ -21,10 +21,11 @@ import (
"tailscale.com/types/key"
"tailscale.com/types/netmap"
"tailscale.com/types/ptr"
"tailscale.com/util/mak"
"tailscale.com/util/must"
)
func TestUndeltaPeers(t *testing.T) {
func TestUpdatePeersStateFromResponse(t *testing.T) {
var curTime time.Time
online := func(v bool) func(*tailcfg.Node) {
@ -56,11 +57,12 @@ func TestUndeltaPeers(t *testing.T) {
}
peers := func(nv ...*tailcfg.Node) []*tailcfg.Node { return nv }
tests := []struct {
name string
mapRes *tailcfg.MapResponse
curTime time.Time
prev []*tailcfg.Node
want []*tailcfg.Node
name string
mapRes *tailcfg.MapResponse
curTime time.Time
prev []*tailcfg.Node
want []*tailcfg.Node
wantStats updateStats
}{
{
name: "full_peers",
@ -68,6 +70,10 @@ func TestUndeltaPeers(t *testing.T) {
Peers: peers(n(1, "foo"), n(2, "bar")),
},
want: peers(n(1, "foo"), n(2, "bar")),
wantStats: updateStats{
allNew: true,
added: 2,
},
},
{
name: "full_peers_ignores_deltas",
@ -76,6 +82,10 @@ func TestUndeltaPeers(t *testing.T) {
PeersRemoved: []tailcfg.NodeID{2},
},
want: peers(n(1, "foo"), n(2, "bar")),
wantStats: updateStats{
allNew: true,
added: 2,
},
},
{
name: "add_and_update",
@ -84,14 +94,21 @@ func TestUndeltaPeers(t *testing.T) {
PeersChanged: peers(n(0, "zero"), n(2, "bar2"), n(3, "three")),
},
want: peers(n(0, "zero"), n(1, "foo"), n(2, "bar2"), n(3, "three")),
wantStats: updateStats{
added: 2, // added IDs 0 and 3
changed: 1, // changed ID 2
},
},
{
name: "remove",
prev: peers(n(1, "foo"), n(2, "bar")),
mapRes: &tailcfg.MapResponse{
PeersRemoved: []tailcfg.NodeID{1},
PeersRemoved: []tailcfg.NodeID{1, 3, 4},
},
want: peers(n(2, "bar")),
wantStats: updateStats{
removed: 1, // ID 1
},
},
{
name: "add_and_remove",
@ -101,6 +118,10 @@ func TestUndeltaPeers(t *testing.T) {
PeersRemoved: []tailcfg.NodeID{2},
},
want: peers(n(1, "foo2")),
wantStats: updateStats{
changed: 1,
removed: 1,
},
},
{
name: "unchanged",
@ -113,13 +134,15 @@ func TestUndeltaPeers(t *testing.T) {
prev: peers(n(1, "foo"), n(2, "bar")),
mapRes: &tailcfg.MapResponse{
OnlineChange: map[tailcfg.NodeID]bool{
1: true,
1: true,
404: true,
},
},
want: peers(
n(1, "foo", online(true)),
n(2, "bar"),
),
wantStats: updateStats{changed: 1},
},
{
name: "online_change_offline",
@ -134,6 +157,7 @@ func TestUndeltaPeers(t *testing.T) {
n(1, "foo", online(false)),
n(2, "bar", online(true)),
),
wantStats: updateStats{changed: 2},
},
{
name: "peer_seen_at",
@ -149,6 +173,7 @@ func TestUndeltaPeers(t *testing.T) {
n(1, "foo"),
n(2, "bar", seenAt(time.Unix(123, 0))),
),
wantStats: updateStats{changed: 2},
},
{
name: "ep_change_derp",
@ -159,7 +184,8 @@ func TestUndeltaPeers(t *testing.T) {
DERPRegion: 4,
}},
},
want: peers(n(1, "foo", withDERP("127.3.3.40:4"))),
want: peers(n(1, "foo", withDERP("127.3.3.40:4"))),
wantStats: updateStats{changed: 1},
},
{
name: "ep_change_udp",
@ -170,10 +196,11 @@ func TestUndeltaPeers(t *testing.T) {
Endpoints: []string{"1.2.3.4:56"},
}},
},
want: peers(n(1, "foo", withEP("1.2.3.4:56"))),
want: peers(n(1, "foo", withEP("1.2.3.4:56"))),
wantStats: updateStats{changed: 1},
},
{
name: "ep_change_udp",
name: "ep_change_udp_2",
prev: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:111"))),
mapRes: &tailcfg.MapResponse{
PeersChangedPatch: []*tailcfg.PeerChange{{
@ -181,7 +208,8 @@ func TestUndeltaPeers(t *testing.T) {
Endpoints: []string{"1.2.3.4:56"},
}},
},
want: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:56"))),
want: peers(n(1, "foo", withDERP("127.3.3.40:3"), withEP("1.2.3.4:56"))),
wantStats: updateStats{changed: 1},
},
{
name: "ep_change_both",
@ -193,7 +221,8 @@ func TestUndeltaPeers(t *testing.T) {
Endpoints: []string{"1.2.3.4:56"},
}},
},
want: peers(n(1, "foo", withDERP("127.3.3.40:2"), withEP("1.2.3.4:56"))),
want: peers(n(1, "foo", withDERP("127.3.3.40:2"), withEP("1.2.3.4:56"))),
wantStats: updateStats{changed: 1},
},
{
name: "change_key",
@ -208,6 +237,7 @@ func TestUndeltaPeers(t *testing.T) {
Name: "foo",
Key: key.NodePublicFromRaw32(mem.B(append(make([]byte, 31), 'A'))),
}),
wantStats: updateStats{changed: 1},
},
{
name: "change_key_signature",
@ -217,11 +247,13 @@ func TestUndeltaPeers(t *testing.T) {
NodeID: 1,
KeySignature: []byte{3, 4},
}},
}, want: peers(&tailcfg.Node{
},
want: peers(&tailcfg.Node{
ID: 1,
Name: "foo",
KeySignature: []byte{3, 4},
}),
wantStats: updateStats{changed: 1},
},
{
name: "change_disco_key",
@ -231,11 +263,13 @@ func TestUndeltaPeers(t *testing.T) {
NodeID: 1,
DiscoKey: ptr.To(key.DiscoPublicFromRaw32(mem.B(append(make([]byte, 31), 'A')))),
}},
}, want: peers(&tailcfg.Node{
},
want: peers(&tailcfg.Node{
ID: 1,
Name: "foo",
DiscoKey: key.DiscoPublicFromRaw32(mem.B(append(make([]byte, 31), 'A'))),
}),
wantStats: updateStats{changed: 1},
},
{
name: "change_online",
@ -245,11 +279,13 @@ func TestUndeltaPeers(t *testing.T) {
NodeID: 1,
Online: ptr.To(true),
}},
}, want: peers(&tailcfg.Node{
},
want: peers(&tailcfg.Node{
ID: 1,
Name: "foo",
Online: ptr.To(true),
}),
wantStats: updateStats{changed: 1},
},
{
name: "change_last_seen",
@ -259,11 +295,13 @@ func TestUndeltaPeers(t *testing.T) {
NodeID: 1,
LastSeen: ptr.To(time.Unix(123, 0).UTC()),
}},
}, want: peers(&tailcfg.Node{
},
want: peers(&tailcfg.Node{
ID: 1,
Name: "foo",
LastSeen: ptr.To(time.Unix(123, 0).UTC()),
}),
wantStats: updateStats{changed: 1},
},
{
name: "change_key_expiry",
@ -273,11 +311,13 @@ func TestUndeltaPeers(t *testing.T) {
NodeID: 1,
KeyExpiry: ptr.To(time.Unix(123, 0).UTC()),
}},
}, want: peers(&tailcfg.Node{
},
want: peers(&tailcfg.Node{
ID: 1,
Name: "foo",
KeyExpiry: time.Unix(123, 0).UTC(),
}),
wantStats: updateStats{changed: 1},
},
{
name: "change_capabilities",
@ -287,11 +327,13 @@ func TestUndeltaPeers(t *testing.T) {
NodeID: 1,
Capabilities: ptr.To([]string{"foo"}),
}},
}, want: peers(&tailcfg.Node{
},
want: peers(&tailcfg.Node{
ID: 1,
Name: "foo",
Capabilities: []string{"foo"},
}),
wantStats: updateStats{changed: 1},
}}
for _, tt := range tests {
@ -300,9 +342,23 @@ func TestUndeltaPeers(t *testing.T) {
curTime = tt.curTime
tstest.Replace(t, &clock, tstime.Clock(tstest.NewClock(tstest.ClockOpts{Start: curTime})))
}
undeltaPeers(tt.mapRes, tt.prev)
if !reflect.DeepEqual(tt.mapRes.Peers, tt.want) {
t.Errorf("wrong results\n got: %s\nwant: %s", formatNodes(tt.mapRes.Peers), formatNodes(tt.want))
ms := newTestMapSession(t, nil)
for _, n := range tt.prev {
mak.Set(&ms.peers, n.ID, ptr.To(n.View()))
}
ms.rebuildSorted()
gotStats := ms.updatePeersStateFromResponse(tt.mapRes)
got := make([]*tailcfg.Node, len(ms.sortedPeers))
for i, vp := range ms.sortedPeers {
got[i] = vp.AsStruct()
}
if gotStats != tt.wantStats {
t.Errorf("got stats = %+v; want %+v", gotStats, tt.wantStats)
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("wrong results\n got: %s\nwant: %s", formatNodes(got), formatNodes(tt.want))
}
})
}
@ -340,6 +396,11 @@ func newTestMapSession(t testing.TB, nu NetmapUpdater) *mapSession {
return ms
}
func (ms *mapSession) netmapForResponse(res *tailcfg.MapResponse) *netmap.NetworkMap {
ms.updateStateFromResponse(res)
return ms.netmap()
}
func TestNetmapForResponse(t *testing.T) {
t.Run("implicit_packetfilter", func(t *testing.T) {
somePacketFilter := []tailcfg.FilterRule{
@ -454,7 +515,8 @@ func TestNetmapForResponse(t *testing.T) {
Node: someNode,
}
initDisplayNames(mapRes.Node.View(), mapRes)
nm1 := ms.netmapForResponse(mapRes)
ms.updateStateFromResponse(mapRes)
nm1 := ms.netmap()
if !nm1.SelfNode.Valid() {
t.Fatal("nil Node in 1st netmap")
}
@ -463,7 +525,8 @@ func TestNetmapForResponse(t *testing.T) {
t.Errorf("Node mismatch in 1st netmap; got: %s", j)
}
nm2 := ms.netmapForResponse(&tailcfg.MapResponse{})
ms.updateStateFromResponse(&tailcfg.MapResponse{})
nm2 := ms.netmap()
if !nm2.SelfNode.Valid() {
t.Fatal("nil Node in 1st netmap")
}