2020-03-18 04:28:47 +00:00
// Copyright (c) 2019 Tailscale Inc & AUTHORS All rights reserved.
2020-02-05 22:16:58 +00:00
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package magicsock implements a socket that can change its communication path while
// in use, actively searching for the best way to communicate.
package magicsock
import (
2020-05-31 23:29:04 +01:00
"bufio"
2020-02-05 22:16:58 +00:00
"context"
2020-07-01 20:56:17 +01:00
crand "crypto/rand"
2020-02-05 22:16:58 +00:00
"encoding/binary"
2020-02-18 21:32:04 +00:00
"errors"
2020-02-05 22:16:58 +00:00
"fmt"
2020-03-04 06:21:56 +00:00
"hash/fnv"
2020-07-03 19:06:33 +01:00
"math"
2020-03-04 06:21:56 +00:00
"math/rand"
2020-02-05 22:16:58 +00:00
"net"
2020-05-17 17:51:38 +01:00
"reflect"
2021-10-20 20:14:19 +01:00
"runtime"
2020-03-23 21:12:23 +00:00
"sort"
2020-02-21 22:01:51 +00:00
"strconv"
2020-02-05 22:16:58 +00:00
"strings"
"sync"
2020-02-18 16:57:11 +00:00
"sync/atomic"
2020-02-05 22:16:58 +00:00
"time"
2021-10-28 19:07:25 +01:00
"go4.org/mem"
2021-05-25 20:42:22 +01:00
"golang.zx2c4.com/wireguard/conn"
2020-04-17 21:51:52 +01:00
"inet.af/netaddr"
2020-06-25 19:04:52 +01:00
"tailscale.com/control/controlclient"
2020-02-21 03:10:54 +00:00
"tailscale.com/derp"
2020-02-05 22:16:58 +00:00
"tailscale.com/derp/derphttp"
2020-06-30 20:22:42 +01:00
"tailscale.com/disco"
2021-02-25 05:29:51 +00:00
"tailscale.com/health"
2020-03-26 05:57:46 +00:00
"tailscale.com/ipn/ipnstate"
2020-10-19 23:11:40 +01:00
"tailscale.com/logtail/backoff"
2020-03-05 18:29:19 +00:00
"tailscale.com/net/dnscache"
2020-03-10 18:02:30 +00:00
"tailscale.com/net/interfaces"
2020-05-25 17:15:50 +01:00
"tailscale.com/net/netcheck"
2021-12-30 19:11:50 +00:00
"tailscale.com/net/neterror"
2020-05-28 23:27:04 +01:00
"tailscale.com/net/netns"
2021-02-20 06:15:41 +00:00
"tailscale.com/net/portmapper"
2020-05-25 17:15:50 +01:00
"tailscale.com/net/stun"
2021-12-29 02:01:50 +00:00
"tailscale.com/net/tsaddr"
2020-03-12 18:16:54 +00:00
"tailscale.com/syncs"
2020-03-04 06:21:56 +00:00
"tailscale.com/tailcfg"
2021-01-20 17:52:24 +00:00
"tailscale.com/tstime"
2021-07-21 19:04:36 +01:00
"tailscale.com/tstime/mono"
2020-02-17 21:52:11 +00:00
"tailscale.com/types/key"
2020-03-04 06:21:56 +00:00
"tailscale.com/types/logger"
2021-02-05 23:44:46 +00:00
"tailscale.com/types/netmap"
2020-07-10 22:26:04 +01:00
"tailscale.com/types/nettype"
2020-12-14 02:51:24 +00:00
"tailscale.com/util/clientmetric"
2021-04-27 22:40:29 +01:00
"tailscale.com/util/uniq"
2020-03-02 20:37:52 +00:00
"tailscale.com/version"
2021-03-15 20:58:10 +00:00
"tailscale.com/wgengine/monitor"
2020-02-05 22:16:58 +00:00
)
2020-08-17 20:56:17 +01:00
// useDerpRoute reports whether magicsock should enable the DERP
// return path optimization (Issue 150).
func useDerpRoute ( ) bool {
if debugUseDerpRouteEnv != "" {
return debugUseDerpRoute
}
ob := controlclient . DERPRouteFlag ( )
if v , ok := ob . Get ( ) ; ok {
return v
}
return false
}
2021-08-26 03:39:20 +01:00
// peerInfo is all the information magicsock tracks about a particular
// peer.
type peerInfo struct {
2021-10-18 18:22:56 +01:00
ep * endpoint // always non-nil.
2021-08-26 03:39:20 +01:00
// ipPorts is an inverted version of peerMap.byIPPort (below), so
// that when we're deleting this node, we can rapidly find out the
// keys that need deleting from peerMap.byIPPort without having to
// iterate over every IPPort known for any peer.
ipPorts map [ netaddr . IPPort ] bool
}
2021-10-18 18:22:56 +01:00
func newPeerInfo ( ep * endpoint ) * peerInfo {
2021-08-26 03:39:20 +01:00
return & peerInfo {
2021-10-18 18:22:56 +01:00
ep : ep ,
2021-08-26 03:39:20 +01:00
ipPorts : map [ netaddr . IPPort ] bool { } ,
}
}
// peerMap is an index of peerInfos by node (WireGuard) key, disco
// key, and discovered ip:port endpoints.
//
// Doesn't do any locking, all access must be done with Conn.mu held.
type peerMap struct {
2021-11-02 00:53:40 +00:00
byNodeKey map [ key . NodePublic ] * peerInfo
2021-10-18 23:31:08 +01:00
byIPPort map [ netaddr . IPPort ] * peerInfo
2021-10-17 19:31:21 +01:00
2021-11-10 20:18:13 +00:00
// nodesOfDisco contains the set of nodes that are using a
2021-10-17 19:31:21 +01:00
// DiscoKey. Usually those sets will be just one node.
2021-11-02 00:53:40 +00:00
nodesOfDisco map [ key . DiscoPublic ] map [ key . NodePublic ] bool
2021-08-26 03:39:20 +01:00
}
func newPeerMap ( ) peerMap {
return peerMap {
2021-11-02 00:53:40 +00:00
byNodeKey : map [ key . NodePublic ] * peerInfo { } ,
2021-10-17 19:31:21 +01:00
byIPPort : map [ netaddr . IPPort ] * peerInfo { } ,
2021-11-02 00:53:40 +00:00
nodesOfDisco : map [ key . DiscoPublic ] map [ key . NodePublic ] bool { } ,
2021-08-26 03:39:20 +01:00
}
}
// nodeCount returns the number of nodes currently in m.
func ( m * peerMap ) nodeCount ( ) int {
return len ( m . byNodeKey )
}
2021-10-15 22:40:49 +01:00
// anyEndpointForDiscoKey reports whether there exists any
// peers in the netmap with dk as their DiscoKey.
2021-10-29 22:27:29 +01:00
func ( m * peerMap ) anyEndpointForDiscoKey ( dk key . DiscoPublic ) bool {
2021-10-18 23:31:08 +01:00
return len ( m . nodesOfDisco [ dk ] ) > 0
2021-10-15 22:40:49 +01:00
}
2021-08-26 06:20:31 +01:00
// endpointForNodeKey returns the endpoint for nk, or nil if
2021-08-26 03:39:20 +01:00
// nk is not known to us.
2021-11-02 00:53:40 +00:00
func ( m * peerMap ) endpointForNodeKey ( nk key . NodePublic ) ( ep * endpoint , ok bool ) {
2021-08-26 03:39:20 +01:00
if nk . IsZero ( ) {
return nil , false
}
2021-10-18 22:27:39 +01:00
if info , ok := m . byNodeKey [ nk ] ; ok {
2021-08-26 03:39:20 +01:00
return info . ep , true
}
return nil , false
}
2021-08-26 06:20:31 +01:00
// endpointForIPPort returns the endpoint for the peer we
2021-08-26 03:39:20 +01:00
// believe to be at ipp, or nil if we don't know of any such peer.
2021-08-26 06:20:31 +01:00
func ( m * peerMap ) endpointForIPPort ( ipp netaddr . IPPort ) ( ep * endpoint , ok bool ) {
2021-10-18 22:27:39 +01:00
if info , ok := m . byIPPort [ ipp ] ; ok {
2021-08-26 03:39:20 +01:00
return info . ep , true
}
return nil , false
}
2021-10-16 06:25:29 +01:00
// forEachEndpoint invokes f on every endpoint in m.
func ( m * peerMap ) forEachEndpoint ( f func ( ep * endpoint ) ) {
2021-08-26 03:39:20 +01:00
for _ , pi := range m . byNodeKey {
2021-10-18 22:27:39 +01:00
f ( pi . ep )
2021-08-26 03:39:20 +01:00
}
}
2021-10-16 05:44:52 +01:00
// forEachEndpointWithDiscoKey invokes f on every endpoint in m
// that has the provided DiscoKey.
2021-10-29 22:27:29 +01:00
func ( m * peerMap ) forEachEndpointWithDiscoKey ( dk key . DiscoPublic , f func ( ep * endpoint ) ) {
2021-10-18 22:38:03 +01:00
for nk := range m . nodesOfDisco [ dk ] {
pi , ok := m . byNodeKey [ nk ]
if ! ok {
// Unexpected. Data structures would have to
// be out of sync. But we don't have a logger
// here to log [unexpected], so just skip.
// Maybe log later once peerMap is merged back
// into Conn.
continue
2021-10-16 05:44:52 +01:00
}
2021-10-18 22:38:03 +01:00
f ( pi . ep )
2021-10-16 05:44:52 +01:00
}
}
2021-10-16 06:25:29 +01:00
// upsertEndpoint stores endpoint in the peerInfo for
2021-08-26 03:39:20 +01:00
// ep.publicKey, and updates indexes. m must already have a
// tailcfg.Node for ep.publicKey.
2021-11-10 22:03:47 +00:00
func ( m * peerMap ) upsertEndpoint ( ep * endpoint , oldDiscoKey key . DiscoPublic ) {
2021-11-11 02:20:41 +00:00
if m . byNodeKey [ ep . publicKey ] == nil {
m . byNodeKey [ ep . publicKey ] = newPeerInfo ( ep )
}
if oldDiscoKey != ep . discoKey {
delete ( m . nodesOfDisco [ oldDiscoKey ] , ep . publicKey )
2021-10-17 19:31:21 +01:00
}
if ! ep . discoKey . IsZero ( ) {
set := m . nodesOfDisco [ ep . discoKey ]
if set == nil {
2021-11-02 00:53:40 +00:00
set = map [ key . NodePublic ] bool { }
2021-10-17 19:31:21 +01:00
m . nodesOfDisco [ ep . discoKey ] = set
}
set [ ep . publicKey ] = true
2021-08-26 03:39:20 +01:00
}
}
2021-10-18 21:29:09 +01:00
// setNodeKeyForIPPort makes future peer lookups by ipp return the
// same endpoint as a lookup by nk.
//
// This should only be called with a fully verified mapping of ipp to
// nk, because calling this function defines the endpoint we hand to
// WireGuard for packets received from ipp.
2021-11-02 00:53:40 +00:00
func ( m * peerMap ) setNodeKeyForIPPort ( ipp netaddr . IPPort , nk key . NodePublic ) {
2021-08-26 03:39:20 +01:00
if pi := m . byIPPort [ ipp ] ; pi != nil {
delete ( pi . ipPorts , ipp )
delete ( m . byIPPort , ipp )
}
2021-10-18 21:29:09 +01:00
if pi , ok := m . byNodeKey [ nk ] ; ok {
2021-08-26 03:39:20 +01:00
pi . ipPorts [ ipp ] = true
m . byIPPort [ ipp ] = pi
}
}
2021-10-16 06:25:29 +01:00
// deleteEndpoint deletes the peerInfo associated with ep, and
2021-08-26 03:39:20 +01:00
// updates indexes.
2021-10-16 06:25:29 +01:00
func ( m * peerMap ) deleteEndpoint ( ep * endpoint ) {
2021-08-26 03:39:20 +01:00
if ep == nil {
return
}
ep . stopAndReset ( )
2021-09-08 16:12:29 +01:00
pi := m . byNodeKey [ ep . publicKey ]
2021-10-17 19:31:21 +01:00
delete ( m . nodesOfDisco [ ep . discoKey ] , ep . publicKey )
2021-08-26 03:39:20 +01:00
delete ( m . byNodeKey , ep . publicKey )
2021-09-08 16:12:29 +01:00
if pi == nil {
// Kneejerk paranoia from earlier issue 2801.
// Unexpected. But no logger plumbed here to log so.
return
}
2021-08-26 03:39:20 +01:00
for ip := range pi . ipPorts {
delete ( m . byIPPort , ip )
}
}
2020-02-05 22:16:58 +00:00
// A Conn routes UDP packets and actively manages a list of its endpoints.
2020-04-06 22:44:10 +01:00
// It implements wireguard/conn.Bind.
2020-02-05 22:16:58 +00:00
type Conn struct {
2020-12-15 07:58:35 +00:00
// This block mirrors the contents and field order of the Options
// struct. Initialized once at construction, then constant.
2021-08-26 06:26:25 +01:00
logf logger . Logf
epFunc func ( [ ] tailcfg . Endpoint )
derpActiveFunc func ( )
idleFunc func ( ) time . Duration // nil means unknown
testOnlyPacketListener nettype . PacketListener
2021-11-02 00:53:40 +00:00
noteRecvActivity func ( key . NodePublic ) // or nil, see Options.NoteRecvActivity
2021-12-29 02:01:50 +00:00
linkMon * monitor . Mon // or nil
2020-02-18 18:55:25 +00:00
2020-12-15 07:58:35 +00:00
// ================================================================
// No locking required to access these fields, either because
// they're static after construction, or are wholly owned by a
// single goroutine.
connCtx context . Context // closed on Conn.Close
connCtxCancel func ( ) // closes connCtx
2021-01-16 03:13:59 +00:00
donec <- chan struct { } // connCtx.Done()'s to avoid context.cancelCtx.Done()'s mutex per call
2020-12-15 07:58:35 +00:00
// pconn4 and pconn6 are the underlying UDP sockets used to
// send/receive packets for wireguard and other magicsock
// protocols.
pconn4 * RebindingUDPConn
pconn6 * RebindingUDPConn
// netChecker is the prober that discovers local network
// conditions, including the closest DERP relay and NAT mappings.
netChecker * netcheck . Client
2021-02-20 06:15:41 +00:00
// portMapper is the NAT-PMP/PCP/UPnP prober/client, for requesting
// port mappings from NAT devices.
portMapper * portmapper . Client
2020-03-13 15:55:38 +00:00
// stunReceiveFunc holds the current STUN packet processing func.
// Its Loaded value is always non-nil.
stunReceiveFunc atomic . Value // of func(p []byte, fromAddr *net.UDPAddr)
2021-03-24 16:41:57 +00:00
// derpRecvCh is used by receiveDERP to read DERP messages.
2020-03-13 15:55:38 +00:00
derpRecvCh chan derpReadResult
2021-03-24 16:41:57 +00:00
// bind is the wireguard-go conn.Bind for Conn.
bind * connBind
2021-01-18 16:39:52 +00:00
2021-03-24 16:41:57 +00:00
// ippEndpoint4 and ippEndpoint6 are owned by receiveIPv4 and
// receiveIPv6, respectively, to cache an IPPort->endpoint for
2021-01-18 23:27:44 +00:00
// hot flows.
ippEndpoint4 , ippEndpoint6 ippEndpointCache
2020-06-26 22:38:53 +01:00
// ============================================================
2021-06-22 21:00:40 +01:00
// Fields that must be accessed via atomic load/stores.
// noV4 and noV6 are whether IPv4 and IPv6 are known to be
// missing. They're only used to suppress log spam. The name
// is named negatively because in early start-up, we don't yet
// necessarily have a netcheck.Report and don't want to skip
// logging.
noV4 , noV6 syncs . AtomicBool
2021-10-07 01:43:37 +01:00
// noV4Send is whether IPv4 UDP is known to be unable to transmit
// at all. This could happen if the socket is in an invalid state
// (as can happen on darwin after a network link status change).
noV4Send syncs . AtomicBool
2021-06-22 21:00:40 +01:00
// networkUp is whether the network is up (some interface is up
// with IPv4 or IPv6). It's used to suppress log spam and prevent
// new connection that'll fail.
networkUp syncs . AtomicBool
// havePrivateKey is whether privateKey is non-zero.
2021-10-16 22:55:26 +01:00
havePrivateKey syncs . AtomicBool
2021-11-02 00:53:40 +00:00
publicKeyAtomic atomic . Value // of key.NodePublic (or NodeKey zero value if !havePrivateKey)
2021-06-22 21:00:40 +01:00
2022-01-13 21:37:26 +00:00
// derpMapAtomic is the same as derpMap, but without requiring
// sync.Mutex. For use with NewRegionClient's callback, to avoid
// lock ordering deadlocks. See issue 3726 and mu field docs.
derpMapAtomic atomic . Value // of *tailcfg.DERPMap
2021-06-22 21:00:40 +01:00
// port is the preferred port from opts.Port; 0 means auto.
port syncs . AtomicUint32
// ============================================================
2022-01-13 21:37:26 +00:00
// mu guards all following fields; see userspaceEngine lock
// ordering rules against the engine. For derphttp, mu must
// be held before derphttp.Client.mu.
2021-06-22 21:00:40 +01:00
mu sync . Mutex
2020-08-04 17:36:38 +01:00
muCond * sync . Cond
2020-03-13 15:55:38 +00:00
2021-09-01 02:09:52 +01:00
closed bool // Close was called
2020-03-13 15:55:38 +00:00
2021-01-19 23:29:50 +00:00
// derpCleanupTimer is the timer that fires to occasionally clean
// up idle DERP connections. It's only used when there is a non-home
// DERP connection in use.
derpCleanupTimer * time . Timer
// derpCleanupTimerArmed is whether derpCleanupTimer is
// scheduled to fire within derpCleanStaleInterval.
derpCleanupTimerArmed bool
2021-01-20 17:52:24 +00:00
// periodicReSTUNTimer, when non-nil, is an AfterFunc timer
// that will call Conn.doPeriodicSTUN.
periodicReSTUNTimer * time . Timer
2020-12-15 07:58:35 +00:00
// endpointsUpdateActive indicates that updateEndpoints is
// currently running. It's used to deduplicate concurrent endpoint
// update requests.
2020-03-13 15:55:38 +00:00
endpointsUpdateActive bool
2020-12-15 07:58:35 +00:00
// wantEndpointsUpdate, if non-empty, means that a new endpoints
// update should begin immediately after the currently-running one
// completes. It can only be non-empty if
// endpointsUpdateActive==true.
wantEndpointsUpdate string // true if non-empty; string is reason
// lastEndpoints records the endpoints found during the previous
// endpoint discovery. It's used to avoid duplicate endpoint
// change notifications.
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
lastEndpoints [ ] tailcfg . Endpoint
2020-12-15 07:58:35 +00:00
2021-01-20 17:52:24 +00:00
// lastEndpointsTime is the last time the endpoints were updated,
// even if there was no change.
lastEndpointsTime time . Time
// onEndpointRefreshed are funcs to run (in their own goroutines)
// when endpoints are refreshed.
2021-08-26 06:20:31 +01:00
onEndpointRefreshed map [ * endpoint ] func ( )
2021-01-20 17:52:24 +00:00
2020-12-15 07:58:35 +00:00
// peerSet is the set of peers that are currently configured in
// WireGuard. These are not used to filter inbound or outbound
// traffic at all, but only to track what state can be cleaned up
// in other maps below that are keyed by peer public key.
2021-10-29 21:15:27 +01:00
peerSet map [ key . NodePublic ] struct { }
2020-12-15 07:58:35 +00:00
// discoPrivate is the private naclbox key used for active
// discovery traffic. It's created once near (but not during)
// construction.
2021-10-29 22:27:29 +01:00
discoPrivate key . DiscoPrivate
discoPublic key . DiscoPublic // public of discoPrivate
discoShort string // ShortString of discoPublic (to save logging work later)
2020-12-15 07:58:35 +00:00
// nodeOfDisco tracks the networkmap Node entity for each peer
// discovery key.
2021-08-26 03:39:20 +01:00
peerMap peerMap
2021-10-16 04:45:33 +01:00
// discoInfo is the state for an active DiscoKey.
2021-10-29 22:27:29 +01:00
discoInfo map [ key . DiscoPublic ] * discoInfo
2020-02-05 22:16:58 +00:00
2020-12-15 07:58:35 +00:00
// netInfoFunc is a callback that provides a tailcfg.NetInfo when
// discovered network conditions change.
//
// TODO(danderson): why can't it be set at construction time?
// There seem to be a few natural places in ipn/local.go to
// swallow untimely invocations.
2020-03-04 06:21:56 +00:00
netInfoFunc func ( * tailcfg . NetInfo ) // nil until set
2020-12-15 07:58:35 +00:00
// netInfoLast is the NetInfo provided in the last call to
// netInfoFunc. It's used to deduplicate calls to netInfoFunc.
//
// TODO(danderson): should all the deduping happen in
// ipn/local.go? We seem to be doing dedupe at several layers, and
// magicsock could do with any complexity reduction it can get.
2020-03-04 06:21:56 +00:00
netInfoLast * tailcfg . NetInfo
2020-05-17 17:51:38 +01:00
derpMap * tailcfg . DERPMap // nil (or zero regions/nodes) means DERP is disabled
2021-02-05 23:44:46 +00:00
netMap * netmap . NetworkMap
2021-10-29 21:15:27 +01:00
privateKey key . NodePrivate // WireGuard private key for this node
2020-07-28 00:26:33 +01:00
everHadKey bool // whether we ever had a non-zero private key
2020-05-17 17:51:38 +01:00
myDerp int // nearest DERP region ID; 0 means none/unknown
2020-07-27 20:23:14 +01:00
derpStarted chan struct { } // closed on first connection to DERP; for tests & cleaner Close
2020-05-17 17:51:38 +01:00
activeDerp map [ int ] activeDerp // DERP regionID -> connection to a node in that region
prevDerp map [ int ] * syncs . WaitGroupChan
2020-03-23 21:12:23 +00:00
// derpRoute contains optional alternate routes to use as an
// optimization instead of contacting a peer via their home
// DERP connection. If they sent us a message on a different
// DERP connection (which should really only be on our DERP
// home connection, or what was once our home), then we
// remember that route here to optimistically use instead of
// creating a new DERP connection back to their home.
2021-10-29 21:15:27 +01:00
derpRoute map [ key . NodePublic ] derpRoute
2020-03-23 21:12:23 +00:00
// peerLastDerp tracks which DERP node we last used to speak with a
// peer. It's only used to quiet logging, so we only log on change.
2021-10-29 21:15:27 +01:00
peerLastDerp map [ key . NodePublic ] int
2020-03-22 01:24:28 +00:00
}
// derpRoute is a route entry for a public key, saying that a certain
// peer should be available at DERP node derpID, as long as the
// current connection for that derpID is dc. (but dc should not be
// used to write directly; it's owned by the read/write loops)
type derpRoute struct {
derpID int
dc * derphttp . Client // don't use directly; see comment above
}
// removeDerpPeerRoute removes a DERP route entry previously added by addDerpPeerRoute.
2021-10-29 21:15:27 +01:00
func ( c * Conn ) removeDerpPeerRoute ( peer key . NodePublic , derpID int , dc * derphttp . Client ) {
2020-03-22 01:24:28 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
r2 := derpRoute { derpID , dc }
if r , ok := c . derpRoute [ peer ] ; ok && r == r2 {
delete ( c . derpRoute , peer )
}
}
// addDerpPeerRoute adds a DERP route entry, noting that peer was seen
// on DERP node derpID, at least on the connection identified by dc.
// See issue 150 for details.
2021-10-29 21:15:27 +01:00
func ( c * Conn ) addDerpPeerRoute ( peer key . NodePublic , derpID int , dc * derphttp . Client ) {
2020-03-22 01:24:28 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
if c . derpRoute == nil {
2021-10-29 21:15:27 +01:00
c . derpRoute = make ( map [ key . NodePublic ] derpRoute )
2020-03-22 01:24:28 +00:00
}
2020-03-23 21:12:23 +00:00
r := derpRoute { derpID , dc }
c . derpRoute [ peer ] = r
2020-02-05 22:16:58 +00:00
}
2020-03-09 22:20:33 +00:00
// DerpMagicIP is a fake WireGuard endpoint IP address that means
// to use DERP. When used, the port number of the WireGuard endpoint
// is the DERP server number to use.
//
// Mnemonic: 3.3.40 are numbers above the keys D, E, R, P.
const DerpMagicIP = "127.3.3.40"
2021-02-11 20:39:56 +00:00
var derpMagicIPAddr = netaddr . MustParseIP ( DerpMagicIP )
2020-03-09 22:20:33 +00:00
2020-03-05 16:54:08 +00:00
// activeDerp contains fields for an active DERP connection.
type activeDerp struct {
2020-03-23 21:12:23 +00:00
c * derphttp . Client
cancel context . CancelFunc
writeCh chan <- derpWriteRequest
// lastWrite is the time of the last request for its write
// channel (currently even if there was no write).
2021-02-09 17:37:24 +00:00
// It is always non-nil and initialized to a non-zero Time.
2020-03-23 21:12:23 +00:00
lastWrite * time . Time
createTime time . Time
2020-03-05 16:54:08 +00:00
}
2020-02-05 22:16:58 +00:00
// Options contains options for Listen.
type Options struct {
2020-03-19 16:39:00 +00:00
// Logf optionally provides a log function to use.
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
// Must not be nil.
2020-03-07 21:11:52 +00:00
Logf logger . Logf
2020-02-05 22:16:58 +00:00
// Port is the port to listen on.
// Zero means to pick one automatically.
Port uint16
// EndpointsFunc optionally provides a func to be called when
// endpoints change. The called func does not own the slice.
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
EndpointsFunc func ( [ ] tailcfg . Endpoint )
2020-06-25 22:19:12 +01:00
2020-08-25 21:21:29 +01:00
// DERPActiveFunc optionally provides a func to be called when
// a connection is made to a DERP server.
DERPActiveFunc func ( )
2020-06-25 22:19:12 +01:00
// IdleFunc optionally provides a func to return how long
// it's been since a TUN packet was sent or received.
IdleFunc func ( ) time . Duration
2020-07-10 22:26:04 +01:00
2021-08-26 06:26:25 +01:00
// TestOnlyPacketListener optionally specifies how to create PacketConns.
// Only used by tests.
TestOnlyPacketListener nettype . PacketListener
2020-07-23 23:15:28 +01:00
2021-09-01 03:06:04 +01:00
// NoteRecvActivity, if provided, is a func for magicsock to call
// whenever it receives a packet from a a peer if it's been more
// than ~10 seconds since the last one. (10 seconds is somewhat
// arbitrary; the sole user just doesn't need or want it called on
// every packet, just every minute or two for Wireguard timeouts,
// and 10 seconds seems like a good trade-off between often enough
// and not too often.)
// The provided func is likely to call back into
// Conn.ParseEndpoint, which acquires Conn.mu. As such, you should
// not hold Conn.mu while calling it.
2021-11-02 00:53:40 +00:00
NoteRecvActivity func ( key . NodePublic )
2020-10-28 15:23:12 +00:00
2021-03-15 20:58:10 +00:00
// LinkMonitor is the link monitor to use.
// With one, the portmapper won't be used.
LinkMonitor * monitor . Mon
2020-02-05 22:16:58 +00:00
}
2020-03-19 16:39:00 +00:00
func ( o * Options ) logf ( ) logger . Logf {
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
if o . Logf == nil {
panic ( "must provide magicsock.Options.logf" )
2020-03-19 16:39:00 +00:00
}
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
return o . Logf
2020-03-19 16:39:00 +00:00
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
func ( o * Options ) endpointsFunc ( ) func ( [ ] tailcfg . Endpoint ) {
2020-02-05 22:16:58 +00:00
if o == nil || o . EndpointsFunc == nil {
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
return func ( [ ] tailcfg . Endpoint ) { }
2020-02-05 22:16:58 +00:00
}
return o . EndpointsFunc
}
2020-08-25 21:21:29 +01:00
func ( o * Options ) derpActiveFunc ( ) func ( ) {
if o == nil || o . DERPActiveFunc == nil {
return func ( ) { }
}
return o . DERPActiveFunc
}
2020-05-17 17:51:38 +01:00
// newConn is the error-free, network-listening-side-effect-free based
// of NewConn. Mostly for tests.
func newConn ( ) * Conn {
2020-02-05 22:16:58 +00:00
c := & Conn {
2021-10-16 04:45:33 +01:00
derpRecvCh : make ( chan derpReadResult ) ,
derpStarted : make ( chan struct { } ) ,
2021-10-29 21:15:27 +01:00
peerLastDerp : make ( map [ key . NodePublic ] int ) ,
2021-10-16 04:45:33 +01:00
peerMap : newPeerMap ( ) ,
2021-10-29 22:27:29 +01:00
discoInfo : make ( map [ key . DiscoPublic ] * discoInfo ) ,
2020-02-05 22:16:58 +00:00
}
2021-03-24 16:41:57 +00:00
c . bind = & connBind { Conn : c , closed : true }
2020-08-04 17:36:38 +01:00
c . muCond = sync . NewCond ( & c . mu )
2020-10-06 23:22:46 +01:00
c . networkUp . Set ( true ) // assume up until told otherwise
2020-05-17 17:51:38 +01:00
return c
}
// NewConn creates a magic Conn listening on opts.Port.
// As the set of possible endpoints for a Conn changes, the
// callback opts.EndpointsFunc is called.
//
// It doesn't start doing anything until Start is called.
func NewConn ( opts Options ) ( * Conn , error ) {
c := newConn ( )
2021-06-22 21:00:40 +01:00
c . port . Set ( uint32 ( opts . Port ) )
2020-05-17 17:51:38 +01:00
c . logf = opts . logf ( )
c . epFunc = opts . endpointsFunc ( )
2020-08-25 21:21:29 +01:00
c . derpActiveFunc = opts . derpActiveFunc ( )
2020-06-25 22:19:12 +01:00
c . idleFunc = opts . IdleFunc
2021-08-26 06:26:25 +01:00
c . testOnlyPacketListener = opts . TestOnlyPacketListener
2020-07-23 23:15:28 +01:00
c . noteRecvActivity = opts . NoteRecvActivity
2021-07-09 18:01:50 +01:00
c . portMapper = portmapper . NewClient ( logger . WithPrefix ( c . logf , "portmapper: " ) , c . onPortMapChanged )
2021-03-15 20:58:10 +00:00
if opts . LinkMonitor != nil {
c . portMapper . SetGatewayLookupFunc ( opts . LinkMonitor . GatewayAndSelfIP )
}
2021-12-29 02:01:50 +00:00
c . linkMon = opts . LinkMonitor
2020-03-19 16:39:00 +00:00
if err := c . initialBind ( ) ; err != nil {
return nil , err
}
c . connCtx , c . connCtxCancel = context . WithCancel ( context . Background ( ) )
2021-01-16 03:13:59 +00:00
c . donec = c . connCtx . Done ( )
2020-03-09 22:20:33 +00:00
c . netChecker = & netcheck . Client {
2020-10-28 15:23:12 +00:00
Logf : logger . WithPrefix ( c . logf , "netcheck: " ) ,
GetSTUNConn4 : func ( ) netcheck . STUNConn { return c . pconn4 } ,
SkipExternalNetwork : inTest ( ) ,
2021-02-20 06:15:41 +00:00
PortMapper : c . portMapper ,
2020-03-19 16:39:00 +00:00
}
2021-02-20 06:15:41 +00:00
2020-03-19 16:39:00 +00:00
if c . pconn6 != nil {
c . netChecker . GetSTUNConn6 = func ( ) netcheck . STUNConn { return c . pconn6 }
2020-03-09 22:20:33 +00:00
}
2020-02-18 16:57:11 +00:00
c . ignoreSTUNPackets ( )
2020-05-17 17:51:38 +01:00
return c , nil
}
2020-02-18 16:57:11 +00:00
// ignoreSTUNPackets sets a STUN packet processing func that does nothing.
func ( c * Conn ) ignoreSTUNPackets ( ) {
2020-06-30 21:25:13 +01:00
c . stunReceiveFunc . Store ( func ( [ ] byte , netaddr . IPPort ) { } )
2020-02-18 16:57:11 +00:00
}
2021-01-20 17:52:24 +00:00
// doPeriodicSTUN is called (in a new goroutine) by
// periodicReSTUNTimer when periodic STUNs are active.
func ( c * Conn ) doPeriodicSTUN ( ) { c . ReSTUN ( "periodic" ) }
func ( c * Conn ) stopPeriodicReSTUNTimerLocked ( ) {
if t := c . periodicReSTUNTimer ; t != nil {
t . Stop ( )
c . periodicReSTUNTimer = nil
}
}
2020-03-13 15:55:38 +00:00
// c.mu must NOT be held.
func ( c * Conn ) updateEndpoints ( why string ) {
defer func ( ) {
c . mu . Lock ( )
defer c . mu . Unlock ( )
why := c . wantEndpointsUpdate
c . wantEndpointsUpdate = ""
2021-01-20 17:52:24 +00:00
if ! c . closed {
if why != "" {
go c . updateEndpoints ( why )
return
}
if c . shouldDoPeriodicReSTUNLocked ( ) {
// Pick a random duration between 20
// and 26 seconds (just under 30s, a
// common UDP NAT timeout on Linux,
// etc)
d := tstime . RandomDurationBetween ( 20 * time . Second , 26 * time . Second )
if t := c . periodicReSTUNTimer ; t != nil {
if debugReSTUNStopOnIdle {
c . logf ( "resetting existing periodicSTUN to run in %v" , d )
}
t . Reset ( d )
} else {
if debugReSTUNStopOnIdle {
c . logf ( "scheduling periodicSTUN to run in %v" , d )
}
c . periodicReSTUNTimer = time . AfterFunc ( d , c . doPeriodicSTUN )
}
} else {
if debugReSTUNStopOnIdle {
c . logf ( "periodic STUN idle" )
}
c . stopPeriodicReSTUNTimerLocked ( )
}
2020-02-05 22:16:58 +00:00
}
2021-01-20 17:52:24 +00:00
c . endpointsUpdateActive = false
c . muCond . Broadcast ( )
2020-03-13 15:55:38 +00:00
} ( )
2020-12-21 18:58:06 +00:00
c . logf ( "[v1] magicsock: starting endpoint update (%s)" , why )
2021-10-22 17:09:37 +01:00
if c . noV4Send . Get ( ) && runtime . GOOS != "js" {
2021-10-16 21:59:13 +01:00
c . mu . Lock ( )
closed := c . closed
c . mu . Unlock ( )
if ! closed {
c . logf ( "magicsock: last netcheck reported send error. Rebinding." )
c . Rebind ( )
}
2021-10-07 01:43:37 +01:00
}
2020-02-05 22:16:58 +00:00
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
endpoints , err := c . determineEndpoints ( c . connCtx )
2020-03-13 15:55:38 +00:00
if err != nil {
2020-03-23 21:12:23 +00:00
c . logf ( "magicsock: endpoint update (%s) failed: %v" , why , err )
2020-03-13 15:55:38 +00:00
// TODO(crawshaw): are there any conditions under which
// we should trigger a retry based on the error here?
return
}
2020-03-04 06:21:56 +00:00
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
if c . setEndpoints ( endpoints ) {
c . logEndpointChange ( endpoints )
2020-03-13 15:55:38 +00:00
c . epFunc ( endpoints )
}
}
2020-03-04 06:21:56 +00:00
2020-03-13 15:55:38 +00:00
// setEndpoints records the new endpoints, reporting whether they're changed.
// It takes ownership of the slice.
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
func ( c * Conn ) setEndpoints ( endpoints [ ] tailcfg . Endpoint ) ( changed bool ) {
2020-10-14 18:44:54 +01:00
anySTUN := false
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
for _ , ep := range endpoints {
if ep . Type == tailcfg . EndpointSTUN {
2020-10-14 18:44:54 +01:00
anySTUN = true
}
}
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-10-14 18:44:54 +01:00
if ! anySTUN && c . derpMap == nil && ! inTest ( ) {
// Don't bother storing or reporting this yet. We
// don't have a DERP map or any STUN entries, so we're
// just starting up. A DERP map should arrive shortly
// and then we'll have more interesting endpoints to
// report. This saves a map update.
// TODO(bradfitz): this optimization is currently
// skipped during the e2e tests because they depend
// too much on the exact sequence of updates. Fix the
// tests. But a protocol rewrite might happen first.
2020-12-21 18:58:06 +00:00
c . logf ( "[v1] magicsock: ignoring pre-DERP map, STUN-less endpoint update: %v" , endpoints )
2020-10-14 18:44:54 +01:00
return false
}
2021-01-20 17:52:24 +00:00
c . lastEndpointsTime = time . Now ( )
for de , fn := range c . onEndpointRefreshed {
go fn ( )
delete ( c . onEndpointRefreshed , de )
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
if endpointSetsEqual ( endpoints , c . lastEndpoints ) {
2020-03-13 15:55:38 +00:00
return false
2020-02-05 22:16:58 +00:00
}
2020-03-13 15:55:38 +00:00
c . lastEndpoints = endpoints
return true
2020-02-05 22:16:58 +00:00
}
2021-03-09 23:09:10 +00:00
// setNetInfoHavePortMap updates NetInfo.HavePortMap to true.
func ( c * Conn ) setNetInfoHavePortMap ( ) {
c . mu . Lock ( )
defer c . mu . Unlock ( )
if c . netInfoLast == nil {
// No NetInfo yet. Nothing to update.
return
}
if c . netInfoLast . HavePortMap {
// No change.
return
}
ni := c . netInfoLast . Clone ( )
ni . HavePortMap = true
c . callNetInfoCallbackLocked ( ni )
}
2020-03-09 22:20:33 +00:00
func ( c * Conn ) updateNetInfo ( ctx context . Context ) ( * netcheck . Report , error ) {
2020-05-17 17:51:38 +01:00
c . mu . Lock ( )
dm := c . derpMap
c . mu . Unlock ( )
2020-10-07 04:24:10 +01:00
if dm == nil || c . networkDown ( ) {
2020-03-10 18:35:43 +00:00
return new ( netcheck . Report ) , nil
2020-03-06 15:47:54 +00:00
}
2020-03-09 22:20:33 +00:00
ctx , cancel := context . WithTimeout ( ctx , 2 * time . Second )
2020-03-04 06:21:56 +00:00
defer cancel ( )
2020-03-09 22:20:33 +00:00
c . stunReceiveFunc . Store ( c . netChecker . ReceiveSTUNPacket )
defer c . ignoreSTUNPackets ( )
2020-05-17 17:51:38 +01:00
report , err := c . netChecker . GetReport ( ctx , dm )
2020-03-04 06:21:56 +00:00
if err != nil {
2020-03-09 22:20:33 +00:00
return nil , err
2020-03-04 06:21:56 +00:00
}
2020-05-29 20:40:51 +01:00
c . noV4 . Set ( ! report . IPv4 )
c . noV6 . Set ( ! report . IPv6 )
2021-10-07 01:43:37 +01:00
c . noV4Send . Set ( ! report . IPv4CanSend )
2020-05-29 20:40:51 +01:00
2020-03-04 06:21:56 +00:00
ni := & tailcfg . NetInfo {
DERPLatency : map [ string ] float64 { } ,
MappingVariesByDestIP : report . MappingVariesByDestIP ,
HairPinning : report . HairPinning ,
2020-07-06 21:51:17 +01:00
UPnP : report . UPnP ,
PMP : report . PMP ,
PCP : report . PCP ,
2021-03-09 23:09:10 +00:00
HavePortMap : c . portMapper . HaveMapping ( ) ,
2020-03-04 06:21:56 +00:00
}
2020-05-17 17:51:38 +01:00
for rid , d := range report . RegionV4Latency {
ni . DERPLatency [ fmt . Sprintf ( "%d-v4" , rid ) ] = d . Seconds ( )
}
for rid , d := range report . RegionV6Latency {
ni . DERPLatency [ fmt . Sprintf ( "%d-v6" , rid ) ] = d . Seconds ( )
2020-03-04 06:21:56 +00:00
}
ni . WorkingIPv6 . Set ( report . IPv6 )
ni . WorkingUDP . Set ( report . UDP )
ni . PreferredDERP = report . PreferredDERP
if ni . PreferredDERP == 0 {
// Perhaps UDP is blocked. Pick a deterministic but arbitrary
// one.
ni . PreferredDERP = c . pickDERPFallback ( )
}
2020-03-04 20:21:40 +00:00
if ! c . setNearestDERP ( ni . PreferredDERP ) {
ni . PreferredDERP = 0
}
2020-03-04 06:21:56 +00:00
// TODO: set link type
c . callNetInfoCallback ( ni )
2020-03-09 22:20:33 +00:00
return report , nil
2020-03-04 06:21:56 +00:00
}
var processStartUnixNano = time . Now ( ) . UnixNano ( )
// pickDERPFallback returns a non-zero but deterministic DERP node to
// connect to. This is only used if netcheck couldn't find the
// nearest one (for instance, if UDP is blocked and thus STUN latency
// checks aren't working).
2020-03-13 15:55:38 +00:00
//
// c.mu must NOT be held.
2020-03-04 06:21:56 +00:00
func ( c * Conn ) pickDERPFallback ( ) int {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-03-04 06:21:56 +00:00
2020-05-17 17:51:38 +01:00
if ! c . wantDerpLocked ( ) {
return 0
}
ids := c . derpMap . RegionIDs ( )
2020-03-09 22:20:33 +00:00
if len ( ids ) == 0 {
2020-05-17 17:51:38 +01:00
// No DERP regions in non-nil map.
2020-03-04 06:21:56 +00:00
return 0
}
2021-08-26 03:39:20 +01:00
// TODO: figure out which DERP region most of our peers are using,
// and use that region as our fallback.
//
// If we already had selected something in the past and it has any
// peers, we want to stay on it. If there are no peers at all,
// stay on whatever DERP we previously picked. If we need to pick
// one and have no peer info, pick a region randomly.
//
// We used to do the above for legacy clients, but never updated
// it for disco.
2020-03-25 18:14:29 +00:00
2021-08-26 03:39:20 +01:00
if c . myDerp != 0 {
2020-03-25 18:14:29 +00:00
return c . myDerp
}
2020-03-04 06:21:56 +00:00
h := fnv . New64 ( )
h . Write ( [ ] byte ( fmt . Sprintf ( "%p/%d" , c , processStartUnixNano ) ) ) // arbitrary
2020-03-09 22:20:33 +00:00
return ids [ rand . New ( rand . NewSource ( int64 ( h . Sum64 ( ) ) ) ) . Intn ( len ( ids ) ) ]
2020-03-04 06:21:56 +00:00
}
// callNetInfoCallback calls the NetInfo callback (if previously
// registered with SetNetInfoCallback) if ni has substantially changed
// since the last state.
//
// callNetInfoCallback takes ownership of ni.
2020-03-13 15:55:38 +00:00
//
// c.mu must NOT be held.
2020-03-04 06:21:56 +00:00
func ( c * Conn ) callNetInfoCallback ( ni * tailcfg . NetInfo ) {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-03-04 06:21:56 +00:00
if ni . BasicallyEqual ( c . netInfoLast ) {
return
}
2021-03-09 23:09:10 +00:00
c . callNetInfoCallbackLocked ( ni )
}
func ( c * Conn ) callNetInfoCallbackLocked ( ni * tailcfg . NetInfo ) {
2020-03-04 06:21:56 +00:00
c . netInfoLast = ni
if c . netInfoFunc != nil {
2020-12-21 18:58:06 +00:00
c . logf ( "[v1] magicsock: netInfo update: %+v" , ni )
2020-03-04 06:21:56 +00:00
go c . netInfoFunc ( ni )
}
}
2021-01-15 20:50:33 +00:00
// addValidDiscoPathForTest makes addr a validated disco address for
// discoKey. It's used in tests to enable receiving of packets from
// addr without having to spin up the entire active discovery
// machinery.
2021-11-02 00:53:40 +00:00
func ( c * Conn ) addValidDiscoPathForTest ( nodeKey key . NodePublic , addr netaddr . IPPort ) {
2021-01-15 20:50:33 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-10-18 21:29:09 +01:00
c . peerMap . setNodeKeyForIPPort ( addr , nodeKey )
2021-01-15 20:50:33 +00:00
}
2020-03-04 06:21:56 +00:00
func ( c * Conn ) SetNetInfoCallback ( fn func ( * tailcfg . NetInfo ) ) {
if fn == nil {
panic ( "nil NetInfoCallback" )
}
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
2020-03-04 06:21:56 +00:00
last := c . netInfoLast
c . netInfoFunc = fn
2020-03-13 15:55:38 +00:00
c . mu . Unlock ( )
2020-03-04 06:21:56 +00:00
if last != nil {
fn ( last )
}
}
2021-10-15 22:40:49 +01:00
// LastRecvActivityOfNodeKey describes the time we last got traffic from
2021-01-12 03:07:08 +00:00
// this endpoint (updated every ~10 seconds).
2021-11-02 00:53:40 +00:00
func ( c * Conn ) LastRecvActivityOfNodeKey ( nk key . NodePublic ) string {
2021-01-12 03:07:08 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-10-15 22:40:49 +01:00
de , ok := c . peerMap . endpointForNodeKey ( nk )
2021-01-12 03:07:08 +00:00
if ! ok {
2021-07-21 19:04:36 +01:00
return "never"
2021-01-12 03:07:08 +00:00
}
2021-07-21 19:04:36 +01:00
saw := de . lastRecv . LoadAtomic ( )
if saw == 0 {
return "never"
2021-01-12 03:07:08 +00:00
}
2021-07-21 19:04:36 +01:00
return mono . Since ( saw ) . Round ( time . Second ) . String ( )
2020-08-09 22:49:42 +01:00
}
// Ping handles a "tailscale ping" CLI query.
2021-03-23 04:25:43 +00:00
func ( c * Conn ) Ping ( peer * tailcfg . Node , res * ipnstate . PingResult , cb func ( * ipnstate . PingResult ) ) {
2020-08-09 22:49:42 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
if c . privateKey . IsZero ( ) {
res . Err = "local tailscaled stopped"
cb ( res )
return
}
if len ( peer . Addresses ) > 0 {
2021-05-15 02:07:28 +01:00
res . NodeIP = peer . Addresses [ 0 ] . IP ( ) . String ( )
2020-08-09 22:49:42 +01:00
}
res . NodeName = peer . Name // prefer DNS name
if res . NodeName == "" {
res . NodeName = peer . Hostinfo . Hostname // else hostname
} else {
if i := strings . Index ( res . NodeName , "." ) ; i != - 1 {
res . NodeName = res . NodeName [ : i ]
}
}
2021-11-02 03:55:52 +00:00
ep , ok := c . peerMap . endpointForNodeKey ( peer . Key )
2020-08-09 22:49:42 +01:00
if ! ok {
2021-09-01 00:55:22 +01:00
res . Err = "unknown peer"
cb ( res )
return
2020-08-09 22:49:42 +01:00
}
2021-09-01 00:55:22 +01:00
ep . cliPing ( res , cb )
2020-08-09 22:49:42 +01:00
}
// c.mu must be held
func ( c * Conn ) populateCLIPingResponseLocked ( res * ipnstate . PingResult , latency time . Duration , ep netaddr . IPPort ) {
res . LatencySeconds = latency . Seconds ( )
2021-05-15 02:07:28 +01:00
if ep . IP ( ) != derpMagicIPAddr {
2020-08-09 22:49:42 +01:00
res . Endpoint = ep . String ( )
return
}
2021-05-15 02:07:28 +01:00
regionID := int ( ep . Port ( ) )
2020-08-09 22:49:42 +01:00
res . DERPRegionID = regionID
2021-08-23 18:59:07 +01:00
res . DERPRegionCode = c . derpRegionCodeLocked ( regionID )
}
func ( c * Conn ) derpRegionCodeLocked ( regionID int ) string {
if c . derpMap == nil {
return ""
}
if dr , ok := c . derpMap . Regions [ regionID ] ; ok {
return dr . RegionCode
2020-08-09 22:49:42 +01:00
}
2021-08-23 18:59:07 +01:00
return ""
2020-08-09 22:49:42 +01:00
}
2020-07-06 20:10:39 +01:00
// DiscoPublicKey returns the discovery public key.
2021-10-29 22:27:29 +01:00
func ( c * Conn ) DiscoPublicKey ( ) key . DiscoPublic {
2020-06-19 20:06:49 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-07-06 20:10:39 +01:00
if c . discoPrivate . IsZero ( ) {
2021-10-29 22:27:29 +01:00
priv := key . NewDisco ( )
2020-07-06 20:10:39 +01:00
c . discoPrivate = priv
2021-10-29 22:27:29 +01:00
c . discoPublic = priv . Public ( )
2020-07-06 20:10:39 +01:00
c . discoShort = c . discoPublic . ShortString ( )
c . logf ( "magicsock: disco key = %v" , c . discoShort )
}
return c . discoPublic
2020-06-19 20:06:49 +01:00
}
2020-07-16 05:08:25 +01:00
// PeerHasDiscoKey reports whether peer k supports discovery keys (client version 0.100.0+).
2021-11-02 00:53:40 +00:00
func ( c * Conn ) PeerHasDiscoKey ( k key . NodePublic ) bool {
2020-07-16 05:08:25 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-09-01 00:55:22 +01:00
if ep , ok := c . peerMap . endpointForNodeKey ( k ) ; ok {
return ep . discoKey . IsZero ( )
2021-08-26 03:39:20 +01:00
}
return false
2020-07-16 05:08:25 +01:00
}
2020-03-13 15:55:38 +00:00
// c.mu must NOT be held.
2020-03-04 20:21:40 +00:00
func ( c * Conn ) setNearestDERP ( derpNum int ) ( wantDERP bool ) {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-05-17 17:51:38 +01:00
if ! c . wantDerpLocked ( ) {
2020-03-04 20:21:40 +00:00
c . myDerp = 0
2021-02-25 05:29:51 +00:00
health . SetMagicSockDERPHome ( 0 )
2020-03-04 20:21:40 +00:00
return false
}
2020-03-05 23:00:56 +00:00
if derpNum == c . myDerp {
// No change.
return true
}
2021-11-16 16:34:25 +00:00
if c . myDerp != 0 && derpNum != 0 {
metricDERPHomeChange . Add ( 1 )
}
2020-03-24 15:09:30 +00:00
c . myDerp = derpNum
2021-02-25 05:29:51 +00:00
health . SetMagicSockDERPHome ( derpNum )
2020-03-24 15:09:30 +00:00
if c . privateKey . IsZero ( ) {
// No private key yet, so DERP connections won't come up anyway.
// Return early rather than ultimately log a couple lines of noise.
return true
}
2020-03-19 06:32:31 +00:00
// On change, notify all currently connected DERP servers and
// start connecting to our home DERP if we are not already.
2020-05-28 08:42:03 +01:00
dr := c . derpMap . Regions [ derpNum ]
if dr == nil {
c . logf ( "[unexpected] magicsock: derpMap.Regions[%v] is nil" , derpNum )
} else {
c . logf ( "magicsock: home is now derp-%v (%v)" , derpNum , c . derpMap . Regions [ derpNum ] . RegionCode )
}
2020-03-05 23:00:56 +00:00
for i , ad := range c . activeDerp {
go ad . c . NotePreferred ( i == c . myDerp )
}
2020-04-09 22:21:36 +01:00
c . goDerpConnect ( derpNum )
2020-03-04 20:21:40 +00:00
return true
2020-03-04 01:46:03 +00:00
}
2021-02-10 18:04:42 +00:00
// startDerpHomeConnectLocked starts connecting to our DERP home, if any.
//
// c.mu must be held.
func ( c * Conn ) startDerpHomeConnectLocked ( ) {
c . goDerpConnect ( c . myDerp )
}
2020-04-09 22:21:36 +01:00
// goDerpConnect starts a goroutine to start connecting to the given
// DERP node.
//
// c.mu may be held, but does not need to be.
func ( c * Conn ) goDerpConnect ( node int ) {
if node == 0 {
return
}
2021-10-29 21:15:27 +01:00
go c . derpWriteChanOfAddr ( netaddr . IPPortFrom ( derpMagicIPAddr , uint16 ( node ) ) , key . NodePublic { } )
2020-04-09 22:21:36 +01:00
}
2020-02-18 16:57:11 +00:00
// determineEndpoints returns the machine's endpoint addresses. It
2020-03-13 15:55:38 +00:00
// does a STUN lookup (via netcheck) to determine its public address.
//
// c.mu must NOT be held.
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
func ( c * Conn ) determineEndpoints ( ctx context . Context ) ( [ ] tailcfg . Endpoint , error ) {
2021-10-22 17:09:37 +01:00
var havePortmap bool
var portmapExt netaddr . IPPort
if runtime . GOOS != "js" {
portmapExt , havePortmap = c . portMapper . GetCachedMappingOrStartCreatingOne ( )
2021-10-20 20:57:10 +01:00
}
2021-07-09 18:01:50 +01:00
2020-03-13 15:55:38 +00:00
nr , err := c . updateNetInfo ( ctx )
if err != nil {
c . logf ( "magicsock.Conn.determineEndpoints: updateNetInfo: %v" , err )
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
return nil , err
2020-03-13 15:55:38 +00:00
}
2021-10-22 17:09:37 +01:00
if runtime . GOOS == "js" {
// TODO(bradfitz): why does control require an
// endpoint? Otherwise it doesn't stream map responses
// back.
return [ ] tailcfg . Endpoint {
{
Addr : netaddr . MustParseIPPort ( "[fe80:123:456:789::1]:12345" ) ,
Type : tailcfg . EndpointLocal ,
} ,
} , nil
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
already := make ( map [ netaddr . IPPort ] tailcfg . EndpointType ) // endpoint -> how it was found
var eps [ ] tailcfg . Endpoint // unique endpoints
2020-02-05 22:16:58 +00:00
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
ipp := func ( s string ) ( ipp netaddr . IPPort ) {
ipp , _ = netaddr . ParseIPPort ( s )
return
}
addAddr := func ( ipp netaddr . IPPort , et tailcfg . EndpointType ) {
if ipp . IsZero ( ) || ( debugOmitLocalAddresses && et == tailcfg . EndpointLocal ) {
2020-07-02 17:53:10 +01:00
return
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
if _ , ok := already [ ipp ] ; ! ok {
already [ ipp ] = et
eps = append ( eps , tailcfg . Endpoint { Addr : ipp , Type : et } )
2020-02-05 22:16:58 +00:00
}
}
2021-07-09 18:01:50 +01:00
// If we didn't have a portmap earlier, maybe it's done by now.
if ! havePortmap {
portmapExt , havePortmap = c . portMapper . GetCachedMappingOrStartCreatingOne ( )
}
if havePortmap {
addAddr ( portmapExt , tailcfg . EndpointPortmapped )
2021-03-09 23:09:10 +00:00
c . setNetInfoHavePortMap ( )
2021-02-20 06:15:41 +00:00
}
2020-03-09 22:20:33 +00:00
if nr . GlobalV4 != "" {
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
addAddr ( ipp ( nr . GlobalV4 ) , tailcfg . EndpointSTUN )
2020-08-04 17:48:34 +01:00
// If they're behind a hard NAT and are using a fixed
// port locally, assume they might've added a static
// port mapping on their router to the same explicit
// port that tailscaled is running with. Worst case
// it's an invalid candidate mapping.
2021-06-22 21:00:40 +01:00
if port := c . port . Get ( ) ; nr . MappingVariesByDestIP . EqualBool ( true ) && port != 0 {
2020-08-04 17:48:34 +01:00
if ip , _ , err := net . SplitHostPort ( nr . GlobalV4 ) ; err == nil {
2021-06-22 21:00:40 +01:00
addAddr ( ipp ( net . JoinHostPort ( ip , strconv . Itoa ( int ( port ) ) ) ) , tailcfg . EndpointSTUN4LocalPort )
2020-08-04 17:48:34 +01:00
}
}
2020-02-05 22:16:58 +00:00
}
2020-03-19 16:39:00 +00:00
if nr . GlobalV6 != "" {
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
addAddr ( ipp ( nr . GlobalV6 ) , tailcfg . EndpointSTUN )
2020-02-05 22:16:58 +00:00
}
2020-02-18 16:57:11 +00:00
c . ignoreSTUNPackets ( )
2020-02-05 22:16:58 +00:00
2020-03-19 15:49:30 +00:00
if localAddr := c . pconn4 . LocalAddr ( ) ; localAddr . IP . IsUnspecified ( ) {
2020-03-02 18:38:44 +00:00
ips , loopback , err := interfaces . LocalAddresses ( )
2020-02-05 22:16:58 +00:00
if err != nil {
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
return nil , err
2020-02-05 22:16:58 +00:00
}
2020-05-28 22:16:23 +01:00
if len ( ips ) == 0 && len ( eps ) == 0 {
2020-02-05 22:16:58 +00:00
// Only include loopback addresses if we have no
2020-05-28 22:16:23 +01:00
// interfaces at all to use as endpoints and don't
// have a public IPv4 or IPv6 address. This allows
2020-02-05 22:16:58 +00:00
// for localhost testing when you're on a plane and
// offline, for example.
2020-03-02 18:38:44 +00:00
ips = loopback
}
2021-03-04 06:02:45 +00:00
for _ , ip := range ips {
2021-05-15 02:07:28 +01:00
addAddr ( netaddr . IPPortFrom ( ip , uint16 ( localAddr . Port ) ) , tailcfg . EndpointLocal )
2020-02-05 22:16:58 +00:00
}
} else {
// Our local endpoint is bound to a particular address.
// Do not offer addresses on other local interfaces.
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
addAddr ( ipp ( localAddr . String ( ) ) , tailcfg . EndpointLocal )
2020-02-05 22:16:58 +00:00
}
// Note: the endpoints are intentionally returned in priority order,
// from "farthest but most reliable" to "closest but least
// reliable." Addresses returned from STUN should be globally
// addressable, but might go farther on the network than necessary.
// Local interface addresses might have lower latency, but not be
// globally addressable.
//
// The STUN address(es) are always first so that legacy wireguard
// can use eps[0] as its only known endpoint address (although that's
// obviously non-ideal).
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
//
// Despite this sorting, though, clients since 0.100 haven't relied
// on the sorting order for any decisions.
return eps , nil
2020-02-05 22:16:58 +00:00
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
// endpointSetsEqual reports whether x and y represent the same set of
// endpoints. The order doesn't matter.
2021-03-22 17:23:26 +00:00
//
// It does not mutate the slices.
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
func endpointSetsEqual ( x , y [ ] tailcfg . Endpoint ) bool {
2021-03-22 17:23:26 +00:00
if len ( x ) == len ( y ) {
orderMatches := true
for i := range x {
if x [ i ] != y [ i ] {
orderMatches = false
break
}
}
if orderMatches {
return true
}
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
m := map [ tailcfg . Endpoint ] int { }
2021-03-22 17:23:26 +00:00
for _ , v := range x {
m [ v ] |= 1
}
for _ , v := range y {
m [ v ] |= 2
2020-02-05 22:16:58 +00:00
}
2021-03-22 17:23:26 +00:00
for _ , n := range m {
if n != 3 {
2020-02-05 22:16:58 +00:00
return false
}
}
return true
}
2021-02-20 06:15:41 +00:00
// LocalPort returns the current IPv4 listener's port number.
2020-02-05 22:16:58 +00:00
func ( c * Conn ) LocalPort ( ) uint16 {
2021-10-20 20:57:10 +01:00
if runtime . GOOS == "js" {
return 12345
}
2020-03-19 15:49:30 +00:00
laddr := c . pconn4 . LocalAddr ( )
2020-02-05 22:16:58 +00:00
return uint16 ( laddr . Port )
}
2020-10-06 23:22:46 +01:00
var errNetworkDown = errors . New ( "magicsock: network down" )
func ( c * Conn ) networkDown ( ) bool { return ! c . networkUp . Get ( ) }
2020-02-18 21:32:04 +00:00
2020-02-24 12:27:48 +00:00
func ( c * Conn ) Send ( b [ ] byte , ep conn . Endpoint ) error {
2020-12-14 02:51:24 +00:00
metricSendData . Add ( 1 )
2020-10-06 23:22:46 +01:00
if c . networkDown ( ) {
2020-12-14 02:51:24 +00:00
metricSendDataNetworkDown . Add ( 1 )
2020-10-06 23:22:46 +01:00
return errNetworkDown
}
2021-08-26 06:20:31 +01:00
return ep . ( * endpoint ) . send ( b )
2020-02-18 21:32:04 +00:00
}
2020-02-05 22:16:58 +00:00
2020-02-18 21:32:04 +00:00
var errConnClosed = errors . New ( "Conn closed" )
2020-02-05 22:16:58 +00:00
2020-02-18 21:32:04 +00:00
var errDropDerpPacket = errors . New ( "too many DERP packets queued; dropping" )
2021-10-22 17:09:37 +01:00
var errNoUDP = errors . New ( "no UDP available on platform" )
2020-12-31 01:34:35 +00:00
var udpAddrPool = & sync . Pool {
New : func ( ) interface { } { return new ( net . UDPAddr ) } ,
}
2020-06-30 20:22:42 +01:00
// sendUDP sends UDP packet b to ipp.
2020-07-01 22:39:21 +01:00
// See sendAddr's docs on the return value meanings.
func ( c * Conn ) sendUDP ( ipp netaddr . IPPort , b [ ] byte ) ( sent bool , err error ) {
2021-10-22 17:09:37 +01:00
if runtime . GOOS == "js" {
return false , errNoUDP
}
2020-12-31 01:34:35 +00:00
ua := udpAddrPool . Get ( ) . ( * net . UDPAddr )
2021-10-18 01:11:27 +01:00
sent , err = c . sendUDPStd ( ipp . UDPAddrAt ( ua ) , b )
2020-12-14 02:51:24 +00:00
if err != nil {
metricSendUDPError . Add ( 1 )
} else {
2021-10-18 01:11:27 +01:00
// Only return it to the pool on success; Issue 3122.
udpAddrPool . Put ( ua )
2020-12-14 02:51:24 +00:00
if sent {
metricSendUDP . Add ( 1 )
}
2021-10-18 01:11:27 +01:00
}
return
2020-06-30 20:22:42 +01:00
}
2020-07-01 22:39:21 +01:00
// sendUDP sends UDP packet b to addr.
// See sendAddr's docs on the return value meanings.
func ( c * Conn ) sendUDPStd ( addr * net . UDPAddr , b [ ] byte ) ( sent bool , err error ) {
2020-06-30 20:22:42 +01:00
switch {
case addr . IP . To4 ( ) != nil :
_ , err = c . pconn4 . WriteTo ( b , addr )
2021-12-30 19:11:50 +00:00
if err != nil && ( c . noV4 . Get ( ) || neterror . TreatAsLostUDP ( err ) ) {
2020-07-01 22:39:21 +01:00
return false , nil
2020-05-29 20:40:51 +01:00
}
2020-06-30 20:22:42 +01:00
case len ( addr . IP ) == net . IPv6len :
if c . pconn6 == nil {
// ignore IPv6 dest if we don't have an IPv6 address.
2020-07-01 22:39:21 +01:00
return false , nil
2020-06-30 20:22:42 +01:00
}
_ , err = c . pconn6 . WriteTo ( b , addr )
2021-12-30 19:11:50 +00:00
if err != nil && ( c . noV6 . Get ( ) || neterror . TreatAsLostUDP ( err ) ) {
2020-07-01 22:39:21 +01:00
return false , nil
2020-05-29 20:40:51 +01:00
}
2020-06-30 20:22:42 +01:00
default :
2020-07-01 22:39:21 +01:00
panic ( "bogus sendUDPStd addr type" )
2020-03-20 20:38:21 +00:00
}
2020-07-01 22:39:21 +01:00
return err == nil , err
2020-03-20 20:38:21 +00:00
}
2020-02-18 21:32:04 +00:00
// sendAddr sends packet b to addr, which is either a real UDP address
// or a fake UDP address representing a DERP server (see derpmap.go).
// The provided public key identifies the recipient.
2020-07-01 22:39:21 +01:00
//
// The returned err is whether there was an error writing when it
// should've worked.
// The returned sent is whether a packet went out at all.
// An example of when they might be different: sending to an
// IPv6 address when the local machine doesn't have IPv6 support
// returns (false, nil); it's not an error, but nothing was sent.
2021-10-29 21:15:27 +01:00
func ( c * Conn ) sendAddr ( addr netaddr . IPPort , pubKey key . NodePublic , b [ ] byte ) ( sent bool , err error ) {
2021-05-15 02:07:28 +01:00
if addr . IP ( ) != derpMagicIPAddr {
2020-03-20 20:38:21 +00:00
return c . sendUDP ( addr , b )
2020-03-04 20:21:40 +00:00
}
2020-03-22 01:24:28 +00:00
ch := c . derpWriteChanOfAddr ( addr , pubKey )
2020-03-04 20:21:40 +00:00
if ch == nil {
2020-12-14 02:51:24 +00:00
metricSendDERPErrorChan . Add ( 1 )
2020-07-01 22:39:21 +01:00
return false , nil
2020-03-04 20:21:40 +00:00
}
2020-03-12 19:05:32 +00:00
// TODO(bradfitz): this makes garbage for now; we could use a
// buffer pool later. Previously we passed ownership of this
// to derpWriteRequest and waited for derphttp.Client.Send to
// complete, but that's too slow while holding wireguard-go
// internal locks.
pkt := make ( [ ] byte , len ( b ) )
copy ( pkt , b )
2020-03-04 20:21:40 +00:00
select {
2021-01-16 03:13:59 +00:00
case <- c . donec :
2020-12-14 02:51:24 +00:00
metricSendDERPErrorClosed . Add ( 1 )
2020-07-01 22:39:21 +01:00
return false , errConnClosed
2020-03-12 19:05:32 +00:00
case ch <- derpWriteRequest { addr , pubKey , pkt } :
2020-12-14 02:51:24 +00:00
metricSendDERPQueued . Add ( 1 )
2020-07-01 22:39:21 +01:00
return true , nil
2020-03-04 20:21:40 +00:00
default :
2020-12-14 02:51:24 +00:00
metricSendDERPErrorQueue . Add ( 1 )
2020-03-04 20:21:40 +00:00
// Too many writes queued. Drop packet.
2020-07-01 22:39:21 +01:00
return false , errDropDerpPacket
2020-02-18 21:32:04 +00:00
}
}
2020-02-05 22:16:58 +00:00
2020-02-18 21:32:04 +00:00
// bufferedDerpWritesBeforeDrop is how many packets writes can be
// queued up the DERP client to write on the wire before we start
// dropping.
//
// TODO: this is currently arbitrary. Figure out something better?
2020-03-12 18:16:54 +00:00
const bufferedDerpWritesBeforeDrop = 32
2020-02-18 21:32:04 +00:00
// derpWriteChanOfAddr returns a DERP client for fake UDP addresses that
// represent DERP servers, creating them as necessary. For real UDP
// addresses, it returns nil.
2020-03-22 01:24:28 +00:00
//
// If peer is non-zero, it can be used to find an active reverse
// path, without using addr.
2021-10-29 21:15:27 +01:00
func ( c * Conn ) derpWriteChanOfAddr ( addr netaddr . IPPort , peer key . NodePublic ) chan <- derpWriteRequest {
2021-05-15 02:07:28 +01:00
if addr . IP ( ) != derpMagicIPAddr {
2020-02-18 21:32:04 +00:00
return nil
}
2021-05-15 02:07:28 +01:00
regionID := int ( addr . Port ( ) )
2020-03-22 21:08:59 +00:00
2020-10-06 23:22:46 +01:00
if c . networkDown ( ) {
return nil
}
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-05-17 17:51:38 +01:00
if ! c . wantDerpLocked ( ) || c . closed {
2020-03-04 20:21:40 +00:00
return nil
}
2020-02-28 19:13:28 +00:00
if c . privateKey . IsZero ( ) {
2020-03-24 05:11:49 +00:00
c . logf ( "magicsock: DERP lookup of %v with no private key; ignoring" , addr )
2020-02-28 19:13:28 +00:00
return nil
}
2020-03-09 22:20:33 +00:00
2020-03-22 01:24:28 +00:00
// See if we have a connection open to that DERP node ID
// first. If so, might as well use it. (It's a little
// arbitrary whether we use this one vs. the reverse route
// below when we have both.)
2020-05-17 17:51:38 +01:00
ad , ok := c . activeDerp [ regionID ]
2020-03-22 21:08:59 +00:00
if ok {
* ad . lastWrite = time . Now ( )
2020-05-17 17:51:38 +01:00
c . setPeerLastDerpLocked ( peer , regionID , regionID )
2020-03-22 21:08:59 +00:00
return ad . writeCh
}
2020-03-12 18:16:54 +00:00
2020-03-22 01:24:28 +00:00
// If we don't have an open connection to the peer's home DERP
// node, see if we have an open connection to a DERP node
// where we'd heard from that peer already. For instance,
// perhaps peer's home is Frankfurt, but they dialed our home DERP
// node in SF to reach us, so we can reply to them using our
// SF connection rather than dialing Frankfurt. (Issue 150)
2020-08-17 20:56:17 +01:00
if ! peer . IsZero ( ) && useDerpRoute ( ) {
2020-03-22 01:24:28 +00:00
if r , ok := c . derpRoute [ peer ] ; ok {
if ad , ok := c . activeDerp [ r . derpID ] ; ok && ad . c == r . dc {
2020-05-17 17:51:38 +01:00
c . setPeerLastDerpLocked ( peer , r . derpID , regionID )
2020-03-22 01:24:28 +00:00
* ad . lastWrite = time . Now ( )
return ad . writeCh
}
}
}
2020-03-24 15:09:30 +00:00
why := "home-keep-alive"
if ! peer . IsZero ( ) {
2021-10-29 21:15:27 +01:00
why = peer . ShortString ( )
2020-03-24 15:09:30 +00:00
}
2020-05-17 17:51:38 +01:00
c . logf ( "magicsock: adding connection to derp-%v for %v" , regionID , why )
2020-03-23 21:12:23 +00:00
2020-05-14 01:54:27 +01:00
firstDerp := false
2020-03-22 21:08:59 +00:00
if c . activeDerp == nil {
2020-05-14 01:54:27 +01:00
firstDerp = true
2020-03-22 21:08:59 +00:00
c . activeDerp = make ( map [ int ] activeDerp )
c . prevDerp = make ( map [ int ] * syncs . WaitGroupChan )
}
2020-05-17 17:51:38 +01:00
if c . derpMap == nil || c . derpMap . Regions [ regionID ] == nil {
2020-03-22 21:08:59 +00:00
return nil
2020-02-05 22:16:58 +00:00
}
2020-03-22 21:08:59 +00:00
2021-01-10 14:50:35 +00:00
// Note that derphttp.NewRegionClient does not dial the server
2022-01-13 21:37:26 +00:00
// (it doesn't block) so it is safe to do under the c.mu lock.
2021-10-29 21:15:27 +01:00
dc := derphttp . NewRegionClient ( c . privateKey , c . logf , func ( ) * tailcfg . DERPRegion {
2022-01-13 21:37:26 +00:00
// Warning: it is not legal to acquire
// magicsock.Conn.mu from this callback.
// It's run from derphttp.Client.connect (via Send, etc)
// and the lock ordering rules are that magicsock.Conn.mu
// must be acquired before derphttp.Client.mu.
// See https://github.com/tailscale/tailscale/issues/3726
2020-07-27 20:23:14 +01:00
if c . connCtx . Err ( ) != nil {
2022-01-13 21:37:26 +00:00
// We're closing anyway; return nil to stop dialing.
2020-07-27 20:23:14 +01:00
return nil
}
2022-01-13 21:37:26 +00:00
derpMap , _ := c . derpMapAtomic . Load ( ) . ( * tailcfg . DERPMap )
if derpMap == nil {
2020-05-17 17:51:38 +01:00
return nil
}
2022-01-13 21:37:26 +00:00
return derpMap . Regions [ regionID ]
2020-05-17 17:51:38 +01:00
} )
2020-03-22 01:24:28 +00:00
2021-03-12 17:45:37 +00:00
dc . SetCanAckPings ( true )
2020-05-17 17:51:38 +01:00
dc . NotePreferred ( c . myDerp == regionID )
2020-03-22 21:08:59 +00:00
dc . DNSCache = dnscache . Get ( )
ctx , cancel := context . WithCancel ( c . connCtx )
ch := make ( chan derpWriteRequest , bufferedDerpWritesBeforeDrop )
ad . c = dc
ad . writeCh = ch
ad . cancel = cancel
ad . lastWrite = new ( time . Time )
2020-03-23 21:12:23 +00:00
* ad . lastWrite = time . Now ( )
ad . createTime = time . Now ( )
2020-05-17 17:51:38 +01:00
c . activeDerp [ regionID ] = ad
2020-12-14 02:51:24 +00:00
metricNumDERPConns . Set ( int64 ( len ( c . activeDerp ) ) )
2020-03-23 21:12:23 +00:00
c . logActiveDerpLocked ( )
2020-05-17 17:51:38 +01:00
c . setPeerLastDerpLocked ( peer , regionID , regionID )
2021-01-19 23:29:50 +00:00
c . scheduleCleanStaleDerpLocked ( )
2020-03-22 21:08:59 +00:00
// Build a startGate for the derp reader+writer
// goroutines, so they don't start running until any
// previous generation is closed.
startGate := syncs . ClosedChan ( )
2020-05-17 17:51:38 +01:00
if prev := c . prevDerp [ regionID ] ; prev != nil {
2020-03-22 21:08:59 +00:00
startGate = prev . DoneChan ( )
}
// And register a WaitGroup(Chan) for this generation.
wg := syncs . NewWaitGroupChan ( )
wg . Add ( 2 )
2020-05-17 17:51:38 +01:00
c . prevDerp [ regionID ] = wg
2020-03-22 21:08:59 +00:00
2020-05-14 18:01:48 +01:00
if firstDerp {
startGate = c . derpStarted
go func ( ) {
dc . Connect ( ctx )
2020-05-14 01:54:27 +01:00
close ( c . derpStarted )
2020-08-04 17:36:38 +01:00
c . muCond . Broadcast ( )
2020-05-14 18:01:48 +01:00
} ( )
}
go c . runDerpReader ( ctx , addr , dc , wg , startGate )
2020-06-30 20:22:42 +01:00
go c . runDerpWriter ( ctx , dc , ch , wg , startGate )
2020-08-25 21:21:29 +01:00
go c . derpActiveFunc ( )
2020-03-22 21:08:59 +00:00
2020-03-05 16:54:08 +00:00
return ad . writeCh
2020-02-18 21:32:04 +00:00
}
2020-02-05 22:16:58 +00:00
2020-03-24 15:09:30 +00:00
// setPeerLastDerpLocked notes that peer is now being written to via
2020-05-17 17:51:38 +01:00
// the provided DERP regionID, and that the peer advertises a DERP
// home region ID of homeID.
2020-03-24 15:09:30 +00:00
//
// If there's any change, it logs.
//
2020-03-23 21:12:23 +00:00
// c.mu must be held.
2021-10-29 21:15:27 +01:00
func ( c * Conn ) setPeerLastDerpLocked ( peer key . NodePublic , regionID , homeID int ) {
2020-03-23 21:12:23 +00:00
if peer . IsZero ( ) {
return
}
old := c . peerLastDerp [ peer ]
2020-05-17 17:51:38 +01:00
if old == regionID {
2020-03-23 21:12:23 +00:00
return
}
2020-05-17 17:51:38 +01:00
c . peerLastDerp [ peer ] = regionID
2020-03-23 21:12:23 +00:00
2020-03-24 15:09:30 +00:00
var newDesc string
switch {
2020-05-17 17:51:38 +01:00
case regionID == homeID && regionID == c . myDerp :
2020-03-24 15:09:30 +00:00
newDesc = "shared home"
2020-05-17 17:51:38 +01:00
case regionID == homeID :
2020-03-24 15:09:30 +00:00
newDesc = "their home"
2020-05-17 17:51:38 +01:00
case regionID == c . myDerp :
2020-03-24 15:09:30 +00:00
newDesc = "our home"
2020-05-17 17:51:38 +01:00
case regionID != homeID :
2020-03-24 15:09:30 +00:00
newDesc = "alt"
}
if old == 0 {
2021-10-29 21:15:27 +01:00
c . logf ( "[v1] magicsock: derp route for %s set to derp-%d (%s)" , peer . ShortString ( ) , regionID , newDesc )
2020-03-24 15:09:30 +00:00
} else {
2021-10-29 21:15:27 +01:00
c . logf ( "[v1] magicsock: derp route for %s changed from derp-%d => derp-%d (%s)" , peer . ShortString ( ) , old , regionID , newDesc )
2020-03-24 15:09:30 +00:00
}
2020-03-23 21:12:23 +00:00
}
2020-02-18 21:32:04 +00:00
// derpReadResult is the type sent by runDerpClient to ReceiveIPv4
// when a DERP packet is available.
2020-03-04 17:35:32 +00:00
//
// Notably, it doesn't include the derp.ReceivedPacket because we
// don't want to give the receiver access to the aliased []byte. To
// get at the packet contents they need to call copyBuf to copy it
// out, which also releases the buffer.
2020-02-18 21:32:04 +00:00
type derpReadResult struct {
2020-06-30 20:22:42 +01:00
regionID int
2021-10-16 00:42:24 +01:00
n int // length of data received
2021-10-29 21:15:27 +01:00
src key . NodePublic
2020-02-18 21:32:04 +00:00
// copyBuf is called to copy the data to dst. It returns how
// much data was copied, which will be n if dst is large
2020-03-04 17:35:32 +00:00
// enough. copyBuf can only be called once.
2021-02-07 06:39:58 +00:00
// If copyBuf is nil, that's a signal from the sender to ignore
// this message.
2020-02-18 21:32:04 +00:00
copyBuf func ( dst [ ] byte ) int
}
// runDerpReader runs in a goroutine for the life of a DERP
// connection, handling received packets.
2020-06-30 20:22:42 +01:00
func ( c * Conn ) runDerpReader ( ctx context . Context , derpFakeAddr netaddr . IPPort , dc * derphttp . Client , wg * syncs . WaitGroupChan , startGate <- chan struct { } ) {
2020-03-12 18:16:54 +00:00
defer wg . Decr ( )
defer dc . Close ( )
select {
case <- startGate :
case <- ctx . Done ( ) :
return
}
2020-02-18 21:32:04 +00:00
didCopy := make ( chan struct { } , 1 )
2021-05-15 02:07:28 +01:00
regionID := int ( derpFakeAddr . Port ( ) )
2020-06-30 20:22:42 +01:00
res := derpReadResult { regionID : regionID }
2020-03-04 17:35:32 +00:00
var pkt derp . ReceivedPacket
res . copyBuf = func ( dst [ ] byte ) int {
n := copy ( dst , pkt . Data )
2020-02-18 21:32:04 +00:00
didCopy <- struct { } { }
return n
}
2021-02-25 05:29:51 +00:00
defer health . SetDERPRegionConnectedState ( regionID , false )
2021-09-02 03:27:22 +01:00
defer health . SetDERPRegionHealth ( regionID , "" )
2021-02-25 05:29:51 +00:00
2020-03-22 01:24:28 +00:00
// peerPresent is the set of senders we know are present on this
// connection, based on messages we've received from the server.
2021-10-29 21:15:27 +01:00
peerPresent := map [ key . NodePublic ] bool { }
2020-10-19 23:11:40 +01:00
bo := backoff . NewBackoff ( fmt . Sprintf ( "derp-%d" , regionID ) , c . logf , 5 * time . Second )
2021-02-25 05:29:51 +00:00
var lastPacketTime time . Time
2020-02-18 21:32:04 +00:00
for {
2021-03-04 17:19:45 +00:00
msg , connGen , err := dc . RecvDetail ( )
2020-02-18 21:32:04 +00:00
if err != nil {
2021-02-25 05:29:51 +00:00
health . SetDERPRegionConnectedState ( regionID , false )
2020-03-22 01:24:28 +00:00
// Forget that all these peers have routes.
for peer := range peerPresent {
delete ( peerPresent , peer )
2020-06-30 20:22:42 +01:00
c . removeDerpPeerRoute ( peer , regionID , dc )
2020-03-22 01:24:28 +00:00
}
2020-10-06 23:22:46 +01:00
if err == derphttp . ErrClientClosed {
return
}
if c . networkDown ( ) {
2020-12-21 18:58:06 +00:00
c . logf ( "[v1] magicsock: derp.Recv(derp-%d): network down, closing" , regionID )
2020-10-06 23:22:46 +01:00
return
}
2020-02-18 21:32:04 +00:00
select {
2020-02-28 19:13:28 +00:00
case <- ctx . Done ( ) :
return
2020-02-18 21:32:04 +00:00
default :
}
2020-10-19 23:11:40 +01:00
2020-06-30 20:22:42 +01:00
c . logf ( "magicsock: [%p] derp.Recv(derp-%d): %v" , dc , regionID , err )
2020-07-16 16:21:34 +01:00
2020-10-19 23:11:40 +01:00
// If our DERP connection broke, it might be because our network
// conditions changed. Start that check.
c . ReSTUN ( "derp-recv-error" )
// Back off a bit before reconnecting.
bo . BackOff ( ctx , err )
2020-07-16 15:44:57 +01:00
select {
case <- ctx . Done ( ) :
return
2020-10-19 23:11:40 +01:00
default :
2020-07-16 15:44:57 +01:00
}
2020-02-18 21:32:04 +00:00
continue
}
2020-10-19 23:11:40 +01:00
bo . BackOff ( ctx , nil ) // reset
2021-02-25 05:29:51 +00:00
now := time . Now ( )
if lastPacketTime . IsZero ( ) || now . Sub ( lastPacketTime ) > 5 * time . Second {
health . NoteDERPRegionReceivedFrame ( regionID )
lastPacketTime = now
}
2020-02-21 03:10:54 +00:00
switch m := msg . ( type ) {
2021-03-04 17:19:45 +00:00
case derp . ServerInfoMessage :
2021-02-25 05:29:51 +00:00
health . SetDERPRegionConnectedState ( regionID , true )
2021-09-02 03:27:22 +01:00
health . SetDERPRegionHealth ( regionID , "" ) // until declared otherwise
2021-03-04 17:19:45 +00:00
c . logf ( "magicsock: derp-%d connected; connGen=%v" , regionID , connGen )
continue
2020-02-21 03:10:54 +00:00
case derp . ReceivedPacket :
2020-03-04 17:35:32 +00:00
pkt = m
res . n = len ( m . Data )
2021-10-29 21:15:27 +01:00
res . src = m . Source
2020-03-04 17:35:32 +00:00
if logDerpVerbose {
2020-06-30 20:22:42 +01:00
c . logf ( "magicsock: got derp-%v packet: %q" , regionID , m . Data )
2020-03-04 17:35:32 +00:00
}
2020-03-22 01:24:28 +00:00
// If this is a new sender we hadn't seen before, remember it and
// register a route for this peer.
2021-10-28 23:42:50 +01:00
if _ , ok := peerPresent [ res . src ] ; ! ok {
peerPresent [ res . src ] = true
c . addDerpPeerRoute ( res . src , regionID , dc )
2020-03-22 01:24:28 +00:00
}
2021-03-09 20:53:02 +00:00
case derp . PingMessage :
// Best effort reply to the ping.
pingData := [ 8 ] byte ( m )
go func ( ) {
if err := dc . SendPong ( pingData ) ; err != nil {
c . logf ( "magicsock: derp-%d SendPong error: %v" , regionID , err )
}
} ( )
continue
2021-09-02 03:27:22 +01:00
case derp . HealthMessage :
health . SetDERPRegionHealth ( regionID , m . Problem )
2020-02-21 03:10:54 +00:00
default :
// Ignore.
continue
}
2021-01-18 16:39:52 +00:00
2021-03-24 16:41:57 +00:00
select {
case <- ctx . Done ( ) :
2021-02-07 05:27:02 +00:00
return
2021-03-24 16:41:57 +00:00
case c . derpRecvCh <- res :
2021-02-07 05:27:02 +00:00
}
2021-03-24 16:41:57 +00:00
2020-02-18 21:32:04 +00:00
select {
2020-03-12 18:16:54 +00:00
case <- ctx . Done ( ) :
2020-02-18 21:32:04 +00:00
return
2021-02-07 05:27:02 +00:00
case <- didCopy :
continue
2020-02-18 21:32:04 +00:00
}
}
}
type derpWriteRequest struct {
2020-06-30 20:22:42 +01:00
addr netaddr . IPPort
2021-10-29 21:15:27 +01:00
pubKey key . NodePublic
2020-03-12 19:05:32 +00:00
b [ ] byte // copied; ownership passed to receiver
2020-02-18 21:32:04 +00:00
}
// runDerpWriter runs in a goroutine for the life of a DERP
// connection, handling received packets.
2020-06-30 20:22:42 +01:00
func ( c * Conn ) runDerpWriter ( ctx context . Context , dc * derphttp . Client , ch <- chan derpWriteRequest , wg * syncs . WaitGroupChan , startGate <- chan struct { } ) {
2020-03-12 18:16:54 +00:00
defer wg . Decr ( )
select {
case <- startGate :
case <- ctx . Done ( ) :
return
}
2020-02-18 21:32:04 +00:00
for {
select {
2020-02-28 19:13:28 +00:00
case <- ctx . Done ( ) :
return
2020-02-18 21:32:04 +00:00
case wr := <- ch :
2021-10-29 21:15:27 +01:00
err := dc . Send ( wr . pubKey , wr . b )
2020-02-18 21:32:04 +00:00
if err != nil {
2020-03-07 21:11:52 +00:00
c . logf ( "magicsock: derp.Send(%v): %v" , wr . addr , err )
2021-11-16 16:34:25 +00:00
metricSendDERPError . Add ( 1 )
} else {
metricSendDERP . Add ( 1 )
2020-02-18 21:32:04 +00:00
}
}
}
2020-02-05 22:16:58 +00:00
}
2021-03-24 16:41:57 +00:00
// receiveIPv6 receives a UDP IPv6 packet. It is called by wireguard-go.
func ( c * Conn ) receiveIPv6 ( b [ ] byte ) ( int , conn . Endpoint , error ) {
2021-04-28 18:43:51 +01:00
health . ReceiveIPv6 . Enter ( )
defer health . ReceiveIPv6 . Exit ( )
2021-01-18 16:39:52 +00:00
for {
2021-02-11 21:35:06 +00:00
n , ipp , err := c . pconn6 . ReadFromNetaddr ( b )
2021-01-18 16:39:52 +00:00
if err != nil {
2021-01-15 02:06:08 +00:00
return 0 , nil , err
2020-02-18 21:32:04 +00:00
}
2021-02-11 20:39:56 +00:00
if ep , ok := c . receiveIP ( b [ : n ] , ipp , & c . ippEndpoint6 ) ; ok {
2020-12-14 02:51:24 +00:00
metricRecvDataIPv6 . Add ( 1 )
2021-01-18 16:39:52 +00:00
return n , ep , nil
2020-06-30 20:22:42 +01:00
}
2021-01-18 16:39:52 +00:00
}
}
2020-06-30 20:22:42 +01:00
2021-03-24 16:41:57 +00:00
// receiveIPv4 receives a UDP IPv4 packet. It is called by wireguard-go.
func ( c * Conn ) receiveIPv4 ( b [ ] byte ) ( n int , ep conn . Endpoint , err error ) {
2021-04-27 01:08:05 +01:00
health . ReceiveIPv4 . Enter ( )
defer health . ReceiveIPv4 . Exit ( )
2021-01-18 16:39:52 +00:00
for {
2021-03-24 16:41:57 +00:00
n , ipp , err := c . pconn4 . ReadFromNetaddr ( b )
2021-01-18 16:39:52 +00:00
if err != nil {
2021-01-15 02:06:08 +00:00
return 0 , nil , err
2020-02-18 21:32:04 +00:00
}
2021-02-11 20:39:56 +00:00
if ep , ok := c . receiveIP ( b [ : n ] , ipp , & c . ippEndpoint4 ) ; ok {
2020-12-14 02:51:24 +00:00
metricRecvDataIPv4 . Add ( 1 )
2021-01-18 16:39:52 +00:00
return n , ep , nil
2021-01-12 23:28:33 +00:00
}
2021-01-18 16:39:52 +00:00
}
}
2020-03-07 01:50:36 +00:00
2021-01-18 16:39:52 +00:00
// receiveIP is the shared bits of ReceiveIPv4 and ReceiveIPv6.
2021-02-04 02:15:01 +00:00
//
// ok is whether this read should be reported up to wireguard-go (our
// caller).
2021-09-01 03:06:04 +01:00
func ( c * Conn ) receiveIP ( b [ ] byte , ipp netaddr . IPPort , cache * ippEndpointCache ) ( ep * endpoint , ok bool ) {
2021-01-18 16:39:52 +00:00
if stun . Is ( b ) {
c . stunReceiveFunc . Load ( ) . ( func ( [ ] byte , netaddr . IPPort ) ) ( b , ipp )
2021-01-19 18:57:30 +00:00
return nil , false
2021-01-18 16:39:52 +00:00
}
2021-11-02 00:53:40 +00:00
if c . handleDiscoMessage ( b , ipp , key . NodePublic { } ) {
2021-01-19 18:57:30 +00:00
return nil , false
2021-01-18 16:39:52 +00:00
}
2021-02-04 02:15:01 +00:00
if ! c . havePrivateKey . Get ( ) {
// If we have no private key, we're logged out or
2021-03-24 16:41:57 +00:00
// stopped. Don't try to pass these wireguard packets
// up to wireguard-go; it'll just complain (issue 1167).
2021-02-04 02:15:01 +00:00
return nil , false
}
2021-01-18 23:27:44 +00:00
if cache . ipp == ipp && cache . de != nil && cache . gen == cache . de . numStopAndReset ( ) {
ep = cache . de
} else {
2021-08-26 06:15:48 +01:00
c . mu . Lock ( )
2021-08-26 06:20:31 +01:00
de , ok := c . peerMap . endpointForIPPort ( ipp )
2021-08-26 06:15:48 +01:00
c . mu . Unlock ( )
2021-08-26 03:39:20 +01:00
if ! ok {
2021-01-19 18:57:30 +00:00
return nil , false
2021-01-18 23:27:44 +00:00
}
2021-08-26 03:39:20 +01:00
cache . ipp = ipp
cache . de = de
cache . gen = de . numStopAndReset ( )
ep = de
2021-01-18 16:39:52 +00:00
}
2021-09-01 03:06:04 +01:00
ep . noteRecvActivity ( )
2021-01-18 16:39:52 +00:00
return ep , true
}
2021-03-24 16:41:57 +00:00
// receiveDERP reads a packet from c.derpRecvCh into b and returns the associated endpoint.
// It is called by wireguard-go.
2021-01-18 16:39:52 +00:00
//
// If the packet was a disco message or the peer endpoint wasn't
// found, the returned error is errLoopAgain.
2021-03-24 16:41:57 +00:00
func ( c * connBind ) receiveDERP ( b [ ] byte ) ( n int , ep conn . Endpoint , err error ) {
2021-04-27 01:08:05 +01:00
health . ReceiveDERP . Enter ( )
defer health . ReceiveDERP . Exit ( )
2021-03-24 16:41:57 +00:00
for dm := range c . derpRecvCh {
if c . Closed ( ) {
break
}
n , ep := c . processDERPReadResult ( dm , b )
if n == 0 {
// No data read occurred. Wait for another packet.
continue
}
2020-12-14 02:51:24 +00:00
metricRecvDataDERP . Add ( 1 )
2021-03-24 16:41:57 +00:00
return n , ep , nil
2021-02-07 06:39:58 +00:00
}
2021-03-24 16:41:57 +00:00
return 0 , nil , net . ErrClosed
}
2021-08-26 06:20:31 +01:00
func ( c * Conn ) processDERPReadResult ( dm derpReadResult , b [ ] byte ) ( n int , ep * endpoint ) {
2021-02-07 06:39:58 +00:00
if dm . copyBuf == nil {
2021-03-24 16:41:57 +00:00
return 0 , nil
2021-02-07 06:39:58 +00:00
}
2021-01-18 16:39:52 +00:00
var regionID int
n , regionID = dm . n , dm . regionID
ncopy := dm . copyBuf ( b )
if ncopy != n {
2021-03-24 16:41:57 +00:00
err := fmt . Errorf ( "received DERP packet of length %d that's too big for WireGuard buf size %d" , n , ncopy )
2021-01-18 16:39:52 +00:00
c . logf ( "magicsock: %v" , err )
2021-03-24 16:41:57 +00:00
return 0 , nil
2020-03-19 16:39:00 +00:00
}
2021-01-18 16:39:52 +00:00
2021-05-15 02:07:28 +01:00
ipp := netaddr . IPPortFrom ( derpMagicIPAddr , uint16 ( regionID ) )
2021-11-02 00:53:40 +00:00
if c . handleDiscoMessage ( b [ : n ] , ipp , dm . src ) {
2021-03-24 16:41:57 +00:00
return 0 , nil
2021-01-18 16:39:52 +00:00
}
2021-08-26 03:39:20 +01:00
var ok bool
2021-09-01 00:55:22 +01:00
c . mu . Lock ( )
2021-11-02 00:53:40 +00:00
ep , ok = c . peerMap . endpointForNodeKey ( dm . src )
2021-09-01 00:55:22 +01:00
c . mu . Unlock ( )
2021-08-26 03:39:20 +01:00
if ! ok {
2021-09-01 00:55:22 +01:00
// We don't know anything about this node key, nothing to
// record or process.
return 0 , nil
2021-01-18 16:39:52 +00:00
}
2021-09-01 03:06:04 +01:00
ep . noteRecvActivity ( )
2021-03-24 16:41:57 +00:00
return n , ep
2020-02-05 22:16:58 +00:00
}
2020-07-18 21:50:08 +01:00
// discoLogLevel controls the verbosity of discovery log messages.
type discoLogLevel int
const (
// discoLog means that a message should be logged.
discoLog discoLogLevel = iota
// discoVerboseLog means that a message should only be logged
// in TS_DEBUG_DISCO mode.
discoVerboseLog
)
2021-10-16 05:44:52 +01:00
// sendDiscoMessage sends discovery message m to dstDisco at dst.
//
// If dst is a DERP IP:port, then dstKey must be non-zero.
//
// The dstKey should only be non-zero if the dstDisco key
// unambiguously maps to exactly one peer.
2021-11-02 00:53:40 +00:00
func ( c * Conn ) sendDiscoMessage ( dst netaddr . IPPort , dstKey key . NodePublic , dstDisco key . DiscoPublic , m disco . Message , logLevel discoLogLevel ) ( sent bool , err error ) {
2020-07-01 20:56:17 +01:00
c . mu . Lock ( )
2020-07-09 00:50:31 +01:00
if c . closed {
c . mu . Unlock ( )
2020-07-30 21:59:23 +01:00
return false , errConnClosed
2020-07-09 00:50:31 +01:00
}
2020-07-01 20:56:17 +01:00
var nonce [ disco . NonceLen ] byte
if _ , err := crand . Read ( nonce [ : ] ) ; err != nil {
panic ( err ) // worth dying for
}
pkt := make ( [ ] byte , 0 , 512 ) // TODO: size it correctly? pool? if it matters.
pkt = append ( pkt , disco . Magic ... )
2021-10-29 22:27:29 +01:00
pkt = c . discoPublic . AppendTo ( pkt )
2021-10-16 04:45:33 +01:00
di := c . discoInfoLocked ( dstDisco )
2020-07-01 20:56:17 +01:00
c . mu . Unlock ( )
2020-12-14 02:51:24 +00:00
isDERP := dst . IP ( ) == derpMagicIPAddr
if isDERP {
metricSendDiscoDERP . Add ( 1 )
} else {
metricSendDiscoUDP . Add ( 1 )
}
2021-10-29 22:27:29 +01:00
box := di . sharedKey . Seal ( m . AppendMarshal ( nil ) )
pkt = append ( pkt , box ... )
2021-11-02 00:53:40 +00:00
sent , err = c . sendAddr ( dst , dstKey , pkt )
2020-07-01 22:39:21 +01:00
if sent {
2020-07-18 21:50:08 +01:00
if logLevel == discoLog || ( logLevel == discoVerboseLog && debugDisco ) {
2021-10-16 05:44:52 +01:00
node := "?"
if ! dstKey . IsZero ( ) {
node = dstKey . ShortString ( )
}
c . logf ( "[v1] magicsock: disco: %v->%v (%v, %v) sent %v" , c . discoShort , dstDisco . ShortString ( ) , node , derpStr ( dst . String ( ) ) , disco . MessageSummary ( m ) )
2020-07-18 21:50:08 +01:00
}
2020-12-14 02:51:24 +00:00
if isDERP {
metricSentDiscoDERP . Add ( 1 )
} else {
metricSentDiscoUDP . Add ( 1 )
}
2021-12-20 17:29:31 +00:00
switch m . ( type ) {
case * disco . Ping :
metricSentDiscoPing . Add ( 1 )
case * disco . Pong :
metricSentDiscoPong . Add ( 1 )
case * disco . CallMeMaybe :
metricSentDiscoCallMeMaybe . Add ( 1 )
}
2020-07-01 22:39:21 +01:00
} else if err == nil {
2020-07-02 18:48:13 +01:00
// Can't send. (e.g. no IPv6 locally)
2020-07-01 22:39:21 +01:00
} else {
2020-10-06 23:22:46 +01:00
if ! c . networkDown ( ) {
c . logf ( "magicsock: disco: failed to send %T to %v: %v" , m , dst , err )
}
2020-07-01 22:39:21 +01:00
}
return sent , err
2020-07-01 20:56:17 +01:00
}
2021-02-04 02:15:01 +00:00
// handleDiscoMessage handles a discovery message and reports whether
// msg was a Tailscale inter-node discovery message.
2020-06-26 22:38:53 +01:00
//
// A discovery message has the form:
//
// * magic [6]byte
// * senderDiscoPubKey [32]byte
// * nonce [24]byte
2020-06-30 20:22:42 +01:00
// * naclbox of payload (see tailscale.com/disco package for inner payload format)
//
2021-10-16 00:42:24 +01:00
// For messages received over DERP, the src.IP() will be derpMagicIP (with
// src.Port() being the region ID) and the derpNodeSrc will be the node key
// it was received from at the DERP layer. derpNodeSrc is zero when received
// over UDP.
2021-11-02 00:53:40 +00:00
func ( c * Conn ) handleDiscoMessage ( msg [ ] byte , src netaddr . IPPort , derpNodeSrc key . NodePublic ) ( isDiscoMsg bool ) {
2021-10-30 01:35:51 +01:00
const headerLen = len ( disco . Magic ) + key . DiscoPublicRawLen
2020-07-01 20:56:17 +01:00
if len ( msg ) < headerLen || string ( msg [ : len ( disco . Magic ) ] ) != disco . Magic {
2020-06-26 22:38:53 +01:00
return false
}
2021-02-04 02:15:01 +00:00
// If the first four parts are the prefix of disco.Magic
// (0x5453f09f) then it's definitely not a valid Wireguard
// packet (which starts with little-endian uint32 1, 2, 3, 4).
// Use naked returns for all following paths.
isDiscoMsg = true
2021-10-30 01:35:51 +01:00
sender := key . DiscoPublicFromRaw32 ( mem . B ( msg [ len ( disco . Magic ) : headerLen ] ) )
2020-06-26 22:38:53 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-07-09 00:50:31 +01:00
if c . closed {
2021-02-04 02:15:01 +00:00
return
2020-07-09 00:50:31 +01:00
}
2020-07-18 21:50:08 +01:00
if debugDisco {
2020-07-02 18:48:13 +01:00
c . logf ( "magicsock: disco: got disco-looking frame from %v" , sender . ShortString ( ) )
2020-07-01 20:56:17 +01:00
}
2020-07-30 21:48:32 +01:00
if c . privateKey . IsZero ( ) {
// Ignore disco messages when we're stopped.
2021-02-04 02:15:01 +00:00
// Still return true, to not pass it down to wireguard.
return
2020-07-30 21:48:32 +01:00
}
2020-06-26 22:38:53 +01:00
if c . discoPrivate . IsZero ( ) {
2020-07-18 21:50:08 +01:00
if debugDisco {
2020-07-01 20:56:17 +01:00
c . logf ( "magicsock: disco: ignoring disco-looking frame, no local key" )
}
2021-02-04 02:15:01 +00:00
return
2020-06-26 22:38:53 +01:00
}
2021-10-16 03:22:30 +01:00
if ! c . peerMap . anyEndpointForDiscoKey ( sender ) {
2020-12-14 02:51:24 +00:00
metricRecvDiscoBadPeer . Add ( 1 )
2020-07-18 21:50:08 +01:00
if debugDisco {
2021-09-01 00:55:22 +01:00
c . logf ( "magicsock: disco: ignoring disco-looking frame, don't know endpoint for %v" , sender . ShortString ( ) )
2020-07-01 20:56:17 +01:00
}
2021-02-04 02:15:01 +00:00
return
2020-06-26 22:38:53 +01:00
}
2021-08-26 03:39:20 +01:00
// We're now reasonably sure we're expecting communication from
// this peer, do the heavy crypto lifting to see what they want.
//
// From here on, peerNode and de are non-nil.
2020-06-26 22:38:53 +01:00
2021-10-16 04:45:33 +01:00
di := c . discoInfoLocked ( sender )
2020-06-26 22:38:53 +01:00
sealedBox := msg [ headerLen : ]
2021-10-29 22:27:29 +01:00
payload , ok := di . sharedKey . Open ( sealedBox )
2020-06-26 22:38:53 +01:00
if ! ok {
2020-06-30 21:14:41 +01:00
// This might be have been intended for a previous
// disco key. When we restart we get a new disco key
// and old packets might've still been in flight (or
// scheduled). This is particularly the case for LANs
// or non-NATed endpoints.
2020-07-01 20:56:17 +01:00
// Don't log in normal case. Pass on to wireguard, in case
2021-08-26 03:39:20 +01:00
// it's actually a wireguard packet (super unlikely,
2020-06-30 21:14:41 +01:00
// but).
2020-07-18 21:50:08 +01:00
if debugDisco {
2020-07-01 20:56:17 +01:00
c . logf ( "magicsock: disco: failed to open naclbox from %v (wrong rcpt?)" , sender )
}
2020-12-14 02:51:24 +00:00
metricRecvDiscoBadKey . Add ( 1 )
2021-02-04 02:15:01 +00:00
return
2020-06-26 22:38:53 +01:00
}
2020-06-30 20:22:42 +01:00
dm , err := disco . Parse ( payload )
2020-07-18 21:50:08 +01:00
if debugDisco {
2020-07-01 20:56:17 +01:00
c . logf ( "magicsock: disco: disco.Parse = %T, %v" , dm , err )
}
2020-06-30 20:22:42 +01:00
if err != nil {
// Couldn't parse it, but it was inside a correctly
// signed box, so just ignore it, assuming it's from a
// newer version of Tailscale that we don't
// understand. Not even worth logging about, lest it
// be too spammy for old clients.
2020-12-14 02:51:24 +00:00
metricRecvDiscoBadParse . Add ( 1 )
2021-02-04 02:15:01 +00:00
return
2020-06-30 20:22:42 +01:00
}
2020-12-14 02:51:24 +00:00
isDERP := src . IP ( ) == derpMagicIPAddr
if isDERP {
metricRecvDiscoDERP . Add ( 1 )
} else {
metricRecvDiscoUDP . Add ( 1 )
}
2020-06-30 20:22:42 +01:00
switch dm := dm . ( type ) {
case * disco . Ping :
2020-12-14 02:51:24 +00:00
metricRecvDiscoPing . Add ( 1 )
2021-10-16 05:44:52 +01:00
c . handlePingLocked ( dm , src , di , derpNodeSrc )
2020-06-30 20:22:42 +01:00
case * disco . Pong :
2020-12-14 02:51:24 +00:00
metricRecvDiscoPong . Add ( 1 )
2021-10-16 05:55:59 +01:00
// There might be multiple nodes for the sender's DiscoKey.
// Ask each to handle it, stopping once one reports that
// the Pong's TxID was theirs.
handled := false
c . peerMap . forEachEndpointWithDiscoKey ( sender , func ( ep * endpoint ) {
2021-10-17 19:31:21 +01:00
if ! handled && ep . handlePongConnLocked ( dm , di , src ) {
2021-10-16 05:55:59 +01:00
handled = true
}
} )
2021-01-20 19:39:42 +00:00
case * disco . CallMeMaybe :
2020-12-14 02:51:24 +00:00
metricRecvDiscoCallMeMaybe . Add ( 1 )
if ! isDERP || derpNodeSrc . IsZero ( ) {
2020-07-18 21:57:26 +01:00
// CallMeMaybe messages should only come via DERP.
2020-06-30 21:14:41 +01:00
c . logf ( "[unexpected] CallMeMaybe packets should only come via DERP" )
2021-02-04 02:15:01 +00:00
return
2020-06-30 20:22:42 +01:00
}
2021-11-02 00:53:40 +00:00
nodeKey := derpNodeSrc
2021-10-17 19:31:21 +01:00
ep , ok := c . peerMap . endpointForNodeKey ( nodeKey )
2021-10-16 00:42:24 +01:00
if ! ok {
2020-12-14 02:51:24 +00:00
metricRecvDiscoCallMeMaybeBadNode . Add ( 1 )
2021-10-16 00:42:24 +01:00
c . logf ( "magicsock: disco: ignoring CallMeMaybe from %v; %v is unknown" , sender . ShortString ( ) , derpNodeSrc . ShortString ( ) )
return
}
if ! ep . canP2P ( ) {
return
}
2021-10-17 19:31:21 +01:00
if ep . discoKey != di . discoKey {
2020-12-14 02:51:24 +00:00
metricRecvDiscoCallMeMaybeBadDisco . Add ( 1 )
2021-10-17 19:31:21 +01:00
c . logf ( "[unexpected] CallMeMaybe from peer via DERP whose netmap discokey != disco source" )
return
}
di . setNodeKey ( nodeKey )
2021-08-26 03:39:20 +01:00
c . logf ( "[v1] magicsock: disco: %v<-%v (%v, %v) got call-me-maybe, %d endpoints" ,
2021-09-01 00:55:22 +01:00
c . discoShort , ep . discoShort ,
ep . publicKey . ShortString ( ) , derpStr ( src . String ( ) ) ,
2021-08-26 03:39:20 +01:00
len ( dm . MyNumber ) )
2021-09-01 00:55:22 +01:00
go ep . handleCallMeMaybe ( dm )
2020-06-30 20:22:42 +01:00
}
2021-02-04 02:15:01 +00:00
return
2020-06-26 22:38:53 +01:00
}
2021-10-17 19:31:21 +01:00
// unambiguousNodeKeyOfPingLocked attempts to look up an unambiguous mapping
// from a DiscoKey dk (which sent ping dm) to a NodeKey. ok is true
// if there's the NodeKey is known unambiguously.
//
// derpNodeSrc is non-zero if the disco ping arrived via DERP.
//
// c.mu must be held.
2021-11-02 00:53:40 +00:00
func ( c * Conn ) unambiguousNodeKeyOfPingLocked ( dm * disco . Ping , dk key . DiscoPublic , derpNodeSrc key . NodePublic ) ( nk key . NodePublic , ok bool ) {
2021-10-17 19:31:21 +01:00
if ! derpNodeSrc . IsZero ( ) {
if ep , ok := c . peerMap . endpointForNodeKey ( derpNodeSrc ) ; ok && ep . discoKey == dk {
return derpNodeSrc , true
}
}
// Pings after 1.16.0 contains its node source. See if it maps back.
if ! dm . NodeKey . IsZero ( ) {
2021-11-02 00:53:40 +00:00
if ep , ok := c . peerMap . endpointForNodeKey ( dm . NodeKey ) ; ok && ep . discoKey == dk {
return dm . NodeKey , true
2021-10-17 19:31:21 +01:00
}
}
// If there's exactly 1 node in our netmap with DiscoKey dk,
// then it's not ambiguous which node key dm was from.
if set := c . peerMap . nodesOfDisco [ dk ] ; len ( set ) == 1 {
for nk = range set {
return nk , true
}
}
return nk , false
}
2021-10-16 04:45:33 +01:00
// di is the discoInfo of the source of the ping.
// derpNodeSrc is non-zero if the ping arrived via DERP.
2021-11-02 00:53:40 +00:00
func ( c * Conn ) handlePingLocked ( dm * disco . Ping , src netaddr . IPPort , di * discoInfo , derpNodeSrc key . NodePublic ) {
2021-10-16 04:45:33 +01:00
likelyHeartBeat := src == di . lastPingFrom && time . Since ( di . lastPingTime ) < 5 * time . Second
di . lastPingFrom = src
di . lastPingTime = time . Now ( )
2021-10-18 21:29:09 +01:00
isDerp := src . IP ( ) == derpMagicIPAddr
// If we can figure out with certainty which node key this disco
// message is for, eagerly update our IP<>node and disco<>node
// mappings to make p2p path discovery faster in simple
// cases. Without this, disco would still work, but would be
// reliant on DERP call-me-maybe to establish the disco<>node
// mapping, and on subsequent disco handlePongLocked to establish
// the IP<>disco mapping.
2021-10-17 19:31:21 +01:00
if nk , ok := c . unambiguousNodeKeyOfPingLocked ( dm , di . discoKey , derpNodeSrc ) ; ok {
di . setNodeKey ( nk )
2021-10-18 21:29:09 +01:00
if ! isDerp {
c . peerMap . setNodeKeyForIPPort ( src , nk )
}
2021-10-17 19:31:21 +01:00
}
2021-10-16 15:43:48 +01:00
2021-10-16 05:44:52 +01:00
// If we got a ping over DERP, then derpNodeSrc is non-zero and we reply
// over DERP (in which case ipDst is also a DERP address).
// But if the ping was over UDP (ipDst is not a DERP address), then dstKey
// will be zero here, but that's fine: sendDiscoMessage only requires
// a dstKey if the dst ip:port is DERP.
dstKey := derpNodeSrc
2020-07-02 16:37:46 +01:00
2020-07-03 20:43:39 +01:00
// Remember this route if not present.
2021-10-16 05:44:52 +01:00
var numNodes int
2021-10-16 15:43:48 +01:00
if isDerp {
2021-10-16 05:44:52 +01:00
if ep , ok := c . peerMap . endpointForNodeKey ( derpNodeSrc ) ; ok {
ep . addCandidateEndpoint ( src )
numNodes = 1
}
} else {
c . peerMap . forEachEndpointWithDiscoKey ( di . discoKey , func ( ep * endpoint ) {
ep . addCandidateEndpoint ( src )
numNodes ++
if numNodes == 1 && dstKey . IsZero ( ) {
dstKey = ep . publicKey
}
} )
if numNodes > 1 {
// Zero it out if it's ambiguous, so sendDiscoMessage logging
// isn't confusing.
2021-11-02 00:53:40 +00:00
dstKey = key . NodePublic { }
2021-10-16 05:44:52 +01:00
}
}
if numNodes == 0 {
c . logf ( "[unexpected] got disco ping from %v/%v for node not in peers" , src , derpNodeSrc )
return
}
if ! likelyHeartBeat || debugDisco {
pingNodeSrcStr := dstKey . ShortString ( )
if numNodes > 1 {
pingNodeSrcStr = "[one-of-multi]"
}
c . logf ( "[v1] magicsock: disco: %v<-%v (%v, %v) got ping tx=%x" , c . discoShort , di . discoShort , pingNodeSrcStr , src , dm . TxID [ : 6 ] )
}
2020-07-02 16:37:46 +01:00
2020-07-23 23:15:28 +01:00
ipDst := src
2021-10-16 04:45:33 +01:00
discoDest := di . discoKey
2021-10-16 05:44:52 +01:00
go c . sendDiscoMessage ( ipDst , dstKey , discoDest , & disco . Pong {
2020-07-02 16:37:46 +01:00
TxID : dm . TxID ,
Src : src ,
2020-07-18 21:50:08 +01:00
} , discoVerboseLog )
2020-07-02 16:37:46 +01:00
}
2021-01-20 20:41:25 +00:00
// enqueueCallMeMaybe schedules a send of disco.CallMeMaybe to de via derpAddr
// once we know that our STUN endpoint is fresh.
//
// derpAddr is de.derpAddr at the time of send. It's assumed the peer won't be
// flipping primary DERPs in the 0-30ms it takes to confirm our STUN endpoint.
// If they do, traffic will just go over DERP for a bit longer until the next
// discovery round.
2021-08-26 06:20:31 +01:00
func ( c * Conn ) enqueueCallMeMaybe ( derpAddr netaddr . IPPort , de * endpoint ) {
2021-01-20 20:41:25 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-01-20 17:52:24 +00:00
if ! c . lastEndpointsTime . After ( time . Now ( ) . Add ( - endpointsFreshEnoughDuration ) ) {
c . logf ( "magicsock: want call-me-maybe but endpoints stale; restunning" )
if c . onEndpointRefreshed == nil {
2021-08-26 06:20:31 +01:00
c . onEndpointRefreshed = map [ * endpoint ] func ( ) { }
2021-01-20 17:52:24 +00:00
}
c . onEndpointRefreshed [ de ] = func ( ) {
c . logf ( "magicsock: STUN done; sending call-me-maybe to %v %v" , de . discoShort , de . publicKey . ShortString ( ) )
c . enqueueCallMeMaybe ( derpAddr , de )
}
// TODO(bradfitz): make a new 'reSTUNQuickly' method
// that passes down a do-a-lite-netcheck flag down to
// netcheck that does 1 (or 2 max) STUN queries
// (UDP-only, not HTTPs) to find our port mapping to
// our home DERP and maybe one other. For now we do a
// "full" ReSTUN which may or may not be a full one
// (depending on age) and may do HTTPS timing queries
// (if UDP is blocked). Good enough for now.
go c . ReSTUN ( "refresh-for-peering" )
return
}
2021-01-20 20:41:25 +00:00
eps := make ( [ ] netaddr . IPPort , 0 , len ( c . lastEndpoints ) )
for _ , ep := range c . lastEndpoints {
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
eps = append ( eps , ep . Addr )
2021-01-20 20:41:25 +00:00
}
2021-11-18 01:30:27 +00:00
go de . c . sendDiscoMessage ( derpAddr , de . publicKey , de . discoKey , & disco . CallMeMaybe { MyNumber : eps } , discoLog )
2021-01-20 20:41:25 +00:00
}
2021-10-16 04:45:33 +01:00
// discoInfoLocked returns the previous or new discoInfo for k.
//
// c.mu must be held.
2021-10-29 22:27:29 +01:00
func ( c * Conn ) discoInfoLocked ( k key . DiscoPublic ) * discoInfo {
2021-10-16 04:45:33 +01:00
di , ok := c . discoInfo [ k ]
if ! ok {
di = & discoInfo {
discoKey : k ,
discoShort : k . ShortString ( ) ,
2021-10-29 22:27:29 +01:00
sharedKey : c . discoPrivate . Shared ( k ) ,
2021-10-16 04:45:33 +01:00
}
c . discoInfo [ k ] = di
2020-06-29 22:26:25 +01:00
}
2021-10-16 04:45:33 +01:00
return di
2020-06-29 22:26:25 +01:00
}
2020-10-06 23:22:46 +01:00
func ( c * Conn ) SetNetworkUp ( up bool ) {
c . mu . Lock ( )
defer c . mu . Unlock ( )
if c . networkUp . Get ( ) == up {
return
}
c . logf ( "magicsock: SetNetworkUp(%v)" , up )
c . networkUp . Set ( up )
2021-02-10 18:04:42 +00:00
if up {
c . startDerpHomeConnectLocked ( )
} else {
2021-02-20 06:15:41 +00:00
c . portMapper . NoteNetworkDown ( )
2020-10-06 23:22:46 +01:00
c . closeAllDerpLocked ( "network-down" )
}
}
2021-06-22 21:00:40 +01:00
// SetPreferredPort sets the connection's preferred local port.
func ( c * Conn ) SetPreferredPort ( port uint16 ) {
if uint16 ( c . port . Get ( ) ) == port {
return
}
c . port . Set ( uint32 ( port ) )
if err := c . rebind ( dropCurrentPort ) ; err != nil {
c . logf ( "%w" , err )
return
}
c . resetEndpointStates ( )
}
2020-03-02 17:31:25 +00:00
// SetPrivateKey sets the connection's private key.
//
// This is only used to be able prove our identity when connecting to
// DERP servers.
//
// If the private key changes, any DERP connections are torn down &
// recreated when needed.
2021-10-28 19:07:25 +01:00
func ( c * Conn ) SetPrivateKey ( privateKey key . NodePrivate ) error {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-02-28 19:13:28 +00:00
2021-10-29 21:15:27 +01:00
oldKey , newKey := c . privateKey , privateKey
if newKey . Equal ( oldKey ) {
2020-02-28 19:13:28 +00:00
return nil
}
c . privateKey = newKey
2021-02-04 02:15:01 +00:00
c . havePrivateKey . Set ( ! newKey . IsZero ( ) )
2020-02-28 19:13:28 +00:00
2021-10-16 22:55:26 +01:00
if newKey . IsZero ( ) {
2021-11-02 00:53:40 +00:00
c . publicKeyAtomic . Store ( key . NodePublic { } )
2021-10-16 22:55:26 +01:00
} else {
2021-11-02 00:53:40 +00:00
c . publicKeyAtomic . Store ( newKey . Public ( ) )
2021-10-16 22:55:26 +01:00
}
2020-03-24 15:09:30 +00:00
if oldKey . IsZero ( ) {
2020-07-28 00:26:33 +01:00
c . everHadKey = true
2020-03-24 15:09:30 +00:00
c . logf ( "magicsock: SetPrivateKey called (init)" )
2021-09-01 02:09:52 +01:00
go c . ReSTUN ( "set-private-key" )
2020-07-27 18:19:05 +01:00
} else if newKey . IsZero ( ) {
c . logf ( "magicsock: SetPrivateKey called (zeroed)" )
c . closeAllDerpLocked ( "zero-private-key" )
2021-01-20 17:52:24 +00:00
c . stopPeriodicReSTUNTimerLocked ( )
c . onEndpointRefreshed = nil
2020-03-24 15:09:30 +00:00
} else {
2020-07-27 18:19:05 +01:00
c . logf ( "magicsock: SetPrivateKey called (changed)" )
c . closeAllDerpLocked ( "new-private-key" )
2020-03-24 15:09:30 +00:00
}
// Key changed. Close existing DERP connections and reconnect to home.
2020-07-27 18:19:05 +01:00
if c . myDerp != 0 && ! newKey . IsZero ( ) {
2020-04-09 22:21:36 +01:00
c . logf ( "magicsock: private key changed, reconnecting to home derp-%d" , c . myDerp )
2021-02-10 18:04:42 +00:00
c . startDerpHomeConnectLocked ( )
2020-03-24 15:09:30 +00:00
}
2020-03-02 17:31:25 +00:00
2020-07-30 21:48:32 +01:00
if newKey . IsZero ( ) {
2021-10-16 06:25:29 +01:00
c . peerMap . forEachEndpoint ( func ( ep * endpoint ) {
2021-08-26 03:39:20 +01:00
ep . stopAndReset ( )
} )
2020-07-30 21:48:32 +01:00
}
2020-03-02 17:31:25 +00:00
return nil
}
2020-04-18 16:48:01 +01:00
// UpdatePeers is called when the set of WireGuard peers changes. It
// then removes any state for old peers.
//
// The caller passes ownership of newPeers map to UpdatePeers.
2021-10-29 21:15:27 +01:00
func ( c * Conn ) UpdatePeers ( newPeers map [ key . NodePublic ] struct { } ) {
2020-04-18 16:48:01 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
oldPeers := c . peerSet
c . peerSet = newPeers
2021-10-29 21:15:27 +01:00
// Clean up any key.NodePublic-keyed maps for peers that no longer
2020-04-18 16:48:01 +01:00
// exist.
for peer := range oldPeers {
if _ , ok := newPeers [ peer ] ; ! ok {
delete ( c . derpRoute , peer )
delete ( c . peerLastDerp , peer )
}
}
2020-04-28 21:41:18 +01:00
if len ( oldPeers ) == 0 && len ( newPeers ) > 0 {
go c . ReSTUN ( "non-zero-peers" )
}
2020-04-18 16:48:01 +01:00
}
2020-05-17 17:51:38 +01:00
// SetDERPMap controls which (if any) DERP servers are used.
// A nil value means to disable DERP; it's disabled by default.
func ( c * Conn ) SetDERPMap ( dm * tailcfg . DERPMap ) {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-03-04 20:21:40 +00:00
2020-05-17 17:51:38 +01:00
if reflect . DeepEqual ( dm , c . derpMap ) {
return
}
2022-01-13 21:37:26 +00:00
c . derpMapAtomic . Store ( dm )
2022-01-04 22:59:11 +00:00
old := c . derpMap
2020-05-17 17:51:38 +01:00
c . derpMap = dm
if dm == nil {
2020-03-23 21:12:23 +00:00
c . closeAllDerpLocked ( "derp-disabled" )
2020-05-17 17:51:38 +01:00
return
2020-03-04 20:21:40 +00:00
}
2020-05-17 17:51:38 +01:00
2022-01-04 22:59:11 +00:00
// Reconnect any DERP region that changed definitions.
if old != nil {
changes := false
for rid , oldDef := range old . Regions {
if reflect . DeepEqual ( oldDef , dm . Regions [ rid ] ) {
continue
}
changes = true
if rid == c . myDerp {
c . myDerp = 0
}
c . closeDerpLocked ( rid , "derp-region-redefined" )
}
if changes {
c . logActiveDerpLocked ( )
}
}
2021-09-01 02:09:52 +01:00
go c . ReSTUN ( "derp-map-update" )
2020-03-04 20:21:40 +00:00
}
2020-07-26 03:37:08 +01:00
func nodesEqual ( x , y [ ] * tailcfg . Node ) bool {
if len ( x ) != len ( y ) {
return false
}
for i := range x {
if ! x [ i ] . Equal ( y [ i ] ) {
return false
}
}
return true
}
2020-06-25 19:04:52 +01:00
// SetNetworkMap is called when the control client gets a new network
2020-07-26 03:37:08 +01:00
// map from the control server. It must always be non-nil.
2020-06-25 19:04:52 +01:00
//
// It should not use the DERPMap field of NetworkMap; that's
// conditionally sent to SetDERPMap instead.
2021-02-05 23:44:46 +00:00
func ( c * Conn ) SetNetworkMap ( nm * netmap . NetworkMap ) {
2020-06-25 19:04:52 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-08-26 03:39:20 +01:00
if c . closed {
return
}
2020-07-26 03:37:08 +01:00
if c . netMap != nil && nodesEqual ( c . netMap . Peers , nm . Peers ) {
2020-06-25 19:04:52 +01:00
return
}
2021-08-26 03:39:20 +01:00
numNoDisco := 0
2020-06-26 22:38:53 +01:00
for _ , n := range nm . Peers {
2020-06-28 19:53:37 +01:00
if n . DiscoKey . IsZero ( ) {
2021-08-26 03:39:20 +01:00
numNoDisco ++
2020-06-26 22:38:53 +01:00
}
}
2020-12-14 02:51:24 +00:00
metricNumPeers . Set ( int64 ( len ( nm . Peers ) ) )
2021-08-26 03:39:20 +01:00
c . logf ( "[v1] magicsock: got updated network map; %d peers" , len ( nm . Peers ) )
if numNoDisco != 0 {
c . logf ( "[v1] magicsock: %d DERP-only peers (no discokey)" , numNoDisco )
}
2020-06-25 19:04:52 +01:00
c . netMap = nm
2020-06-26 22:38:53 +01:00
2021-08-31 21:25:58 +01:00
// Try a pass of just upserting nodes and creating missing
// endpoints. If the set of nodes is the same, this is an
// efficient alloc-free update. If the set of nodes is different,
// we'll fall through to the next pass, which allocates but can
// handle full set updates.
2021-08-26 03:39:20 +01:00
for _ , n := range nm . Peers {
2021-11-02 03:55:52 +00:00
if ep , ok := c . peerMap . endpointForNodeKey ( n . Key ) ; ok {
2021-11-10 22:03:47 +00:00
oldDiscoKey := ep . discoKey
2021-09-01 00:55:22 +01:00
ep . updateFromNode ( n )
2021-11-10 22:03:47 +00:00
c . peerMap . upsertEndpoint ( ep , oldDiscoKey ) // maybe update discokey mappings in peerMap
2021-09-01 00:55:22 +01:00
continue
}
2021-08-31 21:25:58 +01:00
2021-09-01 00:55:22 +01:00
ep := & endpoint {
c : c ,
2021-11-02 03:55:52 +00:00
publicKey : n . Key ,
2021-09-01 00:55:22 +01:00
sentPing : map [ stun . TxID ] sentPing { } ,
endpointState : map [ netaddr . IPPort ] * endpointState { } ,
}
if ! n . DiscoKey . IsZero ( ) {
2021-11-02 21:41:56 +00:00
ep . discoKey = n . DiscoKey
2021-09-01 00:55:22 +01:00
ep . discoShort = n . DiscoKey . ShortString ( )
}
2021-11-02 03:55:52 +00:00
ep . wgEndpoint = n . Key . UntypedHexString ( )
2021-09-01 00:55:22 +01:00
ep . initFakeUDPAddr ( )
2021-11-18 18:51:34 +00:00
if debugDisco { // rather than making a new knob
c . logf ( "magicsock: created endpoint key=%s: disco=%s; %v" , n . Key . ShortString ( ) , n . DiscoKey . ShortString ( ) , logger . ArgWriter ( func ( w * bufio . Writer ) {
const derpPrefix = "127.3.3.40:"
if strings . HasPrefix ( n . DERP , derpPrefix ) {
ipp , _ := netaddr . ParseIPPort ( n . DERP )
regionID := int ( ipp . Port ( ) )
code := c . derpRegionCodeLocked ( regionID )
if code != "" {
code = "(" + code + ")"
}
fmt . Fprintf ( w , "derp=%v%s " , regionID , code )
2021-08-31 21:25:58 +01:00
}
2021-09-01 00:55:22 +01:00
2021-11-18 18:51:34 +00:00
for _ , a := range n . AllowedIPs {
if a . IsSingleIP ( ) {
fmt . Fprintf ( w , "aip=%v " , a . IP ( ) )
} else {
fmt . Fprintf ( w , "aip=%v " , a )
}
2021-08-31 21:25:58 +01:00
}
2021-11-18 18:51:34 +00:00
for _ , ep := range n . Endpoints {
fmt . Fprintf ( w , "ep=%v " , ep )
}
} ) )
}
2021-09-01 00:55:22 +01:00
ep . updateFromNode ( n )
2021-11-10 22:03:47 +00:00
c . peerMap . upsertEndpoint ( ep , key . DiscoPublic { } )
2020-06-26 22:38:53 +01:00
}
2021-08-26 03:39:20 +01:00
// If the set of nodes changed since the last SetNetworkMap, the
// upsert loop just above made c.peerMap contain the union of the
// old and new peers - which will be larger than the set from the
// current netmap. If that happens, go through the allocful
// deletion path to clean up moribund nodes.
if c . peerMap . nodeCount ( ) != len ( nm . Peers ) {
2021-11-02 00:53:40 +00:00
keep := make ( map [ key . NodePublic ] bool , len ( nm . Peers ) )
2021-08-26 03:39:20 +01:00
for _ , n := range nm . Peers {
2021-11-02 03:55:52 +00:00
keep [ n . Key ] = true
2020-06-28 19:53:37 +01:00
}
2021-10-16 06:25:29 +01:00
c . peerMap . forEachEndpoint ( func ( ep * endpoint ) {
2021-09-01 00:55:22 +01:00
if ! keep [ ep . publicKey ] {
2021-10-16 06:25:29 +01:00
c . peerMap . deleteEndpoint ( ep )
2021-08-26 03:39:20 +01:00
}
} )
2020-06-28 19:53:37 +01:00
}
2021-10-06 18:18:12 +01:00
2021-10-16 04:45:33 +01:00
// discokeys might have changed in the above. Discard unused info.
for dk := range c . discoInfo {
if ! c . peerMap . anyEndpointForDiscoKey ( dk ) {
delete ( c . discoInfo , dk )
2021-10-06 18:18:12 +01:00
}
}
2020-06-25 19:04:52 +01:00
}
2020-05-17 17:51:38 +01:00
func ( c * Conn ) wantDerpLocked ( ) bool { return c . derpMap != nil }
2020-03-13 15:55:38 +00:00
// c.mu must be held.
2020-03-23 21:12:23 +00:00
func ( c * Conn ) closeAllDerpLocked ( why string ) {
if len ( c . activeDerp ) == 0 {
return // without the useless log statement
}
2020-03-05 16:54:08 +00:00
for i := range c . activeDerp {
2020-03-23 21:12:23 +00:00
c . closeDerpLocked ( i , why )
2020-02-28 19:13:28 +00:00
}
2020-03-23 21:12:23 +00:00
c . logActiveDerpLocked ( )
2020-03-05 16:54:08 +00:00
}
2021-12-29 02:01:50 +00:00
// maybeCloseDERPsOnRebind, in response to a rebind, closes all
// DERP connections that don't have a local address in okayLocalIPs
// and pings all those that do.
func ( c * Conn ) maybeCloseDERPsOnRebind ( okayLocalIPs [ ] netaddr . IPPrefix ) {
c . mu . Lock ( )
defer c . mu . Unlock ( )
for regionID , ad := range c . activeDerp {
la , err := ad . c . LocalAddr ( )
if err != nil {
c . closeOrReconectDERPLocked ( regionID , "rebind-no-localaddr" )
continue
}
if ! tsaddr . PrefixesContainsIP ( okayLocalIPs , la . IP ( ) ) {
c . closeOrReconectDERPLocked ( regionID , "rebind-default-route-change" )
continue
}
regionID := regionID
dc := ad . c
go func ( ) {
ctx , cancel := context . WithTimeout ( context . Background ( ) , 3 * time . Second )
defer cancel ( )
if err := dc . Ping ( ctx ) ; err != nil {
c . mu . Lock ( )
defer c . mu . Unlock ( )
c . closeOrReconectDERPLocked ( regionID , "rebind-ping-fail" )
return
}
c . logf ( "post-rebind ping of DERP region %d okay" , regionID )
} ( )
}
c . logActiveDerpLocked ( )
}
// closeOrReconectDERPLocked closes the DERP connection to the
// provided regionID and starts reconnecting it if it's our current
// home DERP.
//
// why is a reason for logging.
//
// c.mu must be held.
func ( c * Conn ) closeOrReconectDERPLocked ( regionID int , why string ) {
c . closeDerpLocked ( regionID , why )
if ! c . privateKey . IsZero ( ) && c . myDerp == regionID {
c . startDerpHomeConnectLocked ( )
}
}
2020-03-13 15:55:38 +00:00
// c.mu must be held.
2020-03-23 21:12:23 +00:00
// It is the responsibility of the caller to call logActiveDerpLocked after any set of closes.
2021-12-29 02:01:50 +00:00
func ( c * Conn ) closeDerpLocked ( regionID int , why string ) {
if ad , ok := c . activeDerp [ regionID ] ; ok {
c . logf ( "magicsock: closing connection to derp-%v (%v), age %v" , regionID , why , time . Since ( ad . createTime ) . Round ( time . Second ) )
2020-03-05 16:54:08 +00:00
go ad . c . Close ( )
ad . cancel ( )
2021-12-29 02:01:50 +00:00
delete ( c . activeDerp , regionID )
2020-12-14 02:51:24 +00:00
metricNumDERPConns . Set ( int64 ( len ( c . activeDerp ) ) )
2020-02-28 19:13:28 +00:00
}
2020-02-05 22:16:58 +00:00
}
2020-03-23 21:12:23 +00:00
// c.mu must be held.
func ( c * Conn ) logActiveDerpLocked ( ) {
now := time . Now ( )
2020-05-31 23:29:04 +01:00
c . logf ( "magicsock: %v active derp conns%s" , len ( c . activeDerp ) , logger . ArgWriter ( func ( buf * bufio . Writer ) {
if len ( c . activeDerp ) == 0 {
return
}
buf . WriteString ( ":" )
c . foreachActiveDerpSortedLocked ( func ( node int , ad activeDerp ) {
fmt . Fprintf ( buf , " derp-%d=cr%v,wr%v" , node , simpleDur ( now . Sub ( ad . createTime ) ) , simpleDur ( now . Sub ( * ad . lastWrite ) ) )
} )
} ) )
2020-03-23 21:12:23 +00:00
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
func ( c * Conn ) logEndpointChange ( endpoints [ ] tailcfg . Endpoint ) {
2020-05-31 23:29:04 +01:00
c . logf ( "magicsock: endpoints changed: %s" , logger . ArgWriter ( func ( buf * bufio . Writer ) {
for i , ep := range endpoints {
if i > 0 {
buf . WriteString ( ", " )
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
fmt . Fprintf ( buf , "%s (%s)" , ep . Addr , ep . Type )
2020-03-24 15:09:30 +00:00
}
2020-05-31 23:29:04 +01:00
} ) )
2020-03-24 15:09:30 +00:00
}
2020-03-23 21:12:23 +00:00
// c.mu must be held.
2020-05-17 17:51:38 +01:00
func ( c * Conn ) foreachActiveDerpSortedLocked ( fn func ( regionID int , ad activeDerp ) ) {
2020-03-23 21:12:23 +00:00
if len ( c . activeDerp ) < 2 {
for id , ad := range c . activeDerp {
fn ( id , ad )
}
return
}
ids := make ( [ ] int , 0 , len ( c . activeDerp ) )
for id := range c . activeDerp {
ids = append ( ids , id )
}
sort . Ints ( ids )
for _ , id := range ids {
fn ( id , c . activeDerp [ id ] )
}
}
2020-03-05 20:47:54 +00:00
func ( c * Conn ) cleanStaleDerp ( ) {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-01-19 23:29:50 +00:00
if c . closed {
return
}
c . derpCleanupTimerArmed = false
tooOld := time . Now ( ) . Add ( - derpInactiveCleanupTime )
2020-03-23 21:12:23 +00:00
dirty := false
2021-01-19 23:29:50 +00:00
someNonHomeOpen := false
2020-03-05 20:47:54 +00:00
for i , ad := range c . activeDerp {
if i == c . myDerp {
continue
}
if ad . lastWrite . Before ( tooOld ) {
2020-03-23 21:12:23 +00:00
c . closeDerpLocked ( i , "idle" )
dirty = true
2021-01-19 23:29:50 +00:00
} else {
someNonHomeOpen = true
2020-03-05 20:47:54 +00:00
}
}
2020-03-23 21:12:23 +00:00
if dirty {
c . logActiveDerpLocked ( )
}
2021-01-19 23:29:50 +00:00
if someNonHomeOpen {
c . scheduleCleanStaleDerpLocked ( )
}
}
func ( c * Conn ) scheduleCleanStaleDerpLocked ( ) {
if c . derpCleanupTimerArmed {
// Already going to fire soon. Let the existing one
// fire lest it get infinitely delayed by repeated
// calls to scheduleCleanStaleDerpLocked.
return
}
c . derpCleanupTimerArmed = true
if c . derpCleanupTimer != nil {
c . derpCleanupTimer . Reset ( derpCleanStaleInterval )
} else {
c . derpCleanupTimer = time . AfterFunc ( derpCleanStaleInterval , c . cleanStaleDerp )
}
2020-03-05 20:47:54 +00:00
}
2020-03-19 06:55:14 +00:00
// DERPs reports the number of active DERP connections.
func ( c * Conn ) DERPs ( ) int {
c . mu . Lock ( )
defer c . mu . Unlock ( )
return len ( c . activeDerp )
}
2021-03-24 16:41:57 +00:00
// Bind returns the wireguard-go conn.Bind for c.
func ( c * Conn ) Bind ( ) conn . Bind {
return c . bind
}
// connBind is a wireguard-go conn.Bind for a Conn.
2021-04-03 02:36:24 +01:00
// It bridges the behavior of wireguard-go and a Conn.
// wireguard-go calls Close then Open on device.Up.
// That won't work well for a Conn, which is only closed on shutdown.
// The subsequent Close is a real close.
2021-03-24 16:41:57 +00:00
type connBind struct {
* Conn
mu sync . Mutex
closed bool
}
// Open is called by WireGuard to create a UDP binding.
// The ignoredPort comes from wireguard-go, via the wgcfg config.
// We ignore that port value here, since we have the local port available easily.
func ( c * connBind ) Open ( ignoredPort uint16 ) ( [ ] conn . ReceiveFunc , uint16 , error ) {
c . mu . Lock ( )
defer c . mu . Unlock ( )
if ! c . closed {
return nil , 0 , errors . New ( "magicsock: connBind already open" )
}
c . closed = false
2021-04-28 18:43:51 +01:00
fns := [ ] conn . ReceiveFunc { c . receiveIPv4 , c . receiveIPv6 , c . receiveDERP }
2021-10-20 20:14:19 +01:00
if runtime . GOOS == "js" {
fns = [ ] conn . ReceiveFunc { c . receiveDERP }
}
2021-03-24 16:41:57 +00:00
// TODO: Combine receiveIPv4 and receiveIPv6 and receiveIP into a single
// closure that closes over a *RebindingUDPConn?
2021-10-20 20:57:10 +01:00
return fns , c . LocalPort ( ) , nil
2021-03-24 16:41:57 +00:00
}
// SetMark is used by wireguard-go to set a mark bit for packets to avoid routing loops.
// We handle that ourselves elsewhere.
func ( c * connBind ) SetMark ( value uint32 ) error {
return nil
}
2021-04-03 02:36:24 +01:00
// Close closes the connBind, unless it is already closed.
2021-03-24 16:41:57 +00:00
func ( c * connBind ) Close ( ) error {
c . mu . Lock ( )
defer c . mu . Unlock ( )
if c . closed {
return nil
}
c . closed = true
// Unblock all outstanding receives.
2021-10-20 20:14:19 +01:00
if c . pconn4 != nil {
c . pconn4 . Close ( )
}
if c . pconn6 != nil {
c . pconn6 . Close ( )
}
2021-03-24 16:41:57 +00:00
// Send an empty read result to unblock receiveDERP,
// which will then check connBind.Closed.
c . derpRecvCh <- derpReadResult { }
return nil
}
// Closed reports whether c is closed.
func ( c * connBind ) Closed ( ) bool {
c . mu . Lock ( )
defer c . mu . Unlock ( )
return c . closed
}
2020-02-05 22:16:58 +00:00
2020-03-13 15:55:38 +00:00
// Close closes the connection.
//
// Only the first close does anything. Any later closes return nil.
2020-02-05 22:16:58 +00:00
func ( c * Conn ) Close ( ) error {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
2021-01-15 00:51:17 +00:00
defer c . mu . Unlock ( )
2020-03-13 15:55:38 +00:00
if c . closed {
2020-02-18 21:32:04 +00:00
return nil
}
2021-01-19 23:29:50 +00:00
if c . derpCleanupTimerArmed {
c . derpCleanupTimer . Stop ( )
}
2021-01-20 17:52:24 +00:00
c . stopPeriodicReSTUNTimerLocked ( )
2021-02-20 06:15:41 +00:00
c . portMapper . Close ( )
2020-03-02 17:31:25 +00:00
2021-10-16 06:25:29 +01:00
c . peerMap . forEachEndpoint ( func ( ep * endpoint ) {
2020-07-30 21:48:32 +01:00
ep . stopAndReset ( )
2021-08-26 03:39:20 +01:00
} )
2020-07-09 00:50:31 +01:00
2020-03-13 15:55:38 +00:00
c . closed = true
c . connCtxCancel ( )
2020-03-23 21:12:23 +00:00
c . closeAllDerpLocked ( "conn-close" )
2021-04-03 02:36:24 +01:00
// Ignore errors from c.pconnN.Close.
// They will frequently have been closed already by a call to connBind.Close.
2020-03-19 16:39:00 +00:00
if c . pconn6 != nil {
c . pconn6 . Close ( )
}
2021-10-20 20:14:19 +01:00
if c . pconn4 != nil {
c . pconn4 . Close ( )
}
2020-08-04 17:36:38 +01:00
// Wait on goroutines updating right at the end, once everything is
// already closed. We want everything else in the Conn to be
// consistently in the closed state before we release mu to wait
// on the endpoint updater & derphttp.Connect.
for c . goroutinesRunningLocked ( ) {
c . muCond . Wait ( )
}
2021-04-03 02:36:24 +01:00
return nil
2020-08-04 17:36:38 +01:00
}
func ( c * Conn ) goroutinesRunningLocked ( ) bool {
if c . endpointsUpdateActive {
return true
}
2020-07-14 18:07:46 +01:00
// The goroutine running dc.Connect in derpWriteChanOfAddr may linger
// and appear to leak, as observed in https://github.com/tailscale/tailscale/issues/554.
// This is despite the underlying context being cancelled by connCtxCancel above.
// To avoid this condition, we must wait on derpStarted here
// to ensure that this goroutine has exited by the time Close returns.
// We only do this if derpWriteChanOfAddr has executed at least once:
// on the first run, it sets firstDerp := true and spawns the aforementioned goroutine.
// To detect this, we check activeDerp, which is initialized to non-nil on the first run.
if c . activeDerp != nil {
2020-08-04 17:36:38 +01:00
select {
case <- c . derpStarted :
break
default :
return true
}
2020-04-27 21:03:22 +01:00
}
2020-08-04 17:36:38 +01:00
return false
2020-03-13 15:55:38 +00:00
}
2020-06-25 22:19:12 +01:00
func maxIdleBeforeSTUNShutdown ( ) time . Duration {
if debugReSTUNStopOnIdle {
2021-01-20 17:52:24 +00:00
return 45 * time . Second
2020-06-25 22:19:12 +01:00
}
2021-01-20 17:52:24 +00:00
return sessionActiveTimeout
2020-06-25 22:19:12 +01:00
}
2021-01-20 17:52:24 +00:00
func ( c * Conn ) shouldDoPeriodicReSTUNLocked ( ) bool {
2020-10-06 23:22:46 +01:00
if c . networkDown ( ) {
return false
}
2021-01-20 17:52:24 +00:00
if len ( c . peerSet ) == 0 || c . privateKey . IsZero ( ) {
// If no peers, not worth doing.
// Also don't if there's no key (not running).
2020-06-25 22:19:12 +01:00
return false
}
if f := c . idleFunc ; f != nil {
idleFor := f ( )
if debugReSTUNStopOnIdle {
c . logf ( "magicsock: periodicReSTUN: idle for %v" , idleFor . Round ( time . Second ) )
}
if idleFor > maxIdleBeforeSTUNShutdown ( ) {
2021-01-20 17:52:24 +00:00
if c . netMap != nil && c . netMap . Debug != nil && c . netMap . Debug . ForceBackgroundSTUN {
// Overridden by control.
return true
2020-06-25 22:19:12 +01:00
}
2021-01-20 17:52:24 +00:00
return false
2020-06-25 22:19:12 +01:00
}
}
return true
2020-04-28 21:41:18 +01:00
}
2021-07-09 18:01:50 +01:00
func ( c * Conn ) onPortMapChanged ( ) { c . ReSTUN ( "portmap-changed" ) }
2020-03-13 03:10:11 +00:00
// ReSTUN triggers an address discovery.
// The provided why string is for debug logging only.
func ( c * Conn ) ReSTUN ( why string ) {
2020-03-13 15:55:38 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2020-04-27 21:03:22 +01:00
if c . closed {
// raced with a shutdown.
return
}
2020-07-28 00:26:33 +01:00
// If the user stopped the app, stop doing work. (When the
// user stops Tailscale via the GUI apps, ipn/local.go
// reconfigures the engine with a zero private key.)
//
// This used to just check c.privateKey.IsZero, but that broke
// some end-to-end tests tests that didn't ever set a private
// key somehow. So for now, only stop doing work if we ever
// had a key, which helps real users, but appeases tests for
// now. TODO: rewrite those tests to be less brittle or more
// realistic.
if c . privateKey . IsZero ( ) && c . everHadKey {
c . logf ( "magicsock: ReSTUN(%q) ignored; stopped, no private key" , why )
2020-07-27 18:19:05 +01:00
return
}
2020-04-27 21:03:22 +01:00
2020-03-13 15:55:38 +00:00
if c . endpointsUpdateActive {
if c . wantEndpointsUpdate != why {
2020-12-21 18:58:06 +00:00
c . logf ( "[v1] magicsock: ReSTUN: endpoint update active, need another later (%q)" , why )
2020-03-13 15:55:38 +00:00
c . wantEndpointsUpdate = why
}
} else {
c . endpointsUpdateActive = true
go c . updateEndpoints ( why )
2020-02-18 18:55:25 +00:00
}
}
2020-03-19 16:39:00 +00:00
func ( c * Conn ) initialBind ( ) error {
2021-10-20 20:14:19 +01:00
if runtime . GOOS == "js" {
return nil
}
2021-06-22 21:00:40 +01:00
if err := c . bindSocket ( & c . pconn4 , "udp4" , keepCurrentPort ) ; err != nil {
2021-04-27 22:40:29 +01:00
return fmt . Errorf ( "magicsock: initialBind IPv4 failed: %w" , err )
2020-03-19 16:39:00 +00:00
}
2021-02-20 06:15:41 +00:00
c . portMapper . SetLocalPort ( c . LocalPort ( ) )
2021-06-22 21:00:40 +01:00
if err := c . bindSocket ( & c . pconn6 , "udp6" , keepCurrentPort ) ; err != nil {
2020-03-24 05:11:49 +00:00
c . logf ( "magicsock: ignoring IPv6 bind failure: %v" , err )
2020-03-19 16:39:00 +00:00
}
return nil
}
2021-04-28 18:28:44 +01:00
// listenPacket opens a packet listener.
// The network must be "udp4" or "udp6".
2021-08-26 07:33:46 +01:00
func ( c * Conn ) listenPacket ( network string , port uint16 ) ( net . PacketConn , error ) {
2021-04-27 20:56:28 +01:00
ctx := context . Background ( ) // unused without DNS name to resolve
2021-08-26 07:33:46 +01:00
addr := net . JoinHostPort ( "" , fmt . Sprint ( port ) )
2021-08-26 06:26:25 +01:00
if c . testOnlyPacketListener != nil {
return c . testOnlyPacketListener . ListenPacket ( ctx , network , addr )
2020-07-10 22:26:04 +01:00
}
2021-11-18 20:18:02 +00:00
return netns . Listener ( c . logf ) . ListenPacket ( ctx , network , addr )
2020-07-10 22:26:04 +01:00
}
2021-04-27 22:40:29 +01:00
// bindSocket initializes rucPtr if necessary and binds a UDP socket to it.
// Network indicates the UDP socket type; it must be "udp4" or "udp6".
// If rucPtr had an existing UDP socket bound, it closes that socket.
// The caller is responsible for informing the portMapper of any changes.
2021-06-22 21:00:40 +01:00
// If curPortFate is set to dropCurrentPort, no attempt is made to reuse
// the current port.
func ( c * Conn ) bindSocket ( rucPtr * * RebindingUDPConn , network string , curPortFate currentPortFate ) error {
2021-04-27 22:40:29 +01:00
if * rucPtr == nil {
* rucPtr = new ( RebindingUDPConn )
2020-04-29 03:20:02 +01:00
}
2021-04-27 22:40:29 +01:00
ruc := * rucPtr
// Hold the ruc lock the entire time, so that the close+bind is atomic
// from the perspective of ruc receive functions.
ruc . mu . Lock ( )
defer ruc . mu . Unlock ( )
2021-03-08 23:48:49 +00:00
2021-07-14 00:01:37 +01:00
if debugAlwaysDERP {
c . logf ( "disabled %v per TS_DEBUG_ALWAYS_USE_DERP" , network )
ruc . pconn = newBlockForeverConn ( )
return nil
}
2021-04-27 22:40:29 +01:00
// Build a list of preferred ports.
// Best is the port that the user requested.
// Second best is the port that is currently in use.
// If those fail, fall back to 0.
var ports [ ] uint16
2021-06-22 21:00:40 +01:00
if port := uint16 ( c . port . Get ( ) ) ; port != 0 {
ports = append ( ports , port )
2021-04-27 22:40:29 +01:00
}
2021-06-22 21:00:40 +01:00
if ruc . pconn != nil && curPortFate == keepCurrentPort {
2021-04-27 22:40:29 +01:00
curPort := uint16 ( ruc . localAddrLocked ( ) . Port )
ports = append ( ports , curPort )
}
ports = append ( ports , 0 )
// Remove duplicates. (All duplicates are consecutive.)
uniq . ModifySlice ( & ports , func ( i , j int ) bool { return ports [ i ] == ports [ j ] } )
var pconn net . PacketConn
for _ , port := range ports {
// Close the existing conn, in case it is sitting on the port we want.
err := ruc . closeLocked ( )
if err != nil && ! errors . Is ( err , net . ErrClosed ) && ! errors . Is ( err , errNilPConn ) {
c . logf ( "magicsock: bindSocket %v close failed: %v" , network , err )
2020-02-05 22:16:58 +00:00
}
2021-04-27 22:40:29 +01:00
// Open a new one with the desired port.
2021-08-26 07:33:46 +01:00
pconn , err = c . listenPacket ( network , port )
2021-03-08 23:48:49 +00:00
if err != nil {
2021-04-27 22:40:29 +01:00
c . logf ( "magicsock: unable to bind %v port %d: %v" , network , port , err )
continue
2021-03-08 23:48:49 +00:00
}
2021-04-27 22:40:29 +01:00
// Success.
ruc . pconn = pconn
2021-04-28 18:36:54 +01:00
if network == "udp4" {
health . SetUDP4Unbound ( false )
}
2021-04-27 22:40:29 +01:00
return nil
}
// Failed to bind, including on port 0 (!).
// Set pconn to a dummy conn whose reads block until closed.
// This keeps the receive funcs alive for a future in which
// we get a link change and we can try binding again.
ruc . pconn = newBlockForeverConn ( )
2021-04-28 18:36:54 +01:00
if network == "udp4" {
health . SetUDP4Unbound ( true )
}
2021-04-27 22:40:29 +01:00
return fmt . Errorf ( "failed to bind any ports (tried %v)" , ports )
}
2021-06-22 21:00:40 +01:00
type currentPortFate uint8
const (
keepCurrentPort = currentPortFate ( 0 )
dropCurrentPort = currentPortFate ( 1 )
)
// rebind closes and re-binds the UDP sockets.
// We consider it successful if we manage to bind the IPv4 socket.
func ( c * Conn ) rebind ( curPortFate currentPortFate ) error {
2021-10-22 17:09:37 +01:00
if runtime . GOOS == "js" {
return nil
}
2021-06-22 21:00:40 +01:00
if err := c . bindSocket ( & c . pconn4 , "udp4" , curPortFate ) ; err != nil {
return fmt . Errorf ( "magicsock: Rebind IPv4 failed: %w" , err )
2020-02-05 22:16:58 +00:00
}
2021-02-20 06:15:41 +00:00
c . portMapper . SetLocalPort ( c . LocalPort ( ) )
2021-06-22 21:00:40 +01:00
if err := c . bindSocket ( & c . pconn6 , "udp6" , curPortFate ) ; err != nil {
2021-04-27 22:40:29 +01:00
c . logf ( "magicsock: Rebind ignoring IPv6 bind failure: %v" , err )
}
2021-06-22 21:00:40 +01:00
return nil
}
// Rebind closes and re-binds the UDP sockets and resets the DERP connection.
// It should be followed by a call to ReSTUN.
func ( c * Conn ) Rebind ( ) {
if err := c . rebind ( keepCurrentPort ) ; err != nil {
c . logf ( "%w" , err )
return
}
2020-04-09 22:21:36 +01:00
2021-12-29 02:01:50 +00:00
var ifIPs [ ] netaddr . IPPrefix
if c . linkMon != nil {
st := c . linkMon . InterfaceState ( )
defIf := st . DefaultRouteInterface
ifIPs = st . InterfaceIPs [ defIf ]
c . logf ( "Rebind; defIf=%q, ips=%v" , defIf , ifIPs )
2021-02-10 18:04:42 +00:00
}
2020-07-27 18:19:05 +01:00
2021-12-29 02:01:50 +00:00
c . maybeCloseDERPsOnRebind ( ifIPs )
2020-12-18 08:31:48 +00:00
c . resetEndpointStates ( )
2020-04-10 06:25:31 +01:00
}
2021-08-26 03:39:20 +01:00
// resetEndpointStates resets the preferred address for all peers.
2020-04-10 06:25:31 +01:00
// This is called when connectivity changes enough that we no longer
// trust the old routes.
2020-12-18 08:31:48 +00:00
func ( c * Conn ) resetEndpointStates ( ) {
2020-04-10 06:25:31 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-10-16 06:25:29 +01:00
c . peerMap . forEachEndpoint ( func ( ep * endpoint ) {
2021-08-26 03:39:20 +01:00
ep . noteConnectivityChange ( )
} )
2020-02-05 22:16:58 +00:00
}
2020-06-30 20:22:42 +01:00
// packIPPort packs an IPPort into the form wanted by WireGuard.
func packIPPort ( ua netaddr . IPPort ) [ ] byte {
2021-05-15 02:07:28 +01:00
ip := ua . IP ( ) . Unmap ( )
2020-06-30 20:22:42 +01:00
a := ip . As16 ( )
ipb := a [ : ]
if ip . Is4 ( ) {
ipb = ipb [ 12 : ]
}
b := make ( [ ] byte , 0 , len ( ipb ) + 2 )
b = append ( b , ipb ... )
2021-05-15 02:07:28 +01:00
b = append ( b , byte ( ua . Port ( ) ) )
b = append ( b , byte ( ua . Port ( ) >> 8 ) )
2020-06-30 20:22:42 +01:00
return b
}
2021-03-24 16:41:57 +00:00
// ParseEndpoint is called by WireGuard to connect to an endpoint.
2021-09-01 06:37:23 +01:00
func ( c * Conn ) ParseEndpoint ( nodeKeyStr string ) ( conn . Endpoint , error ) {
2021-10-28 19:07:25 +01:00
k , err := key . ParseNodePublicUntyped ( mem . S ( nodeKeyStr ) )
2021-05-01 00:45:36 +01:00
if err != nil {
2021-09-01 06:37:23 +01:00
return nil , fmt . Errorf ( "magicsock: ParseEndpoint: parse failed on %q: %w" , nodeKeyStr , err )
2020-02-05 22:16:58 +00:00
}
2021-05-01 00:45:36 +01:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-08-26 03:39:20 +01:00
if c . closed {
2021-08-31 05:26:38 +01:00
return nil , errConnClosed
2021-08-26 03:39:20 +01:00
}
2021-11-02 00:53:40 +00:00
ep , ok := c . peerMap . endpointForNodeKey ( k )
2021-09-01 00:55:22 +01:00
if ! ok {
// We should never be telling WireGuard about a new peer
// before magicsock knows about it.
2021-11-02 00:53:40 +00:00
c . logf ( "[unexpected] magicsock: ParseEndpoint: unknown node key=%s" , k . ShortString ( ) )
return nil , fmt . Errorf ( "magicsock: ParseEndpoint: unknown peer %q" , k . ShortString ( ) )
2021-08-31 21:25:58 +01:00
}
2021-08-26 03:39:20 +01:00
2021-09-01 00:55:22 +01:00
return ep , nil
2020-02-05 22:16:58 +00:00
}
// RebindingUDPConn is a UDP socket that can be re-bound.
// Unix has no notion of re-binding a socket, so we swap it out for a new one.
type RebindingUDPConn struct {
2021-04-03 02:36:24 +01:00
mu sync . Mutex
pconn net . PacketConn
2021-03-24 16:41:57 +00:00
}
2021-04-20 00:18:56 +01:00
// currentConn returns c's current pconn.
2021-04-20 18:08:46 +01:00
func ( c * RebindingUDPConn ) currentConn ( ) net . PacketConn {
2021-03-24 16:41:57 +00:00
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-04-03 02:36:24 +01:00
return c . pconn
2020-02-05 22:16:58 +00:00
}
2021-04-20 00:18:56 +01:00
// ReadFrom reads a packet from c into b.
2021-02-11 21:35:06 +00:00
// It returns the number of bytes copied and the source address.
2020-02-05 22:16:58 +00:00
func ( c * RebindingUDPConn ) ReadFrom ( b [ ] byte ) ( int , net . Addr , error ) {
for {
2021-04-03 02:36:24 +01:00
pconn := c . currentConn ( )
2020-02-05 22:16:58 +00:00
n , addr , err := pconn . ReadFrom ( b )
2021-04-03 02:36:24 +01:00
if err != nil && pconn != c . currentConn ( ) {
continue
2020-02-05 22:16:58 +00:00
}
return n , addr , err
}
}
2021-02-11 21:35:06 +00:00
// ReadFromNetaddr reads a packet from c into b.
// It returns the number of bytes copied and the return address.
// It is identical to c.ReadFrom, except that it returns a netaddr.IPPort instead of a net.Addr.
// ReadFromNetaddr is designed to work with specific underlying connection types.
// If c's underlying connection returns a non-*net.UPDAddr return address, ReadFromNetaddr will return an error.
// ReadFromNetaddr exists because it removes an allocation per read,
// when c's underlying connection is a net.UDPConn.
func ( c * RebindingUDPConn ) ReadFromNetaddr ( b [ ] byte ) ( n int , ipp netaddr . IPPort , err error ) {
for {
2021-04-03 02:36:24 +01:00
pconn := c . currentConn ( )
2021-02-11 21:35:06 +00:00
// Optimization: Treat *net.UDPConn specially.
// ReadFromUDP gets partially inlined, avoiding allocating a *net.UDPAddr,
// as long as pAddr itself doesn't escape.
// The non-*net.UDPConn case works, but it allocates.
var pAddr * net . UDPAddr
if udpConn , ok := pconn . ( * net . UDPConn ) ; ok {
n , pAddr , err = udpConn . ReadFromUDP ( b )
} else {
var addr net . Addr
n , addr , err = pconn . ReadFrom ( b )
2021-02-12 18:17:55 +00:00
if addr != nil {
pAddr , ok = addr . ( * net . UDPAddr )
if ! ok {
return 0 , netaddr . IPPort { } , fmt . Errorf ( "RebindingUDPConn.ReadFromNetaddr: underlying connection returned address of type %T, want *netaddr.UDPAddr" , addr )
}
2021-02-11 21:35:06 +00:00
}
}
if err != nil {
2021-04-03 02:36:24 +01:00
if pconn != c . currentConn ( ) {
2021-02-11 21:35:06 +00:00
continue
}
} else {
// Convert pAddr to a netaddr.IPPort.
// This prevents pAddr from escaping.
var ok bool
ipp , ok = netaddr . FromStdAddr ( pAddr . IP , pAddr . Port , pAddr . Zone )
if ! ok {
return 0 , netaddr . IPPort { } , errors . New ( "netaddr.FromStdAddr failed" )
}
}
return n , ipp , err
}
}
2020-02-05 22:16:58 +00:00
func ( c * RebindingUDPConn ) LocalAddr ( ) * net . UDPAddr {
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-03-08 23:48:49 +00:00
return c . localAddrLocked ( )
}
func ( c * RebindingUDPConn ) localAddrLocked ( ) * net . UDPAddr {
2020-02-05 22:16:58 +00:00
return c . pconn . LocalAddr ( ) . ( * net . UDPAddr )
}
2021-04-27 22:40:29 +01:00
// errNilPConn is returned by RebindingUDPConn.Close when there is no current pconn.
// It is for internal use only and should not be returned to users.
var errNilPConn = errors . New ( "nil pconn" )
2020-02-05 22:16:58 +00:00
func ( c * RebindingUDPConn ) Close ( ) error {
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-04-27 22:40:29 +01:00
return c . closeLocked ( )
}
func ( c * RebindingUDPConn ) closeLocked ( ) error {
if c . pconn == nil {
return errNilPConn
}
2020-02-05 22:16:58 +00:00
return c . pconn . Close ( )
}
func ( c * RebindingUDPConn ) WriteTo ( b [ ] byte , addr net . Addr ) ( int , error ) {
for {
c . mu . Lock ( )
pconn := c . pconn
c . mu . Unlock ( )
n , err := pconn . WriteTo ( b , addr )
if err != nil {
c . mu . Lock ( )
pconn2 := c . pconn
c . mu . Unlock ( )
if pconn != pconn2 {
continue
}
}
return n , err
}
}
2020-03-23 21:12:23 +00:00
2021-04-27 22:40:29 +01:00
func newBlockForeverConn ( ) * blockForeverConn {
c := new ( blockForeverConn )
c . cond = sync . NewCond ( & c . mu )
return c
}
// blockForeverConn is a net.PacketConn whose reads block until it is closed.
type blockForeverConn struct {
mu sync . Mutex
cond * sync . Cond
closed bool
}
func ( c * blockForeverConn ) ReadFrom ( p [ ] byte ) ( n int , addr net . Addr , err error ) {
c . mu . Lock ( )
for ! c . closed {
c . cond . Wait ( )
}
c . mu . Unlock ( )
return 0 , nil , net . ErrClosed
}
func ( c * blockForeverConn ) WriteTo ( p [ ] byte , addr net . Addr ) ( n int , err error ) {
// Silently drop writes.
return len ( p ) , nil
}
func ( c * blockForeverConn ) LocalAddr ( ) net . Addr {
// Return a *net.UDPAddr because lots of code assumes that it will.
return new ( net . UDPAddr )
}
func ( c * blockForeverConn ) Close ( ) error {
c . mu . Lock ( )
defer c . mu . Unlock ( )
if c . closed {
return net . ErrClosed
}
c . closed = true
2022-01-06 21:19:09 +00:00
c . cond . Broadcast ( )
2021-04-27 22:40:29 +01:00
return nil
}
func ( c * blockForeverConn ) SetDeadline ( t time . Time ) error { return errors . New ( "unimplemented" ) }
func ( c * blockForeverConn ) SetReadDeadline ( t time . Time ) error { return errors . New ( "unimplemented" ) }
func ( c * blockForeverConn ) SetWriteDeadline ( t time . Time ) error { return errors . New ( "unimplemented" ) }
2020-03-23 21:12:23 +00:00
// simpleDur rounds d such that it stringifies to something short.
func simpleDur ( d time . Duration ) time . Duration {
if d < time . Second {
return d . Round ( time . Millisecond )
}
if d < time . Minute {
return d . Round ( time . Second )
}
return d . Round ( time . Minute )
}
2020-03-24 15:09:30 +00:00
2021-02-11 20:39:56 +00:00
func sbPrintAddr ( sb * strings . Builder , a netaddr . IPPort ) {
2021-05-15 02:07:28 +01:00
is6 := a . IP ( ) . Is6 ( )
2020-03-24 20:40:43 +00:00
if is6 {
sb . WriteByte ( '[' )
}
2021-05-15 02:07:28 +01:00
fmt . Fprintf ( sb , "%s" , a . IP ( ) )
2020-03-24 20:40:43 +00:00
if is6 {
sb . WriteByte ( ']' )
}
2021-05-15 02:07:28 +01:00
fmt . Fprintf ( sb , ":%d" , a . Port ( ) )
2020-03-24 20:40:43 +00:00
}
2020-03-26 05:57:46 +00:00
2020-07-03 21:44:22 +01:00
func ( c * Conn ) derpRegionCodeOfAddrLocked ( ipPort string ) string {
_ , portStr , err := net . SplitHostPort ( ipPort )
if err != nil {
return ""
}
regionID , err := strconv . Atoi ( portStr )
if err != nil {
return ""
}
return c . derpRegionCodeOfIDLocked ( regionID )
}
func ( c * Conn ) derpRegionCodeOfIDLocked ( regionID int ) string {
if c . derpMap == nil {
return ""
}
if r , ok := c . derpMap . Regions [ regionID ] ; ok {
return r . RegionCode
}
return ""
}
2020-03-26 05:57:46 +00:00
func ( c * Conn ) UpdateStatus ( sb * ipnstate . StatusBuilder ) {
c . mu . Lock ( )
defer c . mu . Unlock ( )
2021-04-14 15:20:27 +01:00
var tailscaleIPs [ ] netaddr . IP
2020-07-27 21:25:25 +01:00
if c . netMap != nil {
2021-04-14 15:20:27 +01:00
tailscaleIPs = make ( [ ] netaddr . IP , 0 , len ( c . netMap . Addresses ) )
2020-07-27 21:25:25 +01:00
for _ , addr := range c . netMap . Addresses {
2020-12-24 20:33:55 +00:00
if ! addr . IsSingleIP ( ) {
2020-07-27 21:25:25 +01:00
continue
}
2021-05-15 02:07:28 +01:00
sb . AddTailscaleIP ( addr . IP ( ) )
tailscaleIPs = append ( tailscaleIPs , addr . IP ( ) )
2020-07-27 21:25:25 +01:00
}
}
2021-03-25 22:38:40 +00:00
sb . MutateSelfStatus ( func ( ss * ipnstate . PeerStatus ) {
2021-10-29 00:56:44 +01:00
if ! c . privateKey . IsZero ( ) {
2021-10-29 21:15:27 +01:00
ss . PublicKey = c . privateKey . Public ( )
2021-10-29 00:56:44 +01:00
} else {
ss . PublicKey = key . NodePublic { }
}
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
ss . Addrs = make ( [ ] string , 0 , len ( c . lastEndpoints ) )
for _ , ep := range c . lastEndpoints {
ss . Addrs = append ( ss . Addrs , ep . Addr . String ( ) )
}
2021-03-25 22:38:40 +00:00
ss . OS = version . OS ( )
if c . derpMap != nil {
derpRegion , ok := c . derpMap . Regions [ c . myDerp ]
if ok {
ss . Relay = derpRegion . RegionCode
}
}
2021-04-14 15:20:27 +01:00
ss . TailscaleIPs = tailscaleIPs
2021-03-25 22:38:40 +00:00
} )
2020-07-27 21:25:25 +01:00
2021-10-16 06:25:29 +01:00
c . peerMap . forEachEndpoint ( func ( ep * endpoint ) {
2020-07-03 19:06:33 +01:00
ps := & ipnstate . PeerStatus { InMagicSock : true }
2021-09-01 00:55:22 +01:00
//ps.Addrs = append(ps.Addrs, n.Endpoints...)
ep . populatePeerStatus ( ps )
2021-11-02 00:53:40 +00:00
sb . AddPeer ( ep . publicKey , ps )
2021-08-26 03:39:20 +01:00
} )
2020-03-26 05:57:46 +00:00
c . foreachActiveDerpSortedLocked ( func ( node int , ad activeDerp ) {
// TODO(bradfitz): add to ipnstate.StatusBuilder
//f("<li><b>derp-%v</b>: cr%v,wr%v</li>", node, simpleDur(now.Sub(ad.createTime)), simpleDur(now.Sub(*ad.lastWrite)))
} )
}
2021-02-11 20:39:56 +00:00
func ippDebugString ( ua netaddr . IPPort ) string {
2021-05-15 02:07:28 +01:00
if ua . IP ( ) == derpMagicIPAddr {
return fmt . Sprintf ( "derp-%d" , ua . Port ( ) )
2020-03-26 05:57:46 +00:00
}
return ua . String ( )
}
2020-06-28 19:53:37 +01:00
2021-08-26 03:39:20 +01:00
// discoEndpoint is a wireguard/conn.Endpoint that picks the best
2021-08-27 22:13:49 +01:00
// available path to communicate with a peer, based on network
2021-08-26 06:20:31 +01:00
// conditions and what the peer supports.
type endpoint struct {
2020-08-06 22:57:03 +01:00
// atomically accessed; declared first for alignment reasons
2021-07-21 19:04:36 +01:00
lastRecv mono . Time
2021-01-18 23:27:44 +00:00
numStopAndResetAtomic int64
2020-08-06 22:57:03 +01:00
2020-07-02 06:15:41 +01:00
// These fields are initialized once and never modified.
2021-05-01 00:43:26 +01:00
c * Conn
2021-11-02 00:53:40 +00:00
publicKey key . NodePublic // peer public key (for WireGuard + DERP)
fakeWGAddr netaddr . IPPort // the UDP address we tell wireguard-go we're using
wgEndpoint string // string from ParseEndpoint, holds a JSON-serialized wgcfg.Endpoints
2020-06-28 19:53:37 +01:00
2020-06-30 23:32:19 +01:00
// mu protects all following fields.
2021-08-26 06:20:31 +01:00
mu sync . Mutex // Lock ordering: Conn.mu, then endpoint.mu
2020-06-30 23:32:19 +01:00
2021-10-29 22:27:29 +01:00
discoKey key . DiscoPublic // for discovery messages. IsZero() if peer can't disco.
discoShort string // ShortString of discoKey. Empty if peer can't disco.
2021-10-18 18:14:42 +01:00
2020-07-03 20:43:39 +01:00
heartBeatTimer * time . Timer // nil when idle
2021-07-21 19:04:36 +01:00
lastSend mono . Time // last time there was outgoing packets sent to this peer (from wireguard-go)
lastFullPing mono . Time // last time we pinged all endpoints
2020-07-03 20:43:39 +01:00
derpAddr netaddr . IPPort // fallback/bootstrap path, if non-zero (non-zero for well-behaved clients)
2020-06-30 23:32:19 +01:00
2021-03-23 17:07:34 +00:00
bestAddr addrLatency // best non-DERP path; zero if none
2021-07-21 19:04:36 +01:00
bestAddrAt mono . Time // time best address re-confirmed
trustBestAddrUntil mono . Time // time when bestAddr expires
2020-07-01 23:28:14 +01:00
sentPing map [ stun . TxID ] sentPing
endpointState map [ netaddr . IPPort ] * endpointState
2021-01-20 20:41:25 +00:00
isCallMeMaybeEP map [ netaddr . IPPort ] bool
2020-08-09 22:49:42 +01:00
pendingCLIPings [ ] pendingCLIPing // any outstanding "tailscale ping" commands running
}
type pendingCLIPing struct {
res * ipnstate . PingResult
cb func ( * ipnstate . PingResult )
2020-06-30 23:32:19 +01:00
}
2020-07-03 19:06:33 +01:00
const (
2020-07-03 20:43:39 +01:00
// sessionActiveTimeout is how long since the last activity we
2021-08-26 06:20:31 +01:00
// try to keep an established endpoint peering alive.
2021-01-20 17:52:24 +00:00
// It's also the idle time at which we stop doing STUN queries to
// keep NAT mappings alive.
2022-01-05 19:28:08 +00:00
sessionActiveTimeout = 45 * time . Second
2020-07-03 20:43:39 +01:00
// upgradeInterval is how often we try to upgrade to a better path
// even if we have some non-DERP route that works.
upgradeInterval = 1 * time . Minute
// heartbeatInterval is how often pings to the best UDP address
// are sent.
2022-01-05 19:28:08 +00:00
heartbeatInterval = 3 * time . Second
2020-07-03 20:43:39 +01:00
2020-07-03 19:06:33 +01:00
// trustUDPAddrDuration is how long we trust a UDP address as the exclusive
// path (without using DERP) without having heard a Pong reply.
2022-01-05 19:28:08 +00:00
trustUDPAddrDuration = 6500 * time . Millisecond
2020-07-03 20:43:39 +01:00
// goodEnoughLatency is the latency at or under which we don't
// try to upgrade to a better path.
goodEnoughLatency = 5 * time . Millisecond
2021-01-19 23:29:50 +00:00
// derpInactiveCleanupTime is how long a non-home DERP connection
// needs to be idle (last written to) before we close it.
derpInactiveCleanupTime = 60 * time . Second
// derpCleanStaleInterval is how often cleanStaleDerp runs when there
// are potentially-stale DERP connections to close.
derpCleanStaleInterval = 15 * time . Second
2021-01-20 17:52:24 +00:00
// endpointsFreshEnoughDuration is how long we consider a
// STUN-derived endpoint valid for. UDP NAT mappings typically
// expire at 30 seconds, so this is a few seconds shy of that.
endpointsFreshEnoughDuration = 27 * time . Second
2020-07-03 19:06:33 +01:00
)
2021-10-19 13:10:38 +01:00
// Constants that are variable for testing.
var (
// pingTimeoutDuration is how long we wait for a pong reply before
// assuming it's never coming.
pingTimeoutDuration = 5 * time . Second
// discoPingInterval is the minimum time between pings
// to an endpoint. (Except in the case of CallMeMaybe frames
// resetting the counter, as the first pings likely didn't through
// the firewall)
discoPingInterval = 5 * time . Second
)
2020-07-03 19:06:33 +01:00
// endpointState is some state and history for a specific endpoint of
2021-08-26 06:20:31 +01:00
// a endpoint. (The subject is the endpoint.endpointState
2020-07-03 19:06:33 +01:00
// map key)
2020-06-30 23:32:19 +01:00
type endpointState struct {
2021-08-26 06:20:31 +01:00
// all fields guarded by endpoint.mu
2020-08-13 04:12:56 +01:00
// lastPing is the last (outgoing) ping time.
2021-07-21 19:04:36 +01:00
lastPing mono . Time
2020-08-13 04:12:56 +01:00
// lastGotPing, if non-zero, means that this was an endpoint
// that we learned about at runtime (from an incoming ping)
// and that is not in the network map. If so, we keep the time
// updated and use it to discard old candidates.
lastGotPing time . Time
2021-01-20 20:41:25 +00:00
// callMeMaybeTime, if non-zero, is the time this endpoint
// was advertised last via a call-me-maybe disco message.
callMeMaybeTime time . Time
2020-07-03 19:06:33 +01:00
recentPongs [ ] pongReply // ring buffer up to pongHistoryCount entries
2020-08-09 22:49:42 +01:00
recentPong uint16 // index into recentPongs of most recent; older before, wrapped
2020-08-13 04:12:56 +01:00
index int16 // index in nodecfg.Node.Endpoints; meaningless if lastGotPing non-zero
}
// indexSentinelDeleted is the temporary value that endpointState.index takes while
2021-08-26 06:20:31 +01:00
// a endpoint's endpoints are being updated from a new network map.
2020-08-13 04:12:56 +01:00
const indexSentinelDeleted = - 1
// shouldDeleteLocked reports whether we should delete this endpoint.
func ( st * endpointState ) shouldDeleteLocked ( ) bool {
switch {
2021-01-20 20:41:25 +00:00
case ! st . callMeMaybeTime . IsZero ( ) :
return false
2020-08-13 04:12:56 +01:00
case st . lastGotPing . IsZero ( ) :
// This was an endpoint from the network map. Is it still in the network map?
return st . index == indexSentinelDeleted
default :
2021-01-20 20:41:25 +00:00
// This was an endpoint discovered at runtime.
2020-08-13 04:12:56 +01:00
return time . Since ( st . lastGotPing ) > sessionActiveTimeout
}
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) deleteEndpointLocked ( ep netaddr . IPPort ) {
2020-08-13 04:12:56 +01:00
delete ( de . endpointState , ep )
2021-03-23 17:07:34 +00:00
if de . bestAddr . IPPort == ep {
de . bestAddr = addrLatency { }
2020-08-13 04:12:56 +01:00
}
2020-07-03 19:06:33 +01:00
}
// pongHistoryCount is how many pongReply values we keep per endpointState
const pongHistoryCount = 64
type pongReply struct {
latency time . Duration
2021-07-21 19:04:36 +01:00
pongAt mono . Time // when we received the pong
2020-07-03 19:06:33 +01:00
from netaddr . IPPort // the pong's src (usually same as endpoint map key)
pongSrc netaddr . IPPort // what they reported they heard
2020-06-30 23:32:19 +01:00
}
type sentPing struct {
2020-07-18 21:50:08 +01:00
to netaddr . IPPort
2021-07-21 19:04:36 +01:00
at mono . Time
2020-07-18 21:50:08 +01:00
timer * time . Timer // timeout timer
purpose discoPingPurpose
2020-06-28 19:53:37 +01:00
}
// initFakeUDPAddr populates fakeWGAddr with a globally unique fake UDPAddr.
// The current implementation just uses the pointer value of de jammed into an IPv6
// address, but it could also be, say, a counter.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) initFakeUDPAddr ( ) {
2020-06-28 19:53:37 +01:00
var addr [ 16 ] byte
addr [ 0 ] = 0xfd
addr [ 1 ] = 0x00
binary . BigEndian . PutUint64 ( addr [ 2 : ] , uint64 ( reflect . ValueOf ( de ) . Pointer ( ) ) )
2021-05-15 02:07:28 +01:00
de . fakeWGAddr = netaddr . IPPortFrom ( netaddr . IPFrom16 ( addr ) , 12345 )
2020-06-28 19:53:37 +01:00
}
2021-09-01 03:06:04 +01:00
// noteRecvActivity records receive activity on de, and invokes
// Conn.noteRecvActivity no more than once every 10s.
func ( de * endpoint ) noteRecvActivity ( ) {
if de . c . noteRecvActivity == nil {
return
}
2021-07-21 19:04:36 +01:00
now := mono . Now ( )
elapsed := now . Sub ( de . lastRecv . LoadAtomic ( ) )
if elapsed > 10 * time . Second {
de . lastRecv . StoreAtomic ( now )
2021-09-01 03:06:04 +01:00
de . c . noteRecvActivity ( de . publicKey )
2020-08-06 22:57:03 +01:00
}
}
2020-07-04 06:26:53 +01:00
// String exists purely so wireguard-go internals can log.Printf("%v")
// its internal conn.Endpoints and we don't end up with data races
// from fmt (via log) reading mutex fields and such.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) String ( ) string {
return fmt . Sprintf ( "magicsock.endpoint{%v, %v}" , de . publicKey . ShortString ( ) , de . discoShort )
2020-07-04 06:26:53 +01:00
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) ClearSrc ( ) { }
func ( de * endpoint ) SrcToString ( ) string { panic ( "unused" ) } // unused by wireguard-go
func ( de * endpoint ) SrcIP ( ) net . IP { panic ( "unused" ) } // unused by wireguard-go
func ( de * endpoint ) DstToString ( ) string { return de . wgEndpoint }
func ( de * endpoint ) DstIP ( ) net . IP { panic ( "unused" ) }
func ( de * endpoint ) DstToBytes ( ) [ ] byte { return packIPPort ( de . fakeWGAddr ) }
2020-06-28 19:53:37 +01:00
2021-08-26 03:39:20 +01:00
// canP2P reports whether this endpoint understands the disco protocol
// and is expected to speak it.
//
// As of 2021-08-25, only a few hundred pre-0.100 clients understand
// DERP but not disco, so this returns false very rarely.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) canP2P ( ) bool {
2021-08-26 03:39:20 +01:00
return ! de . discoKey . IsZero ( )
}
2020-07-03 19:06:33 +01:00
// addrForSendLocked returns the address(es) that should be used for
// sending the next packet. Zero, one, or both of UDP address and DERP
// addr may be non-zero.
//
// de.mu must be held.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) addrForSendLocked ( now mono . Time ) ( udpAddr , derpAddr netaddr . IPPort ) {
2021-03-23 17:07:34 +00:00
udpAddr = de . bestAddr . IPPort
2020-07-03 19:06:33 +01:00
if udpAddr . IsZero ( ) || now . After ( de . trustBestAddrUntil ) {
// We had a bestAddr but it expired so send both to it
// and DERP.
derpAddr = de . derpAddr
}
return
}
2020-07-03 20:43:39 +01:00
// heartbeat is called every heartbeatInterval to keep the best UDP path alive,
// or kick off discovery of other paths.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) heartbeat ( ) {
2020-07-03 20:43:39 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
de . heartBeatTimer = nil
2021-08-26 03:39:20 +01:00
if ! de . canP2P ( ) {
// Cannot form p2p connections, no heartbeating necessary.
return
}
2020-07-03 20:43:39 +01:00
if de . lastSend . IsZero ( ) {
// Shouldn't happen.
return
}
2021-07-21 19:04:36 +01:00
if mono . Since ( de . lastSend ) > sessionActiveTimeout {
2020-07-03 20:43:39 +01:00
// Session's idle. Stop heartbeating.
2020-12-21 18:58:06 +00:00
de . c . logf ( "[v1] magicsock: disco: ending heartbeats for idle session to %v (%v)" , de . publicKey . ShortString ( ) , de . discoShort )
2020-07-03 20:43:39 +01:00
return
}
2021-07-21 19:04:36 +01:00
now := mono . Now ( )
2020-07-03 20:43:39 +01:00
udpAddr , _ := de . addrForSendLocked ( now )
if ! udpAddr . IsZero ( ) {
// We have a preferred path. Ping that every 2 seconds.
2020-07-18 21:50:08 +01:00
de . startPingLocked ( udpAddr , now , pingHeartbeat )
2020-07-03 20:43:39 +01:00
}
if de . wantFullPingLocked ( now ) {
de . sendPingsLocked ( now , true )
}
de . heartBeatTimer = time . AfterFunc ( heartbeatInterval , de . heartbeat )
}
// wantFullPingLocked reports whether we should ping to all our peers looking for
// a better path.
//
// de.mu must be held.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) wantFullPingLocked ( now mono . Time ) bool {
2021-10-22 17:09:37 +01:00
if runtime . GOOS == "js" {
return false
}
2021-08-26 03:39:20 +01:00
if ! de . canP2P ( ) {
return false
}
2020-07-03 20:43:39 +01:00
if de . bestAddr . IsZero ( ) || de . lastFullPing . IsZero ( ) {
return true
}
if now . After ( de . trustBestAddrUntil ) {
return true
}
2021-03-23 17:07:34 +00:00
if de . bestAddr . latency <= goodEnoughLatency {
2020-07-03 20:43:39 +01:00
return false
}
if now . Sub ( de . lastFullPing ) >= upgradeInterval {
return true
}
return false
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) noteActiveLocked ( ) {
2021-07-21 19:04:36 +01:00
de . lastSend = mono . Now ( )
2021-08-26 03:39:20 +01:00
if de . heartBeatTimer == nil && de . canP2P ( ) {
2020-07-03 20:43:39 +01:00
de . heartBeatTimer = time . AfterFunc ( heartbeatInterval , de . heartbeat )
}
}
2020-08-09 22:49:42 +01:00
// cliPing starts a ping for the "tailscale ping" command. res is value to call cb with,
// already partially filled.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) cliPing ( res * ipnstate . PingResult , cb func ( * ipnstate . PingResult ) ) {
2020-08-09 22:49:42 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
de . pendingCLIPings = append ( de . pendingCLIPings , pendingCLIPing { res , cb } )
2021-07-21 19:04:36 +01:00
now := mono . Now ( )
2020-08-09 22:49:42 +01:00
udpAddr , derpAddr := de . addrForSendLocked ( now )
if ! derpAddr . IsZero ( ) {
de . startPingLocked ( derpAddr , now , pingCLI )
}
if ! udpAddr . IsZero ( ) && now . Before ( de . trustBestAddrUntil ) {
// Already have an active session, so just ping the address we're using.
// Otherwise "tailscale ping" results to a node on the local network
// can look like they're bouncing between, say 10.0.0.0/9 and the peer's
// IPv6 address, both 1ms away, and it's random who replies first.
de . startPingLocked ( udpAddr , now , pingCLI )
2021-08-26 03:39:20 +01:00
} else if de . canP2P ( ) {
2020-08-09 22:49:42 +01:00
for ep := range de . endpointState {
de . startPingLocked ( ep , now , pingCLI )
}
}
de . noteActiveLocked ( )
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) send ( b [ ] byte ) error {
2021-07-21 19:04:36 +01:00
now := mono . Now ( )
2020-06-30 23:32:19 +01:00
2020-06-28 19:53:37 +01:00
de . mu . Lock ( )
2020-07-03 19:06:33 +01:00
udpAddr , derpAddr := de . addrForSendLocked ( now )
2021-08-26 03:39:20 +01:00
if de . canP2P ( ) && ( udpAddr . IsZero ( ) || now . After ( de . trustBestAddrUntil ) ) {
2020-07-01 23:28:14 +01:00
de . sendPingsLocked ( now , true )
2020-06-30 23:32:19 +01:00
}
2020-07-03 20:43:39 +01:00
de . noteActiveLocked ( )
2020-06-28 19:53:37 +01:00
de . mu . Unlock ( )
2020-07-03 19:06:33 +01:00
if udpAddr . IsZero ( ) && derpAddr . IsZero ( ) {
return errors . New ( "no UDP or DERP addr" )
2020-06-28 19:53:37 +01:00
}
2020-07-03 19:06:33 +01:00
var err error
if ! udpAddr . IsZero ( ) {
2021-11-02 00:53:40 +00:00
_ , err = de . c . sendAddr ( udpAddr , de . publicKey , b )
2020-07-03 19:06:33 +01:00
}
if ! derpAddr . IsZero ( ) {
2021-11-02 00:53:40 +00:00
if ok , _ := de . c . sendAddr ( derpAddr , de . publicKey , b ) ; ok && err != nil {
2020-07-03 19:06:33 +01:00
// UDP failed but DERP worked, so good enough:
return nil
}
2020-07-01 23:28:14 +01:00
}
2020-07-01 22:39:21 +01:00
return err
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) pingTimeout ( txid stun . TxID ) {
2020-07-24 19:18:35 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
sp , ok := de . sentPing [ txid ]
if ! ok {
return
}
2021-07-21 19:04:36 +01:00
if debugDisco || de . bestAddr . IsZero ( ) || mono . Now ( ) . After ( de . trustBestAddrUntil ) {
2020-12-21 18:58:06 +00:00
de . c . logf ( "[v1] magicsock: disco: timeout waiting for pong %x from %v (%v, %v)" , txid [ : 6 ] , sp . to , de . publicKey . ShortString ( ) , de . discoShort )
2020-07-24 19:18:35 +01:00
}
de . removeSentPingLocked ( txid , sp )
}
2020-07-01 22:39:21 +01:00
// forgetPing is called by a timer when a ping either fails to send or
// has taken too long to get a pong reply.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) forgetPing ( txid stun . TxID ) {
2020-07-01 22:39:21 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
if sp , ok := de . sentPing [ txid ] ; ok {
2020-07-03 06:48:12 +01:00
de . removeSentPingLocked ( txid , sp )
2020-07-01 22:39:21 +01:00
}
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) removeSentPingLocked ( txid stun . TxID , sp sentPing ) {
2020-07-03 06:48:12 +01:00
// Stop the timer for the case where sendPing failed to write to UDP.
// In the case of a timer already having fired, this is a no-op:
sp . timer . Stop ( )
delete ( de . sentPing , txid )
}
2021-11-18 01:30:27 +00:00
// sendDiscoPing sends a ping with the provided txid to ep using de's discoKey.
2020-07-03 20:43:39 +01:00
//
2021-11-18 01:30:27 +00:00
// The caller (startPingLocked) should've already recorded the ping in
2020-07-03 20:43:39 +01:00
// sentPing and set up the timer.
2021-11-18 01:30:27 +00:00
//
// The caller should use de.discoKey as the discoKey argument.
// It is passed in so that sendDiscoPing doesn't need to lock de.mu.
func ( de * endpoint ) sendDiscoPing ( ep netaddr . IPPort , discoKey key . DiscoPublic , txid stun . TxID , logLevel discoLogLevel ) {
2021-11-02 00:53:40 +00:00
selfPubKey , _ := de . c . publicKeyAtomic . Load ( ) . ( key . NodePublic )
2021-11-18 01:30:27 +00:00
sent , _ := de . c . sendDiscoMessage ( ep , de . publicKey , discoKey , & disco . Ping {
2021-10-16 22:55:26 +01:00
TxID : [ 12 ] byte ( txid ) ,
2021-11-02 00:53:40 +00:00
NodeKey : selfPubKey ,
2021-10-16 22:55:26 +01:00
} , logLevel )
2020-07-01 22:39:21 +01:00
if ! sent {
de . forgetPing ( txid )
}
2020-06-30 23:32:19 +01:00
}
2020-07-18 21:50:08 +01:00
// discoPingPurpose is the reason why a discovery ping message was sent.
type discoPingPurpose int
2021-08-02 03:14:08 +01:00
//go:generate go run tailscale.com/cmd/addlicense -year 2020 -file discopingpurpose_string.go go run golang.org/x/tools/cmd/stringer -type=discoPingPurpose -trimprefix=ping
2020-07-18 21:50:08 +01:00
const (
// pingDiscovery means that purpose of a ping was to see if a
// path was valid.
pingDiscovery discoPingPurpose = iota
// pingHeartbeat means that purpose of a ping was whether a
// peer was still there.
pingHeartbeat
2020-08-09 22:49:42 +01:00
// pingCLI means that the user is running "tailscale ping"
// from the CLI. These types of pings can go over DERP.
pingCLI
2020-07-18 21:50:08 +01:00
)
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) startPingLocked ( ep netaddr . IPPort , now mono . Time , purpose discoPingPurpose ) {
2021-08-26 03:39:20 +01:00
if ! de . canP2P ( ) {
panic ( "tried to disco ping a peer that can't disco" )
}
2021-10-22 17:09:37 +01:00
if runtime . GOOS == "js" {
return
}
2020-08-09 22:49:42 +01:00
if purpose != pingCLI {
st , ok := de . endpointState [ ep ]
if ! ok {
// Shouldn't happen. But don't ping an endpoint that's
// not active for us.
de . c . logf ( "magicsock: disco: [unexpected] attempt to ping no longer live endpoint %v" , ep )
return
}
st . lastPing = now
2020-07-03 20:43:39 +01:00
}
txid := stun . NewTxID ( )
de . sentPing [ txid ] = sentPing {
2020-07-24 19:18:35 +01:00
to : ep ,
at : now ,
timer : time . AfterFunc ( pingTimeoutDuration , func ( ) { de . pingTimeout ( txid ) } ) ,
2020-07-18 21:50:08 +01:00
purpose : purpose ,
}
logLevel := discoLog
if purpose == pingHeartbeat {
logLevel = discoVerboseLog
2020-07-03 20:43:39 +01:00
}
2021-11-18 01:30:27 +00:00
go de . sendDiscoPing ( ep , de . discoKey , txid , logLevel )
2020-07-03 20:43:39 +01:00
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) sendPingsLocked ( now mono . Time , sendCallMeMaybe bool ) {
2020-07-03 20:43:39 +01:00
de . lastFullPing = now
2020-07-03 06:48:12 +01:00
var sentAny bool
2020-07-01 20:56:17 +01:00
for ep , st := range de . endpointState {
2020-08-13 04:12:56 +01:00
if st . shouldDeleteLocked ( ) {
de . deleteEndpointLocked ( ep )
continue
}
2021-10-27 17:37:32 +01:00
if runtime . GOOS == "js" {
continue
}
2020-07-03 19:06:33 +01:00
if ! st . lastPing . IsZero ( ) && now . Sub ( st . lastPing ) < discoPingInterval {
2020-07-01 20:56:17 +01:00
continue
}
2020-07-03 06:48:12 +01:00
firstPing := ! sentAny
sentAny = true
if firstPing && sendCallMeMaybe {
2020-12-21 18:58:06 +00:00
de . c . logf ( "[v1] magicsock: disco: send, starting discovery for %v (%v)" , de . publicKey . ShortString ( ) , de . discoShort )
2020-07-03 06:48:12 +01:00
}
2020-07-18 21:50:08 +01:00
de . startPingLocked ( ep , now , pingDiscovery )
2020-07-01 20:56:17 +01:00
}
derpAddr := de . derpAddr
2020-07-03 06:48:12 +01:00
if sentAny && sendCallMeMaybe && ! derpAddr . IsZero ( ) {
2021-01-20 20:41:25 +00:00
// Have our magicsock.Conn figure out its STUN endpoint (if
// it doesn't know already) and then send a CallMeMaybe
// message to our peer via DERP informing them that we've
// sent so our firewall ports are probably open and now
// would be a good time for them to connect.
go de . c . enqueueCallMeMaybe ( derpAddr , de )
2020-07-01 20:56:17 +01:00
}
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) updateFromNode ( n * tailcfg . Node ) {
2020-06-28 19:53:37 +01:00
if n == nil {
2021-08-26 03:39:20 +01:00
panic ( "nil node when updating disco ep" )
2020-06-28 19:53:37 +01:00
}
de . mu . Lock ( )
defer de . mu . Unlock ( )
2021-11-02 21:41:56 +00:00
if de . discoKey != n . DiscoKey {
2021-10-06 18:18:12 +01:00
de . c . logf ( "[v1] magicsock: disco: node %s changed from discokey %s to %s" , de . publicKey . ShortString ( ) , de . discoKey , n . DiscoKey )
2021-11-02 21:41:56 +00:00
de . discoKey = n . DiscoKey
2021-10-06 18:18:12 +01:00
de . discoShort = de . discoKey . ShortString ( )
de . resetLocked ( )
}
2020-06-28 19:53:37 +01:00
if n . DERP == "" {
2020-06-30 20:22:42 +01:00
de . derpAddr = netaddr . IPPort { }
2020-06-28 19:53:37 +01:00
} else {
2020-06-30 20:22:42 +01:00
de . derpAddr , _ = netaddr . ParseIPPort ( n . DERP )
2020-06-28 19:53:37 +01:00
}
2020-06-30 23:32:19 +01:00
for _ , st := range de . endpointState {
2020-08-13 04:12:56 +01:00
st . index = indexSentinelDeleted // assume deleted until updated in next loop
2020-06-30 23:32:19 +01:00
}
for i , epStr := range n . Endpoints {
2020-07-03 19:06:33 +01:00
if i > math . MaxInt16 {
// Seems unlikely.
continue
}
2020-06-30 23:32:19 +01:00
ipp , err := netaddr . ParseIPPort ( epStr )
if err != nil {
de . c . logf ( "magicsock: bogus netmap endpoint %q" , epStr )
continue
}
if st , ok := de . endpointState [ ipp ] ; ok {
2020-07-03 19:06:33 +01:00
st . index = int16 ( i )
2020-06-30 23:32:19 +01:00
} else {
2020-07-03 19:06:33 +01:00
de . endpointState [ ipp ] = & endpointState { index : int16 ( i ) }
2020-06-30 23:32:19 +01:00
}
}
2020-08-13 04:12:56 +01:00
// Now delete anything unless it's still in the network map or
// was a recently discovered endpoint.
for ep , st := range de . endpointState {
if st . shouldDeleteLocked ( ) {
de . deleteEndpointLocked ( ep )
}
}
}
// addCandidateEndpoint adds ep as an endpoint to which we should send
// future pings.
//
// This is called once we've already verified that we got a valid
// discovery message from de via ep.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) addCandidateEndpoint ( ep netaddr . IPPort ) {
2020-08-13 04:12:56 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
if st , ok := de . endpointState [ ep ] ; ok {
if st . lastGotPing . IsZero ( ) {
// Already-known endpoint from the network map.
return
}
st . lastGotPing = time . Now ( )
return
}
// Newly discovered endpoint. Exciting!
2021-03-19 20:18:02 +00:00
de . c . logf ( "[v1] magicsock: disco: adding %v as candidate endpoint for %v (%s)" , ep , de . discoShort , de . publicKey . ShortString ( ) )
2020-08-13 04:12:56 +01:00
de . endpointState [ ep ] = & endpointState {
lastGotPing : time . Now ( ) ,
}
// If for some reason this gets very large, do some cleanup.
if size := len ( de . endpointState ) ; size > 100 {
for ep , st := range de . endpointState {
if st . shouldDeleteLocked ( ) {
de . deleteEndpointLocked ( ep )
2020-06-30 23:32:19 +01:00
}
}
2020-08-13 04:12:56 +01:00
size2 := len ( de . endpointState )
2021-03-19 20:18:02 +00:00
de . c . logf ( "[v1] magicsock: disco: addCandidateEndpoint pruned %v candidate set from %v to %v entries" , size , size2 )
2020-06-30 23:32:19 +01:00
}
2020-06-28 19:53:37 +01:00
}
// noteConnectivityChange is called when connectivity changes enough
// that we should question our earlier assumptions about which paths
// work.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) noteConnectivityChange ( ) {
2020-06-28 19:53:37 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
2021-07-21 19:04:36 +01:00
de . trustBestAddrUntil = 0
2020-06-28 19:53:37 +01:00
}
2020-07-02 06:15:41 +01:00
// handlePongConnLocked handles a Pong message (a reply to an earlier ping).
// It should be called with the Conn.mu held.
2021-10-16 05:55:59 +01:00
//
// It reports whether m.TxID corresponds to a ping that this endpoint sent.
2021-10-17 19:31:21 +01:00
func ( de * endpoint ) handlePongConnLocked ( m * disco . Pong , di * discoInfo , src netaddr . IPPort ) ( knownTxID bool ) {
2020-07-01 20:56:17 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
2021-05-15 02:07:28 +01:00
isDerp := src . IP ( ) == derpMagicIPAddr
2020-07-02 06:15:41 +01:00
2020-07-01 20:56:17 +01:00
sp , ok := de . sentPing [ m . TxID ]
if ! ok {
2021-10-16 05:55:59 +01:00
// This is not a pong for a ping we sent.
return false
2020-07-01 20:56:17 +01:00
}
2021-10-16 05:55:59 +01:00
knownTxID = true // for naked returns below
2020-07-03 06:48:12 +01:00
de . removeSentPingLocked ( m . TxID , sp )
2021-10-17 19:31:21 +01:00
di . setNodeKey ( de . publicKey )
2020-07-01 20:56:17 +01:00
2021-07-21 19:04:36 +01:00
now := mono . Now ( )
2020-07-03 19:06:33 +01:00
latency := now . Sub ( sp . at )
2020-08-09 22:49:42 +01:00
if ! isDerp {
st , ok := de . endpointState [ sp . to ]
if ! ok {
// This is no longer an endpoint we care about.
return
}
2021-10-18 21:29:09 +01:00
de . c . peerMap . setNodeKeyForIPPort ( src , de . publicKey )
2020-08-09 22:49:42 +01:00
st . addPongReplyLocked ( pongReply {
latency : latency ,
pongAt : now ,
from : src ,
pongSrc : m . Src ,
} )
}
2020-07-03 06:48:12 +01:00
2020-07-18 21:50:08 +01:00
if sp . purpose != pingHeartbeat {
2020-12-21 18:58:06 +00:00
de . c . logf ( "[v1] magicsock: disco: %v<-%v (%v, %v) got pong tx=%x latency=%v pong.src=%v%v" , de . c . discoShort , de . discoShort , de . publicKey . ShortString ( ) , src , m . TxID [ : 6 ] , latency . Round ( time . Millisecond ) , m . Src , logger . ArgWriter ( func ( bw * bufio . Writer ) {
2020-07-18 21:50:08 +01:00
if sp . to != src {
fmt . Fprintf ( bw , " ping.to=%v" , sp . to )
}
} ) )
}
2020-07-01 20:56:17 +01:00
2020-08-09 22:49:42 +01:00
for _ , pp := range de . pendingCLIPings {
de . c . populateCLIPingResponseLocked ( pp . res , latency , sp . to )
go pp . cb ( pp . res )
}
de . pendingCLIPings = nil
2020-07-01 20:56:17 +01:00
// Promote this pong response to our current best address if it's lower latency.
// TODO(bradfitz): decide how latency vs. preference order affects decision
2020-08-09 22:49:42 +01:00
if ! isDerp {
2021-03-23 17:07:34 +00:00
thisPong := addrLatency { sp . to , latency }
if betterAddr ( thisPong , de . bestAddr ) {
de . c . logf ( "magicsock: disco: node %v %v now using %v" , de . publicKey . ShortString ( ) , de . discoShort , sp . to )
de . bestAddr = thisPong
2020-08-09 22:49:42 +01:00
}
2021-03-23 17:07:34 +00:00
if de . bestAddr . IPPort == thisPong . IPPort {
de . bestAddr . latency = latency
2020-08-09 22:49:42 +01:00
de . bestAddrAt = now
de . trustBestAddrUntil = now . Add ( trustUDPAddrDuration )
2020-07-02 19:37:19 +01:00
}
2020-07-03 19:06:33 +01:00
}
2021-10-16 05:55:59 +01:00
return
2020-07-03 19:06:33 +01:00
}
2021-03-23 17:07:34 +00:00
// addrLatency is an IPPort with an associated latency.
type addrLatency struct {
netaddr . IPPort
latency time . Duration
}
// betterAddr reports whether a is a better addr to use than b.
func betterAddr ( a , b addrLatency ) bool {
if a . IPPort == b . IPPort {
return false
}
if b . IsZero ( ) {
return true
}
if a . IsZero ( ) {
return false
}
2021-05-15 02:07:28 +01:00
if a . IP ( ) . Is6 ( ) && b . IP ( ) . Is4 ( ) {
2021-03-23 17:17:19 +00:00
// Prefer IPv6 for being a bit more robust, as long as
// the latencies are roughly equivalent.
if a . latency / 10 * 9 < b . latency {
return true
}
2021-05-15 02:07:28 +01:00
} else if a . IP ( ) . Is4 ( ) && b . IP ( ) . Is6 ( ) {
2021-03-23 17:17:19 +00:00
if betterAddr ( b , a ) {
return false
}
}
2021-03-23 17:07:34 +00:00
return a . latency < b . latency
}
2021-08-26 06:20:31 +01:00
// endpoint.mu must be held.
2020-07-03 19:06:33 +01:00
func ( st * endpointState ) addPongReplyLocked ( r pongReply ) {
if n := len ( st . recentPongs ) ; n < pongHistoryCount {
st . recentPong = uint16 ( n )
st . recentPongs = append ( st . recentPongs , r )
return
2020-07-01 20:56:17 +01:00
}
2020-07-03 19:06:33 +01:00
i := st . recentPong + 1
if i == pongHistoryCount {
i = 0
}
st . recentPongs [ i ] = r
st . recentPong = i
2020-07-01 20:56:17 +01:00
}
2020-07-01 23:28:14 +01:00
// handleCallMeMaybe handles a CallMeMaybe discovery message via
// DERP. The contract for use of this message is that the peer has
// already sent to us via UDP, so their stateful firewall should be
// open. Now we can Ping back and make it through.
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) handleCallMeMaybe ( m * disco . CallMeMaybe ) {
2021-08-26 03:39:20 +01:00
if ! de . canP2P ( ) {
// How did we receive a disco message from a peer that can't disco?
panic ( "got call-me-maybe from peer with no discokey" )
}
2021-10-22 17:09:37 +01:00
if runtime . GOOS == "js" {
// Nothing to do on js/wasm if we can't send UDP packets anyway.
return
}
2020-07-01 23:28:14 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
2021-01-20 20:41:25 +00:00
now := time . Now ( )
for ep := range de . isCallMeMaybeEP {
de . isCallMeMaybeEP [ ep ] = false // mark for deletion
}
if de . isCallMeMaybeEP == nil {
de . isCallMeMaybeEP = map [ netaddr . IPPort ] bool { }
}
2021-01-21 16:05:07 +00:00
var newEPs [ ] netaddr . IPPort
2021-01-20 20:41:25 +00:00
for _ , ep := range m . MyNumber {
2021-05-15 02:07:28 +01:00
if ep . IP ( ) . Is6 ( ) && ep . IP ( ) . IsLinkLocalUnicast ( ) {
2021-01-21 16:05:07 +00:00
// We send these out, but ignore them for now.
// TODO: teach the ping code to ping on all interfaces
// for these.
continue
}
2021-01-20 20:41:25 +00:00
de . isCallMeMaybeEP [ ep ] = true
if es , ok := de . endpointState [ ep ] ; ok {
es . callMeMaybeTime = now
} else {
de . endpointState [ ep ] = & endpointState { callMeMaybeTime : now }
2021-01-21 16:05:07 +00:00
newEPs = append ( newEPs , ep )
}
}
if len ( newEPs ) > 0 {
2021-03-19 20:18:02 +00:00
de . c . logf ( "[v1] magicsock: disco: call-me-maybe from %v %v added new endpoints: %v" ,
2021-01-21 16:05:07 +00:00
de . publicKey . ShortString ( ) , de . discoShort ,
logger . ArgWriter ( func ( w * bufio . Writer ) {
for i , ep := range newEPs {
if i > 0 {
w . WriteString ( ", " )
}
w . WriteString ( ep . String ( ) )
}
} ) )
2021-01-20 20:41:25 +00:00
}
2021-01-21 16:05:07 +00:00
2021-01-20 20:41:25 +00:00
// Delete any prior CalllMeMaybe endpoints that weren't included
// in this message.
for ep , want := range de . isCallMeMaybeEP {
if ! want {
delete ( de . isCallMeMaybeEP , ep )
de . deleteEndpointLocked ( ep )
}
}
2020-07-01 23:28:14 +01:00
// Zero out all the lastPing times to force sendPingsLocked to send new ones,
// even if it's been less than 5 seconds ago.
for _ , st := range de . endpointState {
2021-07-21 19:04:36 +01:00
st . lastPing = 0
2020-07-01 23:28:14 +01:00
}
2021-07-21 19:04:36 +01:00
de . sendPingsLocked ( mono . Now ( ) , false )
2020-07-01 23:28:14 +01:00
}
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) populatePeerStatus ( ps * ipnstate . PeerStatus ) {
2020-07-03 19:06:33 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
2021-09-01 00:55:22 +01:00
ps . Relay = de . c . derpRegionCodeOfIDLocked ( int ( de . derpAddr . Port ( ) ) )
2020-07-03 19:06:33 +01:00
if de . lastSend . IsZero ( ) {
return
}
2021-07-21 19:04:36 +01:00
now := mono . Now ( )
2021-08-04 16:01:35 +01:00
ps . LastWrite = de . lastSend . WallTime ( )
ps . Active = now . Sub ( de . lastSend ) < sessionActiveTimeout
2020-07-03 21:44:22 +01:00
if udpAddr , derpAddr := de . addrForSendLocked ( now ) ; ! udpAddr . IsZero ( ) && derpAddr . IsZero ( ) {
2020-07-03 19:06:33 +01:00
ps . CurAddr = udpAddr . String ( )
}
}
2020-07-30 21:48:32 +01:00
// stopAndReset stops timers associated with de and resets its state back to zero.
2021-08-26 03:39:20 +01:00
// It's called when a discovery endpoint is no longer present in the
// NetworkMap, or when magicsock is transitioning from running to
// stopped state (via SetPrivateKey(zero))
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) stopAndReset ( ) {
2021-01-18 23:27:44 +00:00
atomic . AddInt64 ( & de . numStopAndResetAtomic , 1 )
2020-06-28 19:53:37 +01:00
de . mu . Lock ( )
defer de . mu . Unlock ( )
2021-10-29 22:27:29 +01:00
de . c . logf ( "[v1] magicsock: doing cleanup for discovery key %s" , de . discoKey . ShortString ( ) )
2020-06-30 23:32:19 +01:00
2021-10-06 18:18:12 +01:00
de . resetLocked ( )
if de . heartBeatTimer != nil {
de . heartBeatTimer . Stop ( )
de . heartBeatTimer = nil
}
de . pendingCLIPings = nil
}
// resetLocked clears all the endpoint's p2p state, reverting it to a
// DERP-only endpoint. It does not stop the endpoint's heartbeat
// timer, if one is running.
func ( de * endpoint ) resetLocked ( ) {
2021-07-21 19:04:36 +01:00
de . lastSend = 0
de . lastFullPing = 0
2021-03-23 17:07:34 +00:00
de . bestAddr = addrLatency { }
2021-07-21 19:04:36 +01:00
de . bestAddrAt = 0
de . trustBestAddrUntil = 0
2020-07-30 21:48:32 +01:00
for _ , es := range de . endpointState {
2021-07-21 19:04:36 +01:00
es . lastPing = 0
2020-07-30 21:48:32 +01:00
}
2020-07-03 19:45:41 +01:00
for txid , sp := range de . sentPing {
de . removeSentPingLocked ( txid , sp )
2020-06-30 23:32:19 +01:00
}
2020-06-28 19:53:37 +01:00
}
2020-06-30 22:37:35 +01:00
2021-08-26 06:20:31 +01:00
func ( de * endpoint ) numStopAndReset ( ) int64 {
2021-01-18 23:27:44 +00:00
return atomic . LoadInt64 ( & de . numStopAndResetAtomic )
}
2020-07-02 18:48:13 +01:00
// derpStr replaces DERP IPs in s with "derp-".
func derpStr ( s string ) string { return strings . ReplaceAll ( s , "127.3.3.40:" , "derp-" ) }
2020-09-03 23:45:41 +01:00
2021-01-18 23:27:44 +00:00
// ippEndpointCache is a mutex-free single-element cache, mapping from
// a single netaddr.IPPort to a single endpoint.
type ippEndpointCache struct {
ipp netaddr . IPPort
gen int64
2021-08-26 06:20:31 +01:00
de * endpoint
2021-01-18 23:27:44 +00:00
}
2021-10-16 04:45:33 +01:00
// discoInfo is the info and state for the DiscoKey
// in the Conn.discoInfo map key.
//
// Note that a DiscoKey does not necessarily map to exactly one
// node. In the case of shared nodes and users switching accounts, two
// nodes in the NetMap may legitimately have the same DiscoKey. As
// such, no fields in here should be considered node-specific.
type discoInfo struct {
// discoKey is the same as the Conn.discoInfo map key,
// just so you can pass around a *discoInfo alone.
// Not modifed once initiazed.
2021-10-29 22:27:29 +01:00
discoKey key . DiscoPublic
2021-10-16 04:45:33 +01:00
// discoShort is discoKey.ShortString().
// Not modifed once initiazed;
discoShort string
2021-10-29 22:27:29 +01:00
// sharedKey is the precomputed key for communication with the
// peer that has the DiscoKey used to look up this *discoInfo in
// Conn.discoInfo.
2021-10-16 04:45:33 +01:00
// Not modifed once initialized.
2021-10-29 22:27:29 +01:00
sharedKey key . DiscoShared
2021-10-16 04:45:33 +01:00
// Mutable fields follow, owned by Conn.mu:
// lastPingFrom is the src of a ping for discoKey.
lastPingFrom netaddr . IPPort
// lastPingTime is the last time of a ping for discoKey.
lastPingTime time . Time
2021-10-17 19:31:21 +01:00
// lastNodeKey is the last NodeKey seen using discoKey.
// It's only updated if the NodeKey is unambiguous.
2021-11-02 00:53:40 +00:00
lastNodeKey key . NodePublic
2021-10-17 19:31:21 +01:00
// lastNodeKeyTime is the time a NodeKey was last seen using
// this discoKey. It's only updated if the NodeKey is
// unambiguous.
lastNodeKeyTime time . Time
}
// setNodeKey sets the most recent mapping from di.discoKey to the
// NodeKey nk.
2021-11-02 00:53:40 +00:00
func ( di * discoInfo ) setNodeKey ( nk key . NodePublic ) {
2021-10-17 19:31:21 +01:00
di . lastNodeKey = nk
di . lastNodeKeyTime = time . Now ( )
2021-10-16 04:45:33 +01:00
}
2020-12-14 02:51:24 +00:00
var (
metricNumPeers = clientmetric . NewGauge ( "magicsock_netmap_num_peers" )
metricNumDERPConns = clientmetric . NewGauge ( "magicsock_num_derp_conns" )
// Sends (data or disco)
metricSendDERPQueued = clientmetric . NewCounter ( "magicsock_send_derp_queued" )
metricSendDERPErrorChan = clientmetric . NewCounter ( "magicsock_send_derp_error_chan" )
metricSendDERPErrorClosed = clientmetric . NewCounter ( "magicsock_send_derp_error_closed" )
metricSendDERPErrorQueue = clientmetric . NewCounter ( "magicsock_send_derp_error_queue" )
metricSendUDP = clientmetric . NewCounter ( "magicsock_send_udp" )
metricSendUDPError = clientmetric . NewCounter ( "magicsock_send_udp_error" )
2021-11-16 16:34:25 +00:00
metricSendDERP = clientmetric . NewCounter ( "magicsock_send_derp" )
metricSendDERPError = clientmetric . NewCounter ( "magicsock_send_derp_error" )
2020-12-14 02:51:24 +00:00
// Data packets (non-disco)
metricSendData = clientmetric . NewCounter ( "magicsock_send_data" )
metricSendDataNetworkDown = clientmetric . NewCounter ( "magicsock_send_data_network_down" )
metricRecvData = clientmetric . NewCounter ( "magicsock_recv_data" )
metricRecvDataDERP = clientmetric . NewCounter ( "magicsock_recv_data_derp" )
metricRecvDataIPv4 = clientmetric . NewCounter ( "magicsock_recv_data_ipv4" )
metricRecvDataIPv6 = clientmetric . NewCounter ( "magicsock_recv_data_ipv6" )
// Disco packets
2021-12-20 17:29:31 +00:00
metricSendDiscoUDP = clientmetric . NewCounter ( "magicsock_disco_send_udp" )
metricSendDiscoDERP = clientmetric . NewCounter ( "magicsock_disco_send_derp" )
metricSentDiscoUDP = clientmetric . NewCounter ( "magicsock_disco_sent_udp" )
metricSentDiscoDERP = clientmetric . NewCounter ( "magicsock_disco_sent_derp" )
metricSentDiscoPing = clientmetric . NewCounter ( "magicsock_disco_sent_ping" )
metricSentDiscoPong = clientmetric . NewCounter ( "magicsock_disco_sent_pong" )
metricSentDiscoCallMeMaybe = clientmetric . NewCounter ( "magicsock_disco_sent_callmemaybe" )
metricRecvDiscoBadPeer = clientmetric . NewCounter ( "magicsock_disco_recv_bad_peer" )
metricRecvDiscoBadKey = clientmetric . NewCounter ( "magicsock_disco_recv_bad_key" )
metricRecvDiscoBadParse = clientmetric . NewCounter ( "magicsock_disco_recv_bad_parse" )
2020-12-14 02:51:24 +00:00
metricRecvDiscoUDP = clientmetric . NewCounter ( "magicsock_disco_recv_udp" )
metricRecvDiscoDERP = clientmetric . NewCounter ( "magicsock_disco_recv_derp" )
metricRecvDiscoPing = clientmetric . NewCounter ( "magicsock_disco_recv_ping" )
metricRecvDiscoPong = clientmetric . NewCounter ( "magicsock_disco_recv_pong" )
metricRecvDiscoCallMeMaybe = clientmetric . NewCounter ( "magicsock_disco_recv_callmemaybe" )
metricRecvDiscoCallMeMaybeBadNode = clientmetric . NewCounter ( "magicsock_disco_recv_callmemaybe_bad_node" )
metricRecvDiscoCallMeMaybeBadDisco = clientmetric . NewCounter ( "magicsock_disco_recv_callmemaybe_bad_disco" )
2021-11-16 16:34:25 +00:00
// metricDERPHomeChange is how many times our DERP home region DI has
// changed from non-zero to a different non-zero.
metricDERPHomeChange = clientmetric . NewCounter ( "derp_home_change" )
2020-12-14 02:51:24 +00:00
)