2020-02-05 22:16:58 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package wgengine
|
|
|
|
|
|
|
|
import (
|
|
|
|
"bufio"
|
2020-04-08 16:42:38 +01:00
|
|
|
"bytes"
|
2020-02-25 16:06:29 +00:00
|
|
|
"context"
|
2021-03-23 22:16:15 +00:00
|
|
|
crand "crypto/rand"
|
2020-05-17 17:51:38 +01:00
|
|
|
"errors"
|
2020-02-05 22:16:58 +00:00
|
|
|
"fmt"
|
2020-04-08 16:42:38 +01:00
|
|
|
"io"
|
2020-07-31 21:27:09 +01:00
|
|
|
"net"
|
2020-04-10 21:44:08 +01:00
|
|
|
"os"
|
2021-04-01 17:35:41 +01:00
|
|
|
"reflect"
|
2020-04-10 21:44:08 +01:00
|
|
|
"runtime"
|
2020-07-30 00:14:35 +01:00
|
|
|
"strconv"
|
2020-02-05 22:16:58 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2020-06-09 19:00:48 +01:00
|
|
|
"sync/atomic"
|
2020-02-05 22:16:58 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/tailscale/wireguard-go/device"
|
|
|
|
"github.com/tailscale/wireguard-go/tun"
|
2020-04-08 16:42:38 +01:00
|
|
|
"go4.org/mem"
|
2020-07-07 20:25:32 +01:00
|
|
|
"inet.af/netaddr"
|
2020-06-25 19:04:52 +01:00
|
|
|
"tailscale.com/control/controlclient"
|
2021-02-18 16:58:13 +00:00
|
|
|
"tailscale.com/health"
|
2020-06-28 18:58:21 +01:00
|
|
|
"tailscale.com/internal/deepprint"
|
2020-03-26 05:57:46 +00:00
|
|
|
"tailscale.com/ipn/ipnstate"
|
2021-04-02 08:34:32 +01:00
|
|
|
"tailscale.com/net/dns"
|
2021-04-01 05:54:38 +01:00
|
|
|
"tailscale.com/net/dns/resolver"
|
2021-01-12 03:07:08 +00:00
|
|
|
"tailscale.com/net/flowtrack"
|
2020-03-13 03:10:11 +00:00
|
|
|
"tailscale.com/net/interfaces"
|
2020-11-10 00:16:04 +00:00
|
|
|
"tailscale.com/net/packet"
|
2020-07-31 21:27:09 +01:00
|
|
|
"tailscale.com/net/tsaddr"
|
2020-09-21 22:02:58 +01:00
|
|
|
"tailscale.com/net/tshttpproxy"
|
2021-03-27 05:14:08 +00:00
|
|
|
"tailscale.com/net/tstun"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/tailcfg"
|
2021-03-21 04:45:47 +00:00
|
|
|
"tailscale.com/types/ipproto"
|
2020-03-26 05:57:46 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-02-15 03:23:16 +00:00
|
|
|
"tailscale.com/types/logger"
|
2021-02-05 23:44:46 +00:00
|
|
|
"tailscale.com/types/netmap"
|
2020-12-30 01:22:56 +00:00
|
|
|
"tailscale.com/types/wgkey"
|
2020-07-30 00:14:35 +01:00
|
|
|
"tailscale.com/version"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/wgengine/filter"
|
|
|
|
"tailscale.com/wgengine/magicsock"
|
2020-02-17 17:00:38 +00:00
|
|
|
"tailscale.com/wgengine/monitor"
|
2020-04-30 21:20:09 +01:00
|
|
|
"tailscale.com/wgengine/router"
|
2021-01-29 20:16:36 +00:00
|
|
|
"tailscale.com/wgengine/wgcfg"
|
2021-01-21 20:33:54 +00:00
|
|
|
"tailscale.com/wgengine/wglog"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
2020-12-20 00:43:25 +00:00
|
|
|
const magicDNSPort = 53
|
|
|
|
|
|
|
|
var magicDNSIP = netaddr.IPv4(100, 100, 100, 100)
|
2020-07-07 20:25:32 +01:00
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
// Lazy wireguard-go configuration parameters.
|
|
|
|
const (
|
|
|
|
// lazyPeerIdleThreshold is the idle duration after
|
|
|
|
// which we remove a peer from the wireguard configuration.
|
|
|
|
// (This includes peers that have never been idle, which
|
|
|
|
// effectively have infinite idleness)
|
|
|
|
lazyPeerIdleThreshold = 5 * time.Minute
|
2020-07-31 20:40:49 +01:00
|
|
|
|
|
|
|
// packetSendTimeUpdateFrequency controls how often we record
|
|
|
|
// the time that we wrote a packet to an IP address.
|
|
|
|
packetSendTimeUpdateFrequency = 10 * time.Second
|
|
|
|
|
|
|
|
// packetSendRecheckWireguardThreshold controls how long we can go
|
|
|
|
// between packet sends to an IP before checking to see
|
|
|
|
// whether this IP address needs to be added back to the
|
|
|
|
// Wireguard peer oconfig.
|
|
|
|
packetSendRecheckWireguardThreshold = 1 * time.Minute
|
2020-07-23 23:15:28 +01:00
|
|
|
)
|
|
|
|
|
2021-04-27 20:26:23 +01:00
|
|
|
// statusPollInterval is how often we ask wireguard-go for its engine
|
|
|
|
// status (as long as there's activity). See docs on its use below.
|
|
|
|
const statusPollInterval = 1 * time.Minute
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
type userspaceEngine struct {
|
2021-02-28 05:42:34 +00:00
|
|
|
logf logger.Logf
|
|
|
|
wgLogger *wglog.Logger //a wireguard-go logging wrapper
|
|
|
|
reqCh chan struct{}
|
|
|
|
waitCh chan struct{} // chan is closed when first Close call completes; contrast with closing bool
|
|
|
|
timeNow func() time.Time
|
2021-03-27 06:13:20 +00:00
|
|
|
tundev *tstun.Wrapper
|
2021-02-28 05:42:34 +00:00
|
|
|
wgdev *device.Device
|
|
|
|
router router.Router
|
2021-04-03 03:24:02 +01:00
|
|
|
dns *dns.Manager
|
2021-02-28 05:42:34 +00:00
|
|
|
magicConn *magicsock.Conn
|
|
|
|
linkMon *monitor.Mon
|
2021-02-28 05:48:00 +00:00
|
|
|
linkMonOwned bool // whether we created linkMon (and thus need to close it)
|
2021-02-28 05:42:34 +00:00
|
|
|
linkMonUnregister func() // unsubscribes from changes; used regardless of linkMonOwned
|
2020-06-09 19:00:48 +01:00
|
|
|
|
2020-08-06 22:57:03 +01:00
|
|
|
testMaybeReconfigHook func() // for tests; if non-nil, fires if maybeReconfigWireguardLocked called
|
|
|
|
|
2021-03-28 06:40:38 +01:00
|
|
|
// isLocalAddr reports the whether an IP is assigned to the local
|
2020-06-09 19:00:48 +01:00
|
|
|
// tunnel interface. It's used to reflect local packets
|
|
|
|
// incorrectly sent to us.
|
2021-03-28 06:40:38 +01:00
|
|
|
isLocalAddr atomic.Value // of func(netaddr.IP)bool
|
2020-12-20 00:43:25 +00:00
|
|
|
|
|
|
|
wgLock sync.Mutex // serializes all wgdev operations; see lock order comment below
|
|
|
|
lastCfgFull wgcfg.Config
|
|
|
|
lastRouterSig string // of router.Config
|
|
|
|
lastEngineSigFull string // of full wireguard config
|
|
|
|
lastEngineSigTrim string // of trimmed wireguard config
|
|
|
|
recvActivityAt map[tailcfg.DiscoKey]time.Time
|
|
|
|
trimmedDisco map[tailcfg.DiscoKey]bool // set of disco keys of peers currently excluded from wireguard config
|
|
|
|
sentActivityAt map[netaddr.IP]*int64 // value is atomic int64 of unixtime
|
|
|
|
destIPActivityFuncs map[netaddr.IP]func()
|
2021-01-26 18:20:13 +00:00
|
|
|
statusBufioReader *bufio.Reader // reusable for UAPI
|
2021-04-27 20:26:23 +01:00
|
|
|
lastStatusPollTime time.Time // last time we polled the engine status
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-03-23 04:25:43 +00:00
|
|
|
mu sync.Mutex // guards following; see lock order comment below
|
|
|
|
netMap *netmap.NetworkMap // or nil
|
|
|
|
closing bool // Close was called (even if we're still closing)
|
2021-01-15 14:16:28 +00:00
|
|
|
statusCallback StatusCallback
|
|
|
|
peerSequence []wgkey.Key
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
|
|
|
endpoints []tailcfg.Endpoint
|
2021-03-01 20:56:03 +00:00
|
|
|
pingers map[wgkey.Key]*pinger // legacy pingers for pre-discovery peers
|
2021-01-15 14:16:28 +00:00
|
|
|
pendOpen map[flowtrack.Tuple]*pendingOpenFlow // see pendopen.go
|
|
|
|
networkMapCallbacks map[*someHandle]NetworkMapCallback
|
2021-03-29 23:17:05 +01:00
|
|
|
tsIPByIPPort map[netaddr.IPPort]netaddr.IP // allows registration of IP:ports as belonging to a certain Tailscale IP for whois lookups
|
|
|
|
pongCallback map[[8]byte]func(packet.TSMPPongReply) // for TSMP pong responses
|
2020-03-25 15:40:36 +00:00
|
|
|
|
2020-08-06 00:36:53 +01:00
|
|
|
// Lock ordering: magicsock.Conn.mu, wgLock, then mu.
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-03-01 18:08:53 +00:00
|
|
|
// InternalsGetter is implemented by Engines that can export their internals.
|
|
|
|
type InternalsGetter interface {
|
2021-03-29 23:17:05 +01:00
|
|
|
GetInternals() (_ *tstun.Wrapper, _ *magicsock.Conn, ok bool)
|
2021-03-01 18:08:53 +00:00
|
|
|
}
|
|
|
|
|
2021-03-29 23:17:05 +01:00
|
|
|
func (e *userspaceEngine) GetInternals() (_ *tstun.Wrapper, _ *magicsock.Conn, ok bool) {
|
|
|
|
return e.tundev, e.magicConn, true
|
2021-03-01 18:08:53 +00:00
|
|
|
}
|
|
|
|
|
2021-02-28 05:42:34 +00:00
|
|
|
// Config is the engine configuration.
|
|
|
|
type Config struct {
|
2021-03-29 03:25:01 +01:00
|
|
|
// Tun is the device used by the Engine to exchange packets with
|
|
|
|
// the OS.
|
|
|
|
// If nil, a fake Device that does nothing is used.
|
|
|
|
Tun tun.Device
|
|
|
|
|
2021-03-29 02:59:33 +01:00
|
|
|
// Router interfaces the Engine to the OS network stack.
|
|
|
|
// If nil, a fake Router that does nothing is used.
|
2021-03-27 04:47:28 +00:00
|
|
|
Router router.Router
|
2021-02-28 05:42:34 +00:00
|
|
|
|
2021-04-03 03:24:02 +01:00
|
|
|
// DNS interfaces the Engine to the OS DNS resolver configuration.
|
|
|
|
// If nil, a fake OSConfigurator that does nothing is used.
|
|
|
|
DNS dns.OSConfigurator
|
|
|
|
|
2021-02-28 05:48:00 +00:00
|
|
|
// LinkMonitor optionally provides an existing link monitor to re-use.
|
|
|
|
// If nil, a new link monitor is created.
|
|
|
|
LinkMonitor *monitor.Mon
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
// ListenPort is the port on which the engine will listen.
|
2021-02-28 05:42:34 +00:00
|
|
|
// If zero, a port is automatically selected.
|
2020-06-08 23:19:26 +01:00
|
|
|
ListenPort uint16
|
2021-02-28 05:42:34 +00:00
|
|
|
|
2021-03-29 03:50:56 +01:00
|
|
|
// RespondToPing determines whether this engine should internally
|
|
|
|
// reply to ICMP pings, without involving the OS.
|
|
|
|
// Used in "fake" mode for development.
|
|
|
|
RespondToPing bool
|
2020-06-08 23:19:26 +01:00
|
|
|
}
|
|
|
|
|
2021-03-01 18:08:53 +00:00
|
|
|
func NewFakeUserspaceEngine(logf logger.Logf, listenPort uint16) (Engine, error) {
|
2020-09-25 21:13:13 +01:00
|
|
|
logf("Starting userspace wireguard engine (with fake TUN device)")
|
2021-03-29 03:25:01 +01:00
|
|
|
return NewUserspaceEngine(logf, Config{
|
2021-03-29 03:50:56 +01:00
|
|
|
ListenPort: listenPort,
|
|
|
|
RespondToPing: true,
|
2021-02-28 05:42:34 +00:00
|
|
|
})
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-04-01 17:35:41 +01:00
|
|
|
// NetstackRouterType is a gross cross-package init-time registration
|
|
|
|
// from netstack to here, informing this package of netstack's router
|
|
|
|
// type.
|
|
|
|
var NetstackRouterType reflect.Type
|
|
|
|
|
|
|
|
// IsNetstackRouter reports whether e is either fully netstack based
|
|
|
|
// (without TUN) or is at least using netstack for routing.
|
|
|
|
func IsNetstackRouter(e Engine) bool {
|
|
|
|
switch e := e.(type) {
|
|
|
|
case *userspaceEngine:
|
|
|
|
if reflect.TypeOf(e.router) == NetstackRouterType {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
case *watchdogEngine:
|
|
|
|
return IsNetstackRouter(e.wrap)
|
|
|
|
}
|
|
|
|
return IsNetstack(e)
|
|
|
|
}
|
|
|
|
|
2021-03-30 17:53:12 +01:00
|
|
|
// IsNetstack reports whether e is a netstack-based TUN-free engine.
|
|
|
|
func IsNetstack(e Engine) bool {
|
|
|
|
ig, ok := e.(InternalsGetter)
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
tw, _, ok := ig.GetInternals()
|
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
name, err := tw.Name()
|
|
|
|
return err == nil && name == "FakeTUN"
|
|
|
|
}
|
|
|
|
|
2020-04-30 21:20:09 +01:00
|
|
|
// NewUserspaceEngine creates the named tun device and returns a
|
|
|
|
// Tailscale Engine running on it.
|
2021-03-29 03:25:01 +01:00
|
|
|
func NewUserspaceEngine(logf logger.Logf, conf Config) (_ Engine, reterr error) {
|
2021-02-23 04:20:35 +00:00
|
|
|
var closePool closeOnErrorPool
|
|
|
|
defer closePool.closeAllIfError(&reterr)
|
|
|
|
|
2021-03-29 03:25:01 +01:00
|
|
|
if conf.Tun == nil {
|
2021-03-29 03:50:56 +01:00
|
|
|
logf("[v1] using fake (no-op) tun device")
|
2021-03-29 03:25:01 +01:00
|
|
|
conf.Tun = tstun.NewFake()
|
|
|
|
}
|
2021-03-27 04:47:28 +00:00
|
|
|
if conf.Router == nil {
|
2021-03-29 03:50:56 +01:00
|
|
|
logf("[v1] using fake (no-op) OS network configurator")
|
2021-03-29 02:59:33 +01:00
|
|
|
conf.Router = router.NewFake(logf)
|
2021-03-27 04:47:28 +00:00
|
|
|
}
|
2021-04-03 03:24:02 +01:00
|
|
|
if conf.DNS == nil {
|
|
|
|
logf("[v1] using fake (no-op) DNS configurator")
|
2021-04-12 23:51:37 +01:00
|
|
|
d, err := dns.NewNoopManager()
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
conf.DNS = d
|
2021-04-03 03:24:02 +01:00
|
|
|
}
|
2021-03-27 04:47:28 +00:00
|
|
|
|
2021-03-29 03:25:01 +01:00
|
|
|
tsTUNDev := tstun.Wrap(logf, conf.Tun)
|
2021-02-28 05:42:34 +00:00
|
|
|
closePool.add(tsTUNDev)
|
2021-02-23 04:20:35 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
e := &userspaceEngine{
|
2021-03-12 16:39:43 +00:00
|
|
|
timeNow: time.Now,
|
|
|
|
logf: logf,
|
|
|
|
reqCh: make(chan struct{}, 1),
|
|
|
|
waitCh: make(chan struct{}),
|
|
|
|
tundev: tsTUNDev,
|
2021-03-27 04:47:28 +00:00
|
|
|
router: conf.Router,
|
2021-03-12 16:39:43 +00:00
|
|
|
pingers: make(map[wgkey.Key]*pinger),
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-03-28 06:40:38 +01:00
|
|
|
e.isLocalAddr.Store(genLocalAddrFunc(nil))
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-02-28 05:48:00 +00:00
|
|
|
if conf.LinkMonitor != nil {
|
|
|
|
e.linkMon = conf.LinkMonitor
|
|
|
|
} else {
|
|
|
|
mon, err := monitor.New(logf)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
|
|
|
closePool.add(mon)
|
|
|
|
e.linkMon = mon
|
|
|
|
e.linkMonOwned = true
|
2020-02-17 17:00:38 +00:00
|
|
|
}
|
2021-03-01 15:55:30 +00:00
|
|
|
|
2021-04-03 03:34:53 +01:00
|
|
|
e.dns = dns.NewManager(logf, conf.DNS, e.linkMon)
|
2021-03-12 16:39:43 +00:00
|
|
|
|
2021-03-01 20:56:03 +00:00
|
|
|
logf("link state: %+v", e.linkMon.InterfaceState())
|
2021-03-01 15:55:30 +00:00
|
|
|
|
2021-03-01 20:56:03 +00:00
|
|
|
unregisterMonWatch := e.linkMon.RegisterChangeCallback(func(changed bool, st *interfaces.State) {
|
2021-02-28 03:33:21 +00:00
|
|
|
tshttpproxy.InvalidateCache()
|
2021-03-02 04:20:25 +00:00
|
|
|
e.linkChange(changed, st)
|
2021-02-28 03:33:21 +00:00
|
|
|
})
|
|
|
|
closePool.addFunc(unregisterMonWatch)
|
2021-02-28 05:42:34 +00:00
|
|
|
e.linkMonUnregister = unregisterMonWatch
|
2020-02-17 17:00:38 +00:00
|
|
|
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
|
|
|
endpointsFn := func(endpoints []tailcfg.Endpoint) {
|
2020-08-25 20:14:57 +01:00
|
|
|
e.mu.Lock()
|
|
|
|
e.endpoints = append(e.endpoints[:0], endpoints...)
|
|
|
|
e.mu.Unlock()
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
e.RequestStatus()
|
|
|
|
}
|
|
|
|
magicsockOpts := magicsock.Options{
|
2020-07-23 23:15:28 +01:00
|
|
|
Logf: logf,
|
|
|
|
Port: conf.ListenPort,
|
|
|
|
EndpointsFunc: endpointsFn,
|
2020-08-25 21:21:29 +01:00
|
|
|
DERPActiveFunc: e.RequestStatus,
|
2020-07-23 23:15:28 +01:00
|
|
|
IdleFunc: e.tundev.IdleDuration,
|
|
|
|
NoteRecvActivity: e.noteReceiveActivity,
|
2021-03-15 20:58:10 +00:00
|
|
|
LinkMonitor: e.linkMon,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-04-01 06:32:07 +01:00
|
|
|
|
2021-04-03 03:42:05 +01:00
|
|
|
var err error
|
2020-05-17 17:51:38 +01:00
|
|
|
e.magicConn, err = magicsock.NewConn(magicsockOpts)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("wgengine: %v", err)
|
|
|
|
}
|
2021-02-23 04:20:35 +00:00
|
|
|
closePool.add(e.magicConn)
|
2021-03-01 20:56:03 +00:00
|
|
|
e.magicConn.SetNetworkUp(e.linkMon.InterfaceState().AnyInterfaceUp())
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-03-29 03:50:56 +01:00
|
|
|
if conf.RespondToPing {
|
2021-03-01 18:08:53 +00:00
|
|
|
e.tundev.PostFilterIn = echoRespondToAll
|
2020-09-03 23:45:41 +01:00
|
|
|
}
|
|
|
|
e.tundev.PreFilterOut = e.handleLocalPackets
|
|
|
|
|
2021-01-12 03:07:08 +00:00
|
|
|
if debugConnectFailures() {
|
|
|
|
if e.tundev.PreFilterIn != nil {
|
|
|
|
return nil, errors.New("unexpected PreFilterIn already set")
|
|
|
|
}
|
|
|
|
e.tundev.PreFilterIn = e.trackOpenPreFilterIn
|
|
|
|
if e.tundev.PostFilterOut != nil {
|
|
|
|
return nil, errors.New("unexpected PostFilterOut already set")
|
|
|
|
}
|
|
|
|
e.tundev.PostFilterOut = e.trackOpenPostFilterOut
|
|
|
|
}
|
|
|
|
|
2021-01-21 20:33:54 +00:00
|
|
|
e.wgLogger = wglog.NewLogger(logf)
|
2020-02-05 22:16:58 +00:00
|
|
|
opts := &device.DeviceOptions{
|
2021-01-13 22:39:34 +00:00
|
|
|
HandshakeDone: func(peerKey device.NoisePublicKey, peer *device.Peer, deviceAllowedIPs *device.AllowedIPs) {
|
2020-02-05 22:16:58 +00:00
|
|
|
// Send an unsolicited status event every time a
|
|
|
|
// handshake completes. This makes sure our UI can
|
|
|
|
// update quickly as soon as it connects to a peer.
|
|
|
|
//
|
|
|
|
// We use a goroutine here to avoid deadlocking
|
|
|
|
// wireguard, since RequestStatus() will call back
|
|
|
|
// into it, and wireguard is what called us to get
|
|
|
|
// here.
|
|
|
|
go e.RequestStatus()
|
2020-02-25 16:06:29 +00:00
|
|
|
|
2021-01-13 22:39:34 +00:00
|
|
|
peerWGKey := wgkey.Key(peerKey)
|
2020-07-16 05:08:25 +01:00
|
|
|
if e.magicConn.PeerHasDiscoKey(tailcfg.NodeKey(peerKey)) {
|
2021-01-13 22:39:34 +00:00
|
|
|
e.logf("wireguard handshake complete for %v", peerWGKey.ShortString())
|
2020-07-16 05:08:25 +01:00
|
|
|
// This is a modern peer with discovery support. No need to send pings.
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2021-01-13 22:39:34 +00:00
|
|
|
e.logf("wireguard handshake complete for %v; sending legacy pings", peerWGKey.ShortString())
|
2020-07-16 05:08:25 +01:00
|
|
|
|
2020-03-08 11:08:38 +00:00
|
|
|
// Ping every single-IP that peer routes.
|
|
|
|
// These synthetic packets are used to traverse NATs.
|
2020-12-24 20:33:55 +00:00
|
|
|
var ips []netaddr.IP
|
2021-02-03 23:24:13 +00:00
|
|
|
var allowedIPs []netaddr.IPPrefix
|
|
|
|
deviceAllowedIPs.EntriesForPeer(peer, func(stdIP net.IP, cidr uint) bool {
|
|
|
|
ip, ok := netaddr.FromStdIP(stdIP)
|
|
|
|
if !ok {
|
|
|
|
logf("[unexpected] bad IP from deviceAllowedIPs.EntriesForPeer: %v", stdIP)
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
ipp := netaddr.IPPrefix{IP: ip, Bits: uint8(cidr)}
|
|
|
|
allowedIPs = append(allowedIPs, ipp)
|
|
|
|
if ipp.IsSingleIP() {
|
2020-03-08 11:08:38 +00:00
|
|
|
ips = append(ips, ip)
|
2020-02-25 16:06:29 +00:00
|
|
|
}
|
2021-02-03 23:24:13 +00:00
|
|
|
return true
|
|
|
|
})
|
2020-03-08 11:08:38 +00:00
|
|
|
if len(ips) > 0 {
|
2021-01-13 22:39:34 +00:00
|
|
|
go e.pinger(peerWGKey, ips)
|
2020-03-08 11:08:38 +00:00
|
|
|
} else {
|
2021-01-13 22:39:34 +00:00
|
|
|
logf("[unexpected] peer %s has no single-IP routes: %v", peerWGKey.ShortString(), allowedIPs)
|
2020-03-08 11:08:38 +00:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
},
|
|
|
|
}
|
|
|
|
|
2021-03-29 23:17:05 +01:00
|
|
|
e.tundev.OnTSMPPongReceived = func(pong packet.TSMPPongReply) {
|
2021-03-23 22:16:15 +00:00
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
2021-03-29 23:17:05 +01:00
|
|
|
cb := e.pongCallback[pong.Data]
|
|
|
|
e.logf("wgengine: got TSMP pong %02x, peerAPIPort=%v; cb=%v", pong.Data, pong.PeerAPIPort, cb != nil)
|
2021-03-23 22:16:15 +00:00
|
|
|
if cb != nil {
|
2021-03-29 23:17:05 +01:00
|
|
|
go cb(pong)
|
2021-03-23 22:16:15 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-10 23:34:16 +01:00
|
|
|
// wgdev takes ownership of tundev, will close it when closed.
|
2020-09-23 23:27:30 +01:00
|
|
|
e.logf("Creating wireguard device...")
|
2021-03-24 16:41:57 +00:00
|
|
|
e.wgdev = device.NewDevice(e.tundev, e.magicConn.Bind(), e.wgLogger.DeviceLogger, opts)
|
2021-02-23 04:20:35 +00:00
|
|
|
closePool.addFunc(e.wgdev.Close)
|
2021-03-24 16:41:57 +00:00
|
|
|
closePool.addFunc(func() {
|
|
|
|
if err := e.magicConn.Close(); err != nil {
|
|
|
|
e.logf("error closing magicconn: %v", err)
|
|
|
|
}
|
|
|
|
})
|
2020-02-14 23:03:25 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
go func() {
|
|
|
|
up := false
|
2021-04-27 00:27:34 +01:00
|
|
|
for event := range e.tundev.EventsUpDown() {
|
2020-02-05 22:16:58 +00:00
|
|
|
if event&tun.EventUp != 0 && !up {
|
|
|
|
e.logf("external route: up")
|
|
|
|
e.RequestStatus()
|
|
|
|
up = true
|
|
|
|
}
|
|
|
|
if event&tun.EventDown != 0 && up {
|
|
|
|
e.logf("external route: down")
|
|
|
|
e.RequestStatus()
|
|
|
|
up = false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-09-23 23:27:30 +01:00
|
|
|
e.logf("Bringing wireguard device up...")
|
2020-02-05 22:16:58 +00:00
|
|
|
e.wgdev.Up()
|
2020-09-23 23:27:30 +01:00
|
|
|
e.logf("Bringing router up...")
|
2020-02-05 22:16:58 +00:00
|
|
|
if err := e.router.Up(); err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2021-02-23 04:20:35 +00:00
|
|
|
|
|
|
|
// It's a little pointless to apply no-op settings here (they
|
|
|
|
// should already be empty?), but it at least exercises the
|
|
|
|
// router implementation early on the machine.
|
2020-09-23 23:27:30 +01:00
|
|
|
e.logf("Clearing router settings...")
|
2020-05-12 08:08:52 +01:00
|
|
|
if err := e.router.Set(nil); err != nil {
|
2020-02-05 22:16:58 +00:00
|
|
|
return nil, err
|
|
|
|
}
|
2020-09-23 23:27:30 +01:00
|
|
|
e.logf("Starting link monitor...")
|
2020-02-17 17:00:38 +00:00
|
|
|
e.linkMon.Start()
|
2020-09-23 23:27:30 +01:00
|
|
|
e.logf("Starting magicsock...")
|
2020-05-17 17:51:38 +01:00
|
|
|
e.magicConn.Start()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-07-07 20:25:32 +01:00
|
|
|
go e.pollResolver()
|
|
|
|
|
2020-09-23 23:27:30 +01:00
|
|
|
e.logf("Engine created.")
|
2020-02-05 22:16:58 +00:00
|
|
|
return e, nil
|
|
|
|
}
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
// echoRespondToAll is an inbound post-filter responding to all echo requests.
|
2021-03-27 06:13:20 +00:00
|
|
|
func echoRespondToAll(p *packet.Parsed, t *tstun.Wrapper) filter.Response {
|
2020-06-08 23:19:26 +01:00
|
|
|
if p.IsEchoRequest() {
|
2020-11-10 09:00:35 +00:00
|
|
|
header := p.ICMP4Header()
|
2020-06-08 23:19:26 +01:00
|
|
|
header.ToResponse()
|
2020-10-06 03:41:16 +01:00
|
|
|
outp := packet.Generate(&header, p.Payload())
|
|
|
|
t.InjectOutbound(outp)
|
|
|
|
// We already responded to it, but it's not an error.
|
|
|
|
// Proceed with regular delivery. (Since this code is only
|
|
|
|
// used in fake mode, regular delivery just means throwing
|
|
|
|
// it away. If this ever gets run in non-fake mode, you'll
|
|
|
|
// get double responses to pings, which is an indicator you
|
|
|
|
// shouldn't be doing that I guess.)
|
|
|
|
return filter.Accept
|
2020-06-08 23:19:26 +01:00
|
|
|
}
|
|
|
|
return filter.Accept
|
|
|
|
}
|
|
|
|
|
2020-06-09 19:00:48 +01:00
|
|
|
// handleLocalPackets inspects packets coming from the local network
|
|
|
|
// stack, and intercepts any packets that should be handled by
|
|
|
|
// tailscaled directly. Other packets are allowed to proceed into the
|
|
|
|
// main ACL filter.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (e *userspaceEngine) handleLocalPackets(p *packet.Parsed, t *tstun.Wrapper) filter.Response {
|
2020-07-31 21:27:09 +01:00
|
|
|
if verdict := e.handleDNS(p, t); verdict == filter.Drop {
|
|
|
|
// local DNS handled the packet.
|
|
|
|
return filter.Drop
|
2020-06-09 19:00:48 +01:00
|
|
|
}
|
|
|
|
|
2021-03-28 06:40:38 +01:00
|
|
|
if runtime.GOOS == "darwin" || runtime.GOOS == "ios" {
|
|
|
|
isLocalAddr, ok := e.isLocalAddr.Load().(func(netaddr.IP) bool)
|
|
|
|
if !ok {
|
|
|
|
e.logf("[unexpected] e.isLocalAddr was nil, can't check for loopback packet")
|
|
|
|
} else if isLocalAddr(p.Dst.IP) {
|
|
|
|
// macOS NetworkExtension directs packets destined to the
|
|
|
|
// tunnel's local IP address into the tunnel, instead of
|
|
|
|
// looping back within the kernel network stack. We have to
|
|
|
|
// notice that an outbound packet is actually destined for
|
|
|
|
// ourselves, and loop it back into macOS.
|
|
|
|
t.InjectInboundCopy(p.Buffer())
|
|
|
|
return filter.Drop
|
|
|
|
}
|
2020-06-09 19:00:48 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return filter.Accept
|
|
|
|
}
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
// handleDNS is an outbound pre-filter resolving Tailscale domains.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (e *userspaceEngine) handleDNS(p *packet.Parsed, t *tstun.Wrapper) filter.Response {
|
2021-03-21 04:45:47 +00:00
|
|
|
if p.Dst.IP == magicDNSIP && p.Dst.Port == magicDNSPort && p.IPProto == ipproto.UDP {
|
2021-04-03 03:34:53 +01:00
|
|
|
err := e.dns.EnqueueRequest(append([]byte(nil), p.Payload()...), p.Src)
|
2020-06-08 23:19:26 +01:00
|
|
|
if err != nil {
|
2021-03-25 21:50:21 +00:00
|
|
|
e.logf("dns: enqueue: %v", err)
|
2020-06-08 23:19:26 +01:00
|
|
|
}
|
|
|
|
return filter.Drop
|
|
|
|
}
|
|
|
|
return filter.Accept
|
|
|
|
}
|
|
|
|
|
2020-07-07 20:25:32 +01:00
|
|
|
// pollResolver reads responses from the DNS resolver and injects them inbound.
|
|
|
|
func (e *userspaceEngine) pollResolver() {
|
|
|
|
for {
|
2021-04-03 03:34:53 +01:00
|
|
|
bs, to, err := e.dns.NextResponse()
|
2021-04-01 05:54:38 +01:00
|
|
|
if err == resolver.ErrClosed {
|
2020-07-07 20:25:32 +01:00
|
|
|
return
|
|
|
|
}
|
|
|
|
if err != nil {
|
2021-03-25 21:50:21 +00:00
|
|
|
e.logf("dns: error: %v", err)
|
2020-07-07 20:25:32 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
|
2020-11-09 23:34:03 +00:00
|
|
|
h := packet.UDP4Header{
|
|
|
|
IP4Header: packet.IP4Header{
|
2020-12-20 00:43:25 +00:00
|
|
|
Src: magicDNSIP,
|
2021-04-01 07:06:47 +01:00
|
|
|
Dst: to.IP,
|
2020-07-07 20:25:32 +01:00
|
|
|
},
|
|
|
|
SrcPort: magicDNSPort,
|
2021-04-01 07:06:47 +01:00
|
|
|
DstPort: to.Port,
|
2020-07-07 20:25:32 +01:00
|
|
|
}
|
|
|
|
hlen := h.Len()
|
|
|
|
|
2021-03-25 21:50:21 +00:00
|
|
|
// TODO(dmytro): avoid this allocation without importing tstun quirks into dns.
|
2020-07-07 20:25:32 +01:00
|
|
|
const offset = tstun.PacketStartOffset
|
2021-04-01 07:06:47 +01:00
|
|
|
buf := make([]byte, offset+hlen+len(bs))
|
|
|
|
copy(buf[offset+hlen:], bs)
|
2020-07-07 20:25:32 +01:00
|
|
|
h.Marshal(buf[offset:])
|
|
|
|
|
|
|
|
e.tundev.InjectInboundDirect(buf, offset)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-28 11:30:46 +00:00
|
|
|
// pinger sends ping packets for a few seconds.
|
2020-02-25 16:06:29 +00:00
|
|
|
//
|
|
|
|
// These generated packets are used to ensure we trigger the spray logic in
|
|
|
|
// the magicsock package for NAT traversal.
|
2020-07-16 05:08:25 +01:00
|
|
|
//
|
|
|
|
// These are only used with legacy peers (before 0.100.0) that don't
|
|
|
|
// have advertised discovery keys.
|
2020-05-29 06:38:26 +01:00
|
|
|
type pinger struct {
|
|
|
|
e *userspaceEngine
|
|
|
|
done chan struct{} // closed after shutdown (not the ctx.Done() chan)
|
|
|
|
cancel context.CancelFunc
|
|
|
|
}
|
2020-02-25 16:06:29 +00:00
|
|
|
|
2020-05-29 06:38:26 +01:00
|
|
|
// close cleans up pinger and removes it from the userspaceEngine.pingers map.
|
|
|
|
// It cannot be called while p.e.mu is held.
|
|
|
|
func (p *pinger) close() {
|
|
|
|
p.cancel()
|
|
|
|
<-p.done
|
|
|
|
}
|
2020-02-25 16:06:29 +00:00
|
|
|
|
2020-12-30 01:22:56 +00:00
|
|
|
func (p *pinger) run(ctx context.Context, peerKey wgkey.Key, ips []netaddr.IP, srcIP netaddr.IP) {
|
2020-05-29 06:38:26 +01:00
|
|
|
defer func() {
|
|
|
|
p.e.mu.Lock()
|
|
|
|
if p.e.pingers[peerKey] == p {
|
|
|
|
delete(p.e.pingers, peerKey)
|
|
|
|
}
|
|
|
|
p.e.mu.Unlock()
|
2020-02-25 16:06:29 +00:00
|
|
|
|
2020-05-29 06:38:26 +01:00
|
|
|
close(p.done)
|
|
|
|
}()
|
2020-02-25 16:06:29 +00:00
|
|
|
|
2020-11-09 23:34:03 +00:00
|
|
|
header := packet.ICMP4Header{
|
|
|
|
IP4Header: packet.IP4Header{
|
2020-12-20 00:43:25 +00:00
|
|
|
Src: srcIP,
|
2020-06-04 23:42:44 +01:00
|
|
|
},
|
2020-11-09 23:34:03 +00:00
|
|
|
Type: packet.ICMP4EchoRequest,
|
|
|
|
Code: packet.ICMP4NoCode,
|
2020-06-04 23:42:44 +01:00
|
|
|
}
|
|
|
|
|
2020-02-25 16:06:29 +00:00
|
|
|
// sendFreq is slightly longer than sprayFreq in magicsock to ensure
|
|
|
|
// that if these ping packets are the only source of early packets
|
|
|
|
// sent to the peer, that each one will be sprayed.
|
|
|
|
const sendFreq = 300 * time.Millisecond
|
|
|
|
const stopAfter = 3 * time.Second
|
|
|
|
|
|
|
|
start := time.Now()
|
2020-12-20 00:43:25 +00:00
|
|
|
var dstIPs []netaddr.IP
|
2020-03-08 11:08:38 +00:00
|
|
|
for _, ip := range ips {
|
2020-12-02 04:09:20 +00:00
|
|
|
if ip.Is6() {
|
|
|
|
// This code is only used for legacy (pre-discovery)
|
|
|
|
// peers. They're not going to work right with IPv6 on the
|
|
|
|
// overlay anyway, so don't bother trying to make ping
|
|
|
|
// work.
|
|
|
|
continue
|
|
|
|
}
|
2020-12-24 20:33:55 +00:00
|
|
|
dstIPs = append(dstIPs, ip)
|
2020-03-08 11:08:38 +00:00
|
|
|
}
|
2020-02-25 16:06:29 +00:00
|
|
|
|
|
|
|
payload := []byte("magicsock_spray") // no meaning
|
|
|
|
|
2020-06-04 23:42:44 +01:00
|
|
|
header.IPID = 1
|
2020-02-28 11:30:46 +00:00
|
|
|
t := time.NewTicker(sendFreq)
|
|
|
|
defer t.Stop()
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case <-t.C:
|
|
|
|
}
|
|
|
|
if time.Since(start) > stopAfter {
|
|
|
|
return
|
|
|
|
}
|
2020-03-08 11:08:38 +00:00
|
|
|
for _, dstIP := range dstIPs {
|
2020-12-20 00:43:25 +00:00
|
|
|
header.Dst = dstIP
|
2020-06-04 23:42:44 +01:00
|
|
|
// InjectOutbound take ownership of the packet, so we allocate.
|
|
|
|
b := packet.Generate(&header, payload)
|
2020-05-29 06:38:26 +01:00
|
|
|
p.e.tundev.InjectOutbound(b)
|
2020-03-08 11:08:38 +00:00
|
|
|
}
|
2020-06-04 23:42:44 +01:00
|
|
|
header.IPID++
|
2020-02-28 11:30:46 +00:00
|
|
|
}
|
2020-05-29 06:38:26 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// pinger sends ping packets for a few seconds.
|
|
|
|
//
|
|
|
|
// These generated packets are used to ensure we trigger the spray logic in
|
|
|
|
// the magicsock package for NAT traversal.
|
2020-07-16 05:08:25 +01:00
|
|
|
//
|
|
|
|
// This is only used with legacy peers (before 0.100.0) that don't
|
|
|
|
// have advertised discovery keys.
|
2020-12-30 01:22:56 +00:00
|
|
|
func (e *userspaceEngine) pinger(peerKey wgkey.Key, ips []netaddr.IP) {
|
2020-12-21 18:58:06 +00:00
|
|
|
e.logf("[v1] generating initial ping traffic to %s (%v)", peerKey.ShortString(), ips)
|
2020-12-20 00:43:25 +00:00
|
|
|
var srcIP netaddr.IP
|
2020-05-29 06:38:26 +01:00
|
|
|
|
|
|
|
e.wgLock.Lock()
|
2020-07-23 23:15:28 +01:00
|
|
|
if len(e.lastCfgFull.Addresses) > 0 {
|
2020-12-24 20:33:55 +00:00
|
|
|
srcIP = e.lastCfgFull.Addresses[0].IP
|
2020-05-29 06:38:26 +01:00
|
|
|
}
|
|
|
|
e.wgLock.Unlock()
|
|
|
|
|
2020-12-20 00:43:25 +00:00
|
|
|
if srcIP.IsZero() {
|
2020-05-29 06:38:26 +01:00
|
|
|
e.logf("generating initial ping traffic: no source IP")
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
p := &pinger{
|
|
|
|
e: e,
|
|
|
|
done: make(chan struct{}),
|
|
|
|
cancel: cancel,
|
|
|
|
}
|
|
|
|
|
|
|
|
e.mu.Lock()
|
|
|
|
if e.closing {
|
|
|
|
e.mu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
oldPinger := e.pingers[peerKey]
|
|
|
|
e.pingers[peerKey] = p
|
|
|
|
e.mu.Unlock()
|
|
|
|
|
|
|
|
if oldPinger != nil {
|
|
|
|
oldPinger.close()
|
|
|
|
}
|
|
|
|
p.run(ctx, peerKey, ips, srcIP)
|
2020-02-25 16:06:29 +00:00
|
|
|
}
|
|
|
|
|
2020-08-20 21:21:25 +01:00
|
|
|
var (
|
|
|
|
debugTrimWireguardEnv = os.Getenv("TS_DEBUG_TRIM_WIREGUARD")
|
|
|
|
debugTrimWireguard, _ = strconv.ParseBool(debugTrimWireguardEnv)
|
|
|
|
)
|
2020-07-30 00:14:35 +01:00
|
|
|
|
|
|
|
// forceFullWireguardConfig reports whether we should give wireguard
|
|
|
|
// our full network map, even for inactive peers
|
|
|
|
//
|
|
|
|
// TODO(bradfitz): remove this after our 1.0 launch; we don't want to
|
|
|
|
// enable wireguard config trimming quite yet because it just landed
|
|
|
|
// and we haven't got enough time testing it.
|
|
|
|
func forceFullWireguardConfig(numPeers int) bool {
|
|
|
|
// Did the user explicitly enable trimmming via the environment variable knob?
|
2020-08-20 21:21:25 +01:00
|
|
|
if debugTrimWireguardEnv != "" {
|
|
|
|
return !debugTrimWireguard
|
2020-07-30 00:14:35 +01:00
|
|
|
}
|
2020-08-20 21:21:25 +01:00
|
|
|
if opt := controlclient.TrimWGConfig(); opt != "" {
|
|
|
|
return !opt.EqualBool(true)
|
|
|
|
}
|
|
|
|
|
2020-07-30 00:14:35 +01:00
|
|
|
// On iOS with large networks, it's critical, so turn on trimming.
|
|
|
|
// Otherwise we run out of memory from wireguard-go goroutine stacks+buffers.
|
|
|
|
// This will be the default later for all platforms and network sizes.
|
2020-11-11 17:04:34 +00:00
|
|
|
if numPeers > 50 && version.OS() == "iOS" {
|
2020-07-30 00:14:35 +01:00
|
|
|
return false
|
|
|
|
}
|
2020-08-21 04:13:39 +01:00
|
|
|
return false
|
2020-07-30 00:14:35 +01:00
|
|
|
}
|
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
// isTrimmablePeer reports whether p is a peer that we can trim out of the
|
|
|
|
// network map.
|
|
|
|
//
|
|
|
|
// We can only trim peers that both a) support discovery (because we
|
|
|
|
// know who they are when we receive their data and don't need to rely
|
|
|
|
// on wireguard-go figuring it out) and b) for implementation
|
2020-12-22 22:48:24 +00:00
|
|
|
// simplicity, have only non-subnet AllowedIPs (an IPv4 /32 or IPv6
|
|
|
|
// /128), which is the common case for most peers. Subnet router nodes
|
|
|
|
// will just always be created in the wireguard-go config.
|
2020-07-30 00:14:35 +01:00
|
|
|
func isTrimmablePeer(p *wgcfg.Peer, numPeers int) bool {
|
|
|
|
if forceFullWireguardConfig(numPeers) {
|
|
|
|
return false
|
|
|
|
}
|
2021-01-14 01:10:41 +00:00
|
|
|
if !isSingleEndpoint(p.Endpoints) {
|
2020-07-23 23:15:28 +01:00
|
|
|
return false
|
|
|
|
}
|
2021-01-14 01:10:41 +00:00
|
|
|
|
|
|
|
host, _, err := net.SplitHostPort(p.Endpoints)
|
|
|
|
if err != nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
if !strings.HasSuffix(host, ".disco.tailscale") {
|
2020-07-23 23:15:28 +01:00
|
|
|
return false
|
|
|
|
}
|
2020-12-15 10:31:33 +00:00
|
|
|
|
|
|
|
// AllowedIPs must all be single IPs, not subnets.
|
|
|
|
for _, aip := range p.AllowedIPs {
|
2020-12-24 20:33:55 +00:00
|
|
|
if !aip.IsSingleIP() {
|
2020-12-15 10:31:33 +00:00
|
|
|
return false
|
|
|
|
}
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
// noteReceiveActivity is called by magicsock when a packet has been received
|
|
|
|
// by the peer using discovery key dk. Magicsock calls this no more than
|
|
|
|
// every 10 seconds for a given peer.
|
|
|
|
func (e *userspaceEngine) noteReceiveActivity(dk tailcfg.DiscoKey) {
|
|
|
|
e.wgLock.Lock()
|
|
|
|
defer e.wgLock.Unlock()
|
|
|
|
|
2020-08-26 20:20:09 +01:00
|
|
|
if _, ok := e.recvActivityAt[dk]; !ok {
|
2020-07-23 23:15:28 +01:00
|
|
|
// Not a trimmable peer we care about tracking. (See isTrimmablePeer)
|
2020-08-26 20:20:09 +01:00
|
|
|
if e.trimmedDisco[dk] {
|
|
|
|
e.logf("wgengine: [unexpected] noteReceiveActivity called on idle discokey %v that's not in recvActivityAt", dk.ShortString())
|
|
|
|
}
|
2020-07-23 23:15:28 +01:00
|
|
|
return
|
|
|
|
}
|
2020-08-06 22:57:03 +01:00
|
|
|
now := e.timeNow()
|
2020-07-23 23:15:28 +01:00
|
|
|
e.recvActivityAt[dk] = now
|
|
|
|
|
2021-04-27 20:26:23 +01:00
|
|
|
// As long as there's activity, periodically poll the engine to get
|
|
|
|
// stats for the far away side effect of
|
|
|
|
// ipn/ipnlocal.LocalBackend.parseWgStatusLocked to log activity, for
|
|
|
|
// use in various admin dashboards.
|
|
|
|
// This particularly matters on platforms without a connected GUI, as
|
|
|
|
// the GUIs generally poll this enough to cause that logging. But
|
|
|
|
// tailscaled alone did not, hence this.
|
|
|
|
if e.lastStatusPollTime.IsZero() || now.Sub(e.lastStatusPollTime) >= statusPollInterval {
|
|
|
|
e.lastStatusPollTime = now
|
|
|
|
go e.RequestStatus()
|
|
|
|
}
|
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
// If the last activity time jumped a bunch (say, at least
|
|
|
|
// half the idle timeout) then see if we need to reprogram
|
|
|
|
// Wireguard. This could probably be just
|
|
|
|
// lazyPeerIdleThreshold without the divide by 2, but
|
|
|
|
// maybeReconfigWireguardLocked is cheap enough to call every
|
|
|
|
// couple minutes (just not on every packet).
|
2020-08-26 20:20:09 +01:00
|
|
|
if e.trimmedDisco[dk] {
|
|
|
|
e.logf("wgengine: idle peer %v now active, reconfiguring wireguard", dk.ShortString())
|
2020-11-16 23:17:24 +00:00
|
|
|
e.maybeReconfigWireguardLocked(nil)
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// isActiveSince reports whether the peer identified by (dk, ip) has
|
|
|
|
// had a packet sent to or received from it since t.
|
|
|
|
//
|
|
|
|
// e.wgLock must be held.
|
2020-12-24 20:33:55 +00:00
|
|
|
func (e *userspaceEngine) isActiveSince(dk tailcfg.DiscoKey, ip netaddr.IP, t time.Time) bool {
|
2020-07-23 23:15:28 +01:00
|
|
|
if e.recvActivityAt[dk].After(t) {
|
|
|
|
return true
|
|
|
|
}
|
2020-12-24 20:33:55 +00:00
|
|
|
timePtr, ok := e.sentActivityAt[ip]
|
2020-07-23 23:15:28 +01:00
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
unixTime := atomic.LoadInt64(timePtr)
|
|
|
|
return unixTime >= t.Unix()
|
|
|
|
}
|
|
|
|
|
|
|
|
// discoKeyFromPeer returns the DiscoKey for a wireguard config's Peer.
|
|
|
|
//
|
|
|
|
// Invariant: isTrimmablePeer(p) == true, so it should have 1 endpoint with
|
|
|
|
// Host of form "<64-hex-digits>.disco.tailscale". If invariant is violated,
|
|
|
|
// we return the zero value.
|
|
|
|
func discoKeyFromPeer(p *wgcfg.Peer) tailcfg.DiscoKey {
|
2021-01-14 01:10:41 +00:00
|
|
|
if len(p.Endpoints) < 64 {
|
2020-07-23 23:15:28 +01:00
|
|
|
return tailcfg.DiscoKey{}
|
|
|
|
}
|
2021-01-14 01:10:41 +00:00
|
|
|
host, rest := p.Endpoints[:64], p.Endpoints[64:]
|
|
|
|
if !strings.HasPrefix(rest, ".disco.tailscale") {
|
|
|
|
return tailcfg.DiscoKey{}
|
|
|
|
}
|
|
|
|
k, err := key.NewPublicFromHexMem(mem.S(host))
|
2020-07-23 23:15:28 +01:00
|
|
|
if err != nil {
|
|
|
|
return tailcfg.DiscoKey{}
|
|
|
|
}
|
|
|
|
return tailcfg.DiscoKey(k)
|
|
|
|
}
|
|
|
|
|
2020-11-16 23:17:24 +00:00
|
|
|
// discoChanged are the set of peers whose disco keys have changed, implying they've restarted.
|
|
|
|
// If a peer is in this set and was previously in the live wireguard config,
|
|
|
|
// it needs to be first removed and then re-added to flush out its wireguard session key.
|
|
|
|
// If discoChanged is nil or empty, this extra removal step isn't done.
|
|
|
|
//
|
2020-07-23 23:15:28 +01:00
|
|
|
// e.wgLock must be held.
|
2020-11-16 23:17:24 +00:00
|
|
|
func (e *userspaceEngine) maybeReconfigWireguardLocked(discoChanged map[key.Public]bool) error {
|
2020-08-06 22:57:03 +01:00
|
|
|
if hook := e.testMaybeReconfigHook; hook != nil {
|
|
|
|
hook()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
full := e.lastCfgFull
|
2021-01-21 20:33:54 +00:00
|
|
|
e.wgLogger.SetPeers(full.Peers)
|
2020-07-23 23:15:28 +01:00
|
|
|
|
|
|
|
// Compute a minimal config to pass to wireguard-go
|
|
|
|
// based on the full config. Prune off all the peers
|
|
|
|
// and only add the active ones back.
|
|
|
|
min := full
|
|
|
|
min.Peers = nil
|
|
|
|
|
|
|
|
// We'll only keep a peer around if it's been active in
|
|
|
|
// the past 5 minutes. That's more than WireGuard's key
|
|
|
|
// rotation time anyway so it's no harm if we remove it
|
|
|
|
// later if it's been inactive.
|
2020-08-06 22:57:03 +01:00
|
|
|
activeCutoff := e.timeNow().Add(-lazyPeerIdleThreshold)
|
2020-07-23 23:15:28 +01:00
|
|
|
|
|
|
|
// Not all peers can be trimmed from the network map (see
|
|
|
|
// isTrimmablePeer). For those are are trimmable, keep track
|
|
|
|
// of their DiscoKey and Tailscale IPs. These are the ones
|
|
|
|
// we'll need to install tracking hooks for to watch their
|
|
|
|
// send/receive activity.
|
|
|
|
trackDisco := make([]tailcfg.DiscoKey, 0, len(full.Peers))
|
2020-12-24 20:33:55 +00:00
|
|
|
trackIPs := make([]netaddr.IP, 0, len(full.Peers))
|
2020-07-23 23:15:28 +01:00
|
|
|
|
2020-08-26 20:20:09 +01:00
|
|
|
trimmedDisco := map[tailcfg.DiscoKey]bool{} // TODO: don't re-alloc this map each time
|
|
|
|
|
2020-11-16 23:17:24 +00:00
|
|
|
needRemoveStep := false
|
2020-07-23 23:15:28 +01:00
|
|
|
for i := range full.Peers {
|
|
|
|
p := &full.Peers[i]
|
2020-07-30 00:14:35 +01:00
|
|
|
if !isTrimmablePeer(p, len(full.Peers)) {
|
2020-07-23 23:15:28 +01:00
|
|
|
min.Peers = append(min.Peers, *p)
|
2020-11-16 23:17:24 +00:00
|
|
|
if discoChanged[key.Public(p.PublicKey)] {
|
|
|
|
needRemoveStep = true
|
|
|
|
}
|
2020-07-23 23:15:28 +01:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
dk := discoKeyFromPeer(p)
|
|
|
|
trackDisco = append(trackDisco, dk)
|
2021-01-18 21:32:16 +00:00
|
|
|
recentlyActive := false
|
|
|
|
for _, cidr := range p.AllowedIPs {
|
|
|
|
trackIPs = append(trackIPs, cidr.IP)
|
|
|
|
recentlyActive = recentlyActive || e.isActiveSince(dk, cidr.IP, activeCutoff)
|
|
|
|
}
|
|
|
|
if recentlyActive {
|
2020-07-23 23:15:28 +01:00
|
|
|
min.Peers = append(min.Peers, *p)
|
2020-11-16 23:17:24 +00:00
|
|
|
if discoChanged[key.Public(p.PublicKey)] {
|
|
|
|
needRemoveStep = true
|
|
|
|
}
|
2020-08-26 20:20:09 +01:00
|
|
|
} else {
|
|
|
|
trimmedDisco[dk] = true
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-10-13 20:04:52 +01:00
|
|
|
if !deepprint.UpdateHash(&e.lastEngineSigTrim, min, trimmedDisco, trackDisco, trackIPs) {
|
2020-07-23 23:15:28 +01:00
|
|
|
// No changes
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2020-08-26 20:20:09 +01:00
|
|
|
e.trimmedDisco = trimmedDisco
|
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
e.updateActivityMapsLocked(trackDisco, trackIPs)
|
|
|
|
|
2020-11-16 23:17:24 +00:00
|
|
|
if needRemoveStep {
|
|
|
|
minner := min
|
|
|
|
minner.Peers = nil
|
|
|
|
numRemove := 0
|
|
|
|
for _, p := range min.Peers {
|
|
|
|
if discoChanged[key.Public(p.PublicKey)] {
|
|
|
|
numRemove++
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
minner.Peers = append(minner.Peers, p)
|
|
|
|
}
|
|
|
|
if numRemove > 0 {
|
|
|
|
e.logf("wgengine: Reconfig: removing session keys for %d peers", numRemove)
|
2021-01-29 20:16:36 +00:00
|
|
|
if err := wgcfg.ReconfigDevice(e.wgdev, &minner, e.logf); err != nil {
|
2020-11-16 23:17:24 +00:00
|
|
|
e.logf("wgdev.Reconfig: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
e.logf("wgengine: Reconfig: configuring userspace wireguard config (with %d/%d peers)", len(min.Peers), len(full.Peers))
|
2021-01-29 20:16:36 +00:00
|
|
|
if err := wgcfg.ReconfigDevice(e.wgdev, &min, e.logf); err != nil {
|
2020-07-23 23:15:28 +01:00
|
|
|
e.logf("wgdev.Reconfig: %v", err)
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// updateActivityMapsLocked updates the data structures used for tracking the activity
|
|
|
|
// of wireguard peers that we might add/remove dynamically from the real config
|
|
|
|
// as given to wireguard-go.
|
|
|
|
//
|
|
|
|
// e.wgLock must be held.
|
2020-12-24 20:33:55 +00:00
|
|
|
func (e *userspaceEngine) updateActivityMapsLocked(trackDisco []tailcfg.DiscoKey, trackIPs []netaddr.IP) {
|
2020-07-23 23:15:28 +01:00
|
|
|
// Generate the new map of which discokeys we want to track
|
|
|
|
// receive times for.
|
|
|
|
mr := map[tailcfg.DiscoKey]time.Time{} // TODO: only recreate this if set of keys changed
|
|
|
|
for _, dk := range trackDisco {
|
|
|
|
// Preserve old times in the new map, but also
|
|
|
|
// populate map entries for new trackDisco values with
|
|
|
|
// time.Time{} zero values. (Only entries in this map
|
|
|
|
// are tracked, so the Time zero values allow it to be
|
|
|
|
// tracked later)
|
|
|
|
mr[dk] = e.recvActivityAt[dk]
|
|
|
|
}
|
|
|
|
e.recvActivityAt = mr
|
|
|
|
|
2020-12-20 00:43:25 +00:00
|
|
|
oldTime := e.sentActivityAt
|
|
|
|
e.sentActivityAt = make(map[netaddr.IP]*int64, len(oldTime))
|
|
|
|
oldFunc := e.destIPActivityFuncs
|
|
|
|
e.destIPActivityFuncs = make(map[netaddr.IP]func(), len(oldFunc))
|
2020-12-15 10:31:33 +00:00
|
|
|
|
|
|
|
updateFn := func(timePtr *int64) func() {
|
|
|
|
return func() {
|
|
|
|
now := e.timeNow().Unix()
|
|
|
|
old := atomic.LoadInt64(timePtr)
|
|
|
|
|
|
|
|
// How long's it been since we last sent a packet?
|
|
|
|
// For our first packet, old is Unix epoch time 0 (1970).
|
|
|
|
elapsedSec := now - old
|
|
|
|
|
|
|
|
if elapsedSec >= int64(packetSendTimeUpdateFrequency/time.Second) {
|
|
|
|
atomic.StoreInt64(timePtr, now)
|
|
|
|
}
|
|
|
|
// On a big jump, assume we might no longer be in the wireguard
|
|
|
|
// config and go check.
|
|
|
|
if elapsedSec >= int64(packetSendRecheckWireguardThreshold/time.Second) {
|
|
|
|
e.wgLock.Lock()
|
|
|
|
defer e.wgLock.Unlock()
|
|
|
|
e.maybeReconfigWireguardLocked(nil)
|
|
|
|
}
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
2020-12-15 10:31:33 +00:00
|
|
|
}
|
2020-07-23 23:15:28 +01:00
|
|
|
|
2020-12-24 20:33:55 +00:00
|
|
|
for _, ip := range trackIPs {
|
|
|
|
timePtr := oldTime[ip]
|
2020-12-20 00:43:25 +00:00
|
|
|
if timePtr == nil {
|
|
|
|
timePtr = new(int64)
|
|
|
|
}
|
2020-12-24 20:33:55 +00:00
|
|
|
e.sentActivityAt[ip] = timePtr
|
2020-07-31 20:40:49 +01:00
|
|
|
|
2020-12-24 20:33:55 +00:00
|
|
|
fn := oldFunc[ip]
|
2020-12-20 00:43:25 +00:00
|
|
|
if fn == nil {
|
|
|
|
fn = updateFn(timePtr)
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
2020-12-24 20:33:55 +00:00
|
|
|
e.destIPActivityFuncs[ip] = fn
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
2020-12-20 00:43:25 +00:00
|
|
|
e.tundev.SetDestIPActivityFuncs(e.destIPActivityFuncs)
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
|
|
|
|
2021-03-28 06:40:38 +01:00
|
|
|
// genLocalAddrFunc returns a func that reports whether an IP is in addrs.
|
|
|
|
// addrs is assumed to be all /32 or /128 entries.
|
|
|
|
func genLocalAddrFunc(addrs []netaddr.IPPrefix) func(netaddr.IP) bool {
|
|
|
|
// Specialize the three common cases: no address, just IPv4
|
|
|
|
// (or just IPv6), and both IPv4 and IPv6.
|
|
|
|
if len(addrs) == 0 {
|
|
|
|
return func(netaddr.IP) bool { return false }
|
|
|
|
}
|
|
|
|
if len(addrs) == 1 {
|
|
|
|
return func(t netaddr.IP) bool { return t == addrs[0].IP }
|
|
|
|
}
|
|
|
|
if len(addrs) == 2 {
|
|
|
|
return func(t netaddr.IP) bool { return t == addrs[0].IP || t == addrs[1].IP }
|
|
|
|
}
|
|
|
|
// Otherwise, the general implementation: a map lookup.
|
|
|
|
m := map[netaddr.IP]bool{}
|
|
|
|
for _, a := range addrs {
|
|
|
|
m[a.IP] = true
|
|
|
|
}
|
|
|
|
return func(t netaddr.IP) bool { return m[t] }
|
|
|
|
}
|
|
|
|
|
2021-04-02 08:34:32 +01:00
|
|
|
func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config, dnsCfg *dns.Config) error {
|
2020-05-31 07:36:57 +01:00
|
|
|
if routerCfg == nil {
|
|
|
|
panic("routerCfg must not be nil")
|
|
|
|
}
|
2021-04-02 08:34:32 +01:00
|
|
|
if dnsCfg == nil {
|
|
|
|
panic("dnsCfg must not be nil")
|
|
|
|
}
|
2020-05-31 07:36:57 +01:00
|
|
|
|
2021-03-28 06:40:38 +01:00
|
|
|
e.isLocalAddr.Store(genLocalAddrFunc(routerCfg.LocalAddrs))
|
2020-06-09 19:00:48 +01:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
e.wgLock.Lock()
|
|
|
|
defer e.wgLock.Unlock()
|
|
|
|
|
2020-04-18 16:48:01 +01:00
|
|
|
peerSet := make(map[key.Public]struct{}, len(cfg.Peers))
|
2020-02-25 16:06:29 +00:00
|
|
|
e.mu.Lock()
|
2020-04-10 16:22:13 +01:00
|
|
|
e.peerSequence = e.peerSequence[:0]
|
|
|
|
for _, p := range cfg.Peers {
|
2020-12-30 01:22:56 +00:00
|
|
|
e.peerSequence = append(e.peerSequence, wgkey.Key(p.PublicKey))
|
2020-04-18 16:48:01 +01:00
|
|
|
peerSet[key.Public(p.PublicKey)] = struct{}{}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-02-25 16:06:29 +00:00
|
|
|
e.mu.Unlock()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-07-29 02:47:23 +01:00
|
|
|
engineChanged := deepprint.UpdateHash(&e.lastEngineSigFull, cfg)
|
2021-04-02 08:34:32 +01:00
|
|
|
routerChanged := deepprint.UpdateHash(&e.lastRouterSig, routerCfg, dnsCfg)
|
2020-05-31 07:37:58 +01:00
|
|
|
if !engineChanged && !routerChanged {
|
2020-04-10 16:42:34 +01:00
|
|
|
return ErrNoChanges
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-11-16 23:17:24 +00:00
|
|
|
|
|
|
|
// See if any peers have changed disco keys, which means they've restarted.
|
|
|
|
// If so, we need to update the wireguard-go/device.Device in two phases:
|
|
|
|
// once without the node which has restarted, to clear its wireguard session key,
|
|
|
|
// and a second time with it.
|
|
|
|
discoChanged := make(map[key.Public]bool)
|
|
|
|
{
|
2021-01-14 01:10:41 +00:00
|
|
|
prevEP := make(map[key.Public]string)
|
2020-11-16 23:17:24 +00:00
|
|
|
for i := range e.lastCfgFull.Peers {
|
2021-01-14 01:10:41 +00:00
|
|
|
if p := &e.lastCfgFull.Peers[i]; isSingleEndpoint(p.Endpoints) {
|
|
|
|
prevEP[key.Public(p.PublicKey)] = p.Endpoints
|
2020-11-16 23:17:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
for i := range cfg.Peers {
|
|
|
|
p := &cfg.Peers[i]
|
2021-01-14 01:10:41 +00:00
|
|
|
if !isSingleEndpoint(p.Endpoints) {
|
2020-11-16 23:17:24 +00:00
|
|
|
continue
|
|
|
|
}
|
|
|
|
pub := key.Public(p.PublicKey)
|
2021-01-14 01:10:41 +00:00
|
|
|
if old, ok := prevEP[pub]; ok && old != p.Endpoints {
|
2020-11-16 23:17:24 +00:00
|
|
|
discoChanged[pub] = true
|
2021-01-14 01:10:41 +00:00
|
|
|
e.logf("wgengine: Reconfig: %s changed from %q to %q", pub.ShortString(), old, p.Endpoints)
|
2020-11-16 23:17:24 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
e.lastCfgFull = cfg.Copy()
|
2020-04-10 16:42:34 +01:00
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
// Tell magicsock about the new (or initial) private key
|
|
|
|
// (which is needed by DERP) before wgdev gets it, as wgdev
|
|
|
|
// will start trying to handshake, which we want to be able to
|
|
|
|
// go over DERP.
|
2020-12-30 01:22:56 +00:00
|
|
|
if err := e.magicConn.SetPrivateKey(wgkey.Private(cfg.PrivateKey)); err != nil {
|
2020-07-23 23:15:28 +01:00
|
|
|
e.logf("wgengine: Reconfig: SetPrivateKey: %v", err)
|
|
|
|
}
|
|
|
|
e.magicConn.UpdatePeers(peerSet)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-11-16 23:17:24 +00:00
|
|
|
if err := e.maybeReconfigWireguardLocked(discoChanged); err != nil {
|
2020-07-23 23:15:28 +01:00
|
|
|
return err
|
2020-05-31 07:37:58 +01:00
|
|
|
}
|
2020-04-18 16:48:01 +01:00
|
|
|
|
2020-05-31 07:37:58 +01:00
|
|
|
if routerChanged {
|
2021-04-11 11:37:14 +01:00
|
|
|
e.logf("wgengine: Reconfig: configuring router")
|
|
|
|
err := e.router.Set(routerCfg)
|
|
|
|
health.SetRouterHealth(err)
|
2021-04-03 03:31:58 +01:00
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
2021-04-11 11:37:14 +01:00
|
|
|
// Keep DNS configuration after router configuration, as some
|
|
|
|
// DNS managers refuse to apply settings if the device has no
|
|
|
|
// assigned address.
|
|
|
|
e.logf("wgengine: Reconfig: configuring DNS")
|
|
|
|
err = e.dns.Set(*dnsCfg)
|
|
|
|
health.SetDNSHealth(err)
|
2021-02-18 16:58:13 +00:00
|
|
|
if err != nil {
|
2020-05-31 07:37:58 +01:00
|
|
|
return err
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-03-02 22:54:57 +00:00
|
|
|
|
2020-12-21 18:58:06 +00:00
|
|
|
e.logf("[v1] wgengine: Reconfig done")
|
2020-03-02 22:54:57 +00:00
|
|
|
return nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2021-01-14 01:10:41 +00:00
|
|
|
// isSingleEndpoint reports whether endpoints contains exactly one host:port pair.
|
|
|
|
func isSingleEndpoint(s string) bool {
|
|
|
|
return s != "" && !strings.Contains(s, ",")
|
|
|
|
}
|
|
|
|
|
2020-03-25 07:47:55 +00:00
|
|
|
func (e *userspaceEngine) GetFilter() *filter.Filter {
|
2020-05-13 14:16:17 +01:00
|
|
|
return e.tundev.GetFilter()
|
2020-03-25 07:47:55 +00:00
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (e *userspaceEngine) SetFilter(filt *filter.Filter) {
|
2020-05-13 14:16:17 +01:00
|
|
|
e.tundev.SetFilter(filt)
|
2020-06-08 23:19:26 +01:00
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (e *userspaceEngine) SetStatusCallback(cb StatusCallback) {
|
2020-02-28 17:32:06 +00:00
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
2020-02-05 22:16:58 +00:00
|
|
|
e.statusCallback = cb
|
|
|
|
}
|
|
|
|
|
2020-02-28 17:32:06 +00:00
|
|
|
func (e *userspaceEngine) getStatusCallback() StatusCallback {
|
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
return e.statusCallback
|
|
|
|
}
|
|
|
|
|
2021-01-27 18:30:57 +00:00
|
|
|
var singleNewline = []byte{'\n'}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (e *userspaceEngine) getStatus() (*Status, error) {
|
2020-08-06 00:36:53 +01:00
|
|
|
// Grab derpConns before acquiring wgLock to not violate lock ordering;
|
|
|
|
// the DERPs method acquires magicsock.Conn.mu.
|
|
|
|
// (See comment in userspaceEngine's declaration.)
|
|
|
|
derpConns := e.magicConn.DERPs()
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
e.wgLock.Lock()
|
|
|
|
defer e.wgLock.Unlock()
|
|
|
|
|
2020-05-17 17:51:38 +01:00
|
|
|
e.mu.Lock()
|
|
|
|
closing := e.closing
|
|
|
|
e.mu.Unlock()
|
|
|
|
if closing {
|
|
|
|
return nil, errors.New("engine closing; no status")
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
if e.wgdev == nil {
|
|
|
|
// RequestStatus was invoked before the wgengine has
|
|
|
|
// finished initializing. This can happen when wgegine
|
|
|
|
// provides a callback to magicsock for endpoint
|
|
|
|
// updates that calls RequestStatus.
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
|
2020-04-08 16:42:38 +01:00
|
|
|
pr, pw := io.Pipe()
|
2021-01-27 18:30:57 +00:00
|
|
|
defer pr.Close() // to unblock writes on error path returns
|
2021-01-26 18:20:13 +00:00
|
|
|
|
2020-04-08 16:42:38 +01:00
|
|
|
errc := make(chan error, 1)
|
|
|
|
go func() {
|
|
|
|
defer pw.Close()
|
|
|
|
// TODO(apenwarr): get rid of silly uapi stuff for in-process comms
|
|
|
|
// FIXME: get notified of status changes instead of polling.
|
2021-02-08 21:34:27 +00:00
|
|
|
err := e.wgdev.IpcGetOperation(pw)
|
2021-01-26 18:20:13 +00:00
|
|
|
if err != nil {
|
|
|
|
err = fmt.Errorf("IpcGetOperation: %w", err)
|
2020-04-08 16:42:38 +01:00
|
|
|
}
|
2021-01-26 18:20:13 +00:00
|
|
|
errc <- err
|
2020-04-08 16:42:38 +01:00
|
|
|
}()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2021-02-04 21:12:42 +00:00
|
|
|
pp := make(map[wgkey.Key]*ipnstate.PeerStatusLite)
|
|
|
|
p := &ipnstate.PeerStatusLite{}
|
2020-04-08 16:42:38 +01:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
var hst1, hst2, n int64
|
2020-04-08 16:42:38 +01:00
|
|
|
|
2021-01-26 18:20:13 +00:00
|
|
|
br := e.statusBufioReader
|
|
|
|
if br != nil {
|
|
|
|
br.Reset(pr)
|
|
|
|
} else {
|
|
|
|
br = bufio.NewReaderSize(pr, 1<<10)
|
|
|
|
e.statusBufioReader = br
|
|
|
|
}
|
|
|
|
for {
|
|
|
|
line, err := br.ReadSlice('\n')
|
|
|
|
if err == io.EOF {
|
|
|
|
break
|
|
|
|
}
|
|
|
|
if err != nil {
|
|
|
|
return nil, fmt.Errorf("reading from UAPI pipe: %w", err)
|
|
|
|
}
|
2021-01-27 18:30:57 +00:00
|
|
|
line = bytes.TrimSuffix(line, singleNewline)
|
2020-04-08 16:42:38 +01:00
|
|
|
k := line
|
|
|
|
var v mem.RO
|
|
|
|
if i := bytes.IndexByte(line, '='); i != -1 {
|
|
|
|
k = line[:i]
|
|
|
|
v = mem.B(line[i+1:])
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-04-08 16:42:38 +01:00
|
|
|
switch string(k) {
|
2020-02-05 22:16:58 +00:00
|
|
|
case "public_key":
|
2020-04-08 16:42:38 +01:00
|
|
|
pk, err := key.NewPublicFromHexMem(v)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-01-27 18:30:57 +00:00
|
|
|
return nil, fmt.Errorf("IpcGetOperation: invalid key in line %q", line)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2021-02-04 21:12:42 +00:00
|
|
|
p = &ipnstate.PeerStatusLite{}
|
2020-12-30 01:22:56 +00:00
|
|
|
pp[wgkey.Key(pk)] = p
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-02-11 03:04:52 +00:00
|
|
|
key := tailcfg.NodeKey(pk)
|
2020-02-05 22:16:58 +00:00
|
|
|
p.NodeKey = key
|
|
|
|
case "rx_bytes":
|
2020-06-01 04:22:46 +01:00
|
|
|
n, err = mem.ParseInt(v, 10, 64)
|
2021-02-04 21:12:42 +00:00
|
|
|
p.RxBytes = n
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-01-26 18:20:13 +00:00
|
|
|
return nil, fmt.Errorf("IpcGetOperation: rx_bytes invalid: %#v", line)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
case "tx_bytes":
|
2020-06-01 04:22:46 +01:00
|
|
|
n, err = mem.ParseInt(v, 10, 64)
|
2021-02-04 21:12:42 +00:00
|
|
|
p.TxBytes = n
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-01-26 18:20:13 +00:00
|
|
|
return nil, fmt.Errorf("IpcGetOperation: tx_bytes invalid: %#v", line)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
case "last_handshake_time_sec":
|
2020-06-01 04:22:46 +01:00
|
|
|
hst1, err = mem.ParseInt(v, 10, 64)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-01-26 18:20:13 +00:00
|
|
|
return nil, fmt.Errorf("IpcGetOperation: hst1 invalid: %#v", line)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
case "last_handshake_time_nsec":
|
2020-06-01 04:22:46 +01:00
|
|
|
hst2, err = mem.ParseInt(v, 10, 64)
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
2021-01-26 18:20:13 +00:00
|
|
|
return nil, fmt.Errorf("IpcGetOperation: hst2 invalid: %#v", line)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if hst1 != 0 || hst2 != 0 {
|
|
|
|
p.LastHandshake = time.Unix(hst1, hst2)
|
|
|
|
} // else leave at time.IsZero()
|
|
|
|
}
|
|
|
|
}
|
2020-04-08 16:42:38 +01:00
|
|
|
if err := <-errc; err != nil {
|
2021-01-26 18:20:13 +00:00
|
|
|
return nil, fmt.Errorf("IpcGetOperation: %v", err)
|
2020-04-08 16:42:38 +01:00
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
|
2021-02-04 21:12:42 +00:00
|
|
|
var peers []ipnstate.PeerStatusLite
|
2020-02-05 22:16:58 +00:00
|
|
|
for _, pk := range e.peerSequence {
|
2020-07-23 23:15:28 +01:00
|
|
|
if p, ok := pp[pk]; ok { // ignore idle ones not in wireguard-go's config
|
|
|
|
peers = append(peers, *p)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return &Status{
|
tailcfg: add Endpoint, EndpointType, MapRequest.EndpointType
Track endpoints internally with a new tailcfg.Endpoint type that
includes a typed netaddr.IPPort (instead of just a string) and
includes a type for how that endpoint was discovered (STUN, local,
etc).
Use []tailcfg.Endpoint instead of []string internally.
At the last second, send it to the control server as the existing
[]string for endpoints, but also include a new parallel
MapRequest.EndpointType []tailcfg.EndpointType, so the control server
can start filtering out less-important endpoint changes from
new-enough clients. Notably, STUN-discovered endpoints can be filtered
out from 1.6+ clients, as they can discover them amongst each other
via CallMeMaybe disco exchanges started over DERP. And STUN endpoints
change a lot, causing a lot of MapResposne updates. But portmapped
endpoints are worth keeping for now, as they they work right away
without requiring the firewall traversal extra RTT dance.
End result will be less control->client bandwidth. (despite negligible
increase in client->control bandwidth)
Updates tailscale/corp#1543
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
2021-04-12 21:24:29 +01:00
|
|
|
LocalAddrs: append([]tailcfg.Endpoint(nil), e.endpoints...),
|
2020-02-05 22:16:58 +00:00
|
|
|
Peers: peers,
|
2020-08-06 00:36:53 +01:00
|
|
|
DERPs: derpConns,
|
2020-02-05 22:16:58 +00:00
|
|
|
}, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *userspaceEngine) RequestStatus() {
|
|
|
|
// This is slightly tricky. e.getStatus() can theoretically get
|
|
|
|
// blocked inside wireguard for a while, and RequestStatus() is
|
|
|
|
// sometimes called from a goroutine, so we don't want a lot of
|
|
|
|
// them hanging around. On the other hand, requesting multiple
|
|
|
|
// status updates simultaneously is pointless anyway; they will
|
|
|
|
// all say the same thing.
|
|
|
|
|
|
|
|
// Enqueue at most one request. If one is in progress already, this
|
|
|
|
// adds one more to the queue. If one has been requested but not
|
|
|
|
// started, it is a no-op.
|
|
|
|
select {
|
|
|
|
case e.reqCh <- struct{}{}:
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
|
|
|
|
// Dequeue at most one request. Another thread may have already
|
|
|
|
// dequeued the request we enqueued above, which is fine, since the
|
|
|
|
// information is guaranteed to be at least as recent as the current
|
|
|
|
// call to RequestStatus().
|
|
|
|
select {
|
|
|
|
case <-e.reqCh:
|
|
|
|
s, err := e.getStatus()
|
|
|
|
if s == nil && err == nil {
|
2021-04-27 17:59:25 +01:00
|
|
|
e.logf("[unexpected] RequestStatus: both s and err are nil")
|
2020-02-05 22:16:58 +00:00
|
|
|
return
|
|
|
|
}
|
2020-02-28 17:32:06 +00:00
|
|
|
if cb := e.getStatusCallback(); cb != nil {
|
|
|
|
cb(s, err)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
default:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *userspaceEngine) Close() {
|
2020-05-29 06:38:26 +01:00
|
|
|
var pingers []*pinger
|
|
|
|
|
2020-02-25 16:06:29 +00:00
|
|
|
e.mu.Lock()
|
2020-05-17 17:51:38 +01:00
|
|
|
if e.closing {
|
|
|
|
e.mu.Unlock()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
e.closing = true
|
2020-05-29 06:38:26 +01:00
|
|
|
for _, pinger := range e.pingers {
|
|
|
|
pingers = append(pingers, pinger)
|
2020-02-25 16:06:29 +00:00
|
|
|
}
|
|
|
|
e.mu.Unlock()
|
|
|
|
|
2020-02-20 17:47:33 +00:00
|
|
|
r := bufio.NewReader(strings.NewReader(""))
|
|
|
|
e.wgdev.IpcSetOperation(r)
|
2020-07-03 08:00:04 +01:00
|
|
|
e.magicConn.Close()
|
2021-02-28 05:42:34 +00:00
|
|
|
e.linkMonUnregister()
|
2021-02-28 05:48:00 +00:00
|
|
|
if e.linkMonOwned {
|
|
|
|
e.linkMon.Close()
|
|
|
|
}
|
2021-04-03 03:34:53 +01:00
|
|
|
e.dns.Down()
|
2020-02-05 22:16:58 +00:00
|
|
|
e.router.Close()
|
2020-07-03 08:00:04 +01:00
|
|
|
e.wgdev.Close()
|
2020-09-18 16:04:15 +01:00
|
|
|
e.tundev.Close()
|
2020-05-29 06:38:26 +01:00
|
|
|
|
|
|
|
// Shut down pingers after tundev is closed (by e.wgdev.Close) so the
|
|
|
|
// synchronous close does not get stuck on InjectOutbound.
|
|
|
|
for _, pinger := range pingers {
|
|
|
|
pinger.close()
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
close(e.waitCh)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *userspaceEngine) Wait() {
|
|
|
|
<-e.waitCh
|
|
|
|
}
|
|
|
|
|
2021-03-01 20:56:03 +00:00
|
|
|
func (e *userspaceEngine) GetLinkMonitor() *monitor.Mon {
|
|
|
|
return e.linkMon
|
2020-03-13 03:10:11 +00:00
|
|
|
}
|
|
|
|
|
2021-03-04 04:58:09 +00:00
|
|
|
// LinkChange signals a network change event. It's currently
|
|
|
|
// (2021-03-03) only called on Android.
|
|
|
|
func (e *userspaceEngine) LinkChange(_ bool) {
|
|
|
|
e.linkMon.InjectEvent()
|
|
|
|
}
|
|
|
|
|
2021-03-01 20:56:03 +00:00
|
|
|
func (e *userspaceEngine) linkChange(changed bool, cur *interfaces.State) {
|
2020-10-06 23:22:46 +01:00
|
|
|
up := cur.AnyInterfaceUp()
|
|
|
|
if !up {
|
|
|
|
e.logf("LinkChange: all links down; pausing: %v", cur)
|
2021-03-01 20:56:03 +00:00
|
|
|
} else if changed {
|
2020-10-06 23:22:46 +01:00
|
|
|
e.logf("LinkChange: major, rebinding. New state: %v", cur)
|
2020-08-12 20:48:34 +01:00
|
|
|
} else {
|
2020-12-21 18:58:06 +00:00
|
|
|
e.logf("[v1] LinkChange: minor")
|
2020-08-12 20:48:34 +01:00
|
|
|
}
|
2020-03-13 03:10:11 +00:00
|
|
|
|
2021-03-23 04:41:53 +00:00
|
|
|
health.SetAnyInterfaceUp(up)
|
2020-10-06 23:22:46 +01:00
|
|
|
e.magicConn.SetNetworkUp(up)
|
|
|
|
|
2020-03-13 03:10:11 +00:00
|
|
|
why := "link-change-minor"
|
2021-03-01 20:56:03 +00:00
|
|
|
if changed {
|
2020-03-13 03:10:11 +00:00
|
|
|
why = "link-change-major"
|
|
|
|
e.magicConn.Rebind()
|
|
|
|
}
|
|
|
|
e.magicConn.ReSTUN(why)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
|
2021-01-15 14:16:28 +00:00
|
|
|
func (e *userspaceEngine) AddNetworkMapCallback(cb NetworkMapCallback) func() {
|
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
if e.networkMapCallbacks == nil {
|
|
|
|
e.networkMapCallbacks = make(map[*someHandle]NetworkMapCallback)
|
|
|
|
}
|
|
|
|
h := new(someHandle)
|
|
|
|
e.networkMapCallbacks[h] = cb
|
|
|
|
return func() {
|
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
delete(e.networkMapCallbacks, h)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
func (e *userspaceEngine) SetNetInfoCallback(cb NetInfoCallback) {
|
|
|
|
e.magicConn.SetNetInfoCallback(cb)
|
|
|
|
}
|
2020-03-04 20:21:40 +00:00
|
|
|
|
2020-05-17 17:51:38 +01:00
|
|
|
func (e *userspaceEngine) SetDERPMap(dm *tailcfg.DERPMap) {
|
|
|
|
e.magicConn.SetDERPMap(dm)
|
2020-03-04 20:21:40 +00:00
|
|
|
}
|
2020-03-26 05:57:46 +00:00
|
|
|
|
2021-02-05 23:44:46 +00:00
|
|
|
func (e *userspaceEngine) SetNetworkMap(nm *netmap.NetworkMap) {
|
2020-06-25 19:04:52 +01:00
|
|
|
e.magicConn.SetNetworkMap(nm)
|
2021-01-15 14:16:28 +00:00
|
|
|
e.mu.Lock()
|
2021-03-23 04:25:43 +00:00
|
|
|
e.netMap = nm
|
2021-01-15 14:16:28 +00:00
|
|
|
callbacks := make([]NetworkMapCallback, 0, 4)
|
|
|
|
for _, fn := range e.networkMapCallbacks {
|
|
|
|
callbacks = append(callbacks, fn)
|
|
|
|
}
|
|
|
|
e.mu.Unlock()
|
|
|
|
for _, fn := range callbacks {
|
|
|
|
fn(nm)
|
|
|
|
}
|
2020-06-25 19:04:52 +01:00
|
|
|
}
|
|
|
|
|
2020-07-06 20:10:39 +01:00
|
|
|
func (e *userspaceEngine) DiscoPublicKey() tailcfg.DiscoKey {
|
|
|
|
return e.magicConn.DiscoPublicKey()
|
2020-06-19 20:06:49 +01:00
|
|
|
}
|
|
|
|
|
2020-03-26 05:57:46 +00:00
|
|
|
func (e *userspaceEngine) UpdateStatus(sb *ipnstate.StatusBuilder) {
|
|
|
|
st, err := e.getStatus()
|
|
|
|
if err != nil {
|
|
|
|
e.logf("wgengine: getStatus: %v", err)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
for _, ps := range st.Peers {
|
|
|
|
sb.AddPeer(key.Public(ps.NodeKey), &ipnstate.PeerStatus{
|
|
|
|
RxBytes: int64(ps.RxBytes),
|
|
|
|
TxBytes: int64(ps.TxBytes),
|
|
|
|
LastHandshake: ps.LastHandshake,
|
|
|
|
InEngine: true,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
e.magicConn.UpdateStatus(sb)
|
|
|
|
}
|
2020-04-10 21:44:08 +01:00
|
|
|
|
2021-03-23 22:16:15 +00:00
|
|
|
func (e *userspaceEngine) Ping(ip netaddr.IP, useTSMP bool, cb func(*ipnstate.PingResult)) {
|
2021-03-23 04:25:43 +00:00
|
|
|
res := &ipnstate.PingResult{IP: ip.String()}
|
|
|
|
peer, err := e.peerForIP(ip)
|
|
|
|
if err != nil {
|
2021-03-23 04:47:42 +00:00
|
|
|
e.logf("ping(%v): %v", ip, err)
|
2021-03-23 04:25:43 +00:00
|
|
|
res.Err = err.Error()
|
|
|
|
cb(res)
|
2021-03-23 04:47:42 +00:00
|
|
|
return
|
2021-03-23 04:25:43 +00:00
|
|
|
}
|
|
|
|
if peer == nil {
|
2021-03-23 04:47:42 +00:00
|
|
|
e.logf("ping(%v): no matching peer", ip)
|
2021-03-23 04:25:43 +00:00
|
|
|
res.Err = "no matching peer"
|
|
|
|
cb(res)
|
|
|
|
return
|
|
|
|
}
|
2021-03-23 22:16:15 +00:00
|
|
|
pingType := "disco"
|
|
|
|
if useTSMP {
|
|
|
|
pingType = "TSMP"
|
|
|
|
}
|
|
|
|
e.logf("ping(%v): sending %v ping to %v %v ...", ip, pingType, peer.Key.ShortString(), peer.ComputedName)
|
|
|
|
if useTSMP {
|
|
|
|
e.sendTSMPPing(ip, peer, res, cb)
|
|
|
|
} else {
|
|
|
|
e.magicConn.Ping(peer, res, cb)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *userspaceEngine) mySelfIPMatchingFamily(dst netaddr.IP) (src netaddr.IP, err error) {
|
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
if e.netMap == nil {
|
|
|
|
return netaddr.IP{}, errors.New("no netmap")
|
|
|
|
}
|
|
|
|
for _, a := range e.netMap.Addresses {
|
|
|
|
if a.IsSingleIP() && a.IP.BitLen() == dst.BitLen() {
|
|
|
|
return a.IP, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if len(e.netMap.Addresses) == 0 {
|
|
|
|
return netaddr.IP{}, errors.New("no self address in netmap")
|
|
|
|
}
|
|
|
|
return netaddr.IP{}, errors.New("no self address in netmap matching address family")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *userspaceEngine) sendTSMPPing(ip netaddr.IP, peer *tailcfg.Node, res *ipnstate.PingResult, cb func(*ipnstate.PingResult)) {
|
|
|
|
srcIP, err := e.mySelfIPMatchingFamily(ip)
|
|
|
|
if err != nil {
|
|
|
|
res.Err = err.Error()
|
|
|
|
cb(res)
|
|
|
|
return
|
|
|
|
}
|
|
|
|
var iph packet.Header
|
|
|
|
if srcIP.Is4() {
|
|
|
|
iph = packet.IP4Header{
|
|
|
|
IPProto: ipproto.TSMP,
|
|
|
|
Src: srcIP,
|
|
|
|
Dst: ip,
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
iph = packet.IP6Header{
|
|
|
|
IPProto: ipproto.TSMP,
|
|
|
|
Src: srcIP,
|
|
|
|
Dst: ip,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var data [8]byte
|
|
|
|
crand.Read(data[:])
|
|
|
|
|
|
|
|
expireTimer := time.AfterFunc(10*time.Second, func() {
|
|
|
|
e.setTSMPPongCallback(data, nil)
|
|
|
|
})
|
|
|
|
t0 := time.Now()
|
2021-03-29 23:17:05 +01:00
|
|
|
e.setTSMPPongCallback(data, func(pong packet.TSMPPongReply) {
|
2021-03-23 22:16:15 +00:00
|
|
|
expireTimer.Stop()
|
|
|
|
d := time.Since(t0)
|
|
|
|
res.LatencySeconds = d.Seconds()
|
|
|
|
res.NodeIP = ip.String()
|
|
|
|
res.NodeName = peer.ComputedName
|
2021-03-29 23:17:05 +01:00
|
|
|
res.PeerAPIPort = pong.PeerAPIPort
|
2021-03-23 22:16:15 +00:00
|
|
|
cb(res)
|
|
|
|
})
|
|
|
|
|
|
|
|
var tsmpPayload [9]byte
|
|
|
|
tsmpPayload[0] = byte(packet.TSMPTypePing)
|
|
|
|
copy(tsmpPayload[1:], data[:])
|
|
|
|
|
|
|
|
tsmpPing := packet.Generate(iph, tsmpPayload[:])
|
|
|
|
e.tundev.InjectOutbound(tsmpPing)
|
|
|
|
}
|
|
|
|
|
2021-03-29 23:17:05 +01:00
|
|
|
func (e *userspaceEngine) setTSMPPongCallback(data [8]byte, cb func(packet.TSMPPongReply)) {
|
2021-03-23 22:16:15 +00:00
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
if e.pongCallback == nil {
|
2021-03-29 23:17:05 +01:00
|
|
|
e.pongCallback = map[[8]byte]func(packet.TSMPPongReply){}
|
2021-03-23 22:16:15 +00:00
|
|
|
}
|
|
|
|
if cb == nil {
|
|
|
|
delete(e.pongCallback, data)
|
|
|
|
} else {
|
|
|
|
e.pongCallback[data] = cb
|
|
|
|
}
|
2020-08-09 22:49:42 +01:00
|
|
|
}
|
|
|
|
|
2021-03-15 21:59:35 +00:00
|
|
|
func (e *userspaceEngine) RegisterIPPortIdentity(ipport netaddr.IPPort, tsIP netaddr.IP) {
|
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
if e.tsIPByIPPort == nil {
|
|
|
|
e.tsIPByIPPort = make(map[netaddr.IPPort]netaddr.IP)
|
|
|
|
}
|
|
|
|
e.tsIPByIPPort[ipport] = tsIP
|
|
|
|
}
|
|
|
|
|
|
|
|
func (e *userspaceEngine) UnregisterIPPortIdentity(ipport netaddr.IPPort) {
|
|
|
|
e.mu.Lock()
|
|
|
|
defer e.mu.Unlock()
|
|
|
|
if e.tsIPByIPPort == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
delete(e.tsIPByIPPort, ipport)
|
|
|
|
}
|
|
|
|
|
2021-03-30 17:50:25 +01:00
|
|
|
var whoIsSleeps = [...]time.Duration{
|
|
|
|
0,
|
|
|
|
10 * time.Millisecond,
|
|
|
|
20 * time.Millisecond,
|
|
|
|
50 * time.Millisecond,
|
|
|
|
100 * time.Millisecond,
|
|
|
|
}
|
|
|
|
|
2021-03-15 21:59:35 +00:00
|
|
|
func (e *userspaceEngine) WhoIsIPPort(ipport netaddr.IPPort) (tsIP netaddr.IP, ok bool) {
|
2021-03-30 17:50:25 +01:00
|
|
|
// We currently have a registration race,
|
|
|
|
// https://github.com/tailscale/tailscale/issues/1616,
|
|
|
|
// so loop a few times for now waiting for the registration
|
|
|
|
// to appear.
|
|
|
|
// TODO(bradfitz,namansood): remove this once #1616 is fixed.
|
|
|
|
for _, d := range whoIsSleeps {
|
|
|
|
time.Sleep(d)
|
|
|
|
e.mu.Lock()
|
|
|
|
tsIP, ok = e.tsIPByIPPort[ipport]
|
|
|
|
e.mu.Unlock()
|
|
|
|
if ok {
|
|
|
|
return tsIP, true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return tsIP, false
|
2021-03-15 21:59:35 +00:00
|
|
|
}
|
|
|
|
|
2021-03-23 04:25:43 +00:00
|
|
|
// peerForIP returns the Node in the wireguard config
|
|
|
|
// that's responsible for handling the given IP address.
|
|
|
|
//
|
|
|
|
// If none is found in the wireguard config but one is found in
|
|
|
|
// the netmap, it's described in an error.
|
|
|
|
//
|
|
|
|
// If none is found in either place, (nil, nil) is returned.
|
|
|
|
//
|
|
|
|
// peerForIP acquires both e.mu and e.wgLock, but neither at the same
|
|
|
|
// time.
|
|
|
|
func (e *userspaceEngine) peerForIP(ip netaddr.IP) (n *tailcfg.Node, err error) {
|
|
|
|
e.mu.Lock()
|
|
|
|
nm := e.netMap
|
|
|
|
e.mu.Unlock()
|
|
|
|
if nm == nil {
|
|
|
|
return nil, errors.New("no network map")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check for exact matches before looking for subnet matches.
|
|
|
|
var bestInNMPrefix netaddr.IPPrefix
|
|
|
|
var bestInNM *tailcfg.Node
|
|
|
|
for _, p := range nm.Peers {
|
|
|
|
for _, a := range p.Addresses {
|
|
|
|
if a.IP == ip && a.IsSingleIP() && tsaddr.IsTailscaleIP(ip) {
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
for _, cidr := range p.AllowedIPs {
|
|
|
|
if !cidr.Contains(ip) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if bestInNMPrefix.IsZero() || cidr.Bits > bestInNMPrefix.Bits {
|
|
|
|
bestInNMPrefix = cidr
|
|
|
|
bestInNM = p
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
e.wgLock.Lock()
|
|
|
|
defer e.wgLock.Unlock()
|
|
|
|
|
|
|
|
// TODO(bradfitz): this is O(n peers). Add ART to netaddr?
|
|
|
|
var best netaddr.IPPrefix
|
|
|
|
var bestKey tailcfg.NodeKey
|
|
|
|
for _, p := range e.lastCfgFull.Peers {
|
|
|
|
for _, cidr := range p.AllowedIPs {
|
|
|
|
if !cidr.Contains(ip) {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
if best.IsZero() || cidr.Bits > best.Bits {
|
|
|
|
best = cidr
|
|
|
|
bestKey = tailcfg.NodeKey(p.PublicKey)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// And another pass. Probably better than allocating a map per peerForIP
|
|
|
|
// call. But TODO(bradfitz): add a lookup map to netmap.NetworkMap.
|
|
|
|
if !bestKey.IsZero() {
|
|
|
|
for _, p := range nm.Peers {
|
|
|
|
if p.Key == bestKey {
|
|
|
|
return p, nil
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if bestInNM == nil {
|
|
|
|
return nil, nil
|
|
|
|
}
|
|
|
|
if bestInNMPrefix.Bits == 0 {
|
|
|
|
return nil, errors.New("exit node found but not enabled")
|
|
|
|
}
|
|
|
|
return nil, fmt.Errorf("node %q found, but not using its %v route", bestInNM.ComputedNameWithHost, bestInNMPrefix)
|
|
|
|
}
|
|
|
|
|
2021-02-28 05:42:34 +00:00
|
|
|
type closeOnErrorPool []func()
|
|
|
|
|
|
|
|
func (p *closeOnErrorPool) add(c io.Closer) { *p = append(*p, func() { c.Close() }) }
|
|
|
|
func (p *closeOnErrorPool) addFunc(fn func()) { *p = append(*p, fn) }
|
|
|
|
func (p closeOnErrorPool) closeAllIfError(errp *error) {
|
|
|
|
if *errp != nil {
|
|
|
|
for _, closeFn := range p {
|
|
|
|
closeFn()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|