2020-02-05 22:16:58 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package magicsock
|
|
|
|
|
|
|
|
import (
|
2020-03-03 15:39:40 +00:00
|
|
|
"bytes"
|
2020-07-25 01:32:18 +01:00
|
|
|
"context"
|
2020-03-03 21:50:47 +00:00
|
|
|
crand "crypto/rand"
|
|
|
|
"crypto/tls"
|
2020-03-09 16:13:28 +00:00
|
|
|
"encoding/binary"
|
2020-08-06 18:23:16 +01:00
|
|
|
"encoding/json"
|
2021-01-10 14:50:35 +00:00
|
|
|
"errors"
|
2020-02-05 22:16:58 +00:00
|
|
|
"fmt"
|
2020-07-04 06:26:53 +01:00
|
|
|
"io/ioutil"
|
2020-02-05 22:16:58 +00:00
|
|
|
"net"
|
2020-03-03 21:50:47 +00:00
|
|
|
"net/http"
|
|
|
|
"net/http/httptest"
|
2020-03-06 21:35:59 +00:00
|
|
|
"os"
|
2020-08-06 18:23:16 +01:00
|
|
|
"strconv"
|
2020-02-05 22:16:58 +00:00
|
|
|
"strings"
|
2020-05-14 04:44:58 +01:00
|
|
|
"sync"
|
2020-02-05 22:16:58 +00:00
|
|
|
"testing"
|
|
|
|
"time"
|
2020-08-06 22:57:03 +01:00
|
|
|
"unsafe"
|
2020-03-03 11:51:31 +00:00
|
|
|
|
2020-03-06 18:37:57 +00:00
|
|
|
"github.com/google/go-cmp/cmp"
|
2020-03-03 15:39:40 +00:00
|
|
|
"github.com/tailscale/wireguard-go/device"
|
|
|
|
"github.com/tailscale/wireguard-go/tun/tuntest"
|
|
|
|
"github.com/tailscale/wireguard-go/wgcfg"
|
2020-06-26 22:38:53 +01:00
|
|
|
"golang.org/x/crypto/nacl/box"
|
2020-06-30 20:22:42 +01:00
|
|
|
"inet.af/netaddr"
|
2020-07-25 01:32:18 +01:00
|
|
|
"tailscale.com/control/controlclient"
|
2020-03-03 21:50:47 +00:00
|
|
|
"tailscale.com/derp"
|
|
|
|
"tailscale.com/derp/derphttp"
|
2020-03-09 22:20:33 +00:00
|
|
|
"tailscale.com/derp/derpmap"
|
2020-07-25 01:32:18 +01:00
|
|
|
"tailscale.com/ipn/ipnstate"
|
2020-05-25 17:15:50 +01:00
|
|
|
"tailscale.com/net/stun/stuntest"
|
2020-05-17 17:51:38 +01:00
|
|
|
"tailscale.com/tailcfg"
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
"tailscale.com/tstest"
|
2020-07-10 22:26:04 +01:00
|
|
|
"tailscale.com/tstest/natlab"
|
2020-03-03 21:50:47 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-03-07 01:50:36 +00:00
|
|
|
"tailscale.com/types/logger"
|
2020-07-10 22:26:04 +01:00
|
|
|
"tailscale.com/types/nettype"
|
2020-12-30 01:22:56 +00:00
|
|
|
"tailscale.com/types/wgkey"
|
2020-05-13 14:16:17 +01:00
|
|
|
"tailscale.com/wgengine/filter"
|
|
|
|
"tailscale.com/wgengine/tstun"
|
2020-02-05 22:16:58 +00:00
|
|
|
)
|
|
|
|
|
2020-10-28 15:23:12 +00:00
|
|
|
func init() {
|
|
|
|
os.Setenv("IN_TS_TEST", "1")
|
|
|
|
}
|
|
|
|
|
2020-05-14 18:01:48 +01:00
|
|
|
// WaitReady waits until the magicsock is entirely initialized and connected
|
|
|
|
// to its home DERP server. This is normally not necessary, since magicsock
|
|
|
|
// is intended to be entirely asynchronous, but it helps eliminate race
|
|
|
|
// conditions in tests. In particular, you can't expect two test magicsocks
|
|
|
|
// to be able to connect to each other through a test DERP unless they are
|
|
|
|
// both fully initialized before you try.
|
2020-12-03 04:12:14 +00:00
|
|
|
func (c *Conn) WaitReady(t testing.TB) {
|
2020-05-14 18:01:48 +01:00
|
|
|
t.Helper()
|
|
|
|
timer := time.NewTimer(10 * time.Second)
|
|
|
|
defer timer.Stop()
|
|
|
|
select {
|
|
|
|
case <-c.derpStarted:
|
|
|
|
return
|
|
|
|
case <-c.connCtx.Done():
|
|
|
|
t.Fatalf("magicsock.Conn closed while waiting for readiness")
|
|
|
|
case <-timer.C:
|
|
|
|
t.Fatalf("timeout waiting for readiness")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-24 22:19:20 +01:00
|
|
|
func runDERPAndStun(t *testing.T, logf logger.Logf, l nettype.PacketListener, stunIP netaddr.IP) (derpMap *tailcfg.DERPMap, cleanup func()) {
|
|
|
|
var serverPrivateKey key.Private
|
|
|
|
if _, err := crand.Read(serverPrivateKey[:]); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
d := derp.NewServer(serverPrivateKey, logf)
|
|
|
|
|
|
|
|
httpsrv := httptest.NewUnstartedServer(derphttp.Handler(d))
|
|
|
|
httpsrv.Config.ErrorLog = logger.StdLogger(logf)
|
|
|
|
httpsrv.Config.TLSNextProto = make(map[string]func(*http.Server, *tls.Conn, http.Handler))
|
|
|
|
httpsrv.StartTLS()
|
|
|
|
|
|
|
|
stunAddr, stunCleanup := stuntest.ServeWithPacketListener(t, l)
|
|
|
|
|
|
|
|
m := &tailcfg.DERPMap{
|
|
|
|
Regions: map[int]*tailcfg.DERPRegion{
|
|
|
|
1: &tailcfg.DERPRegion{
|
|
|
|
RegionID: 1,
|
|
|
|
RegionCode: "test",
|
|
|
|
Nodes: []*tailcfg.DERPNode{
|
|
|
|
{
|
|
|
|
Name: "t1",
|
|
|
|
RegionID: 1,
|
|
|
|
HostName: "test-node.unused",
|
|
|
|
IPv4: "127.0.0.1",
|
|
|
|
IPv6: "none",
|
|
|
|
STUNPort: stunAddr.Port,
|
|
|
|
DERPTestPort: httpsrv.Listener.Addr().(*net.TCPAddr).Port,
|
|
|
|
STUNTestIP: stunIP.String(),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup = func() {
|
|
|
|
httpsrv.CloseClientConnections()
|
|
|
|
httpsrv.Close()
|
|
|
|
d.Close()
|
|
|
|
stunCleanup()
|
|
|
|
}
|
|
|
|
|
|
|
|
return m, cleanup
|
|
|
|
}
|
|
|
|
|
|
|
|
// magicStack is a magicsock, plus all the stuff around it that's
|
|
|
|
// necessary to send and receive packets to test e2e wireguard
|
|
|
|
// happiness.
|
|
|
|
type magicStack struct {
|
2020-12-30 01:22:56 +00:00
|
|
|
privateKey wgkey.Private
|
2020-07-24 22:19:20 +01:00
|
|
|
epCh chan []string // endpoint updates produced by this peer
|
|
|
|
conn *Conn // the magicsock itself
|
2020-09-25 21:13:13 +01:00
|
|
|
tun *tuntest.ChannelTUN // TUN device to send/receive packets
|
2020-07-24 22:19:20 +01:00
|
|
|
tsTun *tstun.TUN // wrapped tun that implements filtering and wgengine hooks
|
|
|
|
dev *device.Device // the wireguard-go Device that connects the previous things
|
|
|
|
}
|
|
|
|
|
|
|
|
// newMagicStack builds and initializes an idle magicsock and
|
|
|
|
// friends. You need to call conn.SetNetworkMap and dev.Reconfig
|
|
|
|
// before anything interesting happens.
|
2020-12-03 04:12:14 +00:00
|
|
|
func newMagicStack(t testing.TB, logf logger.Logf, l nettype.PacketListener, derpMap *tailcfg.DERPMap) *magicStack {
|
2020-07-24 22:19:20 +01:00
|
|
|
t.Helper()
|
|
|
|
|
2020-12-30 01:22:56 +00:00
|
|
|
privateKey, err := wgkey.NewPrivate()
|
2020-07-24 22:19:20 +01:00
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("generating private key: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
epCh := make(chan []string, 100) // arbitrary
|
|
|
|
conn, err := NewConn(Options{
|
|
|
|
Logf: logf,
|
|
|
|
PacketListener: l,
|
|
|
|
EndpointsFunc: func(eps []string) {
|
|
|
|
epCh <- eps
|
|
|
|
},
|
2020-10-28 15:23:12 +00:00
|
|
|
SimulatedNetwork: l != nettype.Std{},
|
2020-07-24 22:19:20 +01:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("constructing magicsock: %v", err)
|
|
|
|
}
|
|
|
|
conn.Start()
|
|
|
|
conn.SetDERPMap(derpMap)
|
|
|
|
if err := conn.SetPrivateKey(privateKey); err != nil {
|
|
|
|
t.Fatalf("setting private key in magicsock: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tun := tuntest.NewChannelTUN()
|
|
|
|
tsTun := tstun.WrapTUN(logf, tun.TUN())
|
2020-11-10 06:02:03 +00:00
|
|
|
tsTun.SetFilter(filter.NewAllowAllForTest(logf))
|
2020-07-24 22:19:20 +01:00
|
|
|
|
|
|
|
dev := device.NewDevice(tsTun, &device.DeviceOptions{
|
|
|
|
Logger: &device.Logger{
|
|
|
|
Debug: logger.StdLogger(logf),
|
|
|
|
Info: logger.StdLogger(logf),
|
|
|
|
Error: logger.StdLogger(logf),
|
|
|
|
},
|
|
|
|
CreateEndpoint: conn.CreateEndpoint,
|
|
|
|
CreateBind: conn.CreateBind,
|
|
|
|
SkipBindUpdate: true,
|
|
|
|
})
|
|
|
|
dev.Up()
|
|
|
|
|
|
|
|
// Wait for magicsock to connect up to DERP.
|
|
|
|
conn.WaitReady(t)
|
|
|
|
|
|
|
|
// Wait for first endpoint update to be available
|
|
|
|
deadline := time.Now().Add(2 * time.Second)
|
|
|
|
for len(epCh) == 0 && time.Now().Before(deadline) {
|
2020-07-28 18:04:09 +01:00
|
|
|
time.Sleep(100 * time.Millisecond)
|
2020-07-24 22:19:20 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return &magicStack{
|
|
|
|
privateKey: privateKey,
|
|
|
|
epCh: epCh,
|
|
|
|
conn: conn,
|
|
|
|
tun: tun,
|
|
|
|
tsTun: tsTun,
|
|
|
|
dev: dev,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-07-27 16:09:54 +01:00
|
|
|
func (s *magicStack) String() string {
|
|
|
|
pub := s.Public()
|
|
|
|
return pub.ShortString()
|
|
|
|
}
|
|
|
|
|
2020-07-24 22:19:20 +01:00
|
|
|
func (s *magicStack) Close() {
|
|
|
|
s.dev.Close()
|
|
|
|
s.conn.Close()
|
|
|
|
}
|
|
|
|
|
2020-07-27 16:09:54 +01:00
|
|
|
func (s *magicStack) Public() key.Public {
|
|
|
|
return key.Public(s.privateKey.Public())
|
|
|
|
}
|
|
|
|
|
2020-07-25 01:32:18 +01:00
|
|
|
func (s *magicStack) Status() *ipnstate.Status {
|
|
|
|
var sb ipnstate.StatusBuilder
|
|
|
|
s.conn.UpdateStatus(&sb)
|
|
|
|
return sb.Status()
|
|
|
|
}
|
|
|
|
|
2020-07-27 21:25:25 +01:00
|
|
|
// IP returns the Tailscale IP address assigned to this magicStack.
|
|
|
|
//
|
|
|
|
// Something external needs to provide a NetworkMap and WireGuard
|
|
|
|
// configs to the magicStack in order for it to acquire an IP
|
|
|
|
// address. See meshStacks for one possible source of netmaps and IPs.
|
|
|
|
func (s *magicStack) IP(t *testing.T) netaddr.IP {
|
|
|
|
for deadline := time.Now().Add(5 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
|
|
|
|
st := s.Status()
|
|
|
|
if len(st.TailscaleIPs) > 0 {
|
|
|
|
return st.TailscaleIPs[0]
|
|
|
|
}
|
2020-07-25 01:32:18 +01:00
|
|
|
}
|
2020-07-27 21:25:25 +01:00
|
|
|
t.Fatal("timed out waiting for magicstack to get an IP assigned")
|
|
|
|
panic("unreachable") // compiler doesn't know t.Fatal panics
|
2020-07-25 01:32:18 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// meshStacks monitors epCh on all given ms, and plumbs network maps
|
|
|
|
// and WireGuard configs into everyone to form a full mesh that has up
|
|
|
|
// to date endpoint info. Think of it as an extremely stripped down
|
|
|
|
// and purpose-built Tailscale control plane.
|
|
|
|
//
|
|
|
|
// meshStacks only supports disco connections, not legacy logic.
|
|
|
|
func meshStacks(logf logger.Logf, ms []*magicStack) (cleanup func()) {
|
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
|
|
|
|
// Serialize all reconfigurations globally, just to keep things
|
|
|
|
// simpler.
|
|
|
|
var (
|
|
|
|
mu sync.Mutex
|
|
|
|
eps = make([][]string, len(ms))
|
|
|
|
)
|
|
|
|
|
|
|
|
buildNetmapLocked := func(myIdx int) *controlclient.NetworkMap {
|
|
|
|
me := ms[myIdx]
|
|
|
|
nm := &controlclient.NetworkMap{
|
|
|
|
PrivateKey: me.privateKey,
|
|
|
|
NodeKey: tailcfg.NodeKey(me.privateKey.Public()),
|
2020-12-24 20:33:55 +00:00
|
|
|
Addresses: []netaddr.IPPrefix{{IP: netaddr.IPv4(1, 0, 0, byte(myIdx+1)), Bits: 32}},
|
2020-07-25 01:32:18 +01:00
|
|
|
}
|
|
|
|
for i, peer := range ms {
|
|
|
|
if i == myIdx {
|
|
|
|
continue
|
|
|
|
}
|
2020-12-24 20:33:55 +00:00
|
|
|
addrs := []netaddr.IPPrefix{{IP: netaddr.IPv4(1, 0, 0, byte(i+1)), Bits: 32}}
|
2020-07-25 01:32:18 +01:00
|
|
|
peer := &tailcfg.Node{
|
|
|
|
ID: tailcfg.NodeID(i + 1),
|
|
|
|
Name: fmt.Sprintf("node%d", i+1),
|
|
|
|
Key: tailcfg.NodeKey(peer.privateKey.Public()),
|
|
|
|
DiscoKey: peer.conn.DiscoPublicKey(),
|
|
|
|
Addresses: addrs,
|
|
|
|
AllowedIPs: addrs,
|
|
|
|
Endpoints: eps[i],
|
|
|
|
DERP: "127.3.3.40:1",
|
|
|
|
}
|
|
|
|
nm.Peers = append(nm.Peers, peer)
|
|
|
|
}
|
|
|
|
|
|
|
|
return nm
|
|
|
|
}
|
|
|
|
|
|
|
|
updateEps := func(idx int, newEps []string) {
|
|
|
|
mu.Lock()
|
|
|
|
defer mu.Unlock()
|
|
|
|
|
|
|
|
eps[idx] = newEps
|
|
|
|
|
|
|
|
for i, m := range ms {
|
|
|
|
netmap := buildNetmapLocked(i)
|
|
|
|
m.conn.SetNetworkMap(netmap)
|
|
|
|
peerSet := make(map[key.Public]struct{}, len(netmap.Peers))
|
|
|
|
for _, peer := range netmap.Peers {
|
|
|
|
peerSet[key.Public(peer.Key)] = struct{}{}
|
|
|
|
}
|
|
|
|
m.conn.UpdatePeers(peerSet)
|
2020-07-31 21:27:09 +01:00
|
|
|
wg, err := netmap.WGCfg(logf, controlclient.AllowSingleHosts)
|
2020-07-25 01:32:18 +01:00
|
|
|
if err != nil {
|
|
|
|
// We're too far from the *testing.T to be graceful,
|
|
|
|
// blow up. Shouldn't happen anyway.
|
|
|
|
panic(fmt.Sprintf("failed to construct wgcfg from netmap: %v", err))
|
|
|
|
}
|
|
|
|
if err := m.dev.Reconfig(wg); err != nil {
|
|
|
|
panic(fmt.Sprintf("device reconfig failed: %v", err))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(len(ms))
|
|
|
|
for i := range ms {
|
|
|
|
go func(myIdx int) {
|
|
|
|
defer wg.Done()
|
|
|
|
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case <-ctx.Done():
|
|
|
|
return
|
|
|
|
case eps := <-ms[myIdx].epCh:
|
|
|
|
logf("conn%d endpoints update", myIdx+1)
|
|
|
|
updateEps(myIdx, eps)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}(i)
|
|
|
|
}
|
|
|
|
|
|
|
|
return func() {
|
|
|
|
cancel()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-17 17:51:38 +01:00
|
|
|
func TestNewConn(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
tstest.PanicOnLog()
|
2020-05-14 04:03:46 +01:00
|
|
|
rc := tstest.NewResourceCheck()
|
|
|
|
defer rc.Assert(t)
|
2020-03-03 21:50:47 +00:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
epCh := make(chan string, 16)
|
|
|
|
epFunc := func(endpoints []string) {
|
|
|
|
for _, ep := range endpoints {
|
|
|
|
epCh <- ep
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-03-12 21:14:48 +00:00
|
|
|
stunAddr, stunCleanupFn := stuntest.Serve(t)
|
2020-03-03 11:51:31 +00:00
|
|
|
defer stunCleanupFn()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
port := pickPort(t)
|
2020-05-17 17:51:38 +01:00
|
|
|
conn, err := NewConn(Options{
|
2020-02-05 22:16:58 +00:00
|
|
|
Port: port,
|
|
|
|
EndpointsFunc: epFunc,
|
2020-03-09 22:20:33 +00:00
|
|
|
Logf: t.Logf,
|
2020-02-05 22:16:58 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
2020-05-17 17:51:38 +01:00
|
|
|
conn.SetDERPMap(stuntest.DERPMapOf(stunAddr.String()))
|
2020-12-30 01:22:56 +00:00
|
|
|
conn.SetPrivateKey(wgkey.Private(key.NewPrivate()))
|
2020-07-27 18:19:05 +01:00
|
|
|
conn.Start()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
go func() {
|
2020-02-18 16:57:11 +00:00
|
|
|
var pkt [64 << 10]byte
|
2020-02-05 22:16:58 +00:00
|
|
|
for {
|
|
|
|
_, _, _, err := conn.ReceiveIPv4(pkt[:])
|
|
|
|
if err != nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-03-06 20:37:19 +00:00
|
|
|
timeout := time.After(10 * time.Second)
|
2020-02-05 22:16:58 +00:00
|
|
|
var endpoints []string
|
|
|
|
suffix := fmt.Sprintf(":%d", port)
|
|
|
|
collectEndpoints:
|
|
|
|
for {
|
|
|
|
select {
|
|
|
|
case ep := <-epCh:
|
|
|
|
endpoints = append(endpoints, ep)
|
|
|
|
if strings.HasSuffix(ep, suffix) {
|
|
|
|
break collectEndpoints
|
|
|
|
}
|
2020-03-06 20:37:19 +00:00
|
|
|
case <-timeout:
|
2020-02-05 22:16:58 +00:00
|
|
|
t.Fatalf("timeout with endpoints: %v", endpoints)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-12-03 04:12:14 +00:00
|
|
|
func pickPort(t testing.TB) uint16 {
|
2020-02-05 22:16:58 +00:00
|
|
|
t.Helper()
|
2020-10-28 15:23:12 +00:00
|
|
|
conn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
2020-02-05 22:16:58 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
return uint16(conn.LocalAddr().(*net.UDPAddr).Port)
|
|
|
|
}
|
2020-02-18 21:32:04 +00:00
|
|
|
|
|
|
|
func TestDerpIPConstant(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
tstest.PanicOnLog()
|
2020-05-14 04:03:46 +01:00
|
|
|
rc := tstest.NewResourceCheck()
|
|
|
|
defer rc.Assert(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
|
2020-03-04 01:46:03 +00:00
|
|
|
if DerpMagicIP != derpMagicIP.String() {
|
|
|
|
t.Errorf("str %q != IP %v", DerpMagicIP, derpMagicIP)
|
|
|
|
}
|
|
|
|
if len(derpMagicIP) != 4 {
|
|
|
|
t.Errorf("derpMagicIP is len %d; want 4", len(derpMagicIP))
|
2020-02-18 21:32:04 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
|
|
|
|
func TestPickDERPFallback(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
tstest.PanicOnLog()
|
2020-05-14 04:03:46 +01:00
|
|
|
rc := tstest.NewResourceCheck()
|
|
|
|
defer rc.Assert(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
|
2020-05-17 17:51:38 +01:00
|
|
|
c := newConn()
|
|
|
|
c.derpMap = derpmap.Prod()
|
2020-03-04 06:21:56 +00:00
|
|
|
a := c.pickDERPFallback()
|
|
|
|
if a == 0 {
|
|
|
|
t.Fatalf("pickDERPFallback returned 0")
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that it's consistent.
|
|
|
|
for i := 0; i < 50; i++ {
|
|
|
|
b := c.pickDERPFallback()
|
|
|
|
if a != b {
|
|
|
|
t.Fatalf("got inconsistent %d vs %d values", a, b)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that that the pointer value of c is blended in and
|
|
|
|
// distribution over nodes works.
|
|
|
|
got := map[int]int{}
|
|
|
|
for i := 0; i < 50; i++ {
|
2020-05-17 17:51:38 +01:00
|
|
|
c = newConn()
|
|
|
|
c.derpMap = derpmap.Prod()
|
2020-03-04 06:21:56 +00:00
|
|
|
got[c.pickDERPFallback()]++
|
|
|
|
}
|
|
|
|
t.Logf("distribution: %v", got)
|
|
|
|
if len(got) < 2 {
|
|
|
|
t.Errorf("expected more than 1 node; got %v", got)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Test that stickiness works.
|
|
|
|
const someNode = 123456
|
|
|
|
c.myDerp = someNode
|
|
|
|
if got := c.pickDERPFallback(); got != someNode {
|
|
|
|
t.Errorf("not sticky: got %v; want %v", got, someNode)
|
|
|
|
}
|
2020-03-25 18:14:29 +00:00
|
|
|
|
|
|
|
// But move if peers are elsewhere.
|
|
|
|
const otherNode = 789
|
2020-12-18 08:22:12 +00:00
|
|
|
c.addrsByKey = map[key.Public]*addrSet{
|
|
|
|
key.Public{1}: &addrSet{addrs: []net.UDPAddr{{IP: derpMagicIP, Port: otherNode}}},
|
2020-03-25 18:14:29 +00:00
|
|
|
}
|
|
|
|
if got := c.pickDERPFallback(); got != otherNode {
|
|
|
|
t.Errorf("didn't join peers: got %v; want %v", got, someNode)
|
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
2020-03-03 11:51:31 +00:00
|
|
|
|
2020-07-11 02:53:02 +01:00
|
|
|
func makeConfigs(t *testing.T, addrs []netaddr.IPPort) []wgcfg.Config {
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Helper()
|
|
|
|
|
|
|
|
var privKeys []wgcfg.PrivateKey
|
2020-12-24 20:33:55 +00:00
|
|
|
var addresses [][]netaddr.IPPrefix
|
2020-03-03 15:39:40 +00:00
|
|
|
|
2020-07-11 02:53:02 +01:00
|
|
|
for i := range addrs {
|
2020-12-30 01:22:56 +00:00
|
|
|
privKey, err := wgkey.NewPrivate()
|
2020-03-03 15:39:40 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-12-30 01:22:56 +00:00
|
|
|
privKeys = append(privKeys, wgcfg.PrivateKey(privKey))
|
2020-03-03 15:39:40 +00:00
|
|
|
|
2020-12-24 20:33:55 +00:00
|
|
|
addresses = append(addresses, []netaddr.IPPrefix{
|
2020-03-03 15:39:40 +00:00
|
|
|
parseCIDR(t, fmt.Sprintf("1.0.0.%d/32", i+1)),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
var cfgs []wgcfg.Config
|
2020-07-11 02:53:02 +01:00
|
|
|
for i, addr := range addrs {
|
2020-03-03 15:39:40 +00:00
|
|
|
cfg := wgcfg.Config{
|
|
|
|
Name: fmt.Sprintf("peer%d", i+1),
|
|
|
|
PrivateKey: privKeys[i],
|
|
|
|
Addresses: addresses[i],
|
2020-07-11 02:53:02 +01:00
|
|
|
ListenPort: addr.Port,
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
2020-07-11 02:53:02 +01:00
|
|
|
for peerNum, addr := range addrs {
|
2020-03-03 15:39:40 +00:00
|
|
|
if peerNum == i {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
peer := wgcfg.Peer{
|
|
|
|
PublicKey: privKeys[peerNum].Public(),
|
|
|
|
AllowedIPs: addresses[peerNum],
|
|
|
|
Endpoints: []wgcfg.Endpoint{{
|
2020-07-11 02:53:02 +01:00
|
|
|
Host: addr.IP.String(),
|
|
|
|
Port: addr.Port,
|
2020-03-03 15:39:40 +00:00
|
|
|
}},
|
2020-03-03 21:50:47 +00:00
|
|
|
PersistentKeepalive: 25,
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
|
|
|
cfg.Peers = append(cfg.Peers, peer)
|
|
|
|
}
|
|
|
|
cfgs = append(cfgs, cfg)
|
|
|
|
}
|
|
|
|
return cfgs
|
|
|
|
}
|
|
|
|
|
2020-12-24 20:33:55 +00:00
|
|
|
func parseCIDR(t *testing.T, addr string) netaddr.IPPrefix {
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Helper()
|
2020-12-24 20:33:55 +00:00
|
|
|
cidr, err := netaddr.ParseIPPrefix(addr)
|
2020-03-03 15:39:40 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-03-17 03:27:00 +00:00
|
|
|
return cidr
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
|
|
|
|
2020-03-07 01:50:36 +00:00
|
|
|
// TestDeviceStartStop exercises the startup and shutdown logic of
|
|
|
|
// wireguard-go, which is intimately intertwined with magicsock's own
|
|
|
|
// lifecycle. We seem to be good at generating deadlocks here, so if
|
|
|
|
// this test fails you should suspect a deadlock somewhere in startup
|
|
|
|
// or shutdown. It may be an infrequent flake, so run with
|
|
|
|
// -count=10000 to be sure.
|
|
|
|
func TestDeviceStartStop(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
tstest.PanicOnLog()
|
2020-05-14 04:03:46 +01:00
|
|
|
rc := tstest.NewResourceCheck()
|
|
|
|
defer rc.Assert(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
|
2020-05-17 17:51:38 +01:00
|
|
|
conn, err := NewConn(Options{
|
2020-03-07 01:50:36 +00:00
|
|
|
EndpointsFunc: func(eps []string) {},
|
2020-03-09 22:20:33 +00:00
|
|
|
Logf: t.Logf,
|
2020-03-07 01:50:36 +00:00
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-17 17:51:38 +01:00
|
|
|
conn.Start()
|
2020-03-07 01:50:36 +00:00
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
tun := tuntest.NewChannelTUN()
|
|
|
|
dev := device.NewDevice(tun.TUN(), &device.DeviceOptions{
|
2020-07-24 22:19:20 +01:00
|
|
|
Logger: &device.Logger{
|
|
|
|
Debug: logger.StdLogger(t.Logf),
|
|
|
|
Info: logger.StdLogger(t.Logf),
|
|
|
|
Error: logger.StdLogger(t.Logf),
|
|
|
|
},
|
2020-03-07 01:50:36 +00:00
|
|
|
CreateEndpoint: conn.CreateEndpoint,
|
|
|
|
CreateBind: conn.CreateBind,
|
|
|
|
SkipBindUpdate: true,
|
|
|
|
})
|
|
|
|
dev.Up()
|
|
|
|
dev.Close()
|
|
|
|
}
|
|
|
|
|
2021-01-11 21:34:51 +00:00
|
|
|
// A context used in TestConnClosing() which seeks to test that code which calls
|
|
|
|
// Err() to see if a connection is already being closed does not then proceed to
|
|
|
|
// try to acquire the mutex, as this would lead to deadlock. When Err() is called
|
|
|
|
// this context acquires the lock itself, in order to force a deadlock (and test
|
|
|
|
// failure on timeout).
|
2021-01-10 14:50:35 +00:00
|
|
|
type testConnClosingContext struct {
|
|
|
|
parent context.Context
|
|
|
|
mu *sync.Mutex
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *testConnClosingContext) Deadline() (deadline time.Time, ok bool) {
|
|
|
|
d, o := c.parent.Deadline()
|
|
|
|
return d, o
|
|
|
|
}
|
|
|
|
func (c *testConnClosingContext) Done() <-chan struct{} {
|
|
|
|
return c.parent.Done()
|
|
|
|
}
|
|
|
|
func (c *testConnClosingContext) Err() error {
|
|
|
|
// Deliberately deadlock if anything grabs the lock after checking Err()
|
|
|
|
c.mu.Lock()
|
|
|
|
return errors.New("testConnClosingContext error")
|
|
|
|
}
|
|
|
|
func (c *testConnClosingContext) Value(key interface{}) interface{} {
|
|
|
|
return c.parent.Value(key)
|
|
|
|
}
|
|
|
|
func (*testConnClosingContext) String() string {
|
|
|
|
return "testConnClosingContext"
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestConnClosing(t *testing.T) {
|
|
|
|
privateKey, err := wgkey.NewPrivate()
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("generating private key: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
epCh := make(chan []string, 100)
|
|
|
|
conn, err := NewConn(Options{
|
|
|
|
Logf: t.Logf,
|
|
|
|
PacketListener: nettype.Std{},
|
|
|
|
EndpointsFunc: func(eps []string) {
|
|
|
|
epCh <- eps
|
|
|
|
},
|
|
|
|
SimulatedNetwork: false,
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
t.Fatalf("constructing magicsock: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, t.Logf, nettype.Std{}, netaddr.IPv4(127, 0, 3, 1))
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// The point of this test case is to exercise handling in derpWriteChanOfAddr() which
|
|
|
|
// returns early if connCtx.Err() returns non-nil, to avoid a deadlock on conn.mu.
|
|
|
|
// We swap in a context which always returns an error, and deliberately grabs the lock
|
|
|
|
// to cause a deadlock if magicsock.go tries to acquire the lock after calling Err().
|
|
|
|
closingCtx := testConnClosingContext{parent: conn.connCtx, mu: &conn.mu}
|
|
|
|
conn.connCtx = &closingCtx
|
|
|
|
conn.Start()
|
|
|
|
|
|
|
|
conn.SetDERPMap(derpMap)
|
|
|
|
if err := conn.SetPrivateKey(privateKey); err != nil {
|
|
|
|
t.Fatalf("setting private key in magicsock: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
tun := tuntest.NewChannelTUN()
|
|
|
|
tsTun := tstun.WrapTUN(t.Logf, tun.TUN())
|
|
|
|
tsTun.SetFilter(filter.NewAllowAllForTest(t.Logf))
|
|
|
|
|
|
|
|
dev := device.NewDevice(tsTun, &device.DeviceOptions{
|
|
|
|
Logger: &device.Logger{
|
|
|
|
Debug: logger.StdLogger(t.Logf),
|
|
|
|
Info: logger.StdLogger(t.Logf),
|
|
|
|
Error: logger.StdLogger(t.Logf),
|
|
|
|
},
|
|
|
|
CreateEndpoint: conn.CreateEndpoint,
|
|
|
|
CreateBind: conn.CreateBind,
|
|
|
|
SkipBindUpdate: true,
|
|
|
|
})
|
|
|
|
|
|
|
|
dev.Up()
|
|
|
|
conn.WaitReady(t)
|
|
|
|
|
|
|
|
// We don't assert any failures within the test itself. If derpWriteChanOfAddr tries to
|
|
|
|
// grab the lock it will deadlock, and conn.WaitReady(t) will call t.Fatal() after timeout.
|
|
|
|
// (verified by deliberately breaking derpWriteChanOfAddr)
|
|
|
|
}
|
|
|
|
|
2021-01-11 01:22:11 +00:00
|
|
|
// Exercise a code path in sendDiscoMessage if the connection has been closed.
|
|
|
|
func TestConnClosed(t *testing.T) {
|
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{Name: "m1"}
|
|
|
|
m2 := &natlab.Machine{Name: "m2"}
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
m1if := m1.Attach("eth0", inet)
|
|
|
|
m2if := m2.Attach("eth0", inet)
|
|
|
|
|
|
|
|
d := &devices{
|
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
|
|
|
}
|
|
|
|
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, t.Logf, d.stun, d.stunIP)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
ms1 := newMagicStack(t, logger.WithPrefix(t.Logf, "conn1: "), d.m1, derpMap)
|
|
|
|
defer ms1.Close()
|
|
|
|
ms2 := newMagicStack(t, logger.WithPrefix(t.Logf, "conn2: "), d.m2, derpMap)
|
|
|
|
defer ms2.Close()
|
|
|
|
|
|
|
|
cleanup = meshStacks(t.Logf, []*magicStack{ms1, ms2})
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
pkt := tuntest.Ping(ms2.IP(t).IPAddr().IP, ms1.IP(t).IPAddr().IP)
|
|
|
|
|
|
|
|
if len(ms1.conn.activeDerp) == 0 {
|
|
|
|
t.Errorf("unexpected DERP empty got: %v want: >0", len(ms1.conn.activeDerp))
|
|
|
|
}
|
|
|
|
|
|
|
|
ms1.conn.Close()
|
|
|
|
ms2.conn.Close()
|
|
|
|
|
|
|
|
// This should hit a c.closed conditional in sendDiscoMessage() and return immediately.
|
|
|
|
ms1.tun.Outbound <- pkt
|
|
|
|
select {
|
|
|
|
case <-ms2.tun.Inbound:
|
|
|
|
t.Error("unexpected response with connection closed")
|
|
|
|
case <-time.After(100 * time.Millisecond):
|
|
|
|
}
|
|
|
|
|
|
|
|
if len(ms1.conn.activeDerp) > 0 {
|
|
|
|
t.Errorf("unexpected DERP active got: %v want:0", len(ms1.conn.activeDerp))
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-14 04:44:58 +01:00
|
|
|
func makeNestable(t *testing.T) (logf logger.Logf, setT func(t *testing.T)) {
|
2020-06-22 09:54:59 +01:00
|
|
|
var mu sync.RWMutex
|
2020-05-14 04:44:58 +01:00
|
|
|
cur := t
|
|
|
|
|
|
|
|
setT = func(t *testing.T) {
|
|
|
|
mu.Lock()
|
|
|
|
cur = t
|
|
|
|
mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
logf = func(s string, args ...interface{}) {
|
2020-06-22 09:54:59 +01:00
|
|
|
mu.RLock()
|
2020-05-14 04:44:58 +01:00
|
|
|
t := cur
|
|
|
|
|
|
|
|
t.Helper()
|
|
|
|
t.Logf(s, args...)
|
2020-06-22 09:54:59 +01:00
|
|
|
mu.RUnlock()
|
2020-05-14 04:44:58 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
return logf, setT
|
|
|
|
}
|
|
|
|
|
2020-03-03 15:39:40 +00:00
|
|
|
func TestTwoDevicePing(t *testing.T) {
|
2020-07-27 16:21:17 +01:00
|
|
|
l, ip := nettype.Std{}, netaddr.IPv4(127, 0, 0, 1)
|
|
|
|
n := &devices{
|
|
|
|
m1: l,
|
|
|
|
m1IP: ip,
|
|
|
|
m2: l,
|
|
|
|
m2IP: ip,
|
|
|
|
stun: l,
|
|
|
|
stunIP: ip,
|
|
|
|
}
|
|
|
|
testTwoDevicePing(t, n)
|
|
|
|
}
|
|
|
|
|
|
|
|
func TestActiveDiscovery(t *testing.T) {
|
|
|
|
t.Run("simple_internet", func(t *testing.T) {
|
|
|
|
t.Parallel()
|
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{Name: "m1"}
|
|
|
|
m2 := &natlab.Machine{Name: "m2"}
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
m1if := m1.Attach("eth0", inet)
|
|
|
|
m2if := m2.Attach("eth0", inet)
|
|
|
|
|
2020-07-11 07:48:08 +01:00
|
|
|
n := &devices{
|
2020-07-27 16:21:17 +01:00
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
2020-07-11 07:48:08 +01:00
|
|
|
}
|
2020-07-27 16:21:17 +01:00
|
|
|
testActiveDiscovery(t, n)
|
2020-07-10 22:26:04 +01:00
|
|
|
})
|
2020-07-11 08:03:19 +01:00
|
|
|
|
2020-07-27 21:34:41 +01:00
|
|
|
t.Run("facing_easy_firewalls", func(t *testing.T) {
|
2020-07-27 16:21:17 +01:00
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{
|
|
|
|
Name: "m1",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
m2 := &natlab.Machine{
|
|
|
|
Name: "m2",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
m1if := m1.Attach("eth0", inet)
|
|
|
|
m2if := m2.Attach("eth0", inet)
|
|
|
|
|
|
|
|
n := &devices{
|
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
|
|
|
}
|
|
|
|
testActiveDiscovery(t, n)
|
2020-07-10 22:26:04 +01:00
|
|
|
})
|
2020-07-27 16:21:17 +01:00
|
|
|
|
|
|
|
t.Run("facing_nats", func(t *testing.T) {
|
|
|
|
mstun := &natlab.Machine{Name: "stun"}
|
|
|
|
m1 := &natlab.Machine{
|
|
|
|
Name: "m1",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
nat1 := &natlab.Machine{
|
|
|
|
Name: "nat1",
|
|
|
|
}
|
|
|
|
m2 := &natlab.Machine{
|
|
|
|
Name: "m2",
|
|
|
|
PacketHandler: &natlab.Firewall{},
|
|
|
|
}
|
|
|
|
nat2 := &natlab.Machine{
|
|
|
|
Name: "nat2",
|
|
|
|
}
|
|
|
|
|
|
|
|
inet := natlab.NewInternet()
|
|
|
|
lan1 := &natlab.Network{
|
|
|
|
Name: "lan1",
|
|
|
|
Prefix4: mustPrefix("192.168.0.0/24"),
|
|
|
|
}
|
|
|
|
lan2 := &natlab.Network{
|
|
|
|
Name: "lan2",
|
|
|
|
Prefix4: mustPrefix("192.168.1.0/24"),
|
|
|
|
}
|
|
|
|
|
|
|
|
sif := mstun.Attach("eth0", inet)
|
|
|
|
nat1WAN := nat1.Attach("wan", inet)
|
|
|
|
nat1LAN := nat1.Attach("lan1", lan1)
|
|
|
|
nat2WAN := nat2.Attach("wan", inet)
|
|
|
|
nat2LAN := nat2.Attach("lan2", lan2)
|
|
|
|
m1if := m1.Attach("eth0", lan1)
|
|
|
|
m2if := m2.Attach("eth0", lan2)
|
|
|
|
lan1.SetDefaultGateway(nat1LAN)
|
|
|
|
lan2.SetDefaultGateway(nat2LAN)
|
|
|
|
|
|
|
|
nat1.PacketHandler = &natlab.SNAT44{
|
|
|
|
Machine: nat1,
|
|
|
|
ExternalInterface: nat1WAN,
|
|
|
|
Firewall: &natlab.Firewall{
|
|
|
|
TrustedInterface: nat1LAN,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
nat2.PacketHandler = &natlab.SNAT44{
|
|
|
|
Machine: nat2,
|
|
|
|
ExternalInterface: nat2WAN,
|
|
|
|
Firewall: &natlab.Firewall{
|
|
|
|
TrustedInterface: nat2LAN,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
|
|
|
|
n := &devices{
|
|
|
|
m1: m1,
|
|
|
|
m1IP: m1if.V4(),
|
|
|
|
m2: m2,
|
|
|
|
m2IP: m2if.V4(),
|
|
|
|
stun: mstun,
|
|
|
|
stunIP: sif.V4(),
|
|
|
|
}
|
|
|
|
testActiveDiscovery(t, n)
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
|
|
|
func mustPrefix(s string) netaddr.IPPrefix {
|
|
|
|
pfx, err := netaddr.ParseIPPrefix(s)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return pfx
|
2020-07-10 22:26:04 +01:00
|
|
|
}
|
|
|
|
|
2020-07-11 07:48:08 +01:00
|
|
|
type devices struct {
|
|
|
|
m1 nettype.PacketListener
|
|
|
|
m1IP netaddr.IP
|
|
|
|
|
|
|
|
m2 nettype.PacketListener
|
|
|
|
m2IP netaddr.IP
|
|
|
|
|
|
|
|
stun nettype.PacketListener
|
|
|
|
stunIP netaddr.IP
|
|
|
|
}
|
|
|
|
|
2020-07-27 16:09:54 +01:00
|
|
|
// newPinger starts continuously sending test packets from srcM to
|
|
|
|
// dstM, until cleanup is invoked to stop it. Each ping has 1 second
|
|
|
|
// to transit the network. It is a test failure to lose a ping.
|
2020-07-27 21:25:25 +01:00
|
|
|
func newPinger(t *testing.T, logf logger.Logf, src, dst *magicStack) (cleanup func()) {
|
2020-07-27 16:09:54 +01:00
|
|
|
ctx, cancel := context.WithCancel(context.Background())
|
|
|
|
done := make(chan struct{})
|
|
|
|
one := func() bool {
|
|
|
|
// TODO(danderson): requiring exactly zero packet loss
|
|
|
|
// will probably be too strict for some tests we'd like to
|
|
|
|
// run (e.g. discovery switching to a new path on
|
|
|
|
// failure). Figure out what kind of thing would be
|
|
|
|
// acceptable to test instead of "every ping must
|
|
|
|
// transit".
|
2020-07-27 21:25:25 +01:00
|
|
|
pkt := tuntest.Ping(dst.IP(t).IPAddr().IP, src.IP(t).IPAddr().IP)
|
2020-07-27 21:32:45 +01:00
|
|
|
select {
|
|
|
|
case src.tun.Outbound <- pkt:
|
|
|
|
case <-ctx.Done():
|
|
|
|
return false
|
|
|
|
}
|
2020-07-27 16:09:54 +01:00
|
|
|
select {
|
2020-07-27 21:25:25 +01:00
|
|
|
case <-dst.tun.Inbound:
|
2020-07-27 16:09:54 +01:00
|
|
|
return true
|
2020-07-27 17:20:31 +01:00
|
|
|
case <-time.After(10 * time.Second):
|
|
|
|
// Very generous timeout here because depending on
|
|
|
|
// magicsock setup races, the first handshake might get
|
|
|
|
// eaten by the receiving end (if wireguard-go hasn't been
|
|
|
|
// configured quite yet), so we have to wait for at least
|
|
|
|
// the first retransmit from wireguard before we declare
|
|
|
|
// failure.
|
2020-07-27 16:09:54 +01:00
|
|
|
t.Errorf("timed out waiting for ping to transit")
|
|
|
|
return true
|
|
|
|
case <-ctx.Done():
|
2020-07-27 20:46:34 +01:00
|
|
|
// Try a little bit longer to consume the packet we're
|
|
|
|
// waiting for. This is to deal with shutdown races, where
|
|
|
|
// natlab may still be delivering a packet to us from a
|
|
|
|
// goroutine.
|
|
|
|
select {
|
2020-07-27 21:25:25 +01:00
|
|
|
case <-dst.tun.Inbound:
|
2020-07-27 20:46:34 +01:00
|
|
|
case <-time.After(time.Second):
|
|
|
|
}
|
2020-07-27 16:09:54 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
cleanup = func() {
|
|
|
|
cancel()
|
|
|
|
<-done
|
|
|
|
}
|
|
|
|
|
|
|
|
// Synchronously transit one ping to get things started. This is
|
|
|
|
// nice because it means that newPinger returning means we've
|
|
|
|
// worked through initial connectivity.
|
|
|
|
if !one() {
|
|
|
|
cleanup()
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
go func() {
|
2020-07-27 21:25:25 +01:00
|
|
|
logf("sending ping stream from %s (%s) to %s (%s)", src, src.IP(t), dst, dst.IP(t))
|
2020-07-27 16:09:54 +01:00
|
|
|
defer close(done)
|
|
|
|
for one() {
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
|
|
|
return cleanup
|
|
|
|
}
|
|
|
|
|
2020-07-27 21:33:09 +01:00
|
|
|
// testActiveDiscovery verifies that two magicStacks tied to the given
|
|
|
|
// devices can establish a direct p2p connection with each other. See
|
|
|
|
// TestActiveDiscovery for the various configurations of devices that
|
|
|
|
// get exercised.
|
2020-07-25 01:32:18 +01:00
|
|
|
func testActiveDiscovery(t *testing.T, d *devices) {
|
|
|
|
tstest.PanicOnLog()
|
|
|
|
rc := tstest.NewResourceCheck()
|
|
|
|
defer rc.Assert(t)
|
|
|
|
|
|
|
|
tlogf, setT := makeNestable(t)
|
|
|
|
setT(t)
|
|
|
|
|
|
|
|
start := time.Now()
|
|
|
|
logf := func(msg string, args ...interface{}) {
|
2020-09-12 00:26:05 +01:00
|
|
|
t.Helper()
|
|
|
|
msg = fmt.Sprintf("%s: %s", time.Since(start).Truncate(time.Microsecond), msg)
|
2020-07-25 01:32:18 +01:00
|
|
|
tlogf(msg, args...)
|
|
|
|
}
|
|
|
|
|
|
|
|
derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP)
|
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
m1 := newMagicStack(t, logger.WithPrefix(logf, "conn1: "), d.m1, derpMap)
|
|
|
|
defer m1.Close()
|
|
|
|
m2 := newMagicStack(t, logger.WithPrefix(logf, "conn2: "), d.m2, derpMap)
|
|
|
|
defer m2.Close()
|
|
|
|
|
|
|
|
cleanup = meshStacks(logf, []*magicStack{m1, m2})
|
|
|
|
defer cleanup()
|
|
|
|
|
2020-07-27 21:25:25 +01:00
|
|
|
m1IP := m1.IP(t)
|
|
|
|
m2IP := m2.IP(t)
|
2020-07-25 01:32:18 +01:00
|
|
|
logf("IPs: %s %s", m1IP, m2IP)
|
|
|
|
|
2020-07-27 21:25:25 +01:00
|
|
|
cleanup = newPinger(t, logf, m1, m2)
|
2020-07-27 16:09:54 +01:00
|
|
|
defer cleanup()
|
|
|
|
|
|
|
|
// Everything is now up and running, active discovery should find
|
|
|
|
// a direct path between our peers. Wait for it to switch away
|
|
|
|
// from DERP.
|
|
|
|
|
|
|
|
mustDirect := func(m1, m2 *magicStack) {
|
2020-08-04 17:36:38 +01:00
|
|
|
lastLog := time.Now().Add(-time.Minute)
|
2020-09-12 01:20:52 +01:00
|
|
|
// See https://github.com/tailscale/tailscale/issues/654 for a discussion of this deadline.
|
|
|
|
for deadline := time.Now().Add(10 * time.Second); time.Now().Before(deadline); time.Sleep(10 * time.Millisecond) {
|
2020-07-27 16:09:54 +01:00
|
|
|
pst := m1.Status().Peer[m2.Public()]
|
|
|
|
if pst.CurAddr != "" {
|
|
|
|
logf("direct link %s->%s found with addr %s", m1, m2, pst.CurAddr)
|
|
|
|
return
|
|
|
|
}
|
2020-08-04 17:36:38 +01:00
|
|
|
if now := time.Now(); now.Sub(lastLog) > time.Second {
|
|
|
|
logf("no direct path %s->%s yet, addrs %v", m1, m2, pst.Addrs)
|
|
|
|
lastLog = now
|
|
|
|
}
|
2020-07-27 16:09:54 +01:00
|
|
|
}
|
|
|
|
t.Errorf("magicsock did not find a direct path from %s to %s", m1, m2)
|
2020-07-25 01:32:18 +01:00
|
|
|
}
|
2020-07-27 16:09:54 +01:00
|
|
|
|
|
|
|
mustDirect(m1, m2)
|
|
|
|
mustDirect(m2, m1)
|
|
|
|
|
|
|
|
logf("starting cleanup")
|
2020-07-25 01:32:18 +01:00
|
|
|
}
|
|
|
|
|
2020-07-11 07:48:08 +01:00
|
|
|
func testTwoDevicePing(t *testing.T, d *devices) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
tstest.PanicOnLog()
|
2020-05-14 04:03:46 +01:00
|
|
|
rc := tstest.NewResourceCheck()
|
|
|
|
defer rc.Assert(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
|
|
|
|
// This gets reassigned inside every test, so that the connections
|
|
|
|
// all log using the "current" t.Logf function. Sigh.
|
2020-05-14 04:44:58 +01:00
|
|
|
logf, setT := makeNestable(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
|
2020-07-24 22:19:20 +01:00
|
|
|
derpMap, cleanup := runDERPAndStun(t, logf, d.stun, d.stunIP)
|
|
|
|
defer cleanup()
|
2020-07-10 22:26:04 +01:00
|
|
|
|
2020-07-24 22:19:20 +01:00
|
|
|
m1 := newMagicStack(t, logf, d.m1, derpMap)
|
|
|
|
defer m1.Close()
|
|
|
|
m2 := newMagicStack(t, logf, d.m2, derpMap)
|
|
|
|
defer m2.Close()
|
2020-03-03 15:39:40 +00:00
|
|
|
|
2020-07-11 02:53:02 +01:00
|
|
|
addrs := []netaddr.IPPort{
|
2020-07-24 22:19:20 +01:00
|
|
|
{IP: d.m1IP, Port: m1.conn.LocalPort()},
|
|
|
|
{IP: d.m2IP, Port: m2.conn.LocalPort()},
|
2020-07-10 22:26:04 +01:00
|
|
|
}
|
2020-07-11 02:53:02 +01:00
|
|
|
cfgs := makeConfigs(t, addrs)
|
2020-03-03 15:39:40 +00:00
|
|
|
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m1.dev.Reconfig(&cfgs[0]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m2.dev.Reconfig(&cfgs[1]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
2020-05-14 01:54:27 +01:00
|
|
|
ping1 := func(t *testing.T) {
|
2020-03-03 15:39:40 +00:00
|
|
|
msg2to1 := tuntest.Ping(net.ParseIP("1.0.0.1"), net.ParseIP("1.0.0.2"))
|
2020-07-24 22:19:20 +01:00
|
|
|
m2.tun.Outbound <- msg2to1
|
2020-05-14 01:54:27 +01:00
|
|
|
t.Log("ping1 sent")
|
2020-03-03 15:39:40 +00:00
|
|
|
select {
|
2020-07-24 22:19:20 +01:00
|
|
|
case msgRecv := <-m1.tun.Inbound:
|
2020-03-03 15:39:40 +00:00
|
|
|
if !bytes.Equal(msg2to1, msgRecv) {
|
|
|
|
t.Error("ping did not transit correctly")
|
|
|
|
}
|
2020-03-06 20:37:19 +00:00
|
|
|
case <-time.After(3 * time.Second):
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Error("ping did not transit")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
ping2 := func(t *testing.T) {
|
|
|
|
msg1to2 := tuntest.Ping(net.ParseIP("1.0.0.2"), net.ParseIP("1.0.0.1"))
|
2020-07-24 22:19:20 +01:00
|
|
|
m1.tun.Outbound <- msg1to2
|
2020-05-14 01:54:27 +01:00
|
|
|
t.Log("ping2 sent")
|
2020-03-03 15:39:40 +00:00
|
|
|
select {
|
2020-07-24 22:19:20 +01:00
|
|
|
case msgRecv := <-m2.tun.Inbound:
|
2020-03-03 15:39:40 +00:00
|
|
|
if !bytes.Equal(msg1to2, msgRecv) {
|
|
|
|
t.Error("return ping did not transit correctly")
|
|
|
|
}
|
2020-03-06 20:37:19 +00:00
|
|
|
case <-time.After(3 * time.Second):
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Error("return ping did not transit")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-22 09:54:59 +01:00
|
|
|
outerT := t
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
t.Run("ping 1.0.0.1", func(t *testing.T) {
|
2020-05-14 04:44:58 +01:00
|
|
|
setT(t)
|
2020-06-22 09:54:59 +01:00
|
|
|
defer setT(outerT)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
ping1(t)
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("ping 1.0.0.2", func(t *testing.T) {
|
2020-06-22 09:54:59 +01:00
|
|
|
setT(t)
|
|
|
|
defer setT(outerT)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
ping2(t)
|
|
|
|
})
|
|
|
|
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Run("ping 1.0.0.2 via SendPacket", func(t *testing.T) {
|
2020-05-14 04:44:58 +01:00
|
|
|
setT(t)
|
2020-06-22 09:54:59 +01:00
|
|
|
defer setT(outerT)
|
2020-03-03 15:39:40 +00:00
|
|
|
msg1to2 := tuntest.Ping(net.ParseIP("1.0.0.2"), net.ParseIP("1.0.0.1"))
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m1.tsTun.InjectOutbound(msg1to2); err != nil {
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-05-14 01:54:27 +01:00
|
|
|
t.Log("SendPacket sent")
|
2020-03-03 15:39:40 +00:00
|
|
|
select {
|
2020-07-24 22:19:20 +01:00
|
|
|
case msgRecv := <-m2.tun.Inbound:
|
2020-03-03 15:39:40 +00:00
|
|
|
if !bytes.Equal(msg1to2, msgRecv) {
|
|
|
|
t.Error("return ping did not transit correctly")
|
|
|
|
}
|
2020-03-06 20:37:19 +00:00
|
|
|
case <-time.After(3 * time.Second):
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Error("return ping did not transit")
|
|
|
|
}
|
|
|
|
})
|
|
|
|
|
|
|
|
t.Run("no-op dev1 reconfig", func(t *testing.T) {
|
2020-05-14 04:44:58 +01:00
|
|
|
setT(t)
|
2020-06-22 09:54:59 +01:00
|
|
|
defer setT(outerT)
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m1.dev.Reconfig(&cfgs[0]); err != nil {
|
2020-03-03 15:39:40 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
ping1(t)
|
|
|
|
ping2(t)
|
|
|
|
})
|
2020-03-03 16:42:31 +00:00
|
|
|
|
2020-05-14 04:03:46 +01:00
|
|
|
// TODO: Remove this once the following tests are reliable.
|
2020-08-06 18:23:16 +01:00
|
|
|
if run, _ := strconv.ParseBool(os.Getenv("RUN_CURSED_TESTS")); !run {
|
2020-05-14 04:03:46 +01:00
|
|
|
t.Skip("skipping following tests because RUN_CURSED_TESTS is not set.")
|
2020-03-08 13:25:45 +00:00
|
|
|
}
|
|
|
|
|
2020-03-06 20:43:49 +00:00
|
|
|
pingSeq := func(t *testing.T, count int, totalTime time.Duration, strict bool) {
|
2020-03-03 16:42:31 +00:00
|
|
|
msg := func(i int) []byte {
|
|
|
|
b := tuntest.Ping(net.ParseIP("1.0.0.2"), net.ParseIP("1.0.0.1"))
|
|
|
|
b[len(b)-1] = byte(i) // set seq num
|
|
|
|
return b
|
|
|
|
}
|
|
|
|
|
2020-03-06 18:37:57 +00:00
|
|
|
// Space out ping transmissions so that the overall
|
|
|
|
// transmission happens in totalTime.
|
|
|
|
//
|
|
|
|
// We do this because the packet spray logic in magicsock is
|
|
|
|
// time-based to allow for reliable NAT traversal. However,
|
|
|
|
// for the packet spraying test further down, there needs to
|
|
|
|
// be at least 1 sprayed packet that is not the handshake, in
|
|
|
|
// case the handshake gets eaten by the race resolution logic.
|
|
|
|
//
|
|
|
|
// This is an inherent "race by design" in our current
|
|
|
|
// magicsock+wireguard-go codebase: sometimes, racing
|
|
|
|
// handshakes will result in a sub-optimal path for a few
|
|
|
|
// hundred milliseconds, until a subsequent spray corrects the
|
|
|
|
// issue. In order for the test to reflect that magicsock
|
|
|
|
// works as designed, we have to space out packet transmission
|
|
|
|
// here.
|
|
|
|
interPacketGap := totalTime / time.Duration(count)
|
|
|
|
if interPacketGap < 1*time.Millisecond {
|
|
|
|
interPacketGap = 0
|
|
|
|
}
|
|
|
|
|
2020-03-03 16:42:31 +00:00
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
b := msg(i)
|
2020-07-24 22:19:20 +01:00
|
|
|
m1.tun.Outbound <- b
|
2020-03-06 18:37:57 +00:00
|
|
|
time.Sleep(interPacketGap)
|
2020-03-03 16:42:31 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for i := 0; i < count; i++ {
|
|
|
|
b := msg(i)
|
|
|
|
select {
|
2020-07-24 22:19:20 +01:00
|
|
|
case msgRecv := <-m2.tun.Inbound:
|
2020-03-03 16:42:31 +00:00
|
|
|
if !bytes.Equal(b, msgRecv) {
|
2020-03-06 20:43:49 +00:00
|
|
|
if strict {
|
|
|
|
t.Errorf("return ping %d did not transit correctly: %s", i, cmp.Diff(b, msgRecv))
|
|
|
|
}
|
2020-03-03 16:42:31 +00:00
|
|
|
}
|
2020-03-06 20:37:19 +00:00
|
|
|
case <-time.After(3 * time.Second):
|
2020-03-06 20:43:49 +00:00
|
|
|
if strict {
|
2020-03-07 21:11:52 +00:00
|
|
|
t.Errorf("return ping %d did not transit", i)
|
2020-03-06 20:43:49 +00:00
|
|
|
}
|
2020-03-03 16:42:31 +00:00
|
|
|
}
|
|
|
|
}
|
2020-03-03 21:50:47 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("ping 1.0.0.1 x50", func(t *testing.T) {
|
2020-05-14 04:44:58 +01:00
|
|
|
setT(t)
|
2020-06-22 09:54:59 +01:00
|
|
|
defer setT(outerT)
|
2020-03-06 20:43:49 +00:00
|
|
|
pingSeq(t, 50, 0, true)
|
2020-03-03 21:50:47 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Add DERP relay.
|
|
|
|
derpEp := wgcfg.Endpoint{Host: "127.3.3.40", Port: 1}
|
|
|
|
ep0 := cfgs[0].Peers[0].Endpoints
|
|
|
|
ep0 = append([]wgcfg.Endpoint{derpEp}, ep0...)
|
|
|
|
cfgs[0].Peers[0].Endpoints = ep0
|
|
|
|
ep1 := cfgs[1].Peers[0].Endpoints
|
|
|
|
ep1 = append([]wgcfg.Endpoint{derpEp}, ep1...)
|
|
|
|
cfgs[1].Peers[0].Endpoints = ep1
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m1.dev.Reconfig(&cfgs[0]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m2.dev.Reconfig(&cfgs[1]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
|
|
|
|
t.Run("add DERP", func(t *testing.T) {
|
2020-05-14 04:44:58 +01:00
|
|
|
setT(t)
|
2020-06-22 09:54:59 +01:00
|
|
|
defer setT(outerT)
|
2020-03-06 20:43:49 +00:00
|
|
|
pingSeq(t, 20, 0, true)
|
2020-03-03 21:50:47 +00:00
|
|
|
})
|
|
|
|
|
|
|
|
// Disable real route.
|
|
|
|
cfgs[0].Peers[0].Endpoints = []wgcfg.Endpoint{derpEp}
|
|
|
|
cfgs[1].Peers[0].Endpoints = []wgcfg.Endpoint{derpEp}
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m1.dev.Reconfig(&cfgs[0]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m2.dev.Reconfig(&cfgs[1]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
time.Sleep(250 * time.Millisecond) // TODO remove
|
|
|
|
|
|
|
|
t.Run("all traffic over DERP", func(t *testing.T) {
|
2020-05-14 04:44:58 +01:00
|
|
|
setT(t)
|
2020-06-22 09:54:59 +01:00
|
|
|
defer setT(outerT)
|
2020-03-03 21:50:47 +00:00
|
|
|
defer func() {
|
|
|
|
if t.Failed() || true {
|
2020-08-06 18:23:16 +01:00
|
|
|
logf("cfg0: %v", stringifyConfig(cfgs[0]))
|
|
|
|
logf("cfg1: %v", stringifyConfig(cfgs[1]))
|
2020-03-03 21:50:47 +00:00
|
|
|
}
|
|
|
|
}()
|
2020-03-06 20:43:49 +00:00
|
|
|
pingSeq(t, 20, 0, true)
|
2020-03-03 21:50:47 +00:00
|
|
|
})
|
|
|
|
|
2020-07-24 22:19:20 +01:00
|
|
|
m1.dev.RemoveAllPeers()
|
|
|
|
m2.dev.RemoveAllPeers()
|
2020-03-06 18:37:57 +00:00
|
|
|
|
|
|
|
// Give one peer a non-DERP endpoint. We expect the other to
|
|
|
|
// accept it via roamAddr.
|
2020-03-03 21:50:47 +00:00
|
|
|
cfgs[0].Peers[0].Endpoints = ep0
|
|
|
|
if ep2 := cfgs[1].Peers[0].Endpoints; len(ep2) != 1 {
|
|
|
|
t.Errorf("unexpected peer endpoints in dev2: %v", ep2)
|
|
|
|
}
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m2.dev.Reconfig(&cfgs[1]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-07-24 22:19:20 +01:00
|
|
|
if err := m1.dev.Reconfig(&cfgs[0]); err != nil {
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-03-06 18:37:57 +00:00
|
|
|
// Dear future human debugging a test failure here: this test is
|
|
|
|
// flaky, and very infrequently will drop 1-2 of the 50 ping
|
|
|
|
// packets. This does not affect normal operation of tailscaled,
|
|
|
|
// but makes this test fail.
|
|
|
|
//
|
|
|
|
// TODO(danderson): finish root-causing and de-flake this test.
|
2020-03-03 21:50:47 +00:00
|
|
|
t.Run("one real route is enough thanks to spray", func(t *testing.T) {
|
2020-05-14 04:44:58 +01:00
|
|
|
setT(t)
|
2020-06-22 09:54:59 +01:00
|
|
|
defer setT(outerT)
|
2020-03-06 20:43:49 +00:00
|
|
|
pingSeq(t, 50, 700*time.Millisecond, false)
|
2020-03-03 21:50:47 +00:00
|
|
|
|
2020-07-24 22:19:20 +01:00
|
|
|
ep2 := m2.dev.Config().Peers[0].Endpoints
|
2020-03-03 21:50:47 +00:00
|
|
|
if len(ep2) != 2 {
|
|
|
|
t.Error("handshake spray failed to find real route")
|
|
|
|
}
|
2020-03-03 16:42:31 +00:00
|
|
|
})
|
2020-03-03 15:39:40 +00:00
|
|
|
}
|
2020-03-09 16:13:28 +00:00
|
|
|
|
2020-12-18 08:22:12 +00:00
|
|
|
// TestAddrSet tests addrSet appendDests and UpdateDst.
|
2020-03-09 16:13:28 +00:00
|
|
|
func TestAddrSet(t *testing.T) {
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
tstest.PanicOnLog()
|
2020-05-14 04:03:46 +01:00
|
|
|
rc := tstest.NewResourceCheck()
|
|
|
|
defer rc.Assert(t)
|
Add tstest.PanicOnLog(), and fix various problems detected by this.
If a test calls log.Printf, 'go test' horrifyingly rearranges the
output to no longer be in chronological order, which makes debugging
virtually impossible. Let's stop that from happening by making
log.Printf panic if called from any module, no matter how deep, during
tests.
This required us to change the default error handler in at least one
http.Server, as well as plumbing a bunch of logf functions around,
especially in magicsock and wgengine, but also in logtail and backoff.
To add insult to injury, 'go test' also rearranges the output when a
parent test has multiple sub-tests (all the sub-test's t.Logf is always
printed after all the parent tests t.Logf), so we need to screw around
with a special Logf that can point at the "current" t (current_t.Logf)
in some places. Probably our entire way of using subtests is wrong,
since 'go test' would probably like to run them all in parallel if you
called t.Parallel(), but it definitely can't because the're all
manipulating the shared state created by the parent test. They should
probably all be separate toplevel tests instead, with common
setup/teardown logic. But that's a job for another time.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2020-05-14 03:59:54 +01:00
|
|
|
|
2020-06-30 20:22:42 +01:00
|
|
|
mustIPPortPtr := func(s string) *netaddr.IPPort {
|
2020-03-09 16:13:28 +00:00
|
|
|
t.Helper()
|
2020-06-30 20:22:42 +01:00
|
|
|
ipp, err := netaddr.ParseIPPort(s)
|
2020-03-09 16:13:28 +00:00
|
|
|
if err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
2020-06-30 20:22:42 +01:00
|
|
|
return &ipp
|
|
|
|
}
|
|
|
|
mustUDPAddr := func(s string) *net.UDPAddr {
|
|
|
|
return mustIPPortPtr(s).UDPAddr()
|
2020-03-09 16:13:28 +00:00
|
|
|
}
|
|
|
|
udpAddrs := func(ss ...string) (ret []net.UDPAddr) {
|
|
|
|
t.Helper()
|
|
|
|
for _, s := range ss {
|
|
|
|
ret = append(ret, *mustUDPAddr(s))
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
2020-06-30 20:22:42 +01:00
|
|
|
joinUDPs := func(in []netaddr.IPPort) string {
|
2020-03-09 16:13:28 +00:00
|
|
|
var sb strings.Builder
|
|
|
|
for i, ua := range in {
|
|
|
|
if i > 0 {
|
|
|
|
sb.WriteByte(',')
|
|
|
|
}
|
|
|
|
sb.WriteString(ua.String())
|
|
|
|
}
|
|
|
|
return sb.String()
|
|
|
|
}
|
|
|
|
var (
|
|
|
|
regPacket = []byte("some regular packet")
|
|
|
|
sprayPacket = []byte("0000")
|
|
|
|
)
|
|
|
|
binary.LittleEndian.PutUint32(sprayPacket[:4], device.MessageInitiationType)
|
|
|
|
if !shouldSprayPacket(sprayPacket) {
|
|
|
|
t.Fatal("sprayPacket should be classified as a spray packet for testing")
|
|
|
|
}
|
|
|
|
|
|
|
|
// A step is either a b+want appendDests tests, or an
|
|
|
|
// UpdateDst call, depending on which fields are set.
|
|
|
|
type step struct {
|
|
|
|
// advance is the time to advance the fake clock
|
|
|
|
// before the step.
|
|
|
|
advance time.Duration
|
|
|
|
|
|
|
|
// updateDst, if set, does an UpdateDst call and
|
|
|
|
// b+want are ignored.
|
|
|
|
updateDst *net.UDPAddr
|
|
|
|
|
|
|
|
b []byte
|
|
|
|
want string // comma-separated
|
|
|
|
}
|
|
|
|
tests := []struct {
|
2020-06-11 17:40:21 +01:00
|
|
|
name string
|
2020-12-18 08:22:12 +00:00
|
|
|
as *addrSet
|
2020-06-11 17:40:21 +01:00
|
|
|
steps []step
|
|
|
|
logCheck func(t *testing.T, logged []byte)
|
2020-03-09 16:13:28 +00:00
|
|
|
}{
|
|
|
|
{
|
|
|
|
name: "reg_packet_no_curaddr",
|
2020-12-18 08:22:12 +00:00
|
|
|
as: &addrSet{
|
2020-03-09 16:13:28 +00:00
|
|
|
addrs: udpAddrs("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"),
|
|
|
|
curAddr: -1, // unknown
|
|
|
|
roamAddr: nil,
|
|
|
|
},
|
|
|
|
steps: []step{
|
|
|
|
{b: regPacket, want: "127.3.3.40:1"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "reg_packet_have_curaddr",
|
2020-12-18 08:22:12 +00:00
|
|
|
as: &addrSet{
|
2020-03-09 16:13:28 +00:00
|
|
|
addrs: udpAddrs("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"),
|
|
|
|
curAddr: 1, // global IP
|
|
|
|
roamAddr: nil,
|
|
|
|
},
|
|
|
|
steps: []step{
|
|
|
|
{b: regPacket, want: "123.45.67.89:123"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "reg_packet_have_roamaddr",
|
2020-12-18 08:22:12 +00:00
|
|
|
as: &addrSet{
|
2020-03-09 16:13:28 +00:00
|
|
|
addrs: udpAddrs("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"),
|
|
|
|
curAddr: 2, // should be ignored
|
2020-06-30 20:22:42 +01:00
|
|
|
roamAddr: mustIPPortPtr("5.6.7.8:123"),
|
2020-03-09 16:13:28 +00:00
|
|
|
},
|
|
|
|
steps: []step{
|
|
|
|
{b: regPacket, want: "5.6.7.8:123"},
|
|
|
|
{updateDst: mustUDPAddr("10.0.0.1:123")}, // no more roaming
|
|
|
|
{b: regPacket, want: "10.0.0.1:123"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "start_roaming",
|
2020-12-18 08:22:12 +00:00
|
|
|
as: &addrSet{
|
2020-03-09 16:13:28 +00:00
|
|
|
addrs: udpAddrs("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"),
|
|
|
|
curAddr: 2,
|
|
|
|
},
|
|
|
|
steps: []step{
|
|
|
|
{b: regPacket, want: "10.0.0.1:123"},
|
|
|
|
{updateDst: mustUDPAddr("4.5.6.7:123")},
|
|
|
|
{b: regPacket, want: "4.5.6.7:123"},
|
|
|
|
{updateDst: mustUDPAddr("5.6.7.8:123")},
|
|
|
|
{b: regPacket, want: "5.6.7.8:123"},
|
|
|
|
{updateDst: mustUDPAddr("123.45.67.89:123")}, // end roaming
|
|
|
|
{b: regPacket, want: "123.45.67.89:123"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
{
|
|
|
|
name: "spray_packet",
|
2020-12-18 08:22:12 +00:00
|
|
|
as: &addrSet{
|
2020-03-09 16:13:28 +00:00
|
|
|
addrs: udpAddrs("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"),
|
|
|
|
curAddr: 2, // should be ignored
|
2020-06-30 20:22:42 +01:00
|
|
|
roamAddr: mustIPPortPtr("5.6.7.8:123"),
|
2020-03-09 16:13:28 +00:00
|
|
|
},
|
|
|
|
steps: []step{
|
|
|
|
{b: sprayPacket, want: "127.3.3.40:1,123.45.67.89:123,10.0.0.1:123,5.6.7.8:123"},
|
|
|
|
{advance: 300 * time.Millisecond, b: regPacket, want: "127.3.3.40:1,123.45.67.89:123,10.0.0.1:123,5.6.7.8:123"},
|
|
|
|
{advance: 300 * time.Millisecond, b: regPacket, want: "127.3.3.40:1,123.45.67.89:123,10.0.0.1:123,5.6.7.8:123"},
|
|
|
|
{advance: 3, b: regPacket, want: "5.6.7.8:123"},
|
|
|
|
{advance: 2 * time.Millisecond, updateDst: mustUDPAddr("10.0.0.1:123")},
|
|
|
|
{advance: 3, b: regPacket, want: "10.0.0.1:123"},
|
|
|
|
},
|
|
|
|
},
|
2020-06-11 17:40:21 +01:00
|
|
|
{
|
|
|
|
name: "low_pri",
|
2020-12-18 08:22:12 +00:00
|
|
|
as: &addrSet{
|
2020-06-11 17:40:21 +01:00
|
|
|
addrs: udpAddrs("127.3.3.40:1", "123.45.67.89:123", "10.0.0.1:123"),
|
|
|
|
curAddr: 2,
|
|
|
|
},
|
|
|
|
steps: []step{
|
|
|
|
{updateDst: mustUDPAddr("123.45.67.89:123")},
|
|
|
|
{updateDst: mustUDPAddr("123.45.67.89:123")},
|
|
|
|
},
|
|
|
|
logCheck: func(t *testing.T, logged []byte) {
|
|
|
|
if n := bytes.Count(logged, []byte(", keeping current ")); n != 1 {
|
|
|
|
t.Errorf("low-prio keeping current logged %d times; want 1", n)
|
|
|
|
}
|
|
|
|
},
|
|
|
|
},
|
2020-03-09 16:13:28 +00:00
|
|
|
}
|
2020-06-30 20:22:42 +01:00
|
|
|
|
2020-03-09 16:13:28 +00:00
|
|
|
for _, tt := range tests {
|
|
|
|
t.Run(tt.name, func(t *testing.T) {
|
|
|
|
faket := time.Unix(0, 0)
|
2020-06-11 17:40:21 +01:00
|
|
|
var logBuf bytes.Buffer
|
|
|
|
tt.as.Logf = func(format string, args ...interface{}) {
|
|
|
|
fmt.Fprintf(&logBuf, format, args...)
|
2020-06-30 20:22:42 +01:00
|
|
|
t.Logf(format, args...)
|
2020-06-11 17:40:21 +01:00
|
|
|
}
|
2020-03-09 16:13:28 +00:00
|
|
|
tt.as.clock = func() time.Time { return faket }
|
2020-06-30 20:22:42 +01:00
|
|
|
initAddrSet(tt.as)
|
2020-03-09 16:13:28 +00:00
|
|
|
for i, st := range tt.steps {
|
|
|
|
faket = faket.Add(st.advance)
|
|
|
|
|
|
|
|
if st.updateDst != nil {
|
|
|
|
if err := tt.as.UpdateDst(st.updateDst); err != nil {
|
|
|
|
t.Fatal(err)
|
|
|
|
}
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
got, _ := tt.as.appendDests(nil, st.b)
|
|
|
|
if gotStr := joinUDPs(got); gotStr != st.want {
|
|
|
|
t.Errorf("step %d: got %v; want %v", i, gotStr, st.want)
|
|
|
|
}
|
|
|
|
}
|
2020-06-11 17:40:21 +01:00
|
|
|
if tt.logCheck != nil {
|
|
|
|
tt.logCheck(t, logBuf.Bytes())
|
|
|
|
}
|
2020-03-09 16:13:28 +00:00
|
|
|
})
|
|
|
|
}
|
|
|
|
}
|
2020-06-26 22:38:53 +01:00
|
|
|
|
2020-12-18 08:22:12 +00:00
|
|
|
// initAddrSet initializes fields in the provided incomplete addrSet
|
2020-06-30 20:22:42 +01:00
|
|
|
// to satisfying invariants within magicsock.
|
2020-12-18 08:22:12 +00:00
|
|
|
func initAddrSet(as *addrSet) {
|
2020-06-30 20:22:42 +01:00
|
|
|
if as.roamAddr != nil && as.roamAddrStd == nil {
|
|
|
|
as.roamAddrStd = as.roamAddr.UDPAddr()
|
|
|
|
}
|
|
|
|
if len(as.ipPorts) == 0 {
|
|
|
|
for _, ua := range as.addrs {
|
|
|
|
ipp, ok := netaddr.FromStdAddr(ua.IP, ua.Port, ua.Zone)
|
|
|
|
if !ok {
|
|
|
|
panic(fmt.Sprintf("bogus UDPAddr %+v", ua))
|
|
|
|
}
|
|
|
|
as.ipPorts = append(as.ipPorts, ipp)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-26 22:38:53 +01:00
|
|
|
func TestDiscoMessage(t *testing.T) {
|
2020-06-29 22:26:25 +01:00
|
|
|
c := newConn()
|
|
|
|
c.logf = t.Logf
|
2020-07-30 21:48:32 +01:00
|
|
|
c.privateKey = key.NewPrivate()
|
2020-07-06 20:10:39 +01:00
|
|
|
|
|
|
|
peer1Pub := c.DiscoPublicKey()
|
|
|
|
peer1Priv := c.discoPrivate
|
2020-07-01 23:28:14 +01:00
|
|
|
c.endpointOfDisco = map[tailcfg.DiscoKey]*discoEndpoint{
|
|
|
|
tailcfg.DiscoKey(peer1Pub): &discoEndpoint{
|
2020-07-23 23:15:28 +01:00
|
|
|
// ... (enough for this test)
|
|
|
|
},
|
|
|
|
}
|
|
|
|
c.nodeOfDisco = map[tailcfg.DiscoKey]*tailcfg.Node{
|
|
|
|
tailcfg.DiscoKey(peer1Pub): &tailcfg.Node{
|
|
|
|
// ... (enough for this test)
|
2020-07-01 23:28:14 +01:00
|
|
|
},
|
2020-06-26 22:38:53 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
const payload = "why hello"
|
|
|
|
|
|
|
|
var nonce [24]byte
|
|
|
|
crand.Read(nonce[:])
|
|
|
|
|
|
|
|
pkt := append([]byte("TS💬"), peer1Pub[:]...)
|
|
|
|
pkt = append(pkt, nonce[:]...)
|
|
|
|
|
|
|
|
pkt = box.Seal(pkt, []byte(payload), &nonce, c.discoPrivate.Public().B32(), peer1Priv.B32())
|
2020-06-30 21:25:13 +01:00
|
|
|
got := c.handleDiscoMessage(pkt, netaddr.IPPort{})
|
2020-06-26 22:38:53 +01:00
|
|
|
if !got {
|
|
|
|
t.Error("failed to open it")
|
|
|
|
}
|
|
|
|
}
|
2020-07-04 06:26:53 +01:00
|
|
|
|
|
|
|
// tests that having a discoEndpoint.String prevents wireguard-go's
|
|
|
|
// log.Printf("%v") of its conn.Endpoint values from using reflect to
|
|
|
|
// walk into read mutex while they're being used and then causing data
|
|
|
|
// races.
|
|
|
|
func TestDiscoStringLogRace(t *testing.T) {
|
|
|
|
de := new(discoEndpoint)
|
|
|
|
var wg sync.WaitGroup
|
|
|
|
wg.Add(2)
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
fmt.Fprintf(ioutil.Discard, "%v", de)
|
|
|
|
}()
|
|
|
|
go func() {
|
|
|
|
defer wg.Done()
|
|
|
|
de.mu.Lock()
|
|
|
|
}()
|
|
|
|
wg.Wait()
|
|
|
|
}
|
2020-08-06 18:23:16 +01:00
|
|
|
|
|
|
|
func stringifyConfig(cfg wgcfg.Config) string {
|
|
|
|
j, err := json.Marshal(cfg)
|
|
|
|
if err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
return string(j)
|
|
|
|
}
|
2020-08-06 22:57:03 +01:00
|
|
|
|
|
|
|
func TestDiscoEndpointAlignment(t *testing.T) {
|
|
|
|
var de discoEndpoint
|
|
|
|
off := unsafe.Offsetof(de.lastRecvUnixAtomic)
|
|
|
|
if off%8 != 0 {
|
|
|
|
t.Fatalf("lastRecvUnixAtomic is not 8-byte aligned")
|
|
|
|
}
|
|
|
|
if !de.isFirstRecvActivityInAwhile() { // verify this doesn't panic on 32-bit
|
|
|
|
t.Error("expected true")
|
|
|
|
}
|
|
|
|
if de.isFirstRecvActivityInAwhile() {
|
|
|
|
t.Error("expected false on second call")
|
|
|
|
}
|
|
|
|
}
|
2020-12-03 04:12:14 +00:00
|
|
|
|
|
|
|
func BenchmarkReceiveFrom(b *testing.B) {
|
|
|
|
port := pickPort(b)
|
|
|
|
conn, err := NewConn(Options{
|
|
|
|
Logf: b.Logf,
|
|
|
|
Port: port,
|
|
|
|
EndpointsFunc: func(eps []string) {
|
|
|
|
b.Logf("endpoints: %q", eps)
|
|
|
|
},
|
|
|
|
})
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer conn.Close()
|
|
|
|
|
|
|
|
sendConn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer sendConn.Close()
|
|
|
|
|
|
|
|
var dstAddr net.Addr = conn.pconn4.LocalAddr()
|
|
|
|
sendBuf := make([]byte, 1<<10)
|
|
|
|
for i := range sendBuf {
|
|
|
|
sendBuf[i] = 'x'
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, 2<<10)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if _, err := sendConn.WriteTo(sendBuf, dstAddr); err != nil {
|
|
|
|
b.Fatalf("WriteTo: %v", err)
|
|
|
|
}
|
|
|
|
n, ep, addr, err := conn.ReceiveIPv4(buf)
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
_ = n
|
|
|
|
_ = ep
|
|
|
|
_ = addr
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func BenchmarkReceiveFrom_Native(b *testing.B) {
|
|
|
|
recvConn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer recvConn.Close()
|
|
|
|
recvConnUDP := recvConn.(*net.UDPConn)
|
|
|
|
|
|
|
|
sendConn, err := net.ListenPacket("udp4", "127.0.0.1:0")
|
|
|
|
if err != nil {
|
|
|
|
b.Fatal(err)
|
|
|
|
}
|
|
|
|
defer sendConn.Close()
|
|
|
|
|
|
|
|
var dstAddr net.Addr = recvConn.LocalAddr()
|
|
|
|
sendBuf := make([]byte, 1<<10)
|
|
|
|
for i := range sendBuf {
|
|
|
|
sendBuf[i] = 'x'
|
|
|
|
}
|
|
|
|
|
|
|
|
buf := make([]byte, 2<<10)
|
|
|
|
for i := 0; i < b.N; i++ {
|
|
|
|
if _, err := sendConn.WriteTo(sendBuf, dstAddr); err != nil {
|
|
|
|
b.Fatalf("WriteTo: %v", err)
|
|
|
|
}
|
|
|
|
if _, _, err := recvConnUDP.ReadFromUDP(buf); err != nil {
|
|
|
|
b.Fatalf("ReadFromUDP: %v", err)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|