2020-05-13 14:16:17 +01:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
// Package tstun provides a TUN struct implementing the tun.Device interface
|
|
|
|
// with additional features as required by wgengine.
|
|
|
|
package tstun
|
|
|
|
|
|
|
|
import (
|
|
|
|
"errors"
|
2021-07-23 17:45:04 +01:00
|
|
|
"fmt"
|
2020-05-13 14:16:17 +01:00
|
|
|
"io"
|
|
|
|
"os"
|
2021-07-23 17:45:04 +01:00
|
|
|
"strings"
|
2020-06-08 23:19:26 +01:00
|
|
|
"sync"
|
2020-05-13 14:16:17 +01:00
|
|
|
"sync/atomic"
|
2020-06-25 22:19:12 +01:00
|
|
|
"time"
|
2020-05-13 14:16:17 +01:00
|
|
|
|
2021-11-02 21:41:56 +00:00
|
|
|
"go4.org/mem"
|
2021-05-25 20:42:22 +01:00
|
|
|
"golang.zx2c4.com/wireguard/device"
|
|
|
|
"golang.zx2c4.com/wireguard/tun"
|
2022-03-21 21:58:43 +00:00
|
|
|
"gvisor.dev/gvisor/pkg/tcpip/stack"
|
2020-12-20 00:43:25 +00:00
|
|
|
"inet.af/netaddr"
|
2021-09-13 22:21:40 +01:00
|
|
|
"tailscale.com/disco"
|
2020-11-10 00:16:04 +00:00
|
|
|
"tailscale.com/net/packet"
|
2022-01-04 21:33:08 +00:00
|
|
|
"tailscale.com/net/tsaddr"
|
2021-07-21 18:43:53 +01:00
|
|
|
"tailscale.com/tstime/mono"
|
2021-03-21 04:45:47 +00:00
|
|
|
"tailscale.com/types/ipproto"
|
2021-11-02 21:41:56 +00:00
|
|
|
"tailscale.com/types/key"
|
2020-05-13 14:16:17 +01:00
|
|
|
"tailscale.com/types/logger"
|
2021-11-17 00:01:42 +00:00
|
|
|
"tailscale.com/util/clientmetric"
|
2020-05-13 14:16:17 +01:00
|
|
|
"tailscale.com/wgengine/filter"
|
|
|
|
)
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
const maxBufferSize = device.MaxMessageSize
|
|
|
|
|
|
|
|
// PacketStartOffset is the minimal amount of leading space that must exist
|
|
|
|
// before &packet[offset] in a packet passed to Read, Write, or InjectInboundDirect.
|
|
|
|
// This is necessary to avoid reallocation in wireguard-go internals.
|
|
|
|
const PacketStartOffset = device.MessageTransportHeaderSize
|
2020-05-13 14:16:17 +01:00
|
|
|
|
|
|
|
// MaxPacketSize is the maximum size (in bytes)
|
2021-03-27 06:13:20 +00:00
|
|
|
// of a packet that can be injected into a tstun.Wrapper.
|
2020-05-13 14:16:17 +01:00
|
|
|
const MaxPacketSize = device.MaxContentSize
|
|
|
|
|
2021-07-23 17:45:04 +01:00
|
|
|
const tapDebug = false // for super verbose TAP debugging
|
|
|
|
|
2020-05-13 14:16:17 +01:00
|
|
|
var (
|
2021-03-27 06:13:20 +00:00
|
|
|
// ErrClosed is returned when attempting an operation on a closed Wrapper.
|
2020-05-26 23:14:19 +01:00
|
|
|
ErrClosed = errors.New("device closed")
|
|
|
|
// ErrFiltered is returned when the acted-on packet is rejected by a filter.
|
|
|
|
ErrFiltered = errors.New("packet dropped by filter")
|
2020-05-13 14:16:17 +01:00
|
|
|
)
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
var (
|
|
|
|
errPacketTooBig = errors.New("packet too big")
|
|
|
|
errOffsetTooBig = errors.New("offset larger than buffer length")
|
|
|
|
errOffsetTooSmall = errors.New("offset smaller than PacketStartOffset")
|
|
|
|
)
|
|
|
|
|
2020-11-10 07:49:09 +00:00
|
|
|
// parsedPacketPool holds a pool of Parsed structs for use in filtering.
|
2020-07-24 16:29:36 +01:00
|
|
|
// This is needed because escape analysis cannot see that parsed packets
|
|
|
|
// do not escape through {Pre,Post}Filter{In,Out}.
|
2022-03-16 23:27:57 +00:00
|
|
|
var parsedPacketPool = sync.Pool{New: func() any { return new(packet.Parsed) }}
|
2020-07-24 16:29:36 +01:00
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// FilterFunc is a packet-filtering function with access to the Wrapper device.
|
2020-06-08 23:19:26 +01:00
|
|
|
// It must not hold onto the packet struct, as its backing storage will be reused.
|
2021-03-27 06:13:20 +00:00
|
|
|
type FilterFunc func(*packet.Parsed, *Wrapper) filter.Response
|
2020-05-26 23:14:19 +01:00
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// Wrapper augments a tun.Device with packet filtering and injection.
|
|
|
|
type Wrapper struct {
|
2021-11-23 19:20:33 +00:00
|
|
|
logf logger.Logf
|
|
|
|
limitedLogf logger.Logf // aggressively rate-limited logf used for potentially high volume errors
|
2021-03-27 06:13:20 +00:00
|
|
|
// tdev is the underlying Wrapper device.
|
2021-07-23 17:45:04 +01:00
|
|
|
tdev tun.Device
|
|
|
|
isTAP bool // whether tdev is a TAP device
|
2020-05-13 14:16:17 +01:00
|
|
|
|
2020-09-18 16:03:10 +01:00
|
|
|
closeOnce sync.Once
|
|
|
|
|
2021-11-23 19:20:33 +00:00
|
|
|
// lastActivityAtomic is read/written atomically.
|
|
|
|
// On 32 bit systems, if the fields above change,
|
|
|
|
// you might need to add a pad32.Four field here.
|
2021-07-21 18:43:53 +01:00
|
|
|
lastActivityAtomic mono.Time // time of last send or receive
|
2020-06-25 22:19:12 +01:00
|
|
|
|
2020-12-20 00:43:25 +00:00
|
|
|
destIPActivity atomic.Value // of map[netaddr.IP]func()
|
2021-07-23 17:45:04 +01:00
|
|
|
destMACAtomic atomic.Value // of [6]byte
|
2021-11-02 21:41:56 +00:00
|
|
|
discoKey atomic.Value // of key.DiscoPublic
|
2020-07-23 23:15:28 +01:00
|
|
|
|
2020-05-13 14:16:17 +01:00
|
|
|
// buffer stores the oldest unconsumed packet from tdev.
|
2020-06-08 23:19:26 +01:00
|
|
|
// It is made a static buffer in order to avoid allocations.
|
|
|
|
buffer [maxBufferSize]byte
|
2021-07-07 23:45:00 +01:00
|
|
|
// bufferConsumedMu protects bufferConsumed from concurrent sends and closes.
|
|
|
|
// It does not prevent send-after-close, only data races.
|
|
|
|
bufferConsumedMu sync.Mutex
|
2020-05-13 14:16:17 +01:00
|
|
|
// bufferConsumed synchronizes access to buffer (shared by Read and poll).
|
2021-07-01 22:45:17 +01:00
|
|
|
//
|
|
|
|
// Close closes bufferConsumed. There may be outstanding sends to bufferConsumed
|
|
|
|
// when that happens; we catch any resulting panics.
|
|
|
|
// This lets us avoid expensive multi-case selects.
|
2020-05-13 14:16:17 +01:00
|
|
|
bufferConsumed chan struct{}
|
|
|
|
|
|
|
|
// closed signals poll (by closing) when the device is closed.
|
|
|
|
closed chan struct{}
|
2021-07-07 23:45:00 +01:00
|
|
|
// outboundMu protects outbound from concurrent sends and closes.
|
|
|
|
// It does not prevent send-after-close, only data races.
|
|
|
|
outboundMu sync.Mutex
|
2020-05-13 14:16:17 +01:00
|
|
|
// outbound is the queue by which packets leave the TUN device.
|
2020-05-26 23:14:19 +01:00
|
|
|
//
|
2020-05-13 14:16:17 +01:00
|
|
|
// The directions are relative to the network, not the device:
|
|
|
|
// inbound packets arrive via UDP and are written into the TUN device;
|
|
|
|
// outbound packets are read from the TUN device and sent out via UDP.
|
|
|
|
// This queue is needed because although inbound writes are synchronous,
|
|
|
|
// the other direction must wait on a Wireguard goroutine to poll it.
|
2020-05-26 23:14:19 +01:00
|
|
|
//
|
|
|
|
// Empty reads are skipped by Wireguard, so it is always legal
|
|
|
|
// to discard an empty packet instead of sending it through t.outbound.
|
2021-07-01 22:45:17 +01:00
|
|
|
//
|
|
|
|
// Close closes outbound. There may be outstanding sends to outbound
|
|
|
|
// when that happens; we catch any resulting panics.
|
|
|
|
// This lets us avoid expensive multi-case selects.
|
|
|
|
outbound chan tunReadResult
|
2020-05-13 14:16:17 +01:00
|
|
|
|
2021-04-27 00:27:34 +01:00
|
|
|
// eventsUpDown yields up and down tun.Events that arrive on a Wrapper's events channel.
|
|
|
|
eventsUpDown chan tun.Event
|
|
|
|
// eventsOther yields non-up-and-down tun.Events that arrive on a Wrapper's events channel.
|
|
|
|
eventsOther chan tun.Event
|
|
|
|
|
wgengine/bench: speed test for channels, sockets, and wireguard-go.
This tries to generate traffic at a rate that will saturate the
receiver, without overdoing it, even in the event of packet loss. It's
unrealistically more aggressive than TCP (which will back off quickly
in case of packet loss) but less silly than a blind test that just
generates packets as fast as it can (which can cause all the CPU to be
absorbed by the transmitter, giving an incorrect impression of how much
capacity the total system has).
Initial indications are that a syscall about every 10 packets (TCP bulk
delivery) is roughly the same speed as sending every packet through a
channel. A syscall per packet is about 5x-10x slower than that.
The whole tailscale wireguard-go + magicsock + packet filter
combination is about 4x slower again, which is better than I thought
we'd do, but probably has room for improvement.
Note that in "full" tailscale, there is also a tundev read/write for
every packet, effectively doubling the syscall overhead per packet.
Given these numbers, it seems like read/write syscalls are only 25-40%
of the total CPU time used in tailscale proper, so we do have
significant non-syscall optimization work to do too.
Sample output:
$ GOMAXPROCS=2 go test -bench . -benchtime 5s ./cmd/tailbench
goos: linux
goarch: amd64
pkg: tailscale.com/cmd/tailbench
cpu: Intel(R) Core(TM) i7-4785T CPU @ 2.20GHz
BenchmarkTrivialNoAlloc/32-2 56340248 93.85 ns/op 340.98 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivialNoAlloc/124-2 57527490 99.27 ns/op 1249.10 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivialNoAlloc/1024-2 52537773 111.3 ns/op 9200.39 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivial/32-2 41878063 135.6 ns/op 236.04 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivial/124-2 41270439 138.4 ns/op 896.02 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTrivial/1024-2 36337252 154.3 ns/op 6635.30 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkBlockingChannel/32-2 12171654 494.3 ns/op 64.74 MB/s 0 %lost 1791 B/op 0 allocs/op
BenchmarkBlockingChannel/124-2 12149956 507.8 ns/op 244.17 MB/s 0 %lost 1792 B/op 1 allocs/op
BenchmarkBlockingChannel/1024-2 11034754 528.8 ns/op 1936.42 MB/s 0 %lost 1792 B/op 1 allocs/op
BenchmarkNonlockingChannel/32-2 8960622 2195 ns/op 14.58 MB/s 8.825 %lost 1792 B/op 1 allocs/op
BenchmarkNonlockingChannel/124-2 3014614 2224 ns/op 55.75 MB/s 11.18 %lost 1792 B/op 1 allocs/op
BenchmarkNonlockingChannel/1024-2 3234915 1688 ns/op 606.53 MB/s 3.765 %lost 1792 B/op 1 allocs/op
BenchmarkDoubleChannel/32-2 8457559 764.1 ns/op 41.88 MB/s 5.945 %lost 1792 B/op 1 allocs/op
BenchmarkDoubleChannel/124-2 5497726 1030 ns/op 120.38 MB/s 12.14 %lost 1792 B/op 1 allocs/op
BenchmarkDoubleChannel/1024-2 7985656 1360 ns/op 752.86 MB/s 13.57 %lost 1792 B/op 1 allocs/op
BenchmarkUDP/32-2 1652134 3695 ns/op 8.66 MB/s 0 %lost 176 B/op 3 allocs/op
BenchmarkUDP/124-2 1621024 3765 ns/op 32.94 MB/s 0 %lost 176 B/op 3 allocs/op
BenchmarkUDP/1024-2 1553750 3825 ns/op 267.72 MB/s 0 %lost 176 B/op 3 allocs/op
BenchmarkTCP/32-2 11056336 503.2 ns/op 63.60 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTCP/124-2 11074869 533.7 ns/op 232.32 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkTCP/1024-2 8934968 671.4 ns/op 1525.20 MB/s 0 %lost 0 B/op 0 allocs/op
BenchmarkWireGuardTest/32-2 1403702 4547 ns/op 7.04 MB/s 14.37 %lost 467 B/op 3 allocs/op
BenchmarkWireGuardTest/124-2 780645 7927 ns/op 15.64 MB/s 1.537 %lost 420 B/op 3 allocs/op
BenchmarkWireGuardTest/1024-2 512671 11791 ns/op 86.85 MB/s 0.5206 %lost 411 B/op 3 allocs/op
PASS
ok tailscale.com/wgengine/bench 195.724s
Updates #414.
Signed-off-by: Avery Pennarun <apenwarr@tailscale.com>
2021-03-24 01:35:35 +00:00
|
|
|
// filter atomically stores the currently active packet filter
|
2020-05-13 14:16:17 +01:00
|
|
|
filter atomic.Value // of *filter.Filter
|
|
|
|
// filterFlags control the verbosity of logging packet drops/accepts.
|
|
|
|
filterFlags filter.RunFlags
|
2020-06-05 16:19:03 +01:00
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
// PreFilterIn is the inbound filter function that runs before the main filter
|
|
|
|
// and therefore sees the packets that may be later dropped by it.
|
|
|
|
PreFilterIn FilterFunc
|
|
|
|
// PostFilterIn is the inbound filter function that runs after the main filter.
|
|
|
|
PostFilterIn FilterFunc
|
|
|
|
// PreFilterOut is the outbound filter function that runs before the main filter
|
|
|
|
// and therefore sees the packets that may be later dropped by it.
|
|
|
|
PreFilterOut FilterFunc
|
|
|
|
// PostFilterOut is the outbound filter function that runs after the main filter.
|
|
|
|
PostFilterOut FilterFunc
|
|
|
|
|
2021-03-23 22:16:15 +00:00
|
|
|
// OnTSMPPongReceived, if non-nil, is called whenever a TSMP pong arrives.
|
2021-03-29 23:17:05 +01:00
|
|
|
OnTSMPPongReceived func(packet.TSMPPongReply)
|
|
|
|
|
|
|
|
// PeerAPIPort, if non-nil, returns the peerapi port that's
|
|
|
|
// running for the given IP address.
|
|
|
|
PeerAPIPort func(netaddr.IP) (port uint16, ok bool)
|
2021-03-23 22:16:15 +00:00
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
// disableFilter disables all filtering when set. This should only be used in tests.
|
|
|
|
disableFilter bool
|
2021-04-07 19:32:53 +01:00
|
|
|
|
|
|
|
// disableTSMPRejected disables TSMP rejected responses. For tests.
|
|
|
|
disableTSMPRejected bool
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2022-03-21 21:58:43 +00:00
|
|
|
// tunReadResult is the result of a TUN read, or an injected result pretending to be a TUN read.
|
|
|
|
// The data is not interpreted in the usual way for a Read method.
|
2021-07-01 22:45:17 +01:00
|
|
|
// See the comment in the middle of Wrap.Read.
|
|
|
|
type tunReadResult struct {
|
2022-03-21 21:58:43 +00:00
|
|
|
// Only one of err, packet or data should be set, and are read in that order
|
|
|
|
// of precendence.
|
|
|
|
err error
|
|
|
|
packet *stack.PacketBuffer
|
|
|
|
data []byte
|
|
|
|
|
|
|
|
// injected is set if the read result was generated internally, and contained packets should not
|
|
|
|
// pass through filters.
|
|
|
|
injected bool
|
2021-07-01 22:45:17 +01:00
|
|
|
}
|
|
|
|
|
2021-07-23 17:45:04 +01:00
|
|
|
func WrapTAP(logf logger.Logf, tdev tun.Device) *Wrapper {
|
|
|
|
return wrap(logf, tdev, true)
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func Wrap(logf logger.Logf, tdev tun.Device) *Wrapper {
|
2021-07-23 17:45:04 +01:00
|
|
|
return wrap(logf, tdev, false)
|
|
|
|
}
|
|
|
|
|
|
|
|
func wrap(logf logger.Logf, tdev tun.Device, isTAP bool) *Wrapper {
|
2021-11-23 19:20:33 +00:00
|
|
|
logf = logger.WithPrefix(logf, "tstun: ")
|
2021-03-27 06:13:20 +00:00
|
|
|
tun := &Wrapper{
|
2021-11-23 19:20:33 +00:00
|
|
|
logf: logf,
|
|
|
|
limitedLogf: logger.RateLimitedFn(logf, 1*time.Minute, 2, 10),
|
|
|
|
isTAP: isTAP,
|
|
|
|
tdev: tdev,
|
2020-05-13 14:16:17 +01:00
|
|
|
// bufferConsumed is conceptually a condition variable:
|
|
|
|
// a goroutine should not block when setting it, even with no listeners.
|
|
|
|
bufferConsumed: make(chan struct{}, 1),
|
|
|
|
closed: make(chan struct{}),
|
2021-07-27 23:44:03 +01:00
|
|
|
// outbound can be unbuffered; the buffer is an optimization.
|
2021-07-14 02:01:56 +01:00
|
|
|
outbound: make(chan tunReadResult, 1),
|
|
|
|
eventsUpDown: make(chan tun.Event),
|
|
|
|
eventsOther: make(chan tun.Event),
|
2020-06-08 23:19:26 +01:00
|
|
|
// TODO(dmytro): (highly rate-limited) hexdumps should happen on unknown packets.
|
|
|
|
filterFlags: filter.LogAccepts | filter.LogDrops,
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
2020-06-08 23:19:26 +01:00
|
|
|
|
2020-05-13 14:16:17 +01:00
|
|
|
go tun.poll()
|
2021-04-27 00:27:34 +01:00
|
|
|
go tun.pumpEvents()
|
2020-05-13 14:16:17 +01:00
|
|
|
// The buffer starts out consumed.
|
|
|
|
tun.bufferConsumed <- struct{}{}
|
2021-07-21 18:43:53 +01:00
|
|
|
tun.noteActivity()
|
2020-05-13 14:16:17 +01:00
|
|
|
|
|
|
|
return tun
|
|
|
|
}
|
|
|
|
|
2020-07-23 23:15:28 +01:00
|
|
|
// SetDestIPActivityFuncs sets a map of funcs to run per packet
|
|
|
|
// destination (the map keys).
|
|
|
|
//
|
2021-03-27 06:13:20 +00:00
|
|
|
// The map ownership passes to the Wrapper. It must be non-nil.
|
|
|
|
func (t *Wrapper) SetDestIPActivityFuncs(m map[netaddr.IP]func()) {
|
2020-12-20 00:43:25 +00:00
|
|
|
t.destIPActivity.Store(m)
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
|
|
|
|
2021-09-13 22:21:40 +01:00
|
|
|
// SetDiscoKey sets the current discovery key.
|
|
|
|
//
|
|
|
|
// It is only used for filtering out bogus traffic when network
|
|
|
|
// stack(s) get confused; see Issue 1526.
|
2021-11-02 21:41:56 +00:00
|
|
|
func (t *Wrapper) SetDiscoKey(k key.DiscoPublic) {
|
2021-09-13 22:21:40 +01:00
|
|
|
t.discoKey.Store(k)
|
|
|
|
}
|
|
|
|
|
|
|
|
// isSelfDisco reports whether packet p
|
|
|
|
// looks like a Disco packet from ourselves.
|
|
|
|
// See Issue 1526.
|
|
|
|
func (t *Wrapper) isSelfDisco(p *packet.Parsed) bool {
|
|
|
|
if p.IPProto != ipproto.UDP {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
pkt := p.Payload()
|
2021-11-02 21:41:56 +00:00
|
|
|
discobs, ok := disco.Source(pkt)
|
2021-09-13 22:21:40 +01:00
|
|
|
if !ok {
|
|
|
|
return false
|
|
|
|
}
|
2021-11-02 21:41:56 +00:00
|
|
|
discoSrc := key.DiscoPublicFromRaw32(mem.B(discobs))
|
|
|
|
selfDiscoPub, ok := t.discoKey.Load().(key.DiscoPublic)
|
|
|
|
return ok && selfDiscoPub == discoSrc
|
2021-09-13 22:21:40 +01:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Close() error {
|
2020-09-18 16:03:10 +01:00
|
|
|
var err error
|
|
|
|
t.closeOnce.Do(func() {
|
2020-05-13 14:16:17 +01:00
|
|
|
close(t.closed)
|
2021-07-07 23:45:00 +01:00
|
|
|
t.bufferConsumedMu.Lock()
|
2021-07-01 22:45:17 +01:00
|
|
|
close(t.bufferConsumed)
|
2021-07-07 23:45:00 +01:00
|
|
|
t.bufferConsumedMu.Unlock()
|
|
|
|
t.outboundMu.Lock()
|
2021-07-01 22:45:17 +01:00
|
|
|
close(t.outbound)
|
2021-07-07 23:45:00 +01:00
|
|
|
t.outboundMu.Unlock()
|
2020-09-18 16:03:10 +01:00
|
|
|
err = t.tdev.Close()
|
|
|
|
})
|
|
|
|
return err
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2021-07-01 22:48:09 +01:00
|
|
|
// isClosed reports whether t is closed.
|
|
|
|
func (t *Wrapper) isClosed() bool {
|
|
|
|
select {
|
|
|
|
case <-t.closed:
|
|
|
|
return true
|
|
|
|
default:
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-27 00:27:34 +01:00
|
|
|
// pumpEvents copies events from t.tdev to t.eventsUpDown and t.eventsOther.
|
|
|
|
// pumpEvents exits when t.tdev.events or t.closed is closed.
|
|
|
|
// pumpEvents closes t.eventsUpDown and t.eventsOther when it exits.
|
|
|
|
func (t *Wrapper) pumpEvents() {
|
|
|
|
defer close(t.eventsUpDown)
|
|
|
|
defer close(t.eventsOther)
|
|
|
|
src := t.tdev.Events()
|
|
|
|
for {
|
|
|
|
// Retrieve an event from the TUN device.
|
|
|
|
var event tun.Event
|
|
|
|
var ok bool
|
|
|
|
select {
|
|
|
|
case <-t.closed:
|
|
|
|
return
|
|
|
|
case event, ok = <-src:
|
|
|
|
if !ok {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Pass along event to the correct recipient.
|
|
|
|
// Though event is a bitmask, in practice there is only ever one bit set at a time.
|
|
|
|
dst := t.eventsOther
|
|
|
|
if event&(tun.EventUp|tun.EventDown) != 0 {
|
|
|
|
dst = t.eventsUpDown
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case <-t.closed:
|
|
|
|
return
|
|
|
|
case dst <- event:
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// EventsUpDown returns a TUN event channel that contains all Up and Down events.
|
|
|
|
func (t *Wrapper) EventsUpDown() chan tun.Event {
|
|
|
|
return t.eventsUpDown
|
|
|
|
}
|
|
|
|
|
|
|
|
// Events returns a TUN event channel that contains all non-Up, non-Down events.
|
|
|
|
// It is named Events because it is the set of events that we want to expose to wireguard-go,
|
|
|
|
// and Events is the name specified by the wireguard-go tun.Device interface.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Events() chan tun.Event {
|
2021-04-27 00:27:34 +01:00
|
|
|
return t.eventsOther
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) File() *os.File {
|
2020-05-13 14:16:17 +01:00
|
|
|
return t.tdev.File()
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Flush() error {
|
2020-05-13 14:16:17 +01:00
|
|
|
return t.tdev.Flush()
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) MTU() (int, error) {
|
2020-05-13 14:16:17 +01:00
|
|
|
return t.tdev.MTU()
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Name() (string, error) {
|
2020-05-13 14:16:17 +01:00
|
|
|
return t.tdev.Name()
|
|
|
|
}
|
|
|
|
|
2021-07-01 22:45:17 +01:00
|
|
|
// allowSendOnClosedChannel suppresses panics due to sending on a closed channel.
|
|
|
|
// This allows us to avoid synchronization between poll and Close.
|
|
|
|
// Such synchronization (particularly multi-case selects) is too expensive
|
|
|
|
// for code like poll or Read that is on the hot path of every packet.
|
|
|
|
// If this makes you sad or angry, you may want to join our
|
|
|
|
// weekly Go Performance Delinquents Anonymous meetings on Monday nights.
|
|
|
|
func allowSendOnClosedChannel() {
|
|
|
|
r := recover()
|
|
|
|
if r == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
e, _ := r.(error)
|
|
|
|
if e != nil && e.Error() == "send on closed channel" {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
panic(r)
|
|
|
|
}
|
|
|
|
|
2021-07-23 17:45:04 +01:00
|
|
|
const ethernetFrameSize = 14 // 2 six byte MACs, 2 bytes ethertype
|
|
|
|
|
2020-05-13 14:16:17 +01:00
|
|
|
// poll polls t.tdev.Read, placing the oldest unconsumed packet into t.buffer.
|
|
|
|
// This is needed because t.tdev.Read in general may block (it does on Windows),
|
|
|
|
// so packets may be stuck in t.outbound if t.Read called t.tdev.Read directly.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) poll() {
|
2021-07-01 22:48:09 +01:00
|
|
|
for range t.bufferConsumed {
|
2021-07-23 17:45:04 +01:00
|
|
|
DoRead:
|
2021-07-01 22:48:09 +01:00
|
|
|
var n int
|
|
|
|
var err error
|
2020-06-08 23:19:26 +01:00
|
|
|
// Read may use memory in t.buffer before PacketStartOffset for mandatory headers.
|
2021-03-27 06:13:20 +00:00
|
|
|
// This is the rationale behind the tun.Wrapper.{Read,Write} interfaces
|
2020-05-13 14:16:17 +01:00
|
|
|
// and the reason t.buffer has size MaxMessageSize and not MaxContentSize.
|
2021-07-01 22:48:09 +01:00
|
|
|
// In principle, read errors are not fatal (but wireguard-go disagrees).
|
|
|
|
// We loop here until we get a non-empty (or failed) read.
|
|
|
|
// We don't need this loop for correctness,
|
|
|
|
// but wireguard-go will skip an empty read,
|
|
|
|
// so we might as well avoid the send through t.outbound.
|
|
|
|
for n == 0 && err == nil {
|
|
|
|
if t.isClosed() {
|
|
|
|
return
|
|
|
|
}
|
2021-07-23 17:45:04 +01:00
|
|
|
if t.isTAP {
|
|
|
|
n, err = t.tdev.Read(t.buffer[:], PacketStartOffset-ethernetFrameSize)
|
|
|
|
if tapDebug {
|
|
|
|
s := fmt.Sprintf("% x", t.buffer[:])
|
|
|
|
for strings.HasSuffix(s, " 00") {
|
|
|
|
s = strings.TrimSuffix(s, " 00")
|
|
|
|
}
|
|
|
|
t.logf("TAP read %v, %v: %s", n, err, s)
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
n, err = t.tdev.Read(t.buffer[:], PacketStartOffset)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if t.isTAP {
|
|
|
|
if err == nil {
|
|
|
|
ethernetFrame := t.buffer[PacketStartOffset-ethernetFrameSize:][:n]
|
|
|
|
if t.handleTAPFrame(ethernetFrame) {
|
|
|
|
goto DoRead
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// Fall through. We got an IP packet.
|
|
|
|
if n >= ethernetFrameSize {
|
|
|
|
n -= ethernetFrameSize
|
|
|
|
}
|
|
|
|
if tapDebug {
|
|
|
|
t.logf("tap regular frame: %x", t.buffer[PacketStartOffset:PacketStartOffset+n])
|
|
|
|
}
|
2020-05-26 23:14:19 +01:00
|
|
|
}
|
2021-07-07 23:45:00 +01:00
|
|
|
t.sendOutbound(tunReadResult{data: t.buffer[PacketStartOffset : PacketStartOffset+n], err: err})
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-07-07 23:45:00 +01:00
|
|
|
// sendBufferConsumed does t.bufferConsumed <- struct{}{}.
|
|
|
|
// It protects against any panics or data races that that send could cause.
|
|
|
|
func (t *Wrapper) sendBufferConsumed() {
|
|
|
|
defer allowSendOnClosedChannel()
|
|
|
|
t.bufferConsumedMu.Lock()
|
|
|
|
defer t.bufferConsumedMu.Unlock()
|
|
|
|
t.bufferConsumed <- struct{}{}
|
|
|
|
}
|
|
|
|
|
|
|
|
// sendOutbound does t.outboundMu <- r.
|
|
|
|
// It protects against any panics or data races that that send could cause.
|
|
|
|
func (t *Wrapper) sendOutbound(r tunReadResult) {
|
|
|
|
defer allowSendOnClosedChannel()
|
|
|
|
t.outboundMu.Lock()
|
|
|
|
defer t.outboundMu.Unlock()
|
|
|
|
t.outbound <- r
|
|
|
|
}
|
|
|
|
|
2022-01-04 21:33:08 +00:00
|
|
|
var (
|
|
|
|
magicDNSIPPort = netaddr.IPPortFrom(tsaddr.TailscaleServiceIP(), 0) // 100.100.100.100:0
|
|
|
|
magicDNSIPPortv6 = netaddr.IPPortFrom(tsaddr.TailscaleServiceIPv6(), 0)
|
|
|
|
)
|
2021-02-11 19:51:59 +00:00
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) filterOut(p *packet.Parsed) filter.Response {
|
2021-02-11 19:51:59 +00:00
|
|
|
// Fake ICMP echo responses to MagicDNS (100.100.100.100).
|
2022-01-04 21:33:08 +00:00
|
|
|
if p.IsEchoRequest() {
|
|
|
|
switch p.Dst {
|
|
|
|
case magicDNSIPPort:
|
|
|
|
header := p.ICMP4Header()
|
|
|
|
header.ToResponse()
|
|
|
|
outp := packet.Generate(&header, p.Payload())
|
|
|
|
t.InjectInboundCopy(outp)
|
|
|
|
return filter.DropSilently // don't pass on to OS; already handled
|
|
|
|
case magicDNSIPPortv6:
|
|
|
|
header := p.ICMP6Header()
|
|
|
|
header.ToResponse()
|
|
|
|
outp := packet.Generate(&header, p.Payload())
|
|
|
|
t.InjectInboundCopy(outp)
|
|
|
|
return filter.DropSilently // don't pass on to OS; already handled
|
|
|
|
}
|
2021-02-11 19:51:59 +00:00
|
|
|
}
|
2020-06-08 23:19:26 +01:00
|
|
|
|
2021-09-29 21:58:14 +01:00
|
|
|
// Issue 1526 workaround: if we sent disco packets over
|
|
|
|
// Tailscale from ourselves, then drop them, as that shouldn't
|
|
|
|
// happen unless a networking stack is confused, as it seems
|
|
|
|
// macOS in Network Extension mode might be.
|
|
|
|
if p.IPProto == ipproto.UDP && // disco is over UDP; avoid isSelfDisco call for TCP/etc
|
|
|
|
t.isSelfDisco(p) {
|
2021-11-23 19:20:33 +00:00
|
|
|
t.limitedLogf("[unexpected] received self disco out packet over tstun; dropping")
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketOutDropSelfDisco.Add(1)
|
2021-09-29 21:58:14 +01:00
|
|
|
return filter.DropSilently
|
|
|
|
}
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
if t.PreFilterOut != nil {
|
2021-01-12 20:03:41 +00:00
|
|
|
if res := t.PreFilterOut(p, t); res.IsDrop() {
|
2021-11-17 00:01:42 +00:00
|
|
|
// Handled by userspaceEngine.handleLocalPackets (quad-100 DNS primarily).
|
2021-01-12 20:03:41 +00:00
|
|
|
return res
|
2020-06-08 23:19:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-13 14:16:17 +01:00
|
|
|
filt, _ := t.filter.Load().(*filter.Filter)
|
|
|
|
|
|
|
|
if filt == nil {
|
|
|
|
return filter.Drop
|
|
|
|
}
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
if filt.RunOut(p, t.filterFlags) != filter.Accept {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketOutDropFilter.Add(1)
|
2020-06-08 23:19:26 +01:00
|
|
|
return filter.Drop
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
2020-06-04 23:42:44 +01:00
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
if t.PostFilterOut != nil {
|
2021-01-12 20:03:41 +00:00
|
|
|
if res := t.PostFilterOut(p, t); res.IsDrop() {
|
|
|
|
return res
|
2020-06-08 23:19:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return filter.Accept
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2020-06-25 22:19:12 +01:00
|
|
|
// noteActivity records that there was a read or write at the current time.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) noteActivity() {
|
2021-07-21 18:43:53 +01:00
|
|
|
t.lastActivityAtomic.StoreAtomic(mono.Now())
|
2020-06-25 22:19:12 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// IdleDuration reports how long it's been since the last read or write to this device.
|
|
|
|
//
|
2021-07-21 18:43:53 +01:00
|
|
|
// Its value should only be presumed accurate to roughly 10ms granularity.
|
|
|
|
// If there's never been activity, the duration is since the wrapper was created.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) IdleDuration() time.Duration {
|
2021-07-21 18:43:53 +01:00
|
|
|
return mono.Since(t.lastActivityAtomic.LoadAtomic())
|
2020-06-25 22:19:12 +01:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Read(buf []byte, offset int) (int, error) {
|
2021-07-01 22:45:17 +01:00
|
|
|
res, ok := <-t.outbound
|
|
|
|
if !ok {
|
|
|
|
// Wrapper is closed.
|
2020-05-13 14:16:17 +01:00
|
|
|
return 0, io.EOF
|
2021-07-01 22:45:17 +01:00
|
|
|
}
|
|
|
|
if res.err != nil {
|
|
|
|
return 0, res.err
|
|
|
|
}
|
2021-11-17 00:01:42 +00:00
|
|
|
|
|
|
|
metricPacketOut.Add(1)
|
2022-03-21 21:58:43 +00:00
|
|
|
|
|
|
|
var n int
|
|
|
|
if res.packet != nil {
|
|
|
|
n = copy(buf[offset:], res.packet.NetworkHeader().View())
|
|
|
|
n += copy(buf[offset+n:], res.packet.TransportHeader().View())
|
|
|
|
n += copy(buf[offset+n:], res.packet.Data().AsRange().AsView())
|
|
|
|
|
|
|
|
res.packet.DecRef()
|
|
|
|
} else {
|
|
|
|
n = copy(buf[offset:], res.data)
|
|
|
|
|
|
|
|
// t.buffer has a fixed location in memory.
|
|
|
|
if &res.data[0] == &t.buffer[PacketStartOffset] {
|
|
|
|
// We are done with t.buffer. Let poll re-use it.
|
|
|
|
t.sendBufferConsumed()
|
|
|
|
}
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2020-11-10 07:49:09 +00:00
|
|
|
p := parsedPacketPool.Get().(*packet.Parsed)
|
2020-07-23 23:15:28 +01:00
|
|
|
defer parsedPacketPool.Put(p)
|
|
|
|
p.Decode(buf[offset : offset+n])
|
|
|
|
|
2020-12-20 00:43:25 +00:00
|
|
|
if m, ok := t.destIPActivity.Load().(map[netaddr.IP]func()); ok {
|
2021-05-15 02:07:28 +01:00
|
|
|
if fn := m[p.Dst.IP()]; fn != nil {
|
2020-12-20 00:43:25 +00:00
|
|
|
fn()
|
2020-07-23 23:15:28 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-06-30 21:28:52 +01:00
|
|
|
// Do not filter injected packets.
|
2022-03-21 21:58:43 +00:00
|
|
|
if !res.injected && !t.disableFilter {
|
2020-07-23 23:15:28 +01:00
|
|
|
response := t.filterOut(p)
|
2020-06-05 16:19:03 +01:00
|
|
|
if response != filter.Accept {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketOutDrop.Add(1)
|
2020-06-05 16:19:03 +01:00
|
|
|
// Wireguard considers read errors fatal; pretend nothing was read
|
|
|
|
return 0, nil
|
|
|
|
}
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2020-06-25 22:19:12 +01:00
|
|
|
t.noteActivity()
|
2020-05-13 14:16:17 +01:00
|
|
|
return n, nil
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) filterIn(buf []byte) filter.Response {
|
2020-11-10 07:49:09 +00:00
|
|
|
p := parsedPacketPool.Get().(*packet.Parsed)
|
2020-07-24 16:29:36 +01:00
|
|
|
defer parsedPacketPool.Put(p)
|
2020-06-08 23:19:26 +01:00
|
|
|
p.Decode(buf)
|
|
|
|
|
2021-03-23 22:16:15 +00:00
|
|
|
if p.IPProto == ipproto.TSMP {
|
|
|
|
if pingReq, ok := p.AsTSMPPing(); ok {
|
|
|
|
t.noteActivity()
|
|
|
|
t.injectOutboundPong(p, pingReq)
|
|
|
|
return filter.DropSilently
|
|
|
|
} else if data, ok := p.AsTSMPPong(); ok {
|
|
|
|
if f := t.OnTSMPPongReceived; f != nil {
|
|
|
|
f(data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-09-13 22:21:40 +01:00
|
|
|
// Issue 1526 workaround: if we see disco packets over
|
|
|
|
// Tailscale from ourselves, then drop them, as that shouldn't
|
|
|
|
// happen unless a networking stack is confused, as it seems
|
|
|
|
// macOS in Network Extension mode might be.
|
|
|
|
if p.IPProto == ipproto.UDP && // disco is over UDP; avoid isSelfDisco call for TCP/etc
|
|
|
|
t.isSelfDisco(p) {
|
2021-11-23 19:20:33 +00:00
|
|
|
t.limitedLogf("[unexpected] received self disco in packet over tstun; dropping")
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketInDropSelfDisco.Add(1)
|
2021-09-13 22:21:40 +01:00
|
|
|
return filter.DropSilently
|
|
|
|
}
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
if t.PreFilterIn != nil {
|
2021-01-12 20:03:41 +00:00
|
|
|
if res := t.PreFilterIn(p, t); res.IsDrop() {
|
|
|
|
return res
|
2020-06-08 23:19:26 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-05-13 14:16:17 +01:00
|
|
|
filt, _ := t.filter.Load().(*filter.Filter)
|
|
|
|
|
|
|
|
if filt == nil {
|
|
|
|
return filter.Drop
|
|
|
|
}
|
|
|
|
|
2021-04-07 19:32:53 +01:00
|
|
|
outcome := filt.RunIn(p, t.filterFlags)
|
|
|
|
|
|
|
|
// Let peerapi through the filter; its ACLs are handled at L7,
|
|
|
|
// not at the packet level.
|
|
|
|
if outcome != filter.Accept &&
|
|
|
|
p.IPProto == ipproto.TCP &&
|
|
|
|
p.TCPFlags&packet.TCPSyn != 0 &&
|
|
|
|
t.PeerAPIPort != nil {
|
2021-05-15 02:07:28 +01:00
|
|
|
if port, ok := t.PeerAPIPort(p.Dst.IP()); ok && port == p.Dst.Port() {
|
2021-04-07 19:32:53 +01:00
|
|
|
outcome = filter.Accept
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if outcome != filter.Accept {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketInDropFilter.Add(1)
|
2021-01-12 20:03:41 +00:00
|
|
|
|
|
|
|
// Tell them, via TSMP, we're dropping them due to the ACL.
|
|
|
|
// Their host networking stack can translate this into ICMP
|
|
|
|
// or whatnot as required. But notably, their GUI or tailscale CLI
|
|
|
|
// can show them a rejection history with reasons.
|
2021-04-07 19:32:53 +01:00
|
|
|
if p.IPVersion == 4 && p.IPProto == ipproto.TCP && p.TCPFlags&packet.TCPSyn != 0 && !t.disableTSMPRejected {
|
2021-01-12 20:03:41 +00:00
|
|
|
rj := packet.TailscaleRejectedHeader{
|
2021-05-15 02:07:28 +01:00
|
|
|
IPSrc: p.Dst.IP(),
|
|
|
|
IPDst: p.Src.IP(),
|
2021-01-12 20:03:41 +00:00
|
|
|
Src: p.Src,
|
|
|
|
Dst: p.Dst,
|
|
|
|
Proto: p.IPProto,
|
|
|
|
Reason: packet.RejectedDueToACLs,
|
|
|
|
}
|
|
|
|
if filt.ShieldsUp() {
|
|
|
|
rj.Reason = packet.RejectedDueToShieldsUp
|
|
|
|
}
|
|
|
|
pkt := packet.Generate(rj, nil)
|
|
|
|
t.InjectOutbound(pkt)
|
|
|
|
|
|
|
|
// TODO(bradfitz): also send a TCP RST, after the TSMP message.
|
|
|
|
}
|
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
return filter.Drop
|
|
|
|
}
|
|
|
|
|
|
|
|
if t.PostFilterIn != nil {
|
2021-01-22 21:22:32 +00:00
|
|
|
if res := t.PostFilterIn(p, t); res.IsDrop() {
|
|
|
|
return res
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
}
|
2020-06-04 23:42:44 +01:00
|
|
|
|
2020-06-08 23:19:26 +01:00
|
|
|
return filter.Accept
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2021-01-12 20:03:41 +00:00
|
|
|
// Write accepts an incoming packet. The packet begins at buf[offset:],
|
|
|
|
// like wireguard-go/tun.Device.Write.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) Write(buf []byte, offset int) (int, error) {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketIn.Add(1)
|
2020-06-08 23:19:26 +01:00
|
|
|
if !t.disableFilter {
|
2021-05-06 05:00:49 +01:00
|
|
|
if t.filterIn(buf[offset:]) != filter.Accept {
|
2021-11-17 00:01:42 +00:00
|
|
|
metricPacketInDrop.Add(1)
|
2021-05-06 05:00:49 +01:00
|
|
|
// If we're not accepting the packet, lie to wireguard-go and pretend
|
|
|
|
// that everything is okay with a nil error, so wireguard-go
|
|
|
|
// doesn't log about this Write "failure".
|
|
|
|
//
|
|
|
|
// We return len(buf), but the ill-defined wireguard-go/tun.Device.Write
|
|
|
|
// method doesn't specify how the offset affects the return value.
|
|
|
|
// In fact, the Linux implementation does one of two different things depending
|
|
|
|
// on how the /dev/net/tun was created. But fortunately the wireguard-go
|
|
|
|
// code ignores the int return and only looks at the error:
|
|
|
|
//
|
|
|
|
// device/receive.go: _, err = device.tun.device.Write(....)
|
|
|
|
//
|
|
|
|
// TODO(bradfitz): fix upstream interface docs, implementation.
|
2021-01-12 20:03:41 +00:00
|
|
|
return len(buf), nil
|
|
|
|
}
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2020-06-25 22:19:12 +01:00
|
|
|
t.noteActivity()
|
2021-07-23 17:45:04 +01:00
|
|
|
return t.tdevWrite(buf, offset)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (t *Wrapper) tdevWrite(buf []byte, offset int) (int, error) {
|
|
|
|
if t.isTAP {
|
|
|
|
return t.tapWrite(buf, offset)
|
|
|
|
}
|
2020-05-13 14:16:17 +01:00
|
|
|
return t.tdev.Write(buf, offset)
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) GetFilter() *filter.Filter {
|
2020-05-13 14:16:17 +01:00
|
|
|
filt, _ := t.filter.Load().(*filter.Filter)
|
|
|
|
return filt
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) SetFilter(filt *filter.Filter) {
|
2020-05-13 14:16:17 +01:00
|
|
|
t.filter.Store(filt)
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// InjectInboundDirect makes the Wrapper device behave as if a packet
|
2020-05-13 14:16:17 +01:00
|
|
|
// with the given contents was received from the network.
|
|
|
|
// It blocks and does not take ownership of the packet.
|
2020-06-08 23:19:26 +01:00
|
|
|
// The injected packet will not pass through inbound filters.
|
|
|
|
//
|
|
|
|
// The packet contents are to start at &buf[offset].
|
|
|
|
// offset must be greater or equal to PacketStartOffset.
|
|
|
|
// The space before &buf[offset] will be used by Wireguard.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) InjectInboundDirect(buf []byte, offset int) error {
|
2020-06-08 23:19:26 +01:00
|
|
|
if len(buf) > MaxPacketSize {
|
|
|
|
return errPacketTooBig
|
|
|
|
}
|
|
|
|
if len(buf) < offset {
|
|
|
|
return errOffsetTooBig
|
|
|
|
}
|
|
|
|
if offset < PacketStartOffset {
|
|
|
|
return errOffsetTooSmall
|
|
|
|
}
|
|
|
|
|
|
|
|
// Write to the underlying device to skip filters.
|
2021-07-23 17:45:04 +01:00
|
|
|
_, err := t.tdevWrite(buf, offset)
|
2020-06-08 23:19:26 +01:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
|
|
|
// InjectInboundCopy takes a packet without leading space,
|
2020-09-25 20:24:44 +01:00
|
|
|
// reallocates it to conform to the InjectInboundDirect interface
|
2020-06-08 23:19:26 +01:00
|
|
|
// and calls InjectInboundDirect on it. Injecting a nil packet is a no-op.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) InjectInboundCopy(packet []byte) error {
|
2020-06-08 23:19:26 +01:00
|
|
|
// We duplicate this check from InjectInboundDirect here
|
|
|
|
// to avoid wasting an allocation on an oversized packet.
|
2020-05-13 14:16:17 +01:00
|
|
|
if len(packet) > MaxPacketSize {
|
2020-05-26 23:14:19 +01:00
|
|
|
return errPacketTooBig
|
|
|
|
}
|
|
|
|
if len(packet) == 0 {
|
|
|
|
return nil
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
2020-06-08 23:19:26 +01:00
|
|
|
|
|
|
|
buf := make([]byte, PacketStartOffset+len(packet))
|
|
|
|
copy(buf[PacketStartOffset:], packet)
|
|
|
|
|
|
|
|
return t.InjectInboundDirect(buf, PacketStartOffset)
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) injectOutboundPong(pp *packet.Parsed, req packet.TSMPPingRequest) {
|
2021-03-23 22:16:15 +00:00
|
|
|
pong := packet.TSMPPongReply{
|
|
|
|
Data: req.Data,
|
|
|
|
}
|
2021-03-29 23:17:05 +01:00
|
|
|
if t.PeerAPIPort != nil {
|
2021-05-15 02:07:28 +01:00
|
|
|
pong.PeerAPIPort, _ = t.PeerAPIPort(pp.Dst.IP())
|
2021-03-29 23:17:05 +01:00
|
|
|
}
|
2021-03-23 22:16:15 +00:00
|
|
|
switch pp.IPVersion {
|
|
|
|
case 4:
|
|
|
|
h4 := pp.IP4Header()
|
|
|
|
h4.ToResponse()
|
|
|
|
pong.IPHeader = h4
|
|
|
|
case 6:
|
|
|
|
h6 := pp.IP6Header()
|
|
|
|
h6.ToResponse()
|
|
|
|
pong.IPHeader = h6
|
|
|
|
default:
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
t.InjectOutbound(packet.Generate(pong, nil))
|
|
|
|
}
|
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// InjectOutbound makes the Wrapper device behave as if a packet
|
2020-05-13 14:16:17 +01:00
|
|
|
// with the given contents was sent to the network.
|
|
|
|
// It does not block, but takes ownership of the packet.
|
2020-06-08 23:19:26 +01:00
|
|
|
// The injected packet will not pass through outbound filters.
|
2020-05-26 23:14:19 +01:00
|
|
|
// Injecting an empty packet is a no-op.
|
2021-03-27 06:13:20 +00:00
|
|
|
func (t *Wrapper) InjectOutbound(packet []byte) error {
|
2020-05-13 14:16:17 +01:00
|
|
|
if len(packet) > MaxPacketSize {
|
2020-05-26 23:14:19 +01:00
|
|
|
return errPacketTooBig
|
|
|
|
}
|
|
|
|
if len(packet) == 0 {
|
|
|
|
return nil
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
2022-03-21 21:58:43 +00:00
|
|
|
t.sendOutbound(tunReadResult{data: packet, injected: true})
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
// InjectOutboundPacketBuffer logically behaves as InjectOutbound. It takes ownership of one
|
|
|
|
// reference count on the packet, and the packet may be mutated. The packet refcount will be
|
|
|
|
// decremented after the injected buffer has been read.
|
|
|
|
func (t *Wrapper) InjectOutboundPacketBuffer(packet *stack.PacketBuffer) error {
|
|
|
|
size := packet.Size()
|
|
|
|
if size > MaxPacketSize {
|
|
|
|
packet.DecRef()
|
|
|
|
return errPacketTooBig
|
|
|
|
}
|
|
|
|
if size == 0 {
|
|
|
|
packet.DecRef()
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
t.sendOutbound(tunReadResult{packet: packet, injected: true})
|
2021-07-01 22:45:17 +01:00
|
|
|
return nil
|
2020-05-13 14:16:17 +01:00
|
|
|
}
|
2020-05-15 08:06:30 +01:00
|
|
|
|
2021-03-27 06:13:20 +00:00
|
|
|
// Unwrap returns the underlying tun.Device.
|
|
|
|
func (t *Wrapper) Unwrap() tun.Device {
|
2020-05-15 08:06:30 +01:00
|
|
|
return t.tdev
|
|
|
|
}
|
2021-11-17 00:01:42 +00:00
|
|
|
|
|
|
|
var (
|
|
|
|
metricPacketIn = clientmetric.NewGauge("tstun_in_from_wg")
|
|
|
|
metricPacketInDrop = clientmetric.NewGauge("tstun_in_from_wg_drop")
|
|
|
|
metricPacketInDropFilter = clientmetric.NewGauge("tstun_in_from_wg_drop_filter")
|
|
|
|
metricPacketInDropSelfDisco = clientmetric.NewGauge("tstun_in_from_wg_drop_self_disco")
|
|
|
|
|
|
|
|
metricPacketOut = clientmetric.NewGauge("tstun_out_to_wg")
|
|
|
|
metricPacketOutDrop = clientmetric.NewGauge("tstun_out_to_wg_drop")
|
|
|
|
metricPacketOutDropFilter = clientmetric.NewGauge("tstun_out_to_wg_drop_filter")
|
|
|
|
metricPacketOutDropSelfDisco = clientmetric.NewGauge("tstun_out_to_wg_drop_self_disco")
|
|
|
|
)
|