tsnet: add CapturePcap method for debugging
Application code can call the tsnet s.CapturePcap(filename) method to write all packets, sent and received, to a pcap file. The cleartext packets are written, outside the Wireguard tunnel. This is expected to be useful for debugging. Updates https://github.com/tailscale/tailscale/issues/9707 Signed-off-by: Denton Gentry <dgentry@tailscale.com>
This commit is contained in:
parent
b247435d66
commit
67f3b2a525
|
@ -1122,6 +1122,35 @@ func (s *Server) listen(network, addr string, lnOn listenOn) (net.Listener, erro
|
|||
return ln, nil
|
||||
}
|
||||
|
||||
// CapturePcap can be called by the application code compiled with tsnet to save a pcap
|
||||
// of packets which the netstack within tsnet sees. This is expected to be useful during
|
||||
// debugging, probably not useful for production.
|
||||
//
|
||||
// Packets will be written to the pcap until the process exits. The pcap needs a Lua dissector
|
||||
// to be installed in WireShark in order to decode properly: wgengine/capture/ts-dissector.lua
|
||||
// in this repository.
|
||||
// https://tailscale.com/kb/1023/troubleshooting/#can-i-examine-network-traffic-inside-the-encrypted-tunnel
|
||||
func (s *Server) CapturePcap(ctx context.Context, pcapFile string) error {
|
||||
stream, err := s.localClient.StreamDebugCapture(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.OpenFile(pcapFile, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
|
||||
if err != nil {
|
||||
stream.Close()
|
||||
return err
|
||||
}
|
||||
|
||||
go func(stream io.ReadCloser, f *os.File) {
|
||||
defer stream.Close()
|
||||
defer f.Close()
|
||||
_, _ = io.Copy(f, stream)
|
||||
}(stream, f)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type listenKey struct {
|
||||
network string
|
||||
host netip.Addr // or zero value for unspecified
|
||||
|
|
|
@ -691,3 +691,56 @@ func TestFallbackTCPHandler(t *testing.T) {
|
|||
t.Errorf("s1TcpConnCount = %d, want %d", got, 1)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCapturePcap(t *testing.T) {
|
||||
const timeLimit = 120
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeLimit*time.Second)
|
||||
defer cancel()
|
||||
|
||||
dir := t.TempDir()
|
||||
s1Pcap := filepath.Join(dir, "s1.pcap")
|
||||
s2Pcap := filepath.Join(dir, "s2.pcap")
|
||||
|
||||
controlURL, _ := startControl(t)
|
||||
s1, s1ip, _ := startServer(t, ctx, controlURL, "s1")
|
||||
s2, _, _ := startServer(t, ctx, controlURL, "s2")
|
||||
s1.CapturePcap(ctx, s1Pcap)
|
||||
s2.CapturePcap(ctx, s2Pcap)
|
||||
|
||||
lc2, err := s2.LocalClient()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// send a packet which both nodes will capture
|
||||
res, err := lc2.Ping(ctx, s1ip, tailcfg.PingICMP)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
t.Logf("ping success: %#+v", res)
|
||||
|
||||
fileSize := func(name string) int64 {
|
||||
fi, err := os.Stat(name)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return fi.Size()
|
||||
}
|
||||
|
||||
const pcapHeaderSize = 24
|
||||
|
||||
// there is a lag before the io.Copy writes a packet to the pcap files
|
||||
for i := 0; i < (timeLimit * 10); i++ {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
if (fileSize(s1Pcap) > pcapHeaderSize) && (fileSize(s2Pcap) > pcapHeaderSize) {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if got := fileSize(s1Pcap); got <= pcapHeaderSize {
|
||||
t.Errorf("s1 pcap file size = %d, want > pcapHeaderSize(%d)", got, pcapHeaderSize)
|
||||
}
|
||||
if got := fileSize(s2Pcap); got <= pcapHeaderSize {
|
||||
t.Errorf("s2 pcap file size = %d, want > pcapHeaderSize(%d)", got, pcapHeaderSize)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue