diff --git a/cmd/k8s-operator/depaware.txt b/cmd/k8s-operator/depaware.txt index 51194d8fa..bda03a386 100644 --- a/cmd/k8s-operator/depaware.txt +++ b/cmd/k8s-operator/depaware.txt @@ -5,6 +5,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W 💣 github.com/alexbrainman/sspi from github.com/alexbrainman/sspi/internal/common+ W github.com/alexbrainman/sspi/internal/common from github.com/alexbrainman/sspi/negotiate W 💣 github.com/alexbrainman/sspi/negotiate from tailscale.com/net/tshttpproxy + LD github.com/anmitsu/go-shlex from tailscale.com/tempfork/gliderlabs/ssh L github.com/aws/aws-sdk-go-v2/aws from github.com/aws/aws-sdk-go-v2/aws/defaults+ L github.com/aws/aws-sdk-go-v2/aws/arn from tailscale.com/ipn/store/awsstore L github.com/aws/aws-sdk-go-v2/aws/defaults from github.com/aws/aws-sdk-go-v2/service/ssm+ @@ -81,6 +82,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/bits-and-blooms/bitset from github.com/gaissmai/bart 💣 github.com/cespare/xxhash/v2 from github.com/prometheus/client_golang/prometheus L github.com/coreos/go-iptables/iptables from tailscale.com/util/linuxfw + LD 💣 github.com/creack/pty from tailscale.com/ssh/tailssh 💣 github.com/davecgh/go-spew/spew from k8s.io/apimachinery/pkg/util/dump W 💣 github.com/dblohm7/wingoes from github.com/dblohm7/wingoes/com+ W 💣 github.com/dblohm7/wingoes/com from tailscale.com/util/osdiag+ @@ -110,7 +112,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/go-openapi/jsonreference from k8s.io/kube-openapi/pkg/internal+ github.com/go-openapi/jsonreference/internal from github.com/go-openapi/jsonreference github.com/go-openapi/swag from github.com/go-openapi/jsonpointer+ - L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns + L 💣 github.com/godbus/dbus/v5 from tailscale.com/net/dns+ 💣 github.com/gogo/protobuf/proto from k8s.io/api/admission/v1+ github.com/gogo/protobuf/sortkeys from k8s.io/api/admission/v1+ github.com/golang/groupcache/lru from k8s.io/client-go/tools/record+ @@ -158,6 +160,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/klauspost/compress/zstd from tailscale.com/util/zstdframe github.com/klauspost/compress/zstd/internal/xxhash from github.com/klauspost/compress/zstd github.com/kortschak/wol from tailscale.com/ipn/ipnlocal + LD github.com/kr/fs from github.com/pkg/sftp github.com/mailru/easyjson/buffer from github.com/mailru/easyjson/jwriter 💣 github.com/mailru/easyjson/jlexer from github.com/go-openapi/swag github.com/mailru/easyjson/jwriter from github.com/go-openapi/swag @@ -179,6 +182,8 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ L github.com/pierrec/lz4/v4/internal/lz4stream from github.com/pierrec/lz4/v4 L github.com/pierrec/lz4/v4/internal/xxh32 from github.com/pierrec/lz4/v4/internal/lz4stream github.com/pkg/errors from github.com/evanphx/json-patch/v5+ + LD github.com/pkg/sftp from tailscale.com/ssh/tailssh + LD github.com/pkg/sftp/internal/encoding/ssh/filexfer from github.com/pkg/sftp D github.com/prometheus-community/pro-bing from tailscale.com/wgengine/netstack 💣 github.com/prometheus/client_golang/prometheus from github.com/prometheus/client_golang/prometheus/collectors+ github.com/prometheus/client_golang/prometheus/collectors from sigs.k8s.io/controller-runtime/pkg/internal/controller/metrics @@ -201,7 +206,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ W github.com/tailscale/go-winio/pkg/guid from github.com/tailscale/go-winio+ github.com/tailscale/golang-x-crypto/acme from tailscale.com/ipn/ipnlocal LD github.com/tailscale/golang-x-crypto/internal/poly1305 from github.com/tailscale/golang-x-crypto/ssh - LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal + LD github.com/tailscale/golang-x-crypto/ssh from tailscale.com/ipn/ipnlocal+ LD github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf from github.com/tailscale/golang-x-crypto/ssh github.com/tailscale/goupnp from github.com/tailscale/goupnp/dcps/internetgateway2+ github.com/tailscale/goupnp/dcps/internetgateway2 from tailscale.com/net/portmapper @@ -224,6 +229,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ github.com/tailscale/wireguard-go/tai64n from github.com/tailscale/wireguard-go/device 💣 github.com/tailscale/wireguard-go/tun from github.com/tailscale/wireguard-go/device+ github.com/tcnksm/go-httpstat from tailscale.com/net/netcheck + LD github.com/u-root/u-root/pkg/termios from tailscale.com/ssh/tailssh L github.com/u-root/uio/rand from github.com/insomniacslk/dhcp/dhcpv4 L github.com/u-root/uio/uio from github.com/insomniacslk/dhcp/dhcpv4+ L 💣 github.com/vishvananda/netlink/nl from github.com/tailscale/netlink @@ -653,6 +659,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/client/web from tailscale.com/ipn/ipnlocal tailscale.com/clientupdate from tailscale.com/client/web+ tailscale.com/clientupdate/distsign from tailscale.com/clientupdate + LD tailscale.com/cmd/tailscaled/childproc from tailscale.com/ssh/tailssh tailscale.com/control/controlbase from tailscale.com/control/controlhttp+ tailscale.com/control/controlclient from tailscale.com/ipn/ipnlocal+ tailscale.com/control/controlhttp from tailscale.com/control/controlclient @@ -735,9 +742,11 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ tailscale.com/posture from tailscale.com/ipn/ipnlocal tailscale.com/proxymap from tailscale.com/tsd+ 💣 tailscale.com/safesocket from tailscale.com/client/tailscale+ + 💣 tailscale.com/ssh/tailssh from tailscale.com/cmd/k8s-operator tailscale.com/syncs from tailscale.com/control/controlknobs+ tailscale.com/tailcfg from tailscale.com/client/tailscale+ tailscale.com/taildrop from tailscale.com/ipn/ipnlocal+ + LD tailscale.com/tempfork/gliderlabs/ssh from tailscale.com/ssh/tailssh tailscale.com/tempfork/heap from tailscale.com/wgengine/magicsock tailscale.com/tka from tailscale.com/client/tailscale+ W tailscale.com/tsconst from tailscale.com/net/netmon+ @@ -826,7 +835,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/argon2 from tailscale.com/tka golang.org/x/crypto/blake2b from golang.org/x/crypto/argon2+ golang.org/x/crypto/blake2s from github.com/tailscale/wireguard-go/device+ - LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf + LD golang.org/x/crypto/blowfish from github.com/tailscale/golang-x-crypto/ssh/internal/bcrypt_pbkdf+ golang.org/x/crypto/chacha20 from github.com/tailscale/golang-x-crypto/ssh+ golang.org/x/crypto/chacha20poly1305 from crypto/tls+ golang.org/x/crypto/cryptobyte from crypto/ecdsa+ @@ -837,6 +846,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ golang.org/x/crypto/nacl/secretbox from golang.org/x/crypto/nacl/box golang.org/x/crypto/poly1305 from github.com/tailscale/wireguard-go/device+ golang.org/x/crypto/salsa20/salsa from golang.org/x/crypto/nacl/box+ + LD golang.org/x/crypto/ssh from github.com/pkg/sftp+ golang.org/x/exp/constraints from github.com/dblohm7/wingoes/pe+ golang.org/x/exp/maps from sigs.k8s.io/controller-runtime/pkg/cache+ golang.org/x/exp/slices from tailscale.com/cmd/k8s-operator+ @@ -941,6 +951,7 @@ tailscale.com/cmd/k8s-operator dependencies: (generated by github.com/tailscale/ log/internal from log+ log/slog from github.com/go-logr/logr+ log/slog/internal from log/slog + LD log/syslog from tailscale.com/ssh/tailssh maps from sigs.k8s.io/controller-runtime/pkg/predicate+ math from archive/tar+ math/big from crypto/dsa+ diff --git a/cmd/k8s-operator/proxy.go b/cmd/k8s-operator/proxy.go index f3b4aad24..04ea11141 100644 --- a/cmd/k8s-operator/proxy.go +++ b/cmd/k8s-operator/proxy.go @@ -11,16 +11,19 @@ import ( "log" "net/http" "net/http/httputil" + "net/netip" "net/url" "os" "strings" + "github.com/pkg/errors" "go.uber.org/zap" "k8s.io/client-go/rest" "k8s.io/client-go/transport" "tailscale.com/client/tailscale" "tailscale.com/client/tailscale/apitype" tskube "tailscale.com/kube" + "tailscale.com/ssh/tailssh" "tailscale.com/tailcfg" "tailscale.com/tsnet" "tailscale.com/util/clientmetric" @@ -34,6 +37,19 @@ var counterNumRequestsProxied = clientmetric.NewCounter("k8s_auth_proxy_requests type apiServerProxyMode int +func (a apiServerProxyMode) String() string { + switch a { + case apiserverProxyModeDisabled: + return "disabled" + case apiserverProxyModeEnabled: + return "auth" + case apiserverProxyModeNoAuth: + return "noauth" + default: + return "unknown" + } +} + const ( apiserverProxyModeDisabled apiServerProxyMode = iota apiserverProxyModeEnabled @@ -97,26 +113,7 @@ func maybeLaunchAPIServerProxy(zlog *zap.SugaredLogger, restConfig *rest.Config, if err != nil { startlog.Fatalf("could not get rest.TransportConfig(): %v", err) } - go runAPIServerProxy(s, rt, zlog.Named("apiserver-proxy"), mode) -} - -// apiserverProxy is an http.Handler that authenticates requests using the Tailscale -// LocalAPI and then proxies them to the Kubernetes API. -type apiserverProxy struct { - log *zap.SugaredLogger - lc *tailscale.LocalClient - rp *httputil.ReverseProxy -} - -func (h *apiserverProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { - who, err := h.lc.WhoIs(r.Context(), r.RemoteAddr) - if err != nil { - h.log.Errorf("failed to authenticate caller: %v", err) - http.Error(w, "failed to authenticate caller", http.StatusInternalServerError) - return - } - counterNumRequestsProxied.Add(1) - h.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) + go runAPIServerProxy(s, rt, zlog.Named("apiserver-proxy"), mode, restConfig.Host) } // runAPIServerProxy runs an HTTP server that authenticates requests using the @@ -133,64 +130,42 @@ func (h *apiserverProxy) ServeHTTP(w http.ResponseWriter, r *http.Request) { // are passed through to the Kubernetes API. // // It never returns. -func runAPIServerProxy(s *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLogger, mode apiServerProxyMode) { +func runAPIServerProxy(ts *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLogger, mode apiServerProxyMode, host string) { if mode == apiserverProxyModeDisabled { return } - ln, err := s.Listen("tcp", ":443") + ln, err := ts.Listen("tcp", ":443") if err != nil { log.Fatalf("could not listen on :443: %v", err) } - u, err := url.Parse(fmt.Sprintf("https://%s:%s", os.Getenv("KUBERNETES_SERVICE_HOST"), os.Getenv("KUBERNETES_SERVICE_PORT_HTTPS"))) + u, err := url.Parse(host) if err != nil { log.Fatalf("runAPIServerProxy: failed to parse URL %v", err) } - lc, err := s.LocalClient() + lc, err := ts.LocalClient() if err != nil { log.Fatalf("could not get local client: %v", err) } + ap := &apiserverProxy{ - log: log, - lc: lc, - rp: &httputil.ReverseProxy{ - Rewrite: func(r *httputil.ProxyRequest) { - // Replace the URL with the Kubernetes APIServer. - - r.Out.URL.Scheme = u.Scheme - r.Out.URL.Host = u.Host - if mode == apiserverProxyModeNoAuth { - // If we are not providing authentication, then we are just - // proxying to the Kubernetes API, so we don't need to do - // anything else. - return - } - - // We want to proxy to the Kubernetes API, but we want to use - // the caller's identity to do so. We do this by impersonating - // the caller using the Kubernetes User Impersonation feature: - // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation - - // Out of paranoia, remove all authentication headers that might - // have been set by the client. - r.Out.Header.Del("Authorization") - r.Out.Header.Del("Impersonate-Group") - r.Out.Header.Del("Impersonate-User") - r.Out.Header.Del("Impersonate-Uid") - for k := range r.Out.Header { - if strings.HasPrefix(k, "Impersonate-Extra-") { - r.Out.Header.Del(k) - } - } - - // Now add the impersonation headers that we want. - if err := addImpersonationHeaders(r.Out, log); err != nil { - panic("failed to add impersonation headers: " + err.Error()) - } - }, - Transport: rt, - }, + log: log, + lc: lc, + mode: mode, + upstreamURL: u, + ts: ts, } + ap.rp = &httputil.ReverseProxy{ + Rewrite: func(pr *httputil.ProxyRequest) { + ap.addImpersonationHeadersAsRequired(pr.Out) + }, + Transport: rt, + } + + mux := http.NewServeMux() + mux.HandleFunc("/", ap.serveDefault) + mux.HandleFunc("/api/v1/namespaces/{namespace}/pods/{pod}/exec", ap.serveExec) + hs := &http.Server{ // Kubernetes uses SPDY for exec and port-forward, however SPDY is // incompatible with HTTP/2; so disable HTTP/2 in the proxy. @@ -199,14 +174,130 @@ func runAPIServerProxy(s *tsnet.Server, rt http.RoundTripper, log *zap.SugaredLo NextProtos: []string{"http/1.1"}, }, TLSNextProto: make(map[string]func(*http.Server, *tls.Conn, http.Handler)), - Handler: ap, + Handler: mux, } - log.Infof("listening on %s", ln.Addr()) + log.Infof("API server proxy in %q mode is listening on %s", mode, ln.Addr()) if err := hs.ServeTLS(ln, "", ""); err != nil { log.Fatalf("runAPIServerProxy: failed to serve %v", err) } } +// apiserverProxy is an [net/http.Handler] that authenticates requests using the Tailscale +// LocalAPI and then proxies them to the Kubernetes API. +type apiserverProxy struct { + log *zap.SugaredLogger + lc *tailscale.LocalClient + rp *httputil.ReverseProxy + + mode apiServerProxyMode + ts *tsnet.Server + upstreamURL *url.URL +} + +// serveDefault is the default handler for Kubernetes API server requests. +func (ap *apiserverProxy) serveDefault(w http.ResponseWriter, r *http.Request) { + who, err := ap.whoIs(r) + if err != nil { + ap.authError(w, err) + return + } + counterNumRequestsProxied.Add(1) + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) +} + +// serveExec serves 'kubectl exec' requests, optionally configuring the kubectl +// exec sessions to be recorded. +func (ap *apiserverProxy) serveExec(w http.ResponseWriter, r *http.Request) { + who, err := ap.whoIs(r) + if err != nil { + ap.authError(w, err) + return + } + counterNumRequestsProxied.Add(1) + failOpen, addrs, err := determineRecorderConfig(who) + if err != nil { + ap.log.Errorf("error trying to determine whether the 'kubectl exec' session needs to be recorded: %v", err) + return + } + if failOpen && len(addrs) == 0 { // will not record + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) + return + } + if !failOpen && len(addrs) == 0 { + msg := "forbidden: 'kubectl exec' session must be recorded, but no recorders are available." + ap.log.Error(msg) + http.Error(w, msg, http.StatusForbidden) + return + } + if r.Method != "POST" || r.Header.Get("Upgrade") != "SPDY/3.1" { + msg := "'kubectl exec' session recording is configured, but the request is not over SPDY. Session recording is currently only supported for SPDY based clients" + if failOpen { + msg = msg + "; failure mode is 'fail open'; continuing session without recording." + ap.log.Warn(msg) + ap.rp.ServeHTTP(w, r.WithContext(whoIsKey.WithValue(r.Context(), who))) + return + } + ap.log.Error(msg) + msg += "; failure mode is 'fail closed'; closing connection." + http.Error(w, msg, http.StatusForbidden) + return + } + spdyH := &spdyHijacker{ + ts: ap.ts, + req: r, + who: who, + ResponseWriter: w, + log: ap.log, + pod: r.PathValue("pod"), + ns: r.PathValue("namespace"), + addrs: addrs, + failOpen: failOpen, + connectToRecorder: tailssh.ConnectToRecorder, + } + + ap.rp.ServeHTTP(spdyH, r.WithContext(whoIsKey.WithValue(r.Context(), who))) +} + +func (h *apiserverProxy) addImpersonationHeadersAsRequired(r *http.Request) { + r.URL.Scheme = h.upstreamURL.Scheme + r.URL.Host = h.upstreamURL.Host + if h.mode == apiserverProxyModeNoAuth { + // If we are not providing authentication, then we are just + // proxying to the Kubernetes API, so we don't need to do + // anything else. + return + } + + // We want to proxy to the Kubernetes API, but we want to use + // the caller's identity to do so. We do this by impersonating + // the caller using the Kubernetes User Impersonation feature: + // https://kubernetes.io/docs/reference/access-authn-authz/authentication/#user-impersonation + + // Out of paranoia, remove all authentication headers that might + // have been set by the client. + r.Header.Del("Authorization") + r.Header.Del("Impersonate-Group") + r.Header.Del("Impersonate-User") + r.Header.Del("Impersonate-Uid") + for k := range r.Header { + if strings.HasPrefix(k, "Impersonate-Extra-") { + r.Header.Del(k) + } + } + + // Now add the impersonation headers that we want. + if err := addImpersonationHeaders(r, h.log); err != nil { + log.Printf("failed to add impersonation headers: " + err.Error()) + } +} +func (ap *apiserverProxy) whoIs(r *http.Request) (*apitype.WhoIsResponse, error) { + return ap.lc.WhoIs(r.Context(), r.RemoteAddr) +} +func (ap *apiserverProxy) authError(w http.ResponseWriter, err error) { + ap.log.Errorf("failed to authenticate caller: %v", err) + http.Error(w, "failed to authenticate caller", http.StatusInternalServerError) +} + const ( // oldCapabilityName is a legacy form of // tailfcg.PeerCapabilityKubernetes capability. The only capability rule @@ -266,3 +357,34 @@ func addImpersonationHeaders(r *http.Request, log *zap.SugaredLogger) error { } return nil } + +// determineRecorderConfig determines recorder config from requester's peer +// capabilities. Determines whether a 'kubectl exec' session from this requester +// needs to be recorded and what recorders the recording should be sent to. +func determineRecorderConfig(who *apitype.WhoIsResponse) (failOpen bool, recorderAddresses []netip.AddrPort, _ error) { + if who == nil { + return false, nil, errors.New("[unexpected] cannot determine caller") + } + failOpen = true + rules, err := tailcfg.UnmarshalCapJSON[tskube.KubernetesCapRule](who.CapMap, tailcfg.PeerCapabilityKubernetes) + if err != nil { + return failOpen, nil, fmt.Errorf("failed to unmarshal Kubernetes capability: %w", err) + } + if len(rules) == 0 { + return failOpen, nil, nil + } + + for _, rule := range rules { + if len(rule.RecorderAddrs) != 0 { + // TODO (irbekrm): here or later determine if the + // recorders behind those addrs are online - else we + // spend 30s trying to reach a recorder whose tailscale + // status is offline. + recorderAddresses = append(recorderAddresses, rule.RecorderAddrs...) + } + if rule.EnforceRecorder { + failOpen = false + } + } + return failOpen, recorderAddresses, nil +} diff --git a/cmd/k8s-operator/proxy_test.go b/cmd/k8s-operator/proxy_test.go index d73686cf7..d1d5733e7 100644 --- a/cmd/k8s-operator/proxy_test.go +++ b/cmd/k8s-operator/proxy_test.go @@ -7,6 +7,8 @@ package main import ( "net/http" + "net/netip" + "reflect" "testing" "github.com/google/go-cmp/cmp" @@ -126,3 +128,72 @@ func TestImpersonationHeaders(t *testing.T) { } } } + +func Test_determineRecorderConfig(t *testing.T) { + addr1, addr2 := netip.MustParseAddrPort("[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:80"), netip.MustParseAddrPort("100.99.99.99:80") + tests := []struct { + name string + wantFailOpen bool + wantRecorderAddresses []netip.AddrPort + who *apitype.WhoIsResponse + }{ + { + name: "two_ips_fail_closed", + who: whoResp(map[string][]string{string(tailcfg.PeerCapabilityKubernetes): {`{"recorderAddrs":["[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:80","100.99.99.99:80"],"enforceRecorder":true}`}}), + wantRecorderAddresses: []netip.AddrPort{addr1, addr2}, + }, + { + name: "two_ips_fail_open", + who: whoResp(map[string][]string{string(tailcfg.PeerCapabilityKubernetes): {`{"recorderAddrs":["[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:80","100.99.99.99:80"]}`}}), + wantRecorderAddresses: []netip.AddrPort{addr1, addr2}, + wantFailOpen: true, + }, + { + name: "odd_rule_combination_fail_closed", + who: whoResp(map[string][]string{string(tailcfg.PeerCapabilityKubernetes): {`{"recorderAddrs":["100.99.99.99:80"],"enforceRecorder":false}`, `{"recorderAddrs":["[fd7a:115c:a1e0:ab12:4843:cd96:626b:628b]:80"]}`, `{"enforceRecorder":true,"impersonate":{"groups":["system:masters"]}}`}}), + wantRecorderAddresses: []netip.AddrPort{addr2, addr1}, + }, + { + name: "no_caps", + who: whoResp(map[string][]string{}), + wantFailOpen: true, + }, + { + name: "no_recorder_caps", + who: whoResp(map[string][]string{"foo": {`{"x":"y"}`}, string(tailcfg.PeerCapabilityKubernetes): {`{"impersonate":{"groups":["system:masters"]}}`}}), + wantFailOpen: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + gotFailOpen, gotRecorderAddresses, err := determineRecorderConfig(tt.who) + if err != nil { + t.Fatalf("unexpected error: %v", err) + } + if gotFailOpen != tt.wantFailOpen { + t.Errorf("determineRecorderConfig() gotFailOpen = %v, want %v", gotFailOpen, tt.wantFailOpen) + } + if !reflect.DeepEqual(gotRecorderAddresses, tt.wantRecorderAddresses) { + t.Errorf("determineRecorderConfig() gotRecorderAddresses = %v, want %v", gotRecorderAddresses, tt.wantRecorderAddresses) + } + }) + } +} + +func whoResp(capMap map[string][]string) *apitype.WhoIsResponse { + resp := &apitype.WhoIsResponse{ + CapMap: tailcfg.PeerCapMap{}, + } + for cap, rules := range capMap { + resp.CapMap[tailcfg.PeerCapability(cap)] = raw(rules...) + } + return resp +} + +func raw(in ...string) []tailcfg.RawMessage { + var out []tailcfg.RawMessage + for _, i := range in { + out = append(out, tailcfg.RawMessage(i)) + } + return out +} diff --git a/cmd/k8s-operator/recorder.go b/cmd/k8s-operator/recorder.go new file mode 100644 index 000000000..ae17f3820 --- /dev/null +++ b/cmd/k8s-operator/recorder.go @@ -0,0 +1,88 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "encoding/json" + "fmt" + "io" + "sync" + "time" + + "github.com/pkg/errors" + "tailscale.com/tstime" +) + +// recorder knows how to send the provided bytes to the configured tsrecorder +// instance in asciinema format. +type recorder struct { + start time.Time + clock tstime.Clock + + // failOpen specifies whether the session should be allowed to + // continue if writing to the recording fails. + failOpen bool + + // backOff is set to true if we've failed open and should stop + // attempting to write to tsrecorder. + backOff bool + + mu sync.Mutex // guards writes to conn + conn io.WriteCloser // connection to a tsrecorder instance +} + +// Write appends timestamp to the provided bytes and sends them to the +// configured tsrecorder. +func (rec *recorder) Write(p []byte) (err error) { + if len(p) == 0 { + return nil + } + if rec.backOff { + return nil + } + j, err := json.Marshal([]any{ + rec.clock.Now().Sub(rec.start).Seconds(), + "o", + string(p), + }) + if err != nil { + return fmt.Errorf("error marhalling payload: %w", err) + } + j = append(j, '\n') + if err := rec.writeCastLine(j); err != nil { + if !rec.failOpen { + return fmt.Errorf("error writing payload to recorder: %w", err) + } + rec.backOff = true + } + return nil +} + +func (rec *recorder) Close() error { + rec.mu.Lock() + defer rec.mu.Unlock() + if rec.conn == nil { + return nil + } + err := rec.conn.Close() + rec.conn = nil + return err +} + +// writeCastLine sends bytes to the tsrecorder. The bytes should be in +// asciinema format. +func (rec *recorder) writeCastLine(j []byte) error { + rec.mu.Lock() + defer rec.mu.Unlock() + if rec.conn == nil { + return errors.New("recorder closed") + } + _, err := rec.conn.Write(j) + if err != nil { + return fmt.Errorf("recorder write error: %w", err) + } + return nil +} diff --git a/cmd/k8s-operator/spdy-frame.go b/cmd/k8s-operator/spdy-frame.go new file mode 100644 index 000000000..0ddefdfa1 --- /dev/null +++ b/cmd/k8s-operator/spdy-frame.go @@ -0,0 +1,285 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bytes" + "encoding/binary" + "fmt" + "io" + "net/http" + "sync" + + "go.uber.org/zap" +) + +const ( + SYN_STREAM ControlFrameType = 1 // https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.1 + SYN_REPLY ControlFrameType = 2 // https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.2 + SYN_PING ControlFrameType = 6 // https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.5 +) + +// spdyFrame is a parsed SPDY frame as defined in +// https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt +// A SPDY frame can be either a control frame or a data frame. +type spdyFrame struct { + Raw []byte // full frame as raw bytes + + // Common frame fields: + Ctrl bool // true if this is a SPDY control frame + Payload []byte // payload as raw bytes + + // Control frame fields: + Version uint16 // SPDY protocol version + Type ControlFrameType + + // Data frame fields: + // StreamID is the id of the steam to which this data frame belongs. + // SPDY allows transmitting multiple data streams concurrently. + StreamID uint32 +} + +// Type of an SPDY control frame. +type ControlFrameType uint16 + +// Parse parses bytes into spdyFrame. +// If the bytes don't contain a full frame, return false. +// +// Control frame structure: +// +// +----------------------------------+ +// |C| Version(15bits) | Type(16bits) | +// +----------------------------------+ +// | Flags (8) | Length (24 bits) | +// +----------------------------------+ +// | Data | +// +----------------------------------+ +// +// Data frame structure: +// +// +----------------------------------+ +// |C| Stream-ID (31bits) | +// +----------------------------------+ +// | Flags (8) | Length (24 bits) | +// +----------------------------------+ +// | Data | +// +----------------------------------+ +// +// https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt +func (sf *spdyFrame) Parse(b []byte, log *zap.SugaredLogger) (ok bool, _ error) { + const ( + spdyHeaderLength = 8 + ) + have := len(b) + if have < spdyHeaderLength { // input does not contain full frame + return false, nil + } + + if !isSPDYFrameHeader(b) { + return false, fmt.Errorf("bytes %v do not seem to contain SPDY frames. Ensure that you are using a SPDY based client to 'kubectl exec'.", b) + } + + payloadLength := readInt24(b[5:8]) + frameLength := payloadLength + spdyHeaderLength + if have < frameLength { // input does not contain full frame + return false, nil + } + + frame := b[:frameLength:frameLength] // enforce frameLength capacity + + sf.Raw = frame + sf.Payload = frame[spdyHeaderLength:frameLength] + + sf.Ctrl = hasControlBitSet(frame) + + if !sf.Ctrl { // data frame + sf.StreamID = dataFrameStreamID(frame) + return true, nil + } + + sf.Version = controlFrameVersion(frame) + sf.Type = controlFrameType(frame) + return true, nil +} + +// parseHeaders retrieves any headers from this spdyFrame. +func (sf *spdyFrame) parseHeaders(z *zlibReader, log *zap.SugaredLogger) (http.Header, error) { + if !sf.Ctrl { + return nil, fmt.Errorf("[unexpected] parseHeaders called for a frame that is not a control frame") + } + const ( + // +------------------------------------+ + // |X| Stream-ID (31bits) | + // +------------------------------------+ + // |X| Associated-To-Stream-ID (31bits) | + // +------------------------------------+ + // | Pri|Unused | Slot | | + // +-------------------+ | + synStreamPayloadLengthBeforeHeaders = 10 + + // +------------------------------------+ + // |X| Stream-ID (31bits) | + //+------------------------------------+ + synReplyPayloadLengthBeforeHeaders = 4 + + // +----------------------------------| + // | 32-bit ID | + // +----------------------------------+ + pingPayloadLength = 4 + ) + + switch sf.Type { + case SYN_STREAM: + if len(sf.Payload) < synStreamPayloadLengthBeforeHeaders { + return nil, fmt.Errorf("SYN_STREAM frame too short: %v", len(sf.Payload)) + } + z.Set(sf.Payload[synStreamPayloadLengthBeforeHeaders:]) + return parseHeaders(z, log) + case SYN_REPLY: + if len(sf.Payload) < synReplyPayloadLengthBeforeHeaders { + return nil, fmt.Errorf("SYN_REPLY frame too short: %v", len(sf.Payload)) + } + if len(sf.Payload) == synReplyPayloadLengthBeforeHeaders { + return nil, nil // no headers + } + z.Set(sf.Payload[synReplyPayloadLengthBeforeHeaders:]) + return parseHeaders(z, log) + case SYN_PING: + if len(sf.Payload) != pingPayloadLength { + return nil, fmt.Errorf("PING frame with unexpected length %v", len(sf.Payload)) + } + return nil, nil // ping frame has no headers + + default: + log.Infof("[unexpected] unknown control frame type %v", sf.Type) + } + return nil, nil +} + +// parseHeaders expects to be passed a reader that contains a compressed SPDY control +// frame Name/Value Header Block with 0 or more headers: +// +// | Number of Name/Value pairs (int32) | <+ +// +------------------------------------+ | +// | Length of name (int32) | | This section is the "Name/Value +// +------------------------------------+ | Header Block", and is compressed. +// | Name (string) | | +// +------------------------------------+ | +// | Length of value (int32) | | +// +------------------------------------+ | +// | Value (string) | | +// +------------------------------------+ | +// | (repeats) | <+ +// +// It extracts the headers and returns them as http.Header. By doing that it +// also advances the provided reader past the headers block. +// See also https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.10 +func parseHeaders(decompressor io.Reader, log *zap.SugaredLogger) (http.Header, error) { + buf := bufPool.Get().(*bytes.Buffer) + defer bufPool.Put(buf) + buf.Reset() + + // readUint32 reads the next 4 decompressed bytes from the decompressor + // as a uint32. + readUint32 := func() (uint32, error) { + const uint32Length = 4 + if _, err := io.CopyN(buf, decompressor, uint32Length); err != nil { // decompress + return 0, fmt.Errorf("error decompressing bytes: %w", err) + } + return binary.BigEndian.Uint32(buf.Next(uint32Length)), nil // return as uint32 + } + + // readLenBytes decompresses and returns as bytes the next 'Name' or 'Value' + // field from SPDY Name/Value header block. decompressor must be at + // 'Length of name'/'Length of value' field. + readLenBytes := func() ([]byte, error) { + xLen, err := readUint32() // length of field to read + if err != nil { + return nil, err + } + if _, err := io.CopyN(buf, decompressor, int64(xLen)); err != nil { // decompress + return nil, err + } + return buf.Next(int(xLen)), nil + } + + numHeaders, err := readUint32() + if err != nil { + return nil, fmt.Errorf("error determining num headers: %v", err) + } + h := make(http.Header, numHeaders) + for i := uint32(0); i < numHeaders; i++ { + name, err := readLenBytes() + if err != nil { + return nil, err + } + ns := string(name) + if _, ok := h[ns]; ok { + return nil, fmt.Errorf("invalid data: duplicate header %q", ns) + } + val, err := readLenBytes() + if err != nil { + return nil, fmt.Errorf("error reading header data: %w", err) + } + for _, v := range bytes.Split(val, headerSep) { + h.Add(ns, string(v)) + } + } + return h, nil +} + +// isSPDYFrame validates that the input bytes start with a valid SPDY frame +// header. +func isSPDYFrameHeader(f []byte) bool { + if hasControlBitSet(f) { + // If this is a control frame, version and type must be set. + return controlFrameVersion(f) != uint16(0) && uint16(controlFrameType(f)) != uint16(0) + } + // If this is a data frame, stream ID must be set. + return dataFrameStreamID(f) != uint32(0) +} + +// spdyDataFrameStreamID returns stream ID for an SPDY data frame passed as the +// input data slice. StreaID is contained within bits [0-31) of a data frame +// header. +func dataFrameStreamID(frame []byte) uint32 { + return binary.BigEndian.Uint32(frame[0:4]) & 0x7f +} + +// controlFrameType returns the type of a SPDY control frame. +// See https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6 +func controlFrameType(f []byte) ControlFrameType { + return ControlFrameType(binary.BigEndian.Uint16(f[2:4])) +} + +// spdyControlFrameVersion returns SPDY version extracted from input bytes that +// must be a SPDY control frame. +func controlFrameVersion(frame []byte) uint16 { + bs := binary.BigEndian.Uint16(frame[0:2]) // first 16 bits + return bs & 0x7f // discard control bit +} + +// hasControlBitSet returns true if the passsed bytes have SPDY control bit set. +// SPDY frames can be either control frames or data frames. A control frame has +// control bit set to 1 and a data frame has it set to 0. +func hasControlBitSet(frame []byte) bool { + return frame[0]&0x80 == 128 // 0x80 +} + +var bufPool = sync.Pool{ + New: func() any { + return new(bytes.Buffer) + }, +} + +// Headers in SPDY header name/value block are separated by a 0 byte. +// https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.10 +var headerSep = []byte{0} + +func readInt24(b []byte) int { + _ = b[2] // bounds check hint to compiler; see golang.org/issue/14808 + return int(b[0])<<16 | int(b[1])<<8 | int(b[2]) +} diff --git a/cmd/k8s-operator/spdy-frame_test.go b/cmd/k8s-operator/spdy-frame_test.go new file mode 100644 index 000000000..416ddfc8b --- /dev/null +++ b/cmd/k8s-operator/spdy-frame_test.go @@ -0,0 +1,293 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bytes" + "compress/zlib" + "encoding/binary" + "io" + "net/http" + "reflect" + "strings" + "testing" + + "github.com/google/go-cmp/cmp" + "go.uber.org/zap" +) + +func Test_spdyFrame_Parse(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + tests := []struct { + name string + gotBytes []byte + wantFrame spdyFrame + wantOk bool + wantErr bool + }{ + { + name: "control_frame_syn_stream", + gotBytes: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0}, + wantFrame: spdyFrame{ + Version: 3, + Type: SYN_STREAM, + Ctrl: true, + Raw: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0}, + Payload: []byte{}, + }, + wantOk: true, + }, + { + name: "control_frame_syn_reply", + gotBytes: []byte{0x80, 0x3, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0}, + wantFrame: spdyFrame{ + Ctrl: true, + Version: 3, + Type: SYN_REPLY, + Raw: []byte{0x80, 0x3, 0x0, 0x2, 0x0, 0x0, 0x0, 0x0}, + Payload: []byte{}, + }, + wantOk: true, + }, + { + name: "control_frame_headers", + gotBytes: []byte{0x80, 0x3, 0x0, 0x8, 0x0, 0x0, 0x0, 0x0}, + wantFrame: spdyFrame{ + Ctrl: true, + Version: 3, + Type: 8, + Raw: []byte{0x80, 0x3, 0x0, 0x8, 0x0, 0x0, 0x0, 0x0}, + Payload: []byte{}, + }, + wantOk: true, + }, + { + name: "data_frame_stream_id_5", + gotBytes: []byte{0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0}, + wantFrame: spdyFrame{ + Payload: []byte{}, + StreamID: 5, + Raw: []byte{0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x0}, + }, + wantOk: true, + }, + { + name: "frame_with_incomplete_header", + gotBytes: []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, + }, + { + name: "frame_with_incomplete_payload", + gotBytes: []byte{0x0, 0x0, 0x0, 0x5, 0x0, 0x0, 0x0, 0x2}, // header specifies payload length of 2 + }, + { + name: "control_bit_set_not_spdy_frame", + gotBytes: []byte{0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, // header specifies payload length of 2 + wantErr: true, + }, + { + name: "control_bit_not_set_not_spdy_frame", + gotBytes: []byte{0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0}, // header specifies payload length of 2 + wantErr: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + sf := &spdyFrame{} + gotOk, err := sf.Parse(tt.gotBytes, zl.Sugar()) + if (err != nil) != tt.wantErr { + t.Errorf("spdyFrame.Parse() error = %v, wantErr %v", err, tt.wantErr) + return + } + if gotOk != tt.wantOk { + t.Errorf("spdyFrame.Parse() = %v, want %v", gotOk, tt.wantOk) + } + if diff := cmp.Diff(*sf, tt.wantFrame); diff != "" { + t.Errorf("Unexpected SPDY frame (-got +want):\n%s", diff) + } + }) + } +} + +func Test_spdyFrame_parseHeaders(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + tests := []struct { + name string + isCtrl bool + payload []byte + typ ControlFrameType + wantHeader http.Header + wantErr bool + }{ + { + name: "syn_stream_with_header", + payload: payload(t, map[string]string{"Streamtype": "stdin"}, SYN_STREAM, 1), + typ: SYN_STREAM, + isCtrl: true, + wantHeader: header(map[string]string{"Streamtype": "stdin"}), + }, + { + name: "syn_ping", + payload: payload(t, nil, SYN_PING, 0), + typ: SYN_PING, + isCtrl: true, + }, + { + name: "syn_reply_headers", + payload: payload(t, map[string]string{"foo": "bar", "bar": "baz"}, SYN_REPLY, 0), + typ: SYN_REPLY, + isCtrl: true, + wantHeader: header(map[string]string{"foo": "bar", "bar": "baz"}), + }, + { + name: "syn_reply_no_headers", + payload: payload(t, nil, SYN_REPLY, 0), + typ: SYN_REPLY, + isCtrl: true, + }, + { + name: "syn_stream_too_short_payload", + payload: []byte{0, 1, 2, 3, 4}, + typ: SYN_STREAM, + isCtrl: true, + wantErr: true, + }, + { + name: "syn_reply_too_short_payload", + payload: []byte{0, 1, 2}, + typ: SYN_REPLY, + isCtrl: true, + wantErr: true, + }, + { + name: "syn_ping_too_short_payload", + payload: []byte{0, 1, 2}, + typ: SYN_PING, + isCtrl: true, + wantErr: true, + }, + { + name: "not_a_control_frame", + payload: []byte{0, 1, 2, 3}, + typ: SYN_PING, + wantErr: true, + }, + } + for _, tt := range tests { + var reader zlibReader + t.Run(tt.name, func(t *testing.T) { + sf := &spdyFrame{ + Ctrl: tt.isCtrl, + Type: tt.typ, + Payload: tt.payload, + } + gotHeader, err := sf.parseHeaders(&reader, zl.Sugar()) + if (err != nil) != tt.wantErr { + t.Errorf("spdyFrame.parseHeaders() error = %v, wantErr %v", err, tt.wantErr) + } + if !reflect.DeepEqual(gotHeader, tt.wantHeader) { + t.Errorf("spdyFrame.parseHeaders() = %v, want %v", gotHeader, tt.wantHeader) + } + }) + } +} + +// payload takes a control frame type and a map with 0 or more header keys and +// values and returns a SPDY control frame payload with the header as SPDY zlib +// compressed header name/value block. The payload is padded with arbitrary +// bytes to ensure the header name/value block is in the correct position for +// the frame type. +func payload(t *testing.T, headerM map[string]string, typ ControlFrameType, streamID int) []byte { + t.Helper() + + buf := bytes.NewBuffer([]byte{}) + writeControlFramePayloadBeforeHeaders(t, buf, typ, streamID) + if len(headerM) == 0 { + return buf.Bytes() + } + + w, err := zlib.NewWriterLevelDict(buf, zlib.BestCompression, spdyTxtDictionary) + if err != nil { + t.Fatalf("error creating new zlib writer: %v", err) + } + if len(headerM) != 0 { + writeHeaderValueBlock(t, w, headerM) + } + if err != nil { + t.Fatalf("error writing headers: %v", err) + } + w.Flush() + return buf.Bytes() +} + +// writeControlFramePayloadBeforeHeaders writes to w N bytes, N being the number +// of bytes that control frame payload for that control frame is required to +// contain before the name/value header block. +func writeControlFramePayloadBeforeHeaders(t *testing.T, w io.Writer, typ ControlFrameType, streamID int) { + t.Helper() + switch typ { + case SYN_STREAM: + // needs 10 bytes in payload before any headers + if err := binary.Write(w, binary.BigEndian, uint32(streamID)); err != nil { + t.Fatalf("writing streamID: %v", err) + } + if err := binary.Write(w, binary.BigEndian, [6]byte{0}); err != nil { + t.Fatalf("writing payload: %v", err) + } + case SYN_REPLY: + // needs 4 bytes in payload before any headers + if err := binary.Write(w, binary.BigEndian, uint32(0)); err != nil { + t.Fatalf("writing payload: %v", err) + } + case SYN_PING: + // needs 4 bytes in payload + if err := binary.Write(w, binary.BigEndian, uint32(0)); err != nil { + t.Fatalf("writing payload: %v", err) + } + default: + t.Fatalf("unexpected frame type: %v", typ) + } +} + +// writeHeaderValue block takes http.Header and zlib writer, writes the headers +// as SPDY zlib compressed bytes to the writer. +// Adopted from https://github.com/moby/spdystream/blob/v0.2.0/spdy/write.go#L171-L198 (which is also what Kubernetes uses). +func writeHeaderValueBlock(t *testing.T, w io.Writer, headerM map[string]string) { + t.Helper() + h := header(headerM) + if err := binary.Write(w, binary.BigEndian, uint32(len(h))); err != nil { + t.Fatalf("error writing header block length: %v", err) + } + for name, values := range h { + if err := binary.Write(w, binary.BigEndian, uint32(len(name))); err != nil { + t.Fatalf("error writing name length for name %q: %v", name, err) + } + name = strings.ToLower(name) + if _, err := io.WriteString(w, name); err != nil { + t.Fatalf("error writing name %q: %v", name, err) + } + v := strings.Join(values, string(headerSep)) + if err := binary.Write(w, binary.BigEndian, uint32(len(v))); err != nil { + t.Fatalf("error writing value length for value %q: %v", v, err) + } + if _, err := io.WriteString(w, v); err != nil { + t.Fatalf("error writing value %q: %v", v, err) + } + } +} + +func header(hs map[string]string) http.Header { + h := make(http.Header, len(hs)) + for key, val := range hs { + h.Add(key, val) + } + return h +} diff --git a/cmd/k8s-operator/spdy-hijacker.go b/cmd/k8s-operator/spdy-hijacker.go new file mode 100644 index 000000000..5cdf97ae8 --- /dev/null +++ b/cmd/k8s-operator/spdy-hijacker.go @@ -0,0 +1,210 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "strings" + + "github.com/pkg/errors" + "go.uber.org/zap" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" + "tailscale.com/tstime" + "tailscale.com/util/multierr" +) + +// spdyHijacker implements [net/http.Hijacker] interface. +// It must be configured with an http request for a 'kubectl exec' session that +// needs to be recorded. It knows how to hijack the connection and configure for +// the session contents to be sent to a tsrecorder instance. +type spdyHijacker struct { + http.ResponseWriter + ts *tsnet.Server + req *http.Request + who *apitype.WhoIsResponse + log *zap.SugaredLogger + pod string // pod being exec-d + ns string // namespace of the pod being exec-d + addrs []netip.AddrPort // tsrecorder addresses + failOpen bool // whether to fail open if recording fails + connectToRecorder RecorderDialFn +} + +// RecorderDialFn dials the specified netip.AddrPorts that should be tsrecorder +// addresses. It tries to connect to recorder endpoints one by one, till one +// connection succeeds. In case of success, returns a list with a single +// successful recording attempt and an error channel. If the connection errors +// after having been established, an error is sent down the channel. +type RecorderDialFn func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) + +// Hijack hijacks a 'kubectl exec' session and configures for the session +// contents to be sent to a recorder. +func (h *spdyHijacker) Hijack() (net.Conn, *bufio.ReadWriter, error) { + h.log.Infof("recorder addrs: %v, failOpen: %v", h.addrs, h.failOpen) + reqConn, brw, err := h.ResponseWriter.(http.Hijacker).Hijack() + if err != nil { + return nil, nil, fmt.Errorf("error hijacking connection: %w", err) + } + + conn, err := h.setUpRecording(context.Background(), reqConn) + if err != nil { + return nil, nil, fmt.Errorf("error setting up session recording: %w", err) + } + return conn, brw, nil +} + +// setupRecording attempts to connect to the recorders set via +// spdyHijacker.addrs. Returns conn from provided opts, wrapped in recording +// logic. If connecting to the recorder fails or an error is received during the +// session and spdyHijacker.failOpen is false, connection will be closed. +func (h *spdyHijacker) setUpRecording(ctx context.Context, conn net.Conn) (net.Conn, error) { + const ( + // https://docs.asciinema.org/manual/asciicast/v2/ + asciicastv2 = 2 + ) + var wc io.WriteCloser + h.log.Infof("kubectl exec session will be recorded, recorders: %v, fail open policy: %t", h.addrs, h.failOpen) + // TODO (irbekrm): send client a message that session will be recorded. + rw, _, errChan, err := h.connectToRecorder(ctx, h.addrs, h.ts.Dial) + if err != nil { + msg := fmt.Sprintf("error connecting to session recorders: %v", err) + if h.failOpen { + msg = msg + "; failure mode is 'fail open'; continuing session without recording." + h.log.Warnf(msg) + return conn, nil + } + msg = msg + "; failure mode is 'fail closed'; closing connection." + if err := closeConnWithWarning(conn, msg); err != nil { + return nil, multierr.New(errors.New(msg), err) + } + return nil, errors.New(msg) + } + + // TODO (irbekrm): log which recorder + h.log.Info("successfully connected to a session recorder") + wc = rw + cl := tstime.DefaultClock{} + lc := &spdyRemoteConnRecorder{ + log: h.log, + Conn: conn, + rec: &recorder{ + start: cl.Now(), + clock: cl, + failOpen: h.failOpen, + conn: wc, + }, + } + + qp := h.req.URL.Query() + ch := CastHeader{ + Version: asciicastv2, + Timestamp: lc.rec.start.Unix(), + Command: strings.Join(qp["command"], " "), + SrcNode: strings.TrimSuffix(h.who.Node.Name, "."), + SrcNodeID: h.who.Node.StableID, + Kubernetes: &Kubernetes{ + PodName: h.pod, + Namespace: h.ns, + }, + } + if !h.who.Node.IsTagged() { + ch.SrcNodeUser = h.who.UserProfile.LoginName + ch.SrcNodeUserID = h.who.Node.User + } else { + ch.SrcNodeTags = h.who.Node.Tags + } + lc.ch = ch + go func() { + var err error + select { + case <-ctx.Done(): + return + case err = <-errChan: + } + if err == nil { + h.log.Info("finished uploading the recording") + return + } + msg := fmt.Sprintf("connection to the session recorder errorred: %v;", err) + if h.failOpen { + msg += msg + "; failure mode is 'fail open'; continuing session without recording." + h.log.Info(msg) + return + } + msg += "; failure mode set to 'fail closed'; closing connection" + h.log.Error(msg) + lc.failed = true + // TODO (irbekrm): write a message to the client + if err := lc.Close(); err != nil { + h.log.Infof("error closing recorder connections: %v", err) + } + return + }() + return lc, nil +} + +// CastHeader is the asciicast header to be sent to the recorder at the start of +// the recording of a session. +// https://docs.asciinema.org/manual/asciicast/v2/#header +type CastHeader struct { + // Version is the asciinema file format version. + Version int `json:"version"` + + // Width is the terminal width in characters. + Width int `json:"width"` + + // Height is the terminal height in characters. + Height int `json:"height"` + + // Timestamp is the unix timestamp of when the recording started. + Timestamp int64 `json:"timestamp"` + + // Tailscale-specific fields: SrcNode is the full MagicDNS name of the + // tailnet node originating the connection, without the trailing dot. + SrcNode string `json:"srcNode"` + + // SrcNodeID is the node ID of the tailnet node originating the connection. + SrcNodeID tailcfg.StableNodeID `json:"srcNodeID"` + + // SrcNodeTags is the list of tags on the node originating the connection (if any). + SrcNodeTags []string `json:"srcNodeTags,omitempty"` + + // SrcNodeUserID is the user ID of the node originating the connection (if not tagged). + SrcNodeUserID tailcfg.UserID `json:"srcNodeUserID,omitempty"` // if not tagged + + // SrcNodeUser is the LoginName of the node originating the connection (if not tagged). + SrcNodeUser string `json:"srcNodeUser,omitempty"` + + Command string + + // Kubernetes-specific fields: + Kubernetes *Kubernetes `json:"kubernetes,omitempty"` +} + +// Kubernetes contains 'kubectl exec' session specific information for +// tsrecorder. +type Kubernetes struct { + PodName string + Namespace string +} + +func closeConnWithWarning(conn net.Conn, msg string) error { + b := io.NopCloser(bytes.NewBuffer([]byte(msg))) + resp := http.Response{Status: http.StatusText(http.StatusForbidden), StatusCode: http.StatusForbidden, Body: b} + if err := resp.Write(conn); err != nil { + return multierr.New(fmt.Errorf("error writing msg %q to conn: %v", msg, err), conn.Close()) + } + return conn.Close() +} diff --git a/cmd/k8s-operator/spdy-hijacker_test.go b/cmd/k8s-operator/spdy-hijacker_test.go new file mode 100644 index 000000000..7ac79d7f0 --- /dev/null +++ b/cmd/k8s-operator/spdy-hijacker_test.go @@ -0,0 +1,111 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/netip" + "net/url" + "testing" + "time" + + "go.uber.org/zap" + "tailscale.com/client/tailscale/apitype" + "tailscale.com/tailcfg" + "tailscale.com/tsnet" + "tailscale.com/tstest" +) + +func Test_SPDYHijacker(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + tests := []struct { + name string + failOpen bool + failRecorderConnect bool // fail initial connect to the recorder + failRecorderConnPostConnect bool // send error down the error channel + wantsConnClosed bool + wantsSetupErr bool + }{ + { + name: "setup succeeds, conn stays open", + }, + { + name: "setup fails, policy is to fail open, conn stays open", + failOpen: true, + failRecorderConnect: true, + }, + { + name: "setup fails, policy is to fail closed, conn is closed", + failRecorderConnect: true, + wantsSetupErr: true, + wantsConnClosed: true, + }, + { + name: "connection fails post-initial connect, policy is to fail open, conn stays open", + failRecorderConnPostConnect: true, + failOpen: true, + }, + { + name: "connection fails post-initial connect, policy is to fail closed, conn is closed", + failRecorderConnPostConnect: true, + wantsConnClosed: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := &testConn{} + ch := make(chan error) + h := &spdyHijacker{ + connectToRecorder: func(context.Context, []netip.AddrPort, func(context.Context, string, string) (net.Conn, error)) (wc io.WriteCloser, rec []*tailcfg.SSHRecordingAttempt, _ <-chan error, err error) { + if tt.failRecorderConnect { + err = errors.New("test") + } + return wc, rec, ch, err + }, + failOpen: tt.failOpen, + who: &apitype.WhoIsResponse{Node: &tailcfg.Node{}, UserProfile: &tailcfg.UserProfile{}}, + log: zl.Sugar(), + ts: &tsnet.Server{}, + req: &http.Request{URL: &url.URL{}}, + } + ctx := context.Background() + _, err := h.setUpRecording(ctx, tc) + if (err != nil) != tt.wantsSetupErr { + t.Errorf("spdyHijacker.setupRecording() error = %v, wantErr %v", err, tt.wantsSetupErr) + return + } + if tt.failRecorderConnPostConnect { + select { + case ch <- errors.New("err"): + case <-time.After(time.Second * 15): + t.Errorf("error from recorder conn was not read within 15 seconds") + } + } + timeout := time.Second * 20 + // TODO (irbekrm): cover case where an error is received + // over channel and the failure policy is to fail open + // (test that connection remains open over some period + // of time). + if err := tstest.WaitFor(timeout, func() (err error) { + if tt.wantsConnClosed != tc.isClosed() { + return fmt.Errorf("got connection state: %t, wants connection state: %t", tc.isClosed(), tt.wantsConnClosed) + } + return nil + }); err != nil { + t.Errorf("connection did not reach the desired state within %s", timeout.String()) + } + ctx.Done() + }) + } +} diff --git a/cmd/k8s-operator/spdy-remote-conn-recorder.go b/cmd/k8s-operator/spdy-remote-conn-recorder.go new file mode 100644 index 000000000..563b2a241 --- /dev/null +++ b/cmd/k8s-operator/spdy-remote-conn-recorder.go @@ -0,0 +1,194 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bytes" + "encoding/binary" + "encoding/json" + "fmt" + "net" + "net/http" + "sync" + "sync/atomic" + + "go.uber.org/zap" + corev1 "k8s.io/api/core/v1" +) + +// spdyRemoteConnRecorder is a wrapper around net.Conn. It reads the bytestream +// for a 'kubectl exec' session, sends session recording data to the configured +// recorder and forwards the raw bytes to the original destination. +type spdyRemoteConnRecorder struct { + net.Conn + // rec knows how to send data written to it to a tsrecorder instance. + rec *recorder + ch CastHeader + + stdoutStreamID atomic.Uint32 + stderrStreamID atomic.Uint32 + resizeStreamID atomic.Uint32 + + wmu sync.Mutex // sequences writes + closed bool + failed bool + + rmu sync.Mutex // sequences reads + writeCastHeaderOnce sync.Once + + zlibReqReader zlibReader + // writeBuf is used to store data written to the connection that has not + // yet been parsed as SPDY frames. + writeBuf bytes.Buffer + // readBuf is used to store data read from the connection that has not + // yet been parsed as SPDY frames. + readBuf bytes.Buffer + log *zap.SugaredLogger +} + +// Read reads bytes from the original connection and parses them as SPDY frames. +// If the frame is a data frame for resize stream, sends resize message to the +// recorder. If the frame is a SYN_STREAM control frame that starts stdout, +// stderr or resize stream, store the stream ID. +func (c *spdyRemoteConnRecorder) Read(b []byte) (int, error) { + c.rmu.Lock() + defer c.rmu.Unlock() + n, err := c.Conn.Read(b) + if err != nil { + return n, fmt.Errorf("error reading from connection: %w", err) + } + c.readBuf.Write(b[:n]) + + var sf spdyFrame + ok, err := sf.Parse(c.readBuf.Bytes(), c.log) + if err != nil { + return 0, fmt.Errorf("error parsing data read from connection: %w", err) + } + if !ok { + // The parsed data in the buffer will be processed together with + // the new data on the next call to Read. + return n, nil + } + c.readBuf.Next(len(sf.Raw)) // advance buffer past the parsed frame + + if !sf.Ctrl { // data frame + switch sf.StreamID { + case c.resizeStreamID.Load(): + var err error + var msg spdyResizeMsg + if err = json.Unmarshal(sf.Payload, &msg); err != nil { + return 0, fmt.Errorf("error umarshalling resize msg: %w", err) + } + c.ch.Width = msg.Width + c.ch.Height = msg.Height + } + return n, nil + } + // We always want to parse the headers, even if we don't care about the + // frame, as we need to advance the zlib reader otherwise we will get + // garbage. + header, err := sf.parseHeaders(&c.zlibReqReader, c.log) + if err != nil { + return 0, fmt.Errorf("error parsing frame headers: %w", err) + } + if sf.Type == SYN_STREAM { + c.storeStreamID(sf, header) + } + return n, nil +} + +// Write forwards the raw data of the latest parsed SPDY frame to the original +// destination. If the frame is an SPDY data frame, it also sends the payload to +// the connected session recorder. +func (c *spdyRemoteConnRecorder) Write(b []byte) (int, error) { + c.wmu.Lock() + defer c.wmu.Unlock() + c.writeBuf.Write(b) + + var sf spdyFrame + ok, err := sf.Parse(c.writeBuf.Bytes(), c.log) + if err != nil { + return 0, fmt.Errorf("error parsing data: %w", err) + } + if !ok { + // The parsed data in the buffer will be processed together with + // the new data on the next call to Write. + return len(b), nil + } + c.writeBuf.Next(len(sf.Raw)) // advance buffer past the parsed frame + + // If this is a stdout or stderr data frame, send its payload to the + // session recorder. + if !sf.Ctrl { + switch sf.StreamID { + case c.stdoutStreamID.Load(), c.stderrStreamID.Load(): + var err error + c.writeCastHeaderOnce.Do(func() { + var j []byte + j, err = json.Marshal(c.ch) + if err != nil { + return + } + j = append(j, '\n') + err = c.rec.writeCastLine(j) + if err != nil { + c.log.Errorf("received error from recorder: %v", err) + } + }) + if err != nil { + return 0, fmt.Errorf("error writing CastHeader: %w", err) + } + if err := c.rec.Write(sf.Payload); err != nil { + return 0, fmt.Errorf("error sending payload to session recorder: %w", err) + } + } + } + // Forward the whole frame to the original destination. + _, err = c.Conn.Write(sf.Raw) // send to net.Conn + return len(b), err +} + +func (c *spdyRemoteConnRecorder) Close() error { + c.wmu.Lock() + defer c.wmu.Unlock() + if c.closed { + return nil + } + if !c.failed && c.writeBuf.Len() > 0 { + c.Conn.Write(c.writeBuf.Bytes()) + } + c.writeBuf.Reset() + c.closed = true + err := c.Conn.Close() + c.rec.Close() + return err +} + +// parseSynStream parses SYN_STREAM SPDY control frame and updates +// spdyRemoteConnRecorder to store the newly created stream's ID if it is one of +// the stream types we care about. Storing stream_id:stream_type mapping allows +// us to parse received data frames (that have stream IDs) differently depening +// on which stream they belong to (i.e send data frame payload for stdout stream +// to session recorder). +func (c *spdyRemoteConnRecorder) storeStreamID(sf spdyFrame, header http.Header) { + const ( + streamTypeHeaderKey = "Streamtype" + ) + id := binary.BigEndian.Uint32(sf.Payload[0:4]) + switch header.Get(streamTypeHeaderKey) { + case corev1.StreamTypeStdout: + c.stdoutStreamID.Store(id) + case corev1.StreamTypeStderr: + c.stderrStreamID.Store(id) + case corev1.StreamTypeResize: + c.resizeStreamID.Store(id) + } +} + +type spdyResizeMsg struct { + Width int `json:"width"` + Height int `json:"height"` +} diff --git a/cmd/k8s-operator/spdy-remote-conn-recorder_test.go b/cmd/k8s-operator/spdy-remote-conn-recorder_test.go new file mode 100644 index 000000000..95f5a8bfc --- /dev/null +++ b/cmd/k8s-operator/spdy-remote-conn-recorder_test.go @@ -0,0 +1,326 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bytes" + "encoding/json" + "net" + "reflect" + "sync" + "testing" + + "go.uber.org/zap" + "tailscale.com/tstest" + "tailscale.com/tstime" +) + +// Test_Writes tests that 1 or more Write calls to spdyRemoteConnRecorder +// results in the expected data being forwarded to the original destination and +// the session recorder. +func Test_Writes(t *testing.T) { + var stdoutStreamID, stderrStreamID uint32 = 1, 2 + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + tests := []struct { + name string + inputs [][]byte + wantForwarded []byte + wantRecorded []byte + firstWrite bool + width int + height int + }{ + { + name: "single_write_control_frame_with_payload", + inputs: [][]byte{{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x5}}, + wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x5}, + }, + { + name: "two_writes_control_frame_with_leftover", + inputs: [][]byte{{0x80, 0x3, 0x0, 0x1}, {0x0, 0x0, 0x0, 0x1, 0x5, 0x80, 0x3}}, + wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x1, 0x5}, + }, + { + name: "single_write_stdout_data_frame", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0}, + }, + { + name: "single_write_stdout_data_frame_with_payload", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + }, + { + name: "single_write_stderr_data_frame_with_payload", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x2, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + }, + { + name: "single_data_frame_unknow_stream_with_payload", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x7, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x7, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + }, + { + name: "control_frame_and_data_frame_split_across_two_writes", + inputs: [][]byte{{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1}, {0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl), + }, + { + name: "single_first_write_stdout_data_frame_with_payload", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}}, + wantForwarded: []byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, 0x5, 0x1, 0x2, 0x3, 0x4, 0x5}, + wantRecorded: append(asciinemaResizeMsg(t, 10, 20), castLine(t, []byte{0x1, 0x2, 0x3, 0x4, 0x5}, cl)...), + width: 10, + height: 20, + firstWrite: true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := &testConn{} + sr := &testSessionRecorder{} + rec := &recorder{ + conn: sr, + clock: cl, + start: cl.Now(), + } + + c := &spdyRemoteConnRecorder{ + Conn: tc, + log: zl.Sugar(), + rec: rec, + ch: CastHeader{ + Width: tt.width, + Height: tt.height, + }, + } + if !tt.firstWrite { + // this test case does not intend to test that cast header gets written once + c.writeCastHeaderOnce.Do(func() {}) + } + + c.stdoutStreamID.Store(stdoutStreamID) + c.stderrStreamID.Store(stderrStreamID) + for i, input := range tt.inputs { + if _, err := c.Write(input); err != nil { + t.Errorf("[%d] spdyRemoteConnRecorder.Write() unexpected error %v", i, err) + } + } + + // Assert that the expected bytes have been forwarded to the original destination. + gotForwarded := tc.writeBuf.Bytes() + if !reflect.DeepEqual(gotForwarded, tt.wantForwarded) { + t.Errorf("expected bytes not forwarded, wants\n%v\ngot\n%v", tt.wantForwarded, gotForwarded) + } + + // Assert that the expected bytes have been forwarded to the session recorder. + gotRecorded := sr.buf.Bytes() + if !reflect.DeepEqual(gotRecorded, tt.wantRecorded) { + t.Errorf("expected bytes not recorded, wants\n%v\ngot\n%v", tt.wantRecorded, gotRecorded) + } + }) + } +} + +// Test_Reads tests that 1 or more Read calls to spdyRemoteConnRecorder results +// in the expected data being forwarded to the original destination and the +// session recorder. +func Test_Reads(t *testing.T) { + zl, err := zap.NewDevelopment() + if err != nil { + t.Fatal(err) + } + cl := tstest.NewClock(tstest.ClockOpts{}) + var reader zlibReader + resizeMsg := resizeMsgBytes(t, 10, 20) + synStreamStdoutPayload := payload(t, map[string]string{"Streamtype": "stdout"}, SYN_STREAM, 1) + synStreamStderrPayload := payload(t, map[string]string{"Streamtype": "stderr"}, SYN_STREAM, 2) + synStreamResizePayload := payload(t, map[string]string{"Streamtype": "resize"}, SYN_STREAM, 3) + syn_stream_ctrl_header := []byte{0x80, 0x3, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(synStreamStdoutPayload))} + + tests := []struct { + name string + inputs [][]byte + wantStdoutStreamID uint32 + wantStderrStreamID uint32 + wantResizeStreamID uint32 + wantWidth int + wantHeight int + resizeStreamIDBeforeRead uint32 + }{ + { + name: "resize_data_frame_single_read", + inputs: [][]byte{append([]byte{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg...)}, + resizeStreamIDBeforeRead: 1, + wantWidth: 10, + wantHeight: 20, + }, + { + name: "resize_data_frame_two_reads", + inputs: [][]byte{{0x0, 0x0, 0x0, 0x1, 0x0, 0x0, 0x0, uint8(len(resizeMsg))}, resizeMsg}, + resizeStreamIDBeforeRead: 1, + wantWidth: 10, + wantHeight: 20, + }, + { + name: "syn_stream_ctrl_frame_stdout_single_read", + inputs: [][]byte{append(syn_stream_ctrl_header, synStreamStdoutPayload...)}, + wantStdoutStreamID: 1, + }, + { + name: "syn_stream_ctrl_frame_stderr_single_read", + inputs: [][]byte{append(syn_stream_ctrl_header, synStreamStderrPayload...)}, + wantStderrStreamID: 2, + }, + { + name: "syn_stream_ctrl_frame_resize_single_read", + inputs: [][]byte{append(syn_stream_ctrl_header, synStreamResizePayload...)}, + wantResizeStreamID: 3, + }, + { + name: "syn_stream_ctrl_frame_resize_four_reads_with_leftover", + inputs: [][]byte{syn_stream_ctrl_header, append(synStreamResizePayload, syn_stream_ctrl_header...), append(synStreamStderrPayload, syn_stream_ctrl_header...), append(synStreamStdoutPayload, 0x0, 0x3)}, + wantStdoutStreamID: 1, + wantStderrStreamID: 2, + wantResizeStreamID: 3, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + tc := &testConn{} + sr := &testSessionRecorder{} + rec := &recorder{ + conn: sr, + clock: cl, + start: cl.Now(), + } + c := &spdyRemoteConnRecorder{ + Conn: tc, + log: zl.Sugar(), + rec: rec, + } + c.resizeStreamID.Store(tt.resizeStreamIDBeforeRead) + + for i, input := range tt.inputs { + c.zlibReqReader = reader + tc.readBuf.Reset() + _, err := tc.readBuf.Write(input) + if err != nil { + t.Fatalf("writing bytes to test conn: %v", err) + } + _, err = c.Read(make([]byte, len(input))) + if err != nil { + t.Errorf("[%d] spdyRemoteConnRecorder.Read() resulted in an unexpected error: %v", i, err) + } + } + if id := c.resizeStreamID.Load(); id != tt.wantResizeStreamID && id != tt.resizeStreamIDBeforeRead { + t.Errorf("wants resizeStreamID: %d, got %d", tt.wantResizeStreamID, id) + } + if id := c.stderrStreamID.Load(); id != tt.wantStderrStreamID { + t.Errorf("wants stderrStreamID: %d, got %d", tt.wantStderrStreamID, id) + } + if id := c.stdoutStreamID.Load(); id != tt.wantStdoutStreamID { + t.Errorf("wants stdoutStreamID: %d, got %d", tt.wantStdoutStreamID, id) + } + if tt.wantHeight != 0 || tt.wantWidth != 0 { + if tt.wantWidth != c.ch.Width { + t.Errorf("wants width: %v, got %v", tt.wantWidth, c.ch.Width) + } + if tt.wantHeight != c.ch.Height { + t.Errorf("want height: %v, got %v", tt.wantHeight, c.ch.Height) + } + } + }) + } +} + +func castLine(t *testing.T, p []byte, clock tstime.Clock) []byte { + t.Helper() + j, err := json.Marshal([]any{ + clock.Now().Sub(clock.Now()).Seconds(), + "o", + string(p), + }) + if err != nil { + t.Fatalf("error marshalling cast line: %v", err) + } + return append(j, '\n') +} + +func resizeMsgBytes(t *testing.T, width, height int) []byte { + t.Helper() + bs, err := json.Marshal(spdyResizeMsg{Width: width, Height: height}) + if err != nil { + t.Fatalf("error marshalling resizeMsg: %v", err) + } + return bs +} + +func asciinemaResizeMsg(t *testing.T, width, height int) []byte { + t.Helper() + ch := CastHeader{ + Width: width, + Height: height, + } + bs, err := json.Marshal(ch) + if err != nil { + t.Fatalf("error marshalling CastHeader: %v", err) + } + return append(bs, '\n') +} + +type testConn struct { + net.Conn + // writeBuf contains whatever was send to the conn via Write. + writeBuf bytes.Buffer + // readBuf contains whatever was sent to the conn via Read. + readBuf bytes.Buffer + sync.RWMutex // protects the following + closed bool +} + +var _ net.Conn = &testConn{} + +func (tc *testConn) Read(b []byte) (int, error) { + return tc.readBuf.Read(b) +} + +func (tc *testConn) Write(b []byte) (int, error) { + return tc.writeBuf.Write(b) +} + +func (tc *testConn) Close() error { + tc.Lock() + defer tc.Unlock() + tc.closed = true + return nil +} +func (tc *testConn) isClosed() bool { + tc.Lock() + defer tc.Unlock() + return tc.closed +} + +type testSessionRecorder struct { + // buf holds data that was sent to the session recorder. + buf bytes.Buffer +} + +func (t *testSessionRecorder) Write(b []byte) (int, error) { + return t.buf.Write(b) +} + +func (t *testSessionRecorder) Close() error { + t.buf.Reset() + return nil +} diff --git a/cmd/k8s-operator/zlib-reader.go b/cmd/k8s-operator/zlib-reader.go new file mode 100644 index 000000000..b29772be3 --- /dev/null +++ b/cmd/k8s-operator/zlib-reader.go @@ -0,0 +1,221 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +//go:build !plan9 + +package main + +import ( + "bytes" + "compress/zlib" + "io" +) + +// zlibReader contains functionality to parse zlib compressed SPDY data. +// See https://www.ietf.org/archive/id/draft-mbelshe-httpbis-spdy-00.txt section 2.6.10.1 +type zlibReader struct { + io.ReadCloser + underlying io.LimitedReader // zlib compressed SPDY data +} + +// Read decompresses zlibReader's underlying zlib compressed SPDY data and reads +// it into b. +func (z *zlibReader) Read(b []byte) (int, error) { + if z.ReadCloser == nil { + r, err := zlib.NewReaderDict(&z.underlying, spdyTxtDictionary) + if err != nil { + return 0, err + } + z.ReadCloser = r + } + return z.ReadCloser.Read(b) +} + +// Set sets zlibReader's underlying data. b must be zlib compressed SPDY data. +func (z *zlibReader) Set(b []byte) { + z.underlying.R = bytes.NewReader(b) + z.underlying.N = int64(len(b)) +} + +// spdyTxtDictionary is the dictionary defined in the SPDY spec. +// https://datatracker.ietf.org/doc/html/draft-mbelshe-httpbis-spdy-00#section-2.6.10.1 +var spdyTxtDictionary = []byte{ + 0x00, 0x00, 0x00, 0x07, 0x6f, 0x70, 0x74, 0x69, // - - - - o p t i + 0x6f, 0x6e, 0x73, 0x00, 0x00, 0x00, 0x04, 0x68, // o n s - - - - h + 0x65, 0x61, 0x64, 0x00, 0x00, 0x00, 0x04, 0x70, // e a d - - - - p + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x03, 0x70, // o s t - - - - p + 0x75, 0x74, 0x00, 0x00, 0x00, 0x06, 0x64, 0x65, // u t - - - - d e + 0x6c, 0x65, 0x74, 0x65, 0x00, 0x00, 0x00, 0x05, // l e t e - - - - + 0x74, 0x72, 0x61, 0x63, 0x65, 0x00, 0x00, 0x00, // t r a c e - - - + 0x06, 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x00, // - a c c e p t - + 0x00, 0x00, 0x0e, 0x61, 0x63, 0x63, 0x65, 0x70, // - - - a c c e p + 0x74, 0x2d, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, // t - c h a r s e + 0x74, 0x00, 0x00, 0x00, 0x0f, 0x61, 0x63, 0x63, // t - - - - a c c + 0x65, 0x70, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, // e p t - e n c o + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x0f, // d i n g - - - - + 0x61, 0x63, 0x63, 0x65, 0x70, 0x74, 0x2d, 0x6c, // a c c e p t - l + 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, 0x00, // a n g u a g e - + 0x00, 0x00, 0x0d, 0x61, 0x63, 0x63, 0x65, 0x70, // - - - a c c e p + 0x74, 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x73, // t - r a n g e s + 0x00, 0x00, 0x00, 0x03, 0x61, 0x67, 0x65, 0x00, // - - - - a g e - + 0x00, 0x00, 0x05, 0x61, 0x6c, 0x6c, 0x6f, 0x77, // - - - a l l o w + 0x00, 0x00, 0x00, 0x0d, 0x61, 0x75, 0x74, 0x68, // - - - - a u t h + 0x6f, 0x72, 0x69, 0x7a, 0x61, 0x74, 0x69, 0x6f, // o r i z a t i o + 0x6e, 0x00, 0x00, 0x00, 0x0d, 0x63, 0x61, 0x63, // n - - - - c a c + 0x68, 0x65, 0x2d, 0x63, 0x6f, 0x6e, 0x74, 0x72, // h e - c o n t r + 0x6f, 0x6c, 0x00, 0x00, 0x00, 0x0a, 0x63, 0x6f, // o l - - - - c o + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, // n n e c t i o n + 0x00, 0x00, 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t + 0x65, 0x6e, 0x74, 0x2d, 0x62, 0x61, 0x73, 0x65, // e n t - b a s e + 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t + 0x65, 0x6e, 0x74, 0x2d, 0x65, 0x6e, 0x63, 0x6f, // e n t - e n c o + 0x64, 0x69, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, // d i n g - - - - + 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, // c o n t e n t - + 0x6c, 0x61, 0x6e, 0x67, 0x75, 0x61, 0x67, 0x65, // l a n g u a g e + 0x00, 0x00, 0x00, 0x0e, 0x63, 0x6f, 0x6e, 0x74, // - - - - c o n t + 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x65, 0x6e, 0x67, // e n t - l e n g + 0x74, 0x68, 0x00, 0x00, 0x00, 0x10, 0x63, 0x6f, // t h - - - - c o + 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x2d, 0x6c, 0x6f, // n t e n t - l o + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, // c a t i o n - - + 0x00, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, // - - c o n t e n + 0x74, 0x2d, 0x6d, 0x64, 0x35, 0x00, 0x00, 0x00, // t - m d 5 - - - + 0x0d, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, // - c o n t e n t + 0x2d, 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, // - r a n g e - - + 0x00, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, // - - c o n t e n + 0x74, 0x2d, 0x74, 0x79, 0x70, 0x65, 0x00, 0x00, // t - t y p e - - + 0x00, 0x04, 0x64, 0x61, 0x74, 0x65, 0x00, 0x00, // - - d a t e - - + 0x00, 0x04, 0x65, 0x74, 0x61, 0x67, 0x00, 0x00, // - - e t a g - - + 0x00, 0x06, 0x65, 0x78, 0x70, 0x65, 0x63, 0x74, // - - e x p e c t + 0x00, 0x00, 0x00, 0x07, 0x65, 0x78, 0x70, 0x69, // - - - - e x p i + 0x72, 0x65, 0x73, 0x00, 0x00, 0x00, 0x04, 0x66, // r e s - - - - f + 0x72, 0x6f, 0x6d, 0x00, 0x00, 0x00, 0x04, 0x68, // r o m - - - - h + 0x6f, 0x73, 0x74, 0x00, 0x00, 0x00, 0x08, 0x69, // o s t - - - - i + 0x66, 0x2d, 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, // f - m a t c h - + 0x00, 0x00, 0x11, 0x69, 0x66, 0x2d, 0x6d, 0x6f, // - - - i f - m o + 0x64, 0x69, 0x66, 0x69, 0x65, 0x64, 0x2d, 0x73, // d i f i e d - s + 0x69, 0x6e, 0x63, 0x65, 0x00, 0x00, 0x00, 0x0d, // i n c e - - - - + 0x69, 0x66, 0x2d, 0x6e, 0x6f, 0x6e, 0x65, 0x2d, // i f - n o n e - + 0x6d, 0x61, 0x74, 0x63, 0x68, 0x00, 0x00, 0x00, // m a t c h - - - + 0x08, 0x69, 0x66, 0x2d, 0x72, 0x61, 0x6e, 0x67, // - i f - r a n g + 0x65, 0x00, 0x00, 0x00, 0x13, 0x69, 0x66, 0x2d, // e - - - - i f - + 0x75, 0x6e, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, // u n m o d i f i + 0x65, 0x64, 0x2d, 0x73, 0x69, 0x6e, 0x63, 0x65, // e d - s i n c e + 0x00, 0x00, 0x00, 0x0d, 0x6c, 0x61, 0x73, 0x74, // - - - - l a s t + 0x2d, 0x6d, 0x6f, 0x64, 0x69, 0x66, 0x69, 0x65, // - m o d i f i e + 0x64, 0x00, 0x00, 0x00, 0x08, 0x6c, 0x6f, 0x63, // d - - - - l o c + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, // a t i o n - - - + 0x0c, 0x6d, 0x61, 0x78, 0x2d, 0x66, 0x6f, 0x72, // - m a x - f o r + 0x77, 0x61, 0x72, 0x64, 0x73, 0x00, 0x00, 0x00, // w a r d s - - - + 0x06, 0x70, 0x72, 0x61, 0x67, 0x6d, 0x61, 0x00, // - p r a g m a - + 0x00, 0x00, 0x12, 0x70, 0x72, 0x6f, 0x78, 0x79, // - - - p r o x y + 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, 0x74, // - a u t h e n t + 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, 0x00, // i c a t e - - - + 0x13, 0x70, 0x72, 0x6f, 0x78, 0x79, 0x2d, 0x61, // - p r o x y - a + 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x61, // u t h o r i z a + 0x74, 0x69, 0x6f, 0x6e, 0x00, 0x00, 0x00, 0x05, // t i o n - - - - + 0x72, 0x61, 0x6e, 0x67, 0x65, 0x00, 0x00, 0x00, // r a n g e - - - + 0x07, 0x72, 0x65, 0x66, 0x65, 0x72, 0x65, 0x72, // - r e f e r e r + 0x00, 0x00, 0x00, 0x0b, 0x72, 0x65, 0x74, 0x72, // - - - - r e t r + 0x79, 0x2d, 0x61, 0x66, 0x74, 0x65, 0x72, 0x00, // y - a f t e r - + 0x00, 0x00, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, // - - - s e r v e + 0x72, 0x00, 0x00, 0x00, 0x02, 0x74, 0x65, 0x00, // r - - - - t e - + 0x00, 0x00, 0x07, 0x74, 0x72, 0x61, 0x69, 0x6c, // - - - t r a i l + 0x65, 0x72, 0x00, 0x00, 0x00, 0x11, 0x74, 0x72, // e r - - - - t r + 0x61, 0x6e, 0x73, 0x66, 0x65, 0x72, 0x2d, 0x65, // a n s f e r - e + 0x6e, 0x63, 0x6f, 0x64, 0x69, 0x6e, 0x67, 0x00, // n c o d i n g - + 0x00, 0x00, 0x07, 0x75, 0x70, 0x67, 0x72, 0x61, // - - - u p g r a + 0x64, 0x65, 0x00, 0x00, 0x00, 0x0a, 0x75, 0x73, // d e - - - - u s + 0x65, 0x72, 0x2d, 0x61, 0x67, 0x65, 0x6e, 0x74, // e r - a g e n t + 0x00, 0x00, 0x00, 0x04, 0x76, 0x61, 0x72, 0x79, // - - - - v a r y + 0x00, 0x00, 0x00, 0x03, 0x76, 0x69, 0x61, 0x00, // - - - - v i a - + 0x00, 0x00, 0x07, 0x77, 0x61, 0x72, 0x6e, 0x69, // - - - w a r n i + 0x6e, 0x67, 0x00, 0x00, 0x00, 0x10, 0x77, 0x77, // n g - - - - w w + 0x77, 0x2d, 0x61, 0x75, 0x74, 0x68, 0x65, 0x6e, // w - a u t h e n + 0x74, 0x69, 0x63, 0x61, 0x74, 0x65, 0x00, 0x00, // t i c a t e - - + 0x00, 0x06, 0x6d, 0x65, 0x74, 0x68, 0x6f, 0x64, // - - m e t h o d + 0x00, 0x00, 0x00, 0x03, 0x67, 0x65, 0x74, 0x00, // - - - - g e t - + 0x00, 0x00, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, // - - - s t a t u + 0x73, 0x00, 0x00, 0x00, 0x06, 0x32, 0x30, 0x30, // s - - - - 2 0 0 + 0x20, 0x4f, 0x4b, 0x00, 0x00, 0x00, 0x07, 0x76, // - O K - - - - v + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x00, 0x00, // e r s i o n - - + 0x00, 0x08, 0x48, 0x54, 0x54, 0x50, 0x2f, 0x31, // - - H T T P - 1 + 0x2e, 0x31, 0x00, 0x00, 0x00, 0x03, 0x75, 0x72, // - 1 - - - - u r + 0x6c, 0x00, 0x00, 0x00, 0x06, 0x70, 0x75, 0x62, // l - - - - p u b + 0x6c, 0x69, 0x63, 0x00, 0x00, 0x00, 0x0a, 0x73, // l i c - - - - s + 0x65, 0x74, 0x2d, 0x63, 0x6f, 0x6f, 0x6b, 0x69, // e t - c o o k i + 0x65, 0x00, 0x00, 0x00, 0x0a, 0x6b, 0x65, 0x65, // e - - - - k e e + 0x70, 0x2d, 0x61, 0x6c, 0x69, 0x76, 0x65, 0x00, // p - a l i v e - + 0x00, 0x00, 0x06, 0x6f, 0x72, 0x69, 0x67, 0x69, // - - - o r i g i + 0x6e, 0x31, 0x30, 0x30, 0x31, 0x30, 0x31, 0x32, // n 1 0 0 1 0 1 2 + 0x30, 0x31, 0x32, 0x30, 0x32, 0x32, 0x30, 0x35, // 0 1 2 0 2 2 0 5 + 0x32, 0x30, 0x36, 0x33, 0x30, 0x30, 0x33, 0x30, // 2 0 6 3 0 0 3 0 + 0x32, 0x33, 0x30, 0x33, 0x33, 0x30, 0x34, 0x33, // 2 3 0 3 3 0 4 3 + 0x30, 0x35, 0x33, 0x30, 0x36, 0x33, 0x30, 0x37, // 0 5 3 0 6 3 0 7 + 0x34, 0x30, 0x32, 0x34, 0x30, 0x35, 0x34, 0x30, // 4 0 2 4 0 5 4 0 + 0x36, 0x34, 0x30, 0x37, 0x34, 0x30, 0x38, 0x34, // 6 4 0 7 4 0 8 4 + 0x30, 0x39, 0x34, 0x31, 0x30, 0x34, 0x31, 0x31, // 0 9 4 1 0 4 1 1 + 0x34, 0x31, 0x32, 0x34, 0x31, 0x33, 0x34, 0x31, // 4 1 2 4 1 3 4 1 + 0x34, 0x34, 0x31, 0x35, 0x34, 0x31, 0x36, 0x34, // 4 4 1 5 4 1 6 4 + 0x31, 0x37, 0x35, 0x30, 0x32, 0x35, 0x30, 0x34, // 1 7 5 0 2 5 0 4 + 0x35, 0x30, 0x35, 0x32, 0x30, 0x33, 0x20, 0x4e, // 5 0 5 2 0 3 - N + 0x6f, 0x6e, 0x2d, 0x41, 0x75, 0x74, 0x68, 0x6f, // o n - A u t h o + 0x72, 0x69, 0x74, 0x61, 0x74, 0x69, 0x76, 0x65, // r i t a t i v e + 0x20, 0x49, 0x6e, 0x66, 0x6f, 0x72, 0x6d, 0x61, // - I n f o r m a + 0x74, 0x69, 0x6f, 0x6e, 0x32, 0x30, 0x34, 0x20, // t i o n 2 0 4 - + 0x4e, 0x6f, 0x20, 0x43, 0x6f, 0x6e, 0x74, 0x65, // N o - C o n t e + 0x6e, 0x74, 0x33, 0x30, 0x31, 0x20, 0x4d, 0x6f, // n t 3 0 1 - M o + 0x76, 0x65, 0x64, 0x20, 0x50, 0x65, 0x72, 0x6d, // v e d - P e r m + 0x61, 0x6e, 0x65, 0x6e, 0x74, 0x6c, 0x79, 0x34, // a n e n t l y 4 + 0x30, 0x30, 0x20, 0x42, 0x61, 0x64, 0x20, 0x52, // 0 0 - B a d - R + 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x34, 0x30, // e q u e s t 4 0 + 0x31, 0x20, 0x55, 0x6e, 0x61, 0x75, 0x74, 0x68, // 1 - U n a u t h + 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x34, 0x30, // o r i z e d 4 0 + 0x33, 0x20, 0x46, 0x6f, 0x72, 0x62, 0x69, 0x64, // 3 - F o r b i d + 0x64, 0x65, 0x6e, 0x34, 0x30, 0x34, 0x20, 0x4e, // d e n 4 0 4 - N + 0x6f, 0x74, 0x20, 0x46, 0x6f, 0x75, 0x6e, 0x64, // o t - F o u n d + 0x35, 0x30, 0x30, 0x20, 0x49, 0x6e, 0x74, 0x65, // 5 0 0 - I n t e + 0x72, 0x6e, 0x61, 0x6c, 0x20, 0x53, 0x65, 0x72, // r n a l - S e r + 0x76, 0x65, 0x72, 0x20, 0x45, 0x72, 0x72, 0x6f, // v e r - E r r o + 0x72, 0x35, 0x30, 0x31, 0x20, 0x4e, 0x6f, 0x74, // r 5 0 1 - N o t + 0x20, 0x49, 0x6d, 0x70, 0x6c, 0x65, 0x6d, 0x65, // - I m p l e m e + 0x6e, 0x74, 0x65, 0x64, 0x35, 0x30, 0x33, 0x20, // n t e d 5 0 3 - + 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x20, // S e r v i c e - + 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, // U n a v a i l a + 0x62, 0x6c, 0x65, 0x4a, 0x61, 0x6e, 0x20, 0x46, // b l e J a n - F + 0x65, 0x62, 0x20, 0x4d, 0x61, 0x72, 0x20, 0x41, // e b - M a r - A + 0x70, 0x72, 0x20, 0x4d, 0x61, 0x79, 0x20, 0x4a, // p r - M a y - J + 0x75, 0x6e, 0x20, 0x4a, 0x75, 0x6c, 0x20, 0x41, // u n - J u l - A + 0x75, 0x67, 0x20, 0x53, 0x65, 0x70, 0x74, 0x20, // u g - S e p t - + 0x4f, 0x63, 0x74, 0x20, 0x4e, 0x6f, 0x76, 0x20, // O c t - N o v - + 0x44, 0x65, 0x63, 0x20, 0x30, 0x30, 0x3a, 0x30, // D e c - 0 0 - 0 + 0x30, 0x3a, 0x30, 0x30, 0x20, 0x4d, 0x6f, 0x6e, // 0 - 0 0 - M o n + 0x2c, 0x20, 0x54, 0x75, 0x65, 0x2c, 0x20, 0x57, // - - T u e - - W + 0x65, 0x64, 0x2c, 0x20, 0x54, 0x68, 0x75, 0x2c, // e d - - T h u - + 0x20, 0x46, 0x72, 0x69, 0x2c, 0x20, 0x53, 0x61, // - F r i - - S a + 0x74, 0x2c, 0x20, 0x53, 0x75, 0x6e, 0x2c, 0x20, // t - - S u n - - + 0x47, 0x4d, 0x54, 0x63, 0x68, 0x75, 0x6e, 0x6b, // G M T c h u n k + 0x65, 0x64, 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, // e d - t e x t - + 0x68, 0x74, 0x6d, 0x6c, 0x2c, 0x69, 0x6d, 0x61, // h t m l - i m a + 0x67, 0x65, 0x2f, 0x70, 0x6e, 0x67, 0x2c, 0x69, // g e - p n g - i + 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x6a, 0x70, 0x67, // m a g e - j p g + 0x2c, 0x69, 0x6d, 0x61, 0x67, 0x65, 0x2f, 0x67, // - i m a g e - g + 0x69, 0x66, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, // i f - a p p l i + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, // c a t i o n - x + 0x6d, 0x6c, 0x2c, 0x61, 0x70, 0x70, 0x6c, 0x69, // m l - a p p l i + 0x63, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x78, // c a t i o n - x + 0x68, 0x74, 0x6d, 0x6c, 0x2b, 0x78, 0x6d, 0x6c, // h t m l - x m l + 0x2c, 0x74, 0x65, 0x78, 0x74, 0x2f, 0x70, 0x6c, // - t e x t - p l + 0x61, 0x69, 0x6e, 0x2c, 0x74, 0x65, 0x78, 0x74, // a i n - t e x t + 0x2f, 0x6a, 0x61, 0x76, 0x61, 0x73, 0x63, 0x72, // - j a v a s c r + 0x69, 0x70, 0x74, 0x2c, 0x70, 0x75, 0x62, 0x6c, // i p t - p u b l + 0x69, 0x63, 0x70, 0x72, 0x69, 0x76, 0x61, 0x74, // i c p r i v a t + 0x65, 0x6d, 0x61, 0x78, 0x2d, 0x61, 0x67, 0x65, // e m a x - a g e + 0x3d, 0x67, 0x7a, 0x69, 0x70, 0x2c, 0x64, 0x65, // - g z i p - d e + 0x66, 0x6c, 0x61, 0x74, 0x65, 0x2c, 0x73, 0x64, // f l a t e - s d + 0x63, 0x68, 0x63, 0x68, 0x61, 0x72, 0x73, 0x65, // c h c h a r s e + 0x74, 0x3d, 0x75, 0x74, 0x66, 0x2d, 0x38, 0x63, // t - u t f - 8 c + 0x68, 0x61, 0x72, 0x73, 0x65, 0x74, 0x3d, 0x69, // h a r s e t - i + 0x73, 0x6f, 0x2d, 0x38, 0x38, 0x35, 0x39, 0x2d, // s o - 8 8 5 9 - + 0x31, 0x2c, 0x75, 0x74, 0x66, 0x2d, 0x2c, 0x2a, // 1 - u t f - - - + 0x2c, 0x65, 0x6e, 0x71, 0x3d, 0x30, 0x2e, // - e n q - 0 - +} diff --git a/ssh/tailssh/connect.go b/ssh/tailssh/connect.go new file mode 100644 index 000000000..c8602eaf3 --- /dev/null +++ b/ssh/tailssh/connect.go @@ -0,0 +1,136 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +package tailssh + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "net/http" + "net/http/httptrace" + "net/netip" + "time" + + "tailscale.com/tailcfg" + "tailscale.com/util/multierr" +) + +// ConnectToRecorder connects to the recorder at any of the provided addresses. +// It returns the first successful response, or a multierr if all attempts fail. +// +// On success, it returns a WriteCloser that can be used to upload the +// recording, and a channel that will be sent an error (or nil) when the upload +// fails or completes. +// +// In both cases, a slice of SSHRecordingAttempts is returned which detail the +// attempted recorder IP and the error message, if the attempt failed. The +// attempts are in order the recorder(s) was attempted. If successful a +// successful connection is made, the last attempt in the slice is the +// attempt for connected recorder. +func ConnectToRecorder(ctx context.Context, recs []netip.AddrPort, dial func(context.Context, string, string) (net.Conn, error)) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) { + if len(recs) == 0 { + return nil, nil, nil, errors.New("no recorders configured") + } + // We use a special context for dialing the recorder, so that we can + // limit the time we spend dialing to 30 seconds and still have an + // unbounded context for the upload. + dialCtx, dialCancel := context.WithTimeout(ctx, 30*time.Second) + defer dialCancel() + hc, err := SessionRecordingClientForDialer(dialCtx, dial) + if err != nil { + return nil, nil, nil, err + } + + var errs []error + var attempts []*tailcfg.SSHRecordingAttempt + for _, ap := range recs { + attempt := &tailcfg.SSHRecordingAttempt{ + Recorder: ap, + } + attempts = append(attempts, attempt) + + // We dial the recorder and wait for it to send a 100-continue + // response before returning from this function. This ensures that + // the recorder is ready to accept the recording. + + // got100 is closed when we receive the 100-continue response. + got100 := make(chan struct{}) + ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ + Got100Continue: func() { + close(got100) + }, + }) + + pr, pw := io.Pipe() + req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s:%d/record", ap.Addr(), ap.Port()), pr) + if err != nil { + err = fmt.Errorf("recording: error starting recording: %w", err) + attempt.FailureMessage = err.Error() + errs = append(errs, err) + continue + } + // We set the Expect header to 100-continue, so that the recorder + // will send a 100-continue response before it starts reading the + // request body. + req.Header.Set("Expect", "100-continue") + + // errChan is used to indicate the result of the request. + errChan := make(chan error, 1) + go func() { + resp, err := hc.Do(req) + if err != nil { + errChan <- fmt.Errorf("recording: error starting recording: %w", err) + return + } + if resp.StatusCode != 200 { + errChan <- fmt.Errorf("recording: unexpected status: %v", resp.Status) + return + } + errChan <- nil + }() + select { + case <-got100: + case err := <-errChan: + // If we get an error before we get the 100-continue response, + // we need to try another recorder. + if err == nil { + // If the error is nil, we got a 200 response, which + // is unexpected as we haven't sent any data yet. + err = errors.New("recording: unexpected EOF") + } + attempt.FailureMessage = err.Error() + errs = append(errs, err) + continue + } + return pw, attempts, errChan, nil + } + return nil, attempts, nil, multierr.New(errs...) +} + +// SessionRecordingClientForDialer returns an http.Client that uses a clone of +// the provided Dialer's PeerTransport to dial connections. This is used to make +// requests to the session recording server to upload session recordings. It +// uses the provided dialCtx to dial connections, and limits a single dial to 5 +// seconds. +func SessionRecordingClientForDialer(dialCtx context.Context, dial func(context.Context, string, string) (net.Conn, error)) (*http.Client, error) { + tr := http.DefaultTransport.(*http.Transport).Clone() + + tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { + perAttemptCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + go func() { + select { + case <-perAttemptCtx.Done(): + case <-dialCtx.Done(): + cancel() + } + }() + return dial(perAttemptCtx, network, addr) + } + return &http.Client{ + Transport: tr, + }, nil +} diff --git a/ssh/tailssh/tailssh.go b/ssh/tailssh/tailssh.go index 49dd71916..a088335d8 100644 --- a/ssh/tailssh/tailssh.go +++ b/ssh/tailssh/tailssh.go @@ -17,7 +17,6 @@ import ( "io" "net" "net/http" - "net/http/httptrace" "net/netip" "net/url" "os" @@ -45,7 +44,6 @@ import ( "tailscale.com/util/clientmetric" "tailscale.com/util/httpm" "tailscale.com/util/mak" - "tailscale.com/util/multierr" ) var ( @@ -1485,128 +1483,6 @@ type CastHeader struct { ConnectionID string `json:"connectionID"` } -// sessionRecordingClient returns an http.Client that uses srv.lb.Dialer() to -// dial connections. This is used to make requests to the session recording -// server to upload session recordings. -// It uses the provided dialCtx to dial connections, and limits a single dial -// to 5 seconds. -func (ss *sshSession) sessionRecordingClient(dialCtx context.Context) (*http.Client, error) { - dialer := ss.conn.srv.lb.Dialer() - if dialer == nil { - return nil, errors.New("no peer API transport") - } - tr := dialer.PeerAPITransport().Clone() - dialContextFn := tr.DialContext - - tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) { - perAttemptCtx, cancel := context.WithTimeout(ctx, 5*time.Second) - defer cancel() - go func() { - select { - case <-perAttemptCtx.Done(): - case <-dialCtx.Done(): - cancel() - } - }() - return dialContextFn(perAttemptCtx, network, addr) - } - return &http.Client{ - Transport: tr, - }, nil -} - -// connectToRecorder connects to the recorder at any of the provided addresses. -// It returns the first successful response, or a multierr if all attempts fail. -// -// On success, it returns a WriteCloser that can be used to upload the -// recording, and a channel that will be sent an error (or nil) when the upload -// fails or completes. -// -// In both cases, a slice of SSHRecordingAttempts is returned which detail the -// attempted recorder IP and the error message, if the attempt failed. The -// attempts are in order the recorder(s) was attempted. If successful a -// successful connection is made, the last attempt in the slice is the -// attempt for connected recorder. -func (ss *sshSession) connectToRecorder(ctx context.Context, recs []netip.AddrPort) (io.WriteCloser, []*tailcfg.SSHRecordingAttempt, <-chan error, error) { - if len(recs) == 0 { - return nil, nil, nil, errors.New("no recorders configured") - } - // We use a special context for dialing the recorder, so that we can - // limit the time we spend dialing to 30 seconds and still have an - // unbounded context for the upload. - dialCtx, dialCancel := context.WithTimeout(ctx, 30*time.Second) - defer dialCancel() - hc, err := ss.sessionRecordingClient(dialCtx) - if err != nil { - return nil, nil, nil, err - } - - var errs []error - var attempts []*tailcfg.SSHRecordingAttempt - for _, ap := range recs { - attempt := &tailcfg.SSHRecordingAttempt{ - Recorder: ap, - } - attempts = append(attempts, attempt) - - // We dial the recorder and wait for it to send a 100-continue - // response before returning from this function. This ensures that - // the recorder is ready to accept the recording. - - // got100 is closed when we receive the 100-continue response. - got100 := make(chan struct{}) - ctx = httptrace.WithClientTrace(ctx, &httptrace.ClientTrace{ - Got100Continue: func() { - close(got100) - }, - }) - - pr, pw := io.Pipe() - req, err := http.NewRequestWithContext(ctx, "POST", fmt.Sprintf("http://%s:%d/record", ap.Addr(), ap.Port()), pr) - if err != nil { - err = fmt.Errorf("recording: error starting recording: %w", err) - attempt.FailureMessage = err.Error() - errs = append(errs, err) - continue - } - // We set the Expect header to 100-continue, so that the recorder - // will send a 100-continue response before it starts reading the - // request body. - req.Header.Set("Expect", "100-continue") - - // errChan is used to indicate the result of the request. - errChan := make(chan error, 1) - go func() { - resp, err := hc.Do(req) - if err != nil { - errChan <- fmt.Errorf("recording: error starting recording: %w", err) - return - } - if resp.StatusCode != 200 { - errChan <- fmt.Errorf("recording: unexpected status: %v", resp.Status) - return - } - errChan <- nil - }() - select { - case <-got100: - case err := <-errChan: - // If we get an error before we get the 100-continue response, - // we need to try another recorder. - if err == nil { - // If the error is nil, we got a 200 response, which - // is unexpected as we haven't sent any data yet. - err = errors.New("recording: unexpected EOF") - } - attempt.FailureMessage = err.Error() - errs = append(errs, err) - continue - } - return pw, attempts, errChan, nil - } - return nil, attempts, nil, multierr.New(errs...) -} - func (ss *sshSession) openFileForRecording(now time.Time) (_ io.WriteCloser, err error) { varRoot := ss.conn.srv.lb.TailscaleVarRoot() if varRoot == "" { @@ -1672,7 +1548,7 @@ func (ss *sshSession) startNewRecording() (_ *recording, err error) { } else { var errChan <-chan error var attempts []*tailcfg.SSHRecordingAttempt - rec.out, attempts, errChan, err = ss.connectToRecorder(ctx, recorders) + rec.out, attempts, errChan, err = ConnectToRecorder(ctx, recorders, ss.conn.srv.lb.Dialer().UserDial) if err != nil { if onFailure != nil && onFailure.NotifyURL != "" && len(attempts) > 0 { eventType := tailcfg.SSHSessionRecordingFailed