Compare commits
35 Commits
02860854f2
...
1280208800
Author | SHA1 | Date |
---|---|---|
Percy Wegmann | 1280208800 | |
Percy Wegmann | 43477e9cb3 | |
Percy Wegmann | e789d73c6c | |
Brad Fitzpatrick | 1a963342c7 | |
Will Norris | 80decd83c1 | |
Maisem Ali | ed843e643f | |
Maisem Ali | fd6ba43b97 | |
Will Norris | 46980c9664 | |
Percy Wegmann | 817badf9ca | |
Percy Wegmann | 2cf764e998 | |
Irbe Krumina | 406293682c | |
Claire Wang | 35872e86d2 | |
Brad Fitzpatrick | b62cfc430a | |
Andrew Dunham | e9505e5432 | |
Brad Fitzpatrick | e42c4396cf | |
Brad Fitzpatrick | 15fc6cd966 | |
Brad Fitzpatrick | 1fe0983f2d | |
Brad Fitzpatrick | 46f3feae96 | |
Brad Fitzpatrick | 4fa6cbec27 | |
Brad Fitzpatrick | ee3bd4dbda | |
Percy Wegmann | a03cb866b4 | |
Percy Wegmann | 745fb31bd4 | |
Percy Wegmann | 07e783c7be | |
Percy Wegmann | 3349e86c0a | |
Percy Wegmann | 0c11fd978b | |
Percy Wegmann | 9d22ec0ba2 | |
Irbe Krumina | cd633a7252 | |
Andrew Dunham | f97d0ac994 | |
Claire Wang | e0287a4b33 | |
Irbe Krumina | 19b31ac9a6 | |
Maisem Ali | a49ed2e145 | |
Brad Fitzpatrick | 96712e10a7 | |
Andrew Dunham | be663c84c1 | |
Andrew Dunham | 10497acc95 | |
Andrew Lytvynov | 13e1355546 |
|
@ -32,7 +32,6 @@ jobs:
|
|||
- "ubuntu:18.04"
|
||||
- "ubuntu:20.04"
|
||||
- "ubuntu:22.04"
|
||||
- "ubuntu:22.10"
|
||||
- "ubuntu:23.04"
|
||||
- "elementary/docker:stable"
|
||||
- "elementary/docker:unstable"
|
||||
|
@ -91,7 +90,10 @@ jobs:
|
|||
|| contains(matrix.image, 'parrotsec')
|
||||
|| contains(matrix.image, 'kalilinux')
|
||||
- name: checkout
|
||||
uses: actions/checkout@v4
|
||||
# We cannot use v4, as it requires a newer glibc version than some of the
|
||||
# tested images provide. See
|
||||
# https://github.com/actions/checkout/issues/1487
|
||||
uses: actions/checkout@v3
|
||||
- name: run installer
|
||||
run: scripts/installer.sh
|
||||
# Package installation can fail in docker because systemd is not running
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/tstest/nettest"
|
||||
)
|
||||
|
||||
func BenchmarkHandleBootstrapDNS(b *testing.B) {
|
||||
|
@ -55,6 +56,8 @@ func getBootstrapDNS(t *testing.T, q string) dnsEntryMap {
|
|||
}
|
||||
|
||||
func TestUnpublishedDNS(t *testing.T) {
|
||||
nettest.SkipIfNoNetwork(t)
|
||||
|
||||
const published = "login.tailscale.com"
|
||||
const unpublished = "log.tailscale.io"
|
||||
|
||||
|
|
|
@ -191,7 +191,12 @@ func main() {
|
|||
http.Error(w, "derp server disabled", http.StatusNotFound)
|
||||
}))
|
||||
}
|
||||
mux.HandleFunc("/derp/probe", probeHandler)
|
||||
|
||||
// These two endpoints are the same. Different versions of the clients
|
||||
// have assumes different paths over time so we support both.
|
||||
mux.HandleFunc("/derp/probe", derphttp.ProbeHandler)
|
||||
mux.HandleFunc("/derp/latency-check", derphttp.ProbeHandler)
|
||||
|
||||
go refreshBootstrapDNSLoop()
|
||||
mux.HandleFunc("/bootstrap-dns", tsweb.BrowserHeaderHandlerFunc(handleBootstrapDNS))
|
||||
mux.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
|
@ -370,17 +375,6 @@ func isChallengeChar(c rune) bool {
|
|||
c == '.' || c == '-' || c == '_'
|
||||
}
|
||||
|
||||
// probeHandler is the endpoint that js/wasm clients hit to measure
|
||||
// DERP latency, since they can't do UDP STUN queries.
|
||||
func probeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "HEAD", "GET":
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
default:
|
||||
http.Error(w, "bogus probe method", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
||||
var validProdHostname = regexp.MustCompile(`^derp([^.]*)\.tailscale\.com\.?$`)
|
||||
|
||||
func prodAutocertHostPolicy(_ context.Context, host string) error {
|
||||
|
|
|
@ -36,7 +36,6 @@ const (
|
|||
// provided by a mounted Kubernetes Configmap. The Configmap mounted at
|
||||
// /config is the only supported way for configuring this nameserver.
|
||||
defaultDNSConfigDir = "/config"
|
||||
defaultDNSFile = "dns.json"
|
||||
kubeletMountedConfigLn = "..data"
|
||||
)
|
||||
|
||||
|
@ -331,9 +330,9 @@ func ensureWatcherForKubeConfigMap(ctx context.Context) chan string {
|
|||
type configReaderFunc func() ([]byte, error)
|
||||
|
||||
// configMapConfigReader reads the desired nameserver configuration from a
|
||||
// dns.json file in a ConfigMap mounted at /config.
|
||||
// records.json file in a ConfigMap mounted at /config.
|
||||
var configMapConfigReader configReaderFunc = func() ([]byte, error) {
|
||||
if contents, err := os.ReadFile(filepath.Join(defaultDNSConfigDir, defaultDNSFile)); err == nil {
|
||||
if contents, err := os.ReadFile(filepath.Join(defaultDNSConfigDir, operatorutils.DNSRecordsCMKey)); err == nil {
|
||||
return contents, nil
|
||||
} else if os.IsNotExist(err) {
|
||||
return nil, nil
|
||||
|
|
|
@ -53,6 +53,9 @@ rules:
|
|||
- apiGroups: ["apps"]
|
||||
resources: ["statefulsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["discovery.k8s.io"]
|
||||
resources: ["endpointslices"]
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
|
|
|
@ -17,7 +17,7 @@ spec:
|
|||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Service IP address of the nameserver
|
||||
jsonPath: .status.nameserverStatus.ip
|
||||
jsonPath: .status.nameserver.ip
|
||||
name: NameserverIP
|
||||
type: string
|
||||
name: v1alpha1
|
||||
|
@ -85,7 +85,7 @@ spec:
|
|||
x-kubernetes-list-map-keys:
|
||||
- type
|
||||
x-kubernetes-list-type: map
|
||||
nameserverStatus:
|
||||
nameserver:
|
||||
type: object
|
||||
properties:
|
||||
ip:
|
||||
|
|
|
@ -39,7 +39,7 @@ spec:
|
|||
type: object
|
||||
properties:
|
||||
metrics:
|
||||
description: Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation.
|
||||
description: Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation. Note that the metrics are currently considered unstable and will likely change in breaking ways in the future - we only recommend that you use those for debugging purposes.
|
||||
type: object
|
||||
required:
|
||||
- enable
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: tailscale.com/v1alpha1
|
||||
kind: DNSConfig
|
||||
metadata:
|
||||
name: ts-dns
|
||||
spec:
|
||||
nameserver:
|
||||
image:
|
||||
repo: tailscale/k8s-nameserver
|
||||
tag: unstable-v1.65
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: dnsconfig
|
||||
name: dnsrecords
|
||||
|
|
|
@ -26,12 +26,12 @@ spec:
|
|||
protocol: UDP
|
||||
containerPort: 1053
|
||||
volumeMounts:
|
||||
- name: dnsconfig
|
||||
- name: dnsrecords
|
||||
mountPath: /config
|
||||
restartPolicy: Always
|
||||
serviceAccount: nameserver
|
||||
serviceAccountName: nameserver
|
||||
volumes:
|
||||
- name: dnsconfig
|
||||
- name: dnsrecords
|
||||
configMap:
|
||||
name: dnsconfig
|
||||
name: dnsrecords
|
||||
|
|
|
@ -2,5 +2,3 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: nameserver
|
||||
imagePullSecrets:
|
||||
- name: foo
|
||||
|
|
|
@ -176,7 +176,7 @@ spec:
|
|||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- description: Service IP address of the nameserver
|
||||
jsonPath: .status.nameserverStatus.ip
|
||||
jsonPath: .status.nameserver.ip
|
||||
name: NameserverIP
|
||||
type: string
|
||||
name: v1alpha1
|
||||
|
@ -240,7 +240,7 @@ spec:
|
|||
x-kubernetes-list-map-keys:
|
||||
- type
|
||||
x-kubernetes-list-type: map
|
||||
nameserverStatus:
|
||||
nameserver:
|
||||
properties:
|
||||
ip:
|
||||
type: string
|
||||
|
@ -291,7 +291,7 @@ spec:
|
|||
description: Specification of the desired state of the ProxyClass resource. https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
properties:
|
||||
metrics:
|
||||
description: Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation.
|
||||
description: Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation. Note that the metrics are currently considered unstable and will likely change in breaking ways in the future - we only recommend that you use those for debugging purposes.
|
||||
properties:
|
||||
enable:
|
||||
description: Setting enable to true will make the proxy serve Tailscale metrics at <pod-ip>:9001/debug/metrics. Defaults to false.
|
||||
|
@ -1394,6 +1394,14 @@ rules:
|
|||
- deployments
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
// tailscale-operator provides a way to expose services running in a Kubernetes
|
||||
// cluster to your Tailnet and to make Tailscale nodes available to cluster
|
||||
// workloads
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
apiequality "k8s.io/apimachinery/pkg/api/equality"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/net"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/reconcile"
|
||||
operatorutils "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/util/mak"
|
||||
)
|
||||
|
||||
const (
|
||||
dnsRecordsRecocilerFinalizer = "tailscale.com/dns-records-reconciler"
|
||||
annotationTSMagicDNSName = "tailscale.com/magic-dnsname"
|
||||
)
|
||||
|
||||
// dnsRecordsReconciler knows how to update dnsrecords ConfigMap with DNS
|
||||
// records.
|
||||
// The records that it creates are:
|
||||
// - For tailscale Ingress, a mapping of the Ingress's MagicDNSName to the IP address of
|
||||
// the ingress proxy Pod.
|
||||
// - For egress proxies configured via tailscale.com/tailnet-fqdn annotation, a
|
||||
// mapping of the tailnet FQDN to the IP address of the egress proxy Pod.
|
||||
//
|
||||
// Records will only be created if there is exactly one ready
|
||||
// tailscale.com/v1alpha1.DNSConfig instance in the cluster (so that we know
|
||||
// that there is a ts.net nameserver deployed in the cluster).
|
||||
type dnsRecordsReconciler struct {
|
||||
client.Client
|
||||
tsNamespace string // namespace in which we provision tailscale resources
|
||||
logger *zap.SugaredLogger
|
||||
isDefaultLoadBalancer bool // true if operator is the default ingress controller in this cluster
|
||||
}
|
||||
|
||||
// Reconcile takes a reconcile.Request for a headless Service fronting a
|
||||
// tailscale proxy and updates DNS Records in dnsrecords ConfigMap for the
|
||||
// in-cluster ts.net nameserver if required.
|
||||
func (dnsRR *dnsRecordsReconciler) Reconcile(ctx context.Context, req reconcile.Request) (res reconcile.Result, err error) {
|
||||
logger := dnsRR.logger.With("Service", req.NamespacedName)
|
||||
logger.Debugf("starting reconcile")
|
||||
defer logger.Debugf("reconcile finished")
|
||||
|
||||
headlessSvc := new(corev1.Service)
|
||||
err = dnsRR.Client.Get(ctx, req.NamespacedName, headlessSvc)
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.Debugf("Service not found")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get Service: %w", err)
|
||||
}
|
||||
if !(isManagedByType(headlessSvc, "svc") || isManagedByType(headlessSvc, "ingress")) {
|
||||
logger.Debugf("Service is not a headless Service for a tailscale ingress or egress proxy; do nothing")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
if !headlessSvc.DeletionTimestamp.IsZero() {
|
||||
logger.Debug("Service is being deleted, clean up resources")
|
||||
return reconcile.Result{}, dnsRR.maybeCleanup(ctx, headlessSvc, logger)
|
||||
}
|
||||
|
||||
// Check that there is a ts.net nameserver deployed to the cluster by
|
||||
// checking that there is tailscale.com/v1alpha1.DNSConfig resource in a
|
||||
// Ready state.
|
||||
dnsCfgLst := new(tsapi.DNSConfigList)
|
||||
if err = dnsRR.List(ctx, dnsCfgLst); err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("error listing DNSConfigs: %w", err)
|
||||
}
|
||||
if len(dnsCfgLst.Items) == 0 {
|
||||
logger.Debugf("DNSConfig does not exist, not creating DNS records")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
if len(dnsCfgLst.Items) > 1 {
|
||||
logger.Errorf("Invalid cluster state - more than one DNSConfig found in cluster. Please ensure no more than one exists")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
dnsCfg := dnsCfgLst.Items[0]
|
||||
if !operatorutils.DNSCfgIsReady(&dnsCfg) {
|
||||
logger.Info("DNSConfig is not ready yet, waiting...")
|
||||
return reconcile.Result{}, nil
|
||||
}
|
||||
|
||||
return reconcile.Result{}, dnsRR.maybeProvision(ctx, headlessSvc, logger)
|
||||
}
|
||||
|
||||
// maybeProvision ensures that dnsrecords ConfigMap contains a record for the
|
||||
// proxy associated with the headless Service.
|
||||
// The record is only provisioned if the proxy is for a tailscale Ingress or
|
||||
// egress configured via tailscale.com/tailnet-fqdn annotation.
|
||||
//
|
||||
// For Ingress, the record is a mapping between the MagicDNSName of the Ingress, retrieved from
|
||||
// ingress.status.loadBalancer.ingress.hostname field and the proxy Pod IP addresses
|
||||
// retrieved from the EndpoinSlice associated with this headless Service, i.e
|
||||
// Records{IP4: <MagicDNS name of the Ingress>: <[IPs of the ingress proxy Pods]>}
|
||||
//
|
||||
// For egress, the record is a mapping between tailscale.com/tailnet-fqdn
|
||||
// annotation and the proxy Pod IP addresses, retrieved from the EndpointSlice
|
||||
// associated with this headless Service, i.e
|
||||
// Records{IP4: {<tailscale.com/tailnet-fqdn>: <[IPs of the egress proxy Pods]>}
|
||||
//
|
||||
// If records need to be created for this proxy, maybeProvision will also:
|
||||
// - update the headless Service with a tailscale.com/magic-dnsname annotation
|
||||
// - update the headless Service with a finalizer
|
||||
func (dnsRR *dnsRecordsReconciler) maybeProvision(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error {
|
||||
if headlessSvc == nil {
|
||||
logger.Info("[unexpected] maybeProvision called with a nil Service")
|
||||
return nil
|
||||
}
|
||||
isEgressFQDNSvc, err := dnsRR.isSvcForFQDNEgressProxy(ctx, headlessSvc)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error checking whether the Service is for an egress proxy: %w", err)
|
||||
}
|
||||
if !(isEgressFQDNSvc || isManagedByType(headlessSvc, "ingress")) {
|
||||
logger.Debug("Service is not fronting a proxy that we create DNS records for; do nothing")
|
||||
return nil
|
||||
}
|
||||
fqdn, err := dnsRR.fqdnForDNSRecord(ctx, headlessSvc, logger)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error determining DNS name for record: %w", err)
|
||||
}
|
||||
if fqdn == "" {
|
||||
logger.Debugf("MagicDNS name does not (yet) exist, not provisioning DNS record")
|
||||
return nil // a new reconcile will be triggered once it's added
|
||||
}
|
||||
|
||||
oldHeadlessSvc := headlessSvc.DeepCopy()
|
||||
// Ensure that headless Service is annotated with a finalizer to help
|
||||
// with records cleanup when proxy resources are deleted.
|
||||
if !slices.Contains(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer) {
|
||||
headlessSvc.Finalizers = append(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer)
|
||||
}
|
||||
// Ensure that headless Service is annotated with the current MagicDNS
|
||||
// name to help with records cleanup when proxy resources are deleted or
|
||||
// MagicDNS name changes.
|
||||
oldFqdn := headlessSvc.Annotations[annotationTSMagicDNSName]
|
||||
if oldFqdn != "" && oldFqdn != fqdn { // i.e user has changed the value of tailscale.com/tailnet-fqdn annotation
|
||||
logger.Debugf("MagicDNS name has changed, remvoving record for %s", oldFqdn)
|
||||
updateFunc := func(rec *operatorutils.Records) {
|
||||
delete(rec.IP4, oldFqdn)
|
||||
}
|
||||
if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil {
|
||||
return fmt.Errorf("error removing record for %s: %w", oldFqdn, err)
|
||||
}
|
||||
}
|
||||
mak.Set(&headlessSvc.Annotations, annotationTSMagicDNSName, fqdn)
|
||||
if !apiequality.Semantic.DeepEqual(oldHeadlessSvc, headlessSvc) {
|
||||
logger.Infof("provisioning DNS record for MagicDNS name: %s", fqdn) // this will be printed exactly once
|
||||
if err := dnsRR.Update(ctx, headlessSvc); err != nil {
|
||||
return fmt.Errorf("error updating proxy headless Service metadata: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Get the Pod IP addresses for the proxy from the EndpointSlice for the
|
||||
// headless Service.
|
||||
labels := map[string]string{discoveryv1.LabelServiceName: headlessSvc.Name} // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership
|
||||
eps, err := getSingleObject[discoveryv1.EndpointSlice](ctx, dnsRR.Client, dnsRR.tsNamespace, labels)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting the EndpointSlice for the proxy's headless Service: %w", err)
|
||||
}
|
||||
if eps == nil {
|
||||
logger.Debugf("proxy's headless Service EndpointSlice does not yet exist. We will reconcile again once it's created")
|
||||
return nil
|
||||
}
|
||||
// An EndpointSlice for a Service can have a list of endpoints that each
|
||||
// can have multiple addresses - these are the IP addresses of any Pods
|
||||
// selected by that Service. Pick all the IPv4 addresses.
|
||||
ips := make([]string, 0)
|
||||
for _, ep := range eps.Endpoints {
|
||||
for _, ip := range ep.Addresses {
|
||||
if !net.IsIPv4String(ip) {
|
||||
logger.Infof("EndpointSlice contains IP address %q that is not IPv4, ignoring. Currently only IPv4 is supported", ip)
|
||||
} else {
|
||||
ips = append(ips, ip)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(ips) == 0 {
|
||||
logger.Debugf("EndpointSlice for the Service contains no IPv4 addresses. We will reconcile again once they are created.")
|
||||
return nil
|
||||
}
|
||||
updateFunc := func(rec *operatorutils.Records) {
|
||||
mak.Set(&rec.IP4, fqdn, ips)
|
||||
}
|
||||
if err = dnsRR.updateDNSConfig(ctx, updateFunc); err != nil {
|
||||
return fmt.Errorf("error updating DNS records: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// maybeCleanup ensures that the DNS record for the proxy has been removed from
|
||||
// dnsrecords ConfigMap and the tailscale.com/dns-records-reconciler finalizer
|
||||
// has been removed from the Service. If the record is not found in the
|
||||
// ConfigMap, the ConfigMap does not exist, or the Service does not have
|
||||
// tailscale.com/magic-dnsname annotation, just remove the finalizer.
|
||||
func (h *dnsRecordsReconciler) maybeCleanup(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) error {
|
||||
ix := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer)
|
||||
if ix == -1 {
|
||||
logger.Debugf("no finalizer, nothing to do")
|
||||
return nil
|
||||
}
|
||||
cm := &corev1.ConfigMap{}
|
||||
err := h.Client.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: h.tsNamespace}, cm)
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.Debug("'dsnrecords' ConfigMap not found")
|
||||
return h.removeHeadlessSvcFinalizer(ctx, headlessSvc)
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving 'dnsrecords' ConfigMap: %w", err)
|
||||
}
|
||||
if cm.Data == nil {
|
||||
logger.Debug("'dnsrecords' ConfigMap contains no records")
|
||||
return h.removeHeadlessSvcFinalizer(ctx, headlessSvc)
|
||||
}
|
||||
_, ok := cm.Data[operatorutils.DNSRecordsCMKey]
|
||||
if !ok {
|
||||
logger.Debug("'dnsrecords' ConfigMap contains no records")
|
||||
return h.removeHeadlessSvcFinalizer(ctx, headlessSvc)
|
||||
}
|
||||
fqdn, _ := headlessSvc.GetAnnotations()[annotationTSMagicDNSName]
|
||||
if fqdn == "" {
|
||||
return h.removeHeadlessSvcFinalizer(ctx, headlessSvc)
|
||||
}
|
||||
logger.Infof("removing DNS record for MagicDNS name %s", fqdn)
|
||||
updateFunc := func(rec *operatorutils.Records) {
|
||||
delete(rec.IP4, fqdn)
|
||||
}
|
||||
if err = h.updateDNSConfig(ctx, updateFunc); err != nil {
|
||||
return fmt.Errorf("error updating DNS config: %w", err)
|
||||
}
|
||||
return h.removeHeadlessSvcFinalizer(ctx, headlessSvc)
|
||||
}
|
||||
|
||||
func (dnsRR *dnsRecordsReconciler) removeHeadlessSvcFinalizer(ctx context.Context, headlessSvc *corev1.Service) error {
|
||||
idx := slices.Index(headlessSvc.Finalizers, dnsRecordsRecocilerFinalizer)
|
||||
if idx == -1 {
|
||||
return nil
|
||||
}
|
||||
headlessSvc.Finalizers = append(headlessSvc.Finalizers[:idx], headlessSvc.Finalizers[idx+1:]...)
|
||||
return dnsRR.Update(ctx, headlessSvc)
|
||||
}
|
||||
|
||||
// fqdnForDNSRecord returns MagicDNS name associated with a given headless Service.
|
||||
// If the headless Service is for a tailscale Ingress proxy, returns ingress.status.loadBalancer.ingress.hostname.
|
||||
// If the headless Service is for an tailscale egress proxy configured via tailscale.com/tailnet-fqdn annotation, returns the annotation value.
|
||||
// This function is not expected to be called with headless Services for other
|
||||
// proxy types, or any other Services, but it just returns an empty string if
|
||||
// that happens.
|
||||
func (dnsRR *dnsRecordsReconciler) fqdnForDNSRecord(ctx context.Context, headlessSvc *corev1.Service, logger *zap.SugaredLogger) (string, error) {
|
||||
parentName := parentFromObjectLabels(headlessSvc)
|
||||
if isManagedByType(headlessSvc, "ingress") {
|
||||
ing := new(networkingv1.Ingress)
|
||||
if err := dnsRR.Get(ctx, parentName, ing); err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(ing.Status.LoadBalancer.Ingress) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
return ing.Status.LoadBalancer.Ingress[0].Hostname, nil
|
||||
}
|
||||
if isManagedByType(headlessSvc, "svc") {
|
||||
svc := new(corev1.Service)
|
||||
if err := dnsRR.Get(ctx, parentName, svc); apierrors.IsNotFound(err) {
|
||||
logger.Info("[unexpected] parent Service for egress proxy %s not found", headlessSvc.Name)
|
||||
return "", nil
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return svc.Annotations[AnnotationTailnetTargetFQDN], nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// updateDNSConfig runs the provided update function against dnsrecords
|
||||
// ConfigMap. At this point the in-cluster ts.net nameserver is expected to be
|
||||
// successfully created together with the ConfigMap.
|
||||
func (dnsRR *dnsRecordsReconciler) updateDNSConfig(ctx context.Context, update func(*operatorutils.Records)) error {
|
||||
cm := &corev1.ConfigMap{}
|
||||
err := dnsRR.Get(ctx, types.NamespacedName{Name: operatorutils.DNSRecordsCMName, Namespace: dnsRR.tsNamespace}, cm)
|
||||
if apierrors.IsNotFound(err) {
|
||||
dnsRR.logger.Info("[unexpected] dnsrecords ConfigMap not found in cluster. Not updating DNS records. Please open an isue and attach operator logs.")
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("error retrieving dnsrecords ConfigMap: %w", err)
|
||||
}
|
||||
dnsRecords := operatorutils.Records{Version: operatorutils.Alpha1Version, IP4: map[string][]string{}}
|
||||
if cm.Data != nil && cm.Data[operatorutils.DNSRecordsCMKey] != "" {
|
||||
if err := json.Unmarshal([]byte(cm.Data[operatorutils.DNSRecordsCMKey]), &dnsRecords); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
update(&dnsRecords)
|
||||
dnsRecordsBs, err := json.Marshal(dnsRecords)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error marshalling DNS records: %w", err)
|
||||
}
|
||||
mak.Set(&cm.Data, operatorutils.DNSRecordsCMKey, string(dnsRecordsBs))
|
||||
return dnsRR.Update(ctx, cm)
|
||||
}
|
||||
|
||||
// isSvcForFQDNEgressProxy returns true if the Service is a headless Service
|
||||
// created for a proxy for a tailscale egress Service configured via
|
||||
// tailscale.com/tailnet-fqdn annotation.
|
||||
func (dnsRR *dnsRecordsReconciler) isSvcForFQDNEgressProxy(ctx context.Context, svc *corev1.Service) (bool, error) {
|
||||
if !isManagedByType(svc, "svc") {
|
||||
return false, nil
|
||||
}
|
||||
parentName := parentFromObjectLabels(svc)
|
||||
parentSvc := new(corev1.Service)
|
||||
if err := dnsRR.Get(ctx, parentName, parentSvc); apierrors.IsNotFound(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, err
|
||||
}
|
||||
annots := parentSvc.Annotations
|
||||
return annots != nil && annots[AnnotationTailnetTargetFQDN] != "", nil
|
||||
}
|
|
@ -0,0 +1,198 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
//go:build !plan9
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"go.uber.org/zap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client"
|
||||
"sigs.k8s.io/controller-runtime/pkg/client/fake"
|
||||
operatorutils "tailscale.com/k8s-operator"
|
||||
tsapi "tailscale.com/k8s-operator/apis/v1alpha1"
|
||||
"tailscale.com/tstest"
|
||||
"tailscale.com/types/ptr"
|
||||
)
|
||||
|
||||
func TestDNSRecordsReconciler(t *testing.T) {
|
||||
// Preconfigure a cluster with a DNSConfig
|
||||
dnsConfig := &tsapi.DNSConfig{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
TypeMeta: metav1.TypeMeta{Kind: "DNSConfig"},
|
||||
Spec: tsapi.DNSConfigSpec{
|
||||
Nameserver: &tsapi.Nameserver{},
|
||||
}}
|
||||
ing := &networkingv1.Ingress{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "ts-ingress",
|
||||
Namespace: "test",
|
||||
},
|
||||
Spec: networkingv1.IngressSpec{
|
||||
IngressClassName: ptr.To("tailscale"),
|
||||
},
|
||||
Status: networkingv1.IngressStatus{
|
||||
LoadBalancer: networkingv1.IngressLoadBalancerStatus{
|
||||
Ingress: []networkingv1.IngressLoadBalancerIngress{{
|
||||
Hostname: "cluster.ingress.ts.net"}},
|
||||
},
|
||||
},
|
||||
}
|
||||
cm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "dnsrecords", Namespace: "tailscale"}}
|
||||
fc := fake.NewClientBuilder().
|
||||
WithScheme(tsapi.GlobalScheme).
|
||||
WithObjects(cm).
|
||||
WithObjects(dnsConfig).
|
||||
WithObjects(ing).
|
||||
WithStatusSubresource(dnsConfig, ing).
|
||||
Build()
|
||||
zl, err := zap.NewDevelopment()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
cl := tstest.NewClock(tstest.ClockOpts{})
|
||||
// Set the ready condition of the DNSConfig
|
||||
mustUpdateStatus[tsapi.DNSConfig](t, fc, "", "test", func(c *tsapi.DNSConfig) {
|
||||
operatorutils.SetDNSConfigCondition(c, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated, 0, cl, zl.Sugar())
|
||||
})
|
||||
dnsRR := &dnsRecordsReconciler{
|
||||
Client: fc,
|
||||
logger: zl.Sugar(),
|
||||
tsNamespace: "tailscale",
|
||||
}
|
||||
|
||||
// 1. DNS record is created for an egress proxy configured via
|
||||
// tailscale.com/tailnet-fqdn annotation
|
||||
egressSvcFQDN := &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "egress-fqdn",
|
||||
Namespace: "test",
|
||||
Annotations: map[string]string{"tailscale.com/tailnet-fqdn": "foo.bar.ts.net"},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ExternalName: "unused",
|
||||
Type: corev1.ServiceTypeExternalName,
|
||||
},
|
||||
}
|
||||
headlessForEgressSvcFQDN := headlessSvcForParent(egressSvcFQDN, "svc") // create the proxy headless Service
|
||||
ep := endpointSliceForService(headlessForEgressSvcFQDN, "10.9.8.7")
|
||||
mustCreate(t, fc, egressSvcFQDN)
|
||||
mustCreate(t, fc, headlessForEgressSvcFQDN)
|
||||
mustCreate(t, fc, ep)
|
||||
expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service
|
||||
// ConfigMap should now have a record for foo.bar.ts.net -> 10.8.8.7
|
||||
wantHosts := map[string][]string{"foo.bar.ts.net": {"10.9.8.7"}}
|
||||
expectHostsRecords(t, fc, wantHosts)
|
||||
|
||||
// 2. DNS record is updated if tailscale.com/tailnet-fqdn annotation's
|
||||
// value changes
|
||||
mustUpdate(t, fc, "test", "egress-fqdn", func(svc *corev1.Service) {
|
||||
svc.Annotations["tailscale.com/tailnet-fqdn"] = "baz.bar.ts.net"
|
||||
})
|
||||
expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service
|
||||
wantHosts = map[string][]string{"baz.bar.ts.net": {"10.9.8.7"}}
|
||||
expectHostsRecords(t, fc, wantHosts)
|
||||
|
||||
// 3. DNS record is updated if the IP address of the proxy Pod changes.
|
||||
ep = endpointSliceForService(headlessForEgressSvcFQDN, "10.6.5.4")
|
||||
mustUpdate(t, fc, ep.Namespace, ep.Name, func(ep *discoveryv1.EndpointSlice) {
|
||||
ep.Endpoints[0].Addresses = []string{"10.6.5.4"}
|
||||
})
|
||||
expectReconciled(t, dnsRR, "tailscale", "egress-fqdn") // dns-records-reconciler reconcile the headless Service
|
||||
wantHosts = map[string][]string{"baz.bar.ts.net": {"10.6.5.4"}}
|
||||
expectHostsRecords(t, fc, wantHosts)
|
||||
|
||||
// 4. DNS record is created for an ingress proxy configured via Ingress
|
||||
headlessForIngress := headlessSvcForParent(ing, "ingress")
|
||||
ep = endpointSliceForService(headlessForIngress, "10.9.8.7")
|
||||
mustCreate(t, fc, headlessForIngress)
|
||||
mustCreate(t, fc, ep)
|
||||
expectReconciled(t, dnsRR, "tailscale", "ts-ingress") // dns-records-reconciler should reconcile the headless Service
|
||||
wantHosts["cluster.ingress.ts.net"] = []string{"10.9.8.7"}
|
||||
expectHostsRecords(t, fc, wantHosts)
|
||||
|
||||
// 5. DNS records are updated if Ingress's MagicDNS name changes (i.e users changed spec.tls.hosts[0])
|
||||
t.Log("test case 5")
|
||||
mustUpdateStatus(t, fc, "test", "ts-ingress", func(ing *networkingv1.Ingress) {
|
||||
ing.Status.LoadBalancer.Ingress[0].Hostname = "another.ingress.ts.net"
|
||||
})
|
||||
expectReconciled(t, dnsRR, "tailscale", "ts-ingress") // dns-records-reconciler should reconcile the headless Service
|
||||
delete(wantHosts, "cluster.ingress.ts.net")
|
||||
wantHosts["another.ingress.ts.net"] = []string{"10.9.8.7"}
|
||||
expectHostsRecords(t, fc, wantHosts)
|
||||
|
||||
// 6. DNS records are updated if Ingress proxy's Pod IP changes
|
||||
mustUpdate(t, fc, ep.Namespace, ep.Name, func(ep *discoveryv1.EndpointSlice) {
|
||||
ep.Endpoints[0].Addresses = []string{"7.8.9.10"}
|
||||
})
|
||||
expectReconciled(t, dnsRR, "tailscale", "ts-ingress")
|
||||
wantHosts["another.ingress.ts.net"] = []string{"7.8.9.10"}
|
||||
expectHostsRecords(t, fc, wantHosts)
|
||||
}
|
||||
|
||||
func headlessSvcForParent(o client.Object, typ string) *corev1.Service {
|
||||
return &corev1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: o.GetName(),
|
||||
Namespace: "tailscale",
|
||||
Labels: map[string]string{
|
||||
LabelManaged: "true",
|
||||
LabelParentName: o.GetName(),
|
||||
LabelParentNamespace: o.GetNamespace(),
|
||||
LabelParentType: typ,
|
||||
},
|
||||
},
|
||||
Spec: corev1.ServiceSpec{
|
||||
ClusterIP: "None",
|
||||
Type: corev1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{"foo": "bar"},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func endpointSliceForService(svc *corev1.Service, ip string) *discoveryv1.EndpointSlice {
|
||||
return &discoveryv1.EndpointSlice{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: svc.Name,
|
||||
Namespace: svc.Namespace,
|
||||
Labels: map[string]string{discoveryv1.LabelServiceName: svc.Name},
|
||||
},
|
||||
Endpoints: []discoveryv1.Endpoint{{
|
||||
Addresses: []string{ip},
|
||||
}},
|
||||
}
|
||||
}
|
||||
|
||||
func expectHostsRecords(t *testing.T, cl client.Client, wantsHosts map[string][]string) {
|
||||
t.Helper()
|
||||
cm := new(corev1.ConfigMap)
|
||||
if err := cl.Get(context.Background(), types.NamespacedName{Name: "dnsrecords", Namespace: "tailscale"}, cm); err != nil {
|
||||
t.Fatalf("getting dnsconfig ConfigMap: %v", err)
|
||||
}
|
||||
if cm.Data == nil {
|
||||
t.Fatal("dnsconfig ConfigMap has no data")
|
||||
}
|
||||
dnsConfigString, ok := cm.Data[operatorutils.DNSRecordsCMKey]
|
||||
if !ok {
|
||||
t.Fatal("dnsconfig ConfigMap does not contain dnsconfig")
|
||||
}
|
||||
dnsConfig := &operatorutils.Records{}
|
||||
if err := json.Unmarshal([]byte(dnsConfigString), dnsConfig); err != nil {
|
||||
t.Fatalf("unmarshaling dnsconfig: %v", err)
|
||||
}
|
||||
if diff := cmp.Diff(dnsConfig.IP4, wantsHosts); diff != "" {
|
||||
t.Fatalf("unexpected dns config (-got +want):\n%s", diff)
|
||||
}
|
||||
}
|
|
@ -38,10 +38,10 @@ func main() {
|
|||
}
|
||||
repoRoot := "../../"
|
||||
switch os.Args[1] {
|
||||
case "helmcrd": // insert CRD to Helm templates behind a installCRDs=true conditional check
|
||||
log.Print("Adding Connector CRD to Helm templates")
|
||||
case "helmcrd": // insert CRDs to Helm templates behind a installCRDs=true conditional check
|
||||
log.Print("Adding CRDs to Helm templates")
|
||||
if err := generate("./"); err != nil {
|
||||
log.Fatalf("error adding Connector CRD to Helm templates: %v", err)
|
||||
log.Fatalf("error adding CRDs to Helm templates: %v", err)
|
||||
}
|
||||
return
|
||||
case "staticmanifests": // generate static manifests from Helm templates (including the CRD)
|
||||
|
|
|
@ -141,7 +141,7 @@ func (a *NameserverReconciler) Reconcile(ctx context.Context, req reconcile.Requ
|
|||
return res, fmt.Errorf("error getting Service: %w", err)
|
||||
}
|
||||
if ip := svc.Spec.ClusterIP; ip != "" && ip != "None" {
|
||||
dnsCfg.Status.NameserverStatus = &tsapi.NameserverStatus{
|
||||
dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{
|
||||
IP: ip,
|
||||
}
|
||||
return setStatus(&dnsCfg, tsapi.NameserverReady, metav1.ConditionTrue, reasonNameserverCreated, reasonNameserverCreated)
|
||||
|
|
|
@ -77,7 +77,7 @@ func TestNameserverReconciler(t *testing.T) {
|
|||
svc.Spec.ClusterIP = "1.2.3.4"
|
||||
})
|
||||
expectReconciled(t, nr, "", "test")
|
||||
dnsCfg.Status.NameserverStatus = &tsapi.NameserverStatus{
|
||||
dnsCfg.Status.Nameserver = &tsapi.NameserverStatus{
|
||||
IP: "1.2.3.4",
|
||||
}
|
||||
dnsCfg.Finalizers = []string{FinalizerName}
|
||||
|
@ -105,14 +105,14 @@ func TestNameserverReconciler(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("error marshalling ConfigMap contents: %v", err)
|
||||
}
|
||||
mustUpdate(t, fc, "tailscale", "dnsconfig", func(cm *corev1.ConfigMap) {
|
||||
mak.Set(&cm.Data, "dns.json", string(bs))
|
||||
mustUpdate(t, fc, "tailscale", "dnsrecords", func(cm *corev1.ConfigMap) {
|
||||
mak.Set(&cm.Data, "records.json", string(bs))
|
||||
})
|
||||
expectReconciled(t, nr, "", "test")
|
||||
wantCm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "dnsconfig",
|
||||
wantCm := &corev1.ConfigMap{ObjectMeta: metav1.ObjectMeta{Name: "dnsrecords",
|
||||
Namespace: "tailscale", Labels: labels, OwnerReferences: []metav1.OwnerReference{*dnsCfgOwnerRef}},
|
||||
TypeMeta: metav1.TypeMeta{Kind: "ConfigMap", APIVersion: "v1"},
|
||||
Data: map[string]string{"dns.json": string(bs)},
|
||||
Data: map[string]string{"records.json": string(bs)},
|
||||
}
|
||||
expectEqual(t, fc, wantCm, nil)
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"golang.org/x/oauth2/clientcredentials"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
discoveryv1 "k8s.io/api/discovery/v1"
|
||||
networkingv1 "k8s.io/api/networking/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -59,12 +60,13 @@ func main() {
|
|||
tailscale.I_Acknowledge_This_API_Is_Unstable = true
|
||||
|
||||
var (
|
||||
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
|
||||
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
|
||||
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
|
||||
priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "")
|
||||
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
|
||||
tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "")
|
||||
tsNamespace = defaultEnv("OPERATOR_NAMESPACE", "")
|
||||
tslogging = defaultEnv("OPERATOR_LOGGING", "info")
|
||||
image = defaultEnv("PROXY_IMAGE", "tailscale/tailscale:latest")
|
||||
priorityClassName = defaultEnv("PROXY_PRIORITY_CLASS_NAME", "")
|
||||
tags = defaultEnv("PROXY_TAGS", "tag:k8s")
|
||||
tsFirewallMode = defaultEnv("PROXY_FIREWALL_MODE", "")
|
||||
isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false)
|
||||
)
|
||||
|
||||
var opts []kzap.Opts
|
||||
|
@ -93,9 +95,19 @@ func main() {
|
|||
defer s.Close()
|
||||
restConfig := config.GetConfigOrDie()
|
||||
maybeLaunchAPIServerProxy(zlog, restConfig, s, mode)
|
||||
// TODO (irbekrm): gather the reconciler options into an opts struct
|
||||
// rather than passing a million of them in one by one.
|
||||
runReconcilers(zlog, s, tsNamespace, restConfig, tsClient, image, priorityClassName, tags, tsFirewallMode)
|
||||
rOpts := reconcilerOpts{
|
||||
log: zlog,
|
||||
tsServer: s,
|
||||
tsClient: tsClient,
|
||||
tailscaleNamespace: tsNamespace,
|
||||
restConfig: restConfig,
|
||||
proxyImage: image,
|
||||
proxyPriorityClassName: priorityClassName,
|
||||
proxyActAsDefaultLoadBalancer: isDefaultLoadBalancer,
|
||||
proxyTags: tags,
|
||||
proxyFirewallMode: tsFirewallMode,
|
||||
}
|
||||
runReconcilers(rOpts)
|
||||
}
|
||||
|
||||
// initTSNet initializes the tsnet.Server and logs in to Tailscale. It uses the
|
||||
|
@ -203,11 +215,8 @@ waitOnline:
|
|||
|
||||
// runReconcilers starts the controller-runtime manager and registers the
|
||||
// ServiceReconciler. It blocks forever.
|
||||
func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string, restConfig *rest.Config, tsClient *tailscale.Client, image, priorityClassName, tags, tsFirewallMode string) {
|
||||
var (
|
||||
isDefaultLoadBalancer = defaultBool("OPERATOR_DEFAULT_LOAD_BALANCER", false)
|
||||
)
|
||||
startlog := zlog.Named("startReconcilers")
|
||||
func runReconcilers(opts reconcilerOpts) {
|
||||
startlog := opts.log.Named("startReconcilers")
|
||||
// For secrets and statefulsets, we only get permission to touch the objects
|
||||
// in the controller's own namespace. This cannot be expressed by
|
||||
// .Watches(...) below, instead you have to add a per-type field selector to
|
||||
|
@ -215,7 +224,7 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string
|
|||
// implicitly filter what parts of the world the builder code gets to see at
|
||||
// all.
|
||||
nsFilter := cache.ByObject{
|
||||
Field: client.InNamespace(tsNamespace).AsSelector(),
|
||||
Field: client.InNamespace(opts.tailscaleNamespace).AsSelector(),
|
||||
}
|
||||
mgrOpts := manager.Options{
|
||||
// TODO (irbekrm): stricter filtering what we watch/cache/call
|
||||
|
@ -223,36 +232,37 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string
|
|||
// resources that we GET via the controller manager's client.
|
||||
Cache: cache.Options{
|
||||
ByObject: map[client.Object]cache.ByObject{
|
||||
&corev1.Secret{}: nsFilter,
|
||||
&corev1.ServiceAccount{}: nsFilter,
|
||||
&corev1.ConfigMap{}: nsFilter,
|
||||
&appsv1.StatefulSet{}: nsFilter,
|
||||
&appsv1.Deployment{}: nsFilter,
|
||||
&corev1.Secret{}: nsFilter,
|
||||
&corev1.ServiceAccount{}: nsFilter,
|
||||
&corev1.ConfigMap{}: nsFilter,
|
||||
&appsv1.StatefulSet{}: nsFilter,
|
||||
&appsv1.Deployment{}: nsFilter,
|
||||
&discoveryv1.EndpointSlice{}: nsFilter,
|
||||
},
|
||||
},
|
||||
Scheme: tsapi.GlobalScheme,
|
||||
}
|
||||
mgr, err := manager.New(restConfig, mgrOpts)
|
||||
mgr, err := manager.New(opts.restConfig, mgrOpts)
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create manager: %v", err)
|
||||
}
|
||||
|
||||
svcFilter := handler.EnqueueRequestsFromMapFunc(serviceHandler)
|
||||
svcChildFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("svc"))
|
||||
// If a ProxyClassChanges, enqueue all Services labeled with that
|
||||
// If a ProxyClass changes, enqueue all Services labeled with that
|
||||
// ProxyClass's name.
|
||||
proxyClassFilterForSvc := handler.EnqueueRequestsFromMapFunc(proxyClassHandlerForSvc(mgr.GetClient(), startlog))
|
||||
|
||||
eventRecorder := mgr.GetEventRecorderFor("tailscale-operator")
|
||||
ssr := &tailscaleSTSReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
tsnetServer: s,
|
||||
tsClient: tsClient,
|
||||
defaultTags: strings.Split(tags, ","),
|
||||
operatorNamespace: tsNamespace,
|
||||
proxyImage: image,
|
||||
proxyPriorityClassName: priorityClassName,
|
||||
tsFirewallMode: tsFirewallMode,
|
||||
tsnetServer: opts.tsServer,
|
||||
tsClient: opts.tsClient,
|
||||
defaultTags: strings.Split(opts.proxyTags, ","),
|
||||
operatorNamespace: opts.tailscaleNamespace,
|
||||
proxyImage: opts.proxyImage,
|
||||
proxyPriorityClassName: opts.proxyPriorityClassName,
|
||||
tsFirewallMode: opts.proxyFirewallMode,
|
||||
}
|
||||
err = builder.
|
||||
ControllerManagedBy(mgr).
|
||||
|
@ -264,10 +274,10 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string
|
|||
Complete(&ServiceReconciler{
|
||||
ssr: ssr,
|
||||
Client: mgr.GetClient(),
|
||||
logger: zlog.Named("service-reconciler"),
|
||||
isDefaultLoadBalancer: isDefaultLoadBalancer,
|
||||
logger: opts.log.Named("service-reconciler"),
|
||||
isDefaultLoadBalancer: opts.proxyActAsDefaultLoadBalancer,
|
||||
recorder: eventRecorder,
|
||||
tsNamespace: tsNamespace,
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create service reconciler: %v", err)
|
||||
|
@ -289,7 +299,7 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string
|
|||
ssr: ssr,
|
||||
recorder: eventRecorder,
|
||||
Client: mgr.GetClient(),
|
||||
logger: zlog.Named("ingress-reconciler"),
|
||||
logger: opts.log.Named("ingress-reconciler"),
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create ingress reconciler: %v", err)
|
||||
|
@ -308,14 +318,14 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string
|
|||
ssr: ssr,
|
||||
recorder: eventRecorder,
|
||||
Client: mgr.GetClient(),
|
||||
logger: zlog.Named("connector-reconciler"),
|
||||
logger: opts.log.Named("connector-reconciler"),
|
||||
clock: tstime.DefaultClock{},
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create connector reconciler: %v", err)
|
||||
}
|
||||
// TODO (irbekrm): switch to metadata-only watches for resources whose
|
||||
// spec we don't need to inspect to reduce memory consumption
|
||||
// spec we don't need to inspect to reduce memory consumption.
|
||||
// https://github.com/kubernetes-sigs/controller-runtime/issues/1159
|
||||
nameserverFilter := handler.EnqueueRequestsFromMapFunc(managedResourceHandlerForType("nameserver"))
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
|
@ -326,11 +336,10 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string
|
|||
Watches(&corev1.ServiceAccount{}, nameserverFilter).
|
||||
Complete(&NameserverReconciler{
|
||||
recorder: eventRecorder,
|
||||
tsNamespace: tsNamespace,
|
||||
|
||||
Client: mgr.GetClient(),
|
||||
logger: zlog.Named("nameserver-reconciler"),
|
||||
clock: tstime.DefaultClock{},
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
Client: mgr.GetClient(),
|
||||
logger: opts.log.Named("nameserver-reconciler"),
|
||||
clock: tstime.DefaultClock{},
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create nameserver reconciler: %v", err)
|
||||
|
@ -340,18 +349,170 @@ func runReconcilers(zlog *zap.SugaredLogger, s *tsnet.Server, tsNamespace string
|
|||
Complete(&ProxyClassReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
recorder: eventRecorder,
|
||||
logger: zlog.Named("proxyclass-reconciler"),
|
||||
logger: opts.log.Named("proxyclass-reconciler"),
|
||||
clock: tstime.DefaultClock{},
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatal("could not create proxyclass reconciler: %v", err)
|
||||
}
|
||||
logger := startlog.Named("dns-records-reconciler-event-handlers")
|
||||
// On EndpointSlice events, if it is an EndpointSlice for an
|
||||
// ingress/egress proxy headless Service, reconcile the headless
|
||||
// Service.
|
||||
dnsRREpsOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerEndpointSliceHandler)
|
||||
// On DNSConfig changes, reconcile all headless Services for
|
||||
// ingress/egress proxies in operator namespace.
|
||||
dnsRRDNSConfigOpts := handler.EnqueueRequestsFromMapFunc(enqueueAllIngressEgressProxySvcsInNS(opts.tailscaleNamespace, mgr.GetClient(), logger))
|
||||
// On Service events, if it is an ingress/egress proxy headless Service, reconcile it.
|
||||
dnsRRServiceOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerServiceHandler)
|
||||
// On Ingress events, if it is a tailscale Ingress or if tailscale is the default ingress controller, reconcile the proxy
|
||||
// headless Service.
|
||||
dnsRRIngressOpts := handler.EnqueueRequestsFromMapFunc(dnsRecordsReconcilerIngressHandler(opts.tailscaleNamespace, opts.proxyActAsDefaultLoadBalancer, mgr.GetClient(), logger))
|
||||
err = builder.ControllerManagedBy(mgr).
|
||||
Named("dns-records-reconciler").
|
||||
Watches(&corev1.Service{}, dnsRRServiceOpts).
|
||||
Watches(&networkingv1.Ingress{}, dnsRRIngressOpts).
|
||||
Watches(&discoveryv1.EndpointSlice{}, dnsRREpsOpts).
|
||||
Watches(&tsapi.DNSConfig{}, dnsRRDNSConfigOpts).
|
||||
Complete(&dnsRecordsReconciler{
|
||||
Client: mgr.GetClient(),
|
||||
tsNamespace: opts.tailscaleNamespace,
|
||||
logger: opts.log.Named("dns-records-reconciler"),
|
||||
isDefaultLoadBalancer: opts.proxyActAsDefaultLoadBalancer,
|
||||
})
|
||||
if err != nil {
|
||||
startlog.Fatalf("could not create DNS records reconciler: %v", err)
|
||||
}
|
||||
startlog.Infof("Startup complete, operator running, version: %s", version.Long())
|
||||
if err := mgr.Start(signals.SetupSignalHandler()); err != nil {
|
||||
startlog.Fatalf("could not start manager: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
type reconcilerOpts struct {
|
||||
log *zap.SugaredLogger
|
||||
tsServer *tsnet.Server
|
||||
tsClient *tailscale.Client
|
||||
tailscaleNamespace string // namespace in which operator resources will be deployed
|
||||
restConfig *rest.Config // config for connecting to the kube API server
|
||||
proxyImage string // <proxy-image-repo>:<proxy-image-tag>
|
||||
// proxyPriorityClassName isPriorityClass to be set for proxy Pods. This
|
||||
// is a legacy mechanism for cluster resource configuration options -
|
||||
// going forward use ProxyClass.
|
||||
// https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass
|
||||
proxyPriorityClassName string
|
||||
// proxyTags are ACL tags to tag proxy auth keys. Multiple tags should
|
||||
// be provided as a string with comma-separated tag values. Proxy tags
|
||||
// default to tag:k8s.
|
||||
// https://tailscale.com/kb/1085/auth-keys
|
||||
proxyTags string
|
||||
// proxyActAsDefaultLoadBalancer determines whether this operator
|
||||
// instance should act as the default ingress controller when looking at
|
||||
// Ingress resources with unset ingress.spec.ingressClassName.
|
||||
// TODO (irbekrm): this setting does not respect the default
|
||||
// IngressClass.
|
||||
// https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class
|
||||
// We should fix that and preferably integrate with that mechanism as
|
||||
// well - perhaps make the operator itself create the default
|
||||
// IngressClass if this is set to true.
|
||||
proxyActAsDefaultLoadBalancer bool
|
||||
// proxyFirewallMode determines whether non-userspace proxies should use
|
||||
// iptables or nftables for firewall configuration. Accepted values are
|
||||
// iptables, nftables and auto. If set to auto, proxy will automatically
|
||||
// determine which mode is supported for a given host (prefer nftables).
|
||||
// Auto is usually the best choice, unless you want to explicitly set
|
||||
// specific mode for debugging purposes.
|
||||
proxyFirewallMode string
|
||||
}
|
||||
|
||||
// enqueueAllIngressEgressProxySvcsinNS returns a reconcile request for each
|
||||
// ingress/egress proxy headless Service found in the provided namespace.
|
||||
func enqueueAllIngressEgressProxySvcsInNS(ns string, cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
return func(ctx context.Context, _ client.Object) []reconcile.Request {
|
||||
reqs := make([]reconcile.Request, 0)
|
||||
|
||||
// Get all headless Services for proxies configured using Service.
|
||||
svcProxyLabels := map[string]string{
|
||||
LabelManaged: "true",
|
||||
LabelParentType: "svc",
|
||||
}
|
||||
svcHeadlessSvcList := &corev1.ServiceList{}
|
||||
if err := cl.List(ctx, svcHeadlessSvcList, client.InNamespace(ns), client.MatchingLabels(svcProxyLabels)); err != nil {
|
||||
logger.Errorf("error listing headless Services for tailscale ingress/egress Services in operator namespace: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, svc := range svcHeadlessSvcList.Items {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}})
|
||||
}
|
||||
|
||||
// Get all headless Services for proxies configured using Ingress.
|
||||
ingProxyLabels := map[string]string{
|
||||
LabelManaged: "true",
|
||||
LabelParentType: "ingress",
|
||||
}
|
||||
ingHeadlessSvcList := &corev1.ServiceList{}
|
||||
if err := cl.List(ctx, ingHeadlessSvcList, client.InNamespace(ns), client.MatchingLabels(ingProxyLabels)); err != nil {
|
||||
logger.Errorf("error listing headless Services for tailscale Ingresses in operator namespace: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, svc := range ingHeadlessSvcList.Items {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: svc.Namespace, Name: svc.Name}})
|
||||
}
|
||||
return reqs
|
||||
}
|
||||
}
|
||||
|
||||
// dnsRecordsReconciler filters EndpointSlice events for which
|
||||
// dns-records-reconciler should reconcile a headless Service. The only events
|
||||
// it should reconcile are those for EndpointSlices associated with proxy
|
||||
// headless Services.
|
||||
func dnsRecordsReconcilerEndpointSliceHandler(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
if !isManagedByType(o, "svc") && !isManagedByType(o, "ingress") {
|
||||
return nil
|
||||
}
|
||||
headlessSvcName, ok := o.GetLabels()[discoveryv1.LabelServiceName] // https://kubernetes.io/docs/concepts/services-networking/endpoint-slices/#ownership
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: o.GetNamespace(), Name: headlessSvcName}}}
|
||||
}
|
||||
|
||||
// dnsRecordsReconcilerServiceHandler filters Service events for which
|
||||
// dns-records-reconciler should reconcile. If the event is for a cluster
|
||||
// ingress/cluster egress proxy's headless Service, returns the Service for
|
||||
// reconcile.
|
||||
func dnsRecordsReconcilerServiceHandler(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
if isManagedByType(o, "svc") || isManagedByType(o, "ingress") {
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: o.GetNamespace(), Name: o.GetName()}}}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dnsRecordsReconcilerIngressHandler filters Ingress events to ensure that
|
||||
// dns-records-reconciler only reconciles on tailscale Ingress events. When an
|
||||
// event is observed on a tailscale Ingress, reconcile the proxy headless Service.
|
||||
func dnsRecordsReconcilerIngressHandler(ns string, isDefaultLoadBalancer bool, cl client.Client, logger *zap.SugaredLogger) handler.MapFunc {
|
||||
return func(ctx context.Context, o client.Object) []reconcile.Request {
|
||||
ing, ok := o.(*networkingv1.Ingress)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
if !isDefaultLoadBalancer && (ing.Spec.IngressClassName == nil || *ing.Spec.IngressClassName != "tailscale") {
|
||||
return nil
|
||||
}
|
||||
proxyResourceLabels := childResourceLabels(ing.Name, ing.Namespace, "ingress")
|
||||
headlessSvc, err := getSingleObject[corev1.Service](ctx, cl, ns, proxyResourceLabels)
|
||||
if err != nil {
|
||||
logger.Errorf("error getting headless Service from parent labels: %v", err)
|
||||
return nil
|
||||
}
|
||||
if headlessSvc == nil {
|
||||
return nil
|
||||
}
|
||||
return []reconcile.Request{{NamespacedName: types.NamespacedName{Namespace: headlessSvc.Namespace, Name: headlessSvc.Name}}}
|
||||
}
|
||||
}
|
||||
|
||||
type tsClient interface {
|
||||
CreateKey(ctx context.Context, caps tailscale.KeyCapabilities) (string, *tailscale.Key, error)
|
||||
DeleteDevice(ctx context.Context, nodeStableID string) error
|
||||
|
|
|
@ -90,7 +90,7 @@ func (a *ServiceReconciler) Reconcile(ctx context.Context, req reconcile.Request
|
|||
} else if err != nil {
|
||||
return reconcile.Result{}, fmt.Errorf("failed to get svc: %w", err)
|
||||
}
|
||||
targetIP := a.tailnetTargetAnnotation(svc)
|
||||
targetIP := tailnetTargetAnnotation(svc)
|
||||
targetFQDN := svc.Annotations[AnnotationTailnetTargetFQDN]
|
||||
if !svc.DeletionTimestamp.IsZero() || !a.shouldExpose(svc) && targetIP == "" && targetFQDN == "" {
|
||||
logger.Debugf("service is being deleted or is (no longer) referring to Tailscale ingress/egress, ensuring any created resources are cleaned up")
|
||||
|
@ -216,7 +216,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
|||
sts.ClusterTargetDNSName = svc.Spec.ExternalName
|
||||
a.managedIngressProxies.Add(svc.UID)
|
||||
gaugeIngressProxies.Set(int64(a.managedIngressProxies.Len()))
|
||||
} else if ip := a.tailnetTargetAnnotation(svc); ip != "" {
|
||||
} else if ip := tailnetTargetAnnotation(svc); ip != "" {
|
||||
sts.TailnetTargetIP = ip
|
||||
a.managedEgressProxies.Add(svc.UID)
|
||||
gaugeEgressProxies.Set(int64(a.managedEgressProxies.Len()))
|
||||
|
@ -250,7 +250,7 @@ func (a *ServiceReconciler) maybeProvision(ctx context.Context, logger *zap.Suga
|
|||
return nil
|
||||
}
|
||||
|
||||
if !a.hasLoadBalancerClass(svc) {
|
||||
if !isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) {
|
||||
logger.Debugf("service is not a LoadBalancer, so not updating ingress")
|
||||
return nil
|
||||
}
|
||||
|
@ -310,29 +310,27 @@ func (a *ServiceReconciler) shouldExpose(svc *corev1.Service) bool {
|
|||
return a.shouldExposeClusterIP(svc) || a.shouldExposeDNSName(svc)
|
||||
}
|
||||
|
||||
func (a *ServiceReconciler) shouldExposeDNSName(svc *corev1.Service) bool {
|
||||
return hasExposeAnnotation(svc) && svc.Spec.Type == corev1.ServiceTypeExternalName && svc.Spec.ExternalName != ""
|
||||
}
|
||||
|
||||
func (a *ServiceReconciler) shouldExposeClusterIP(svc *corev1.Service) bool {
|
||||
// Headless services can't be exposed, since there is no ClusterIP to
|
||||
// forward to.
|
||||
if svc.Spec.ClusterIP == "" || svc.Spec.ClusterIP == "None" {
|
||||
return false
|
||||
}
|
||||
return a.hasLoadBalancerClass(svc) || a.hasExposeAnnotation(svc)
|
||||
return isTailscaleLoadBalancerService(svc, a.isDefaultLoadBalancer) || hasExposeAnnotation(svc)
|
||||
}
|
||||
|
||||
func (a *ServiceReconciler) shouldExposeDNSName(svc *corev1.Service) bool {
|
||||
return a.hasExposeAnnotation(svc) && svc.Spec.Type == corev1.ServiceTypeExternalName && svc.Spec.ExternalName != ""
|
||||
}
|
||||
|
||||
func (a *ServiceReconciler) hasLoadBalancerClass(svc *corev1.Service) bool {
|
||||
func isTailscaleLoadBalancerService(svc *corev1.Service, isDefaultLoadBalancer bool) bool {
|
||||
return svc != nil &&
|
||||
svc.Spec.Type == corev1.ServiceTypeLoadBalancer &&
|
||||
(svc.Spec.LoadBalancerClass != nil && *svc.Spec.LoadBalancerClass == "tailscale" ||
|
||||
svc.Spec.LoadBalancerClass == nil && a.isDefaultLoadBalancer)
|
||||
svc.Spec.LoadBalancerClass == nil && isDefaultLoadBalancer)
|
||||
}
|
||||
|
||||
// hasExposeAnnotation reports whether Service has the tailscale.com/expose
|
||||
// annotation set
|
||||
func (a *ServiceReconciler) hasExposeAnnotation(svc *corev1.Service) bool {
|
||||
func hasExposeAnnotation(svc *corev1.Service) bool {
|
||||
return svc != nil && svc.Annotations[AnnotationExpose] == "true"
|
||||
}
|
||||
|
||||
|
@ -340,7 +338,7 @@ func (a *ServiceReconciler) hasExposeAnnotation(svc *corev1.Service) bool {
|
|||
// annotation or of the deprecated tailscale.com/ts-tailnet-target-ip
|
||||
// annotation. If neither is set, it returns an empty string. If both are set,
|
||||
// it returns the value of the new annotation.
|
||||
func (a *ServiceReconciler) tailnetTargetAnnotation(svc *corev1.Service) string {
|
||||
func tailnetTargetAnnotation(svc *corev1.Service) string {
|
||||
if svc == nil {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -69,10 +69,6 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
const (
|
||||
perClientSendQueueDepth = 32 // packets buffered for sending
|
||||
writeTimeout = 2 * time.Second
|
||||
|
|
|
@ -136,8 +136,15 @@ func NewRegionClient(privateKey key.NodePrivate, logf logger.Logf, netMon *netmo
|
|||
|
||||
// NewNetcheckClient returns a Client that's only able to have its DialRegionTLS method called.
|
||||
// It's used by the netcheck package.
|
||||
func NewNetcheckClient(logf logger.Logf) *Client {
|
||||
return &Client{logf: logf, clock: tstime.StdClock{}}
|
||||
func NewNetcheckClient(logf logger.Logf, netMon *netmon.Monitor) *Client {
|
||||
if netMon == nil {
|
||||
panic("nil netMon")
|
||||
}
|
||||
return &Client{
|
||||
logf: logf,
|
||||
clock: tstime.StdClock{},
|
||||
netMon: netMon,
|
||||
}
|
||||
}
|
||||
|
||||
// NewClient returns a new DERP-over-HTTP client. It connects lazily.
|
||||
|
|
|
@ -20,6 +20,16 @@ const fastStartHeader = "Derp-Fast-Start"
|
|||
|
||||
func Handler(s *derp.Server) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// These are installed both here and in cmd/derper. The check here
|
||||
// catches both cmd/derper run with DERP disabled (STUN only mode) as
|
||||
// well as DERP being run in tests with derphttp.Handler directly,
|
||||
// as netcheck still assumes this replies.
|
||||
switch r.URL.Path {
|
||||
case "/derp/probe", "/derp/latency-check":
|
||||
ProbeHandler(w, r)
|
||||
return
|
||||
}
|
||||
|
||||
up := strings.ToLower(r.Header.Get("Upgrade"))
|
||||
if up != "websocket" && up != "derp" {
|
||||
if up != "" {
|
||||
|
@ -58,3 +68,14 @@ func Handler(s *derp.Server) http.Handler {
|
|||
s.Accept(r.Context(), netConn, conn, netConn.RemoteAddr().String())
|
||||
})
|
||||
}
|
||||
|
||||
// ProbeHandler is the endpoint that clients without UDP access (including js/wasm) hit to measure
|
||||
// DERP latency, as a replacement for UDP STUN queries.
|
||||
func ProbeHandler(w http.ResponseWriter, r *http.Request) {
|
||||
switch r.Method {
|
||||
case "HEAD", "GET":
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
default:
|
||||
http.Error(w, "bogus probe method", http.StatusMethodNotAllowed)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/netip"
|
||||
"sync"
|
||||
"testing"
|
||||
|
@ -464,3 +465,24 @@ func TestLocalAddrNoMutex(t *testing.T) {
|
|||
t.Errorf("got error %q; want %q", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProbe(t *testing.T) {
|
||||
h := Handler(nil)
|
||||
|
||||
tests := []struct {
|
||||
path string
|
||||
want int
|
||||
}{
|
||||
{"/derp/probe", 200},
|
||||
{"/derp/latency-check", 200},
|
||||
{"/derp/sdf", http.StatusUpgradeRequired},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
rec := httptest.NewRecorder()
|
||||
h.ServeHTTP(rec, httptest.NewRequest("GET", tt.path, nil))
|
||||
if got := rec.Result().StatusCode; got != tt.want {
|
||||
t.Errorf("for path %q got HTTP status %v; want %v", tt.path, got, tt.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,10 +27,10 @@ import (
|
|||
type Child struct {
|
||||
*dirfs.Child
|
||||
|
||||
// BaseURL is the base URL of the WebDAV service to which we'll proxy
|
||||
// BaseURL returns the base URL of the WebDAV service to which we'll proxy
|
||||
// requests for this Child. We will append the filename from the original
|
||||
// URL to this.
|
||||
BaseURL string
|
||||
BaseURL func() (string, error)
|
||||
|
||||
// Transport (if specified) is the http transport to use when communicating
|
||||
// with this Child's WebDAV service.
|
||||
|
@ -154,9 +154,15 @@ func (h *Handler) delegate(mpl int, pathComponents []string, w http.ResponseWrit
|
|||
return
|
||||
}
|
||||
|
||||
u, err := url.Parse(child.BaseURL)
|
||||
baseURL, err := child.BaseURL()
|
||||
if err != nil {
|
||||
h.logf("warning: parse base URL %s failed: %s", child.BaseURL, err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
u, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
h.logf("warning: parse base URL %s failed: %s", baseURL, err)
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
|
|
@ -24,33 +24,26 @@ func (h *Handler) handlePROPFIND(w http.ResponseWriter, r *http.Request) {
|
|||
// Delegate to a Child.
|
||||
depth := getDepth(r)
|
||||
|
||||
cached := h.StatCache.get(r.URL.Path, depth)
|
||||
if cached != nil {
|
||||
w.Header().Del("Content-Length")
|
||||
w.WriteHeader(http.StatusMultiStatus)
|
||||
w.Write(cached)
|
||||
return
|
||||
}
|
||||
status, result := h.StatCache.getOr(r.URL.Path, depth, func() (int, []byte) {
|
||||
// Use a buffering ResponseWriter so that we can manipulate the result.
|
||||
// The only thing we use from the original ResponseWriter is Header().
|
||||
bw := &bufferingResponseWriter{ResponseWriter: w}
|
||||
|
||||
// Use a buffering ResponseWriter so that we can manipulate the result.
|
||||
// The only thing we use from the original ResponseWriter is Header().
|
||||
bw := &bufferingResponseWriter{ResponseWriter: w}
|
||||
mpl := h.maxPathLength(r)
|
||||
h.delegate(mpl, pathComponents[mpl-1:], bw, r)
|
||||
|
||||
mpl := h.maxPathLength(r)
|
||||
h.delegate(mpl, pathComponents[mpl-1:], bw, r)
|
||||
// Fixup paths to add the requested path as a prefix.
|
||||
pathPrefix := shared.Join(pathComponents[0:mpl]...)
|
||||
b := hrefRegex.ReplaceAll(bw.buf.Bytes(), []byte(fmt.Sprintf("<D:href>%s/$1</D:href>", pathPrefix)))
|
||||
|
||||
// Fixup paths to add the requested path as a prefix.
|
||||
pathPrefix := shared.Join(pathComponents[0:mpl]...)
|
||||
b := hrefRegex.ReplaceAll(bw.buf.Bytes(), []byte(fmt.Sprintf("<D:href>%s/$1</D:href>", pathPrefix)))
|
||||
|
||||
if h.StatCache != nil && bw.status == http.StatusMultiStatus && b != nil {
|
||||
h.StatCache.set(r.URL.Path, depth, b)
|
||||
}
|
||||
return bw.status, b
|
||||
})
|
||||
|
||||
w.Header().Del("Content-Length")
|
||||
w.WriteHeader(bw.status)
|
||||
w.Write(b)
|
||||
|
||||
w.WriteHeader(status)
|
||||
if result != nil {
|
||||
w.Write(result)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package compositedav
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -25,6 +26,23 @@ type StatCache struct {
|
|||
cachesByDepthAndPath map[int]*ttlcache.Cache[string, []byte]
|
||||
}
|
||||
|
||||
// getOr checks the cache for the named value at the given depth. If a cached
|
||||
// value was found, it returns http.StatusMultiStatus along with the cached
|
||||
// value. Otherwise, it executes the given function and returns the resulting
|
||||
// status and value. If the function returned http.StatusMultiStatus, getOr
|
||||
// caches the resulting value at the given name and depth before returning.
|
||||
func (c *StatCache) getOr(name string, depth int, or func() (int, []byte)) (int, []byte) {
|
||||
cached := c.get(name, depth)
|
||||
if cached != nil {
|
||||
return http.StatusMultiStatus, cached
|
||||
}
|
||||
status, next := or()
|
||||
if c != nil && status == http.StatusMultiStatus && next != nil {
|
||||
c.set(name, depth, next)
|
||||
}
|
||||
return status, next
|
||||
}
|
||||
|
||||
func (c *StatCache) get(name string, depth int) []byte {
|
||||
if c == nil {
|
||||
return nil
|
||||
|
|
|
@ -10,10 +10,12 @@ import (
|
|||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -112,7 +114,35 @@ func TestPermissions(t *testing.T) {
|
|||
if err := s.client.Rename(pathTo(remote1, share12, file111), pathTo(remote1, share12, file112), true); err == nil {
|
||||
t.Error("moving file on read-only remote should fail")
|
||||
}
|
||||
}
|
||||
|
||||
// TestSecretTokenAuth verifies that the fileserver running at localhost cannot
|
||||
// be accessed directly without the correct secret token. This matters because
|
||||
// if a victim can be induced to visit the localhost URL and access a malicious
|
||||
// file on their own share, it could allow a Mark-of-the-Web bypass attack.
|
||||
func TestSecretTokenAuth(t *testing.T) {
|
||||
s := newSystem(t)
|
||||
|
||||
fileserverAddr := s.addRemote(remote1)
|
||||
s.addShare(remote1, share11, drive.PermissionReadWrite)
|
||||
s.writeFile("writing file to read/write remote should succeed", remote1, share11, file111, "hello world", true)
|
||||
|
||||
client := &http.Client{
|
||||
Transport: &http.Transport{DisableKeepAlives: true},
|
||||
}
|
||||
addr := strings.Split(fileserverAddr, "|")[1]
|
||||
wrongSecret, err := generateSecretToken()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
u := fmt.Sprintf("http://%s/%s/%s", addr, wrongSecret, url.PathEscape(file111))
|
||||
resp, err := client.Get(u)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if resp.StatusCode != http.StatusForbidden {
|
||||
t.Errorf("expected %d for incorrect secret token, but got %d", http.StatusForbidden, resp.StatusCode)
|
||||
}
|
||||
}
|
||||
|
||||
type local struct {
|
||||
|
@ -154,7 +184,7 @@ func newSystem(t *testing.T) *system {
|
|||
// Make sure we don't leak goroutines
|
||||
tstest.ResourceCheck(t)
|
||||
|
||||
fs := NewFileSystemForLocal(log.Printf)
|
||||
fs := newFileSystemForLocal(log.Printf, nil)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
t.Fatalf("failed to Listen: %s", err)
|
||||
|
@ -183,7 +213,7 @@ func newSystem(t *testing.T) *system {
|
|||
return s
|
||||
}
|
||||
|
||||
func (s *system) addRemote(name string) {
|
||||
func (s *system) addRemote(name string) string {
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
s.t.Fatalf("failed to Listen: %s", err)
|
||||
|
@ -222,6 +252,8 @@ func (s *system) addRemote(name string) {
|
|||
DisableKeepAlives: true,
|
||||
ResponseHeaderTimeout: 5 * time.Second,
|
||||
})
|
||||
|
||||
return fileServer.Addr()
|
||||
}
|
||||
|
||||
func (s *system) addShare(remoteName, shareName string, permission drive.Permission) {
|
||||
|
|
|
@ -4,6 +4,10 @@
|
|||
package driveimpl
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
@ -17,6 +21,7 @@ import (
|
|||
// serve up files as an unprivileged user.
|
||||
type FileServer struct {
|
||||
l net.Listener
|
||||
secretToken string
|
||||
shareHandlers map[string]http.Handler
|
||||
sharesMu sync.RWMutex
|
||||
}
|
||||
|
@ -40,19 +45,36 @@ func NewFileServer() (*FileServer, error) {
|
|||
// if err != nil {
|
||||
// TODO(oxtoacart): actually get safesocket working in more environments (MacOS Sandboxed, Windows, ???)
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("listen: %w", err)
|
||||
}
|
||||
|
||||
secretToken, err := generateSecretToken()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// }
|
||||
|
||||
return &FileServer{
|
||||
l: l,
|
||||
secretToken: secretToken,
|
||||
shareHandlers: make(map[string]http.Handler),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Addr returns the address at which this FileServer is listening.
|
||||
// generateSecretToken generates a hex-encoded 256 bit secet.
|
||||
func generateSecretToken() (string, error) {
|
||||
tokenBytes := make([]byte, 32)
|
||||
_, err := rand.Read(tokenBytes)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("generateSecretToken: %w", err)
|
||||
}
|
||||
return hex.EncodeToString(tokenBytes), nil
|
||||
}
|
||||
|
||||
// Addr returns the address at which this FileServer is listening. This
|
||||
// includes the secret token in front of the address, delimited by a pipe |.
|
||||
func (s *FileServer) Addr() string {
|
||||
return s.l.Addr().String()
|
||||
return fmt.Sprintf("%s|%s", s.secretToken, s.l.Addr().String())
|
||||
}
|
||||
|
||||
// Serve() starts serving files and blocks until it encounters a fatal error.
|
||||
|
@ -95,11 +117,33 @@ func (s *FileServer) SetShares(shares map[string]string) {
|
|||
}
|
||||
}
|
||||
|
||||
// ServeHTTP implements the http.Handler interface.
|
||||
// ServeHTTP implements the http.Handler interface. This requires a secret
|
||||
// token in the path in order to prevent Mark-of-the-Web (MOTW) bypass attacks
|
||||
// of the below sort:
|
||||
//
|
||||
// 1. Attacker with write access to the share puts a malicious file via
|
||||
// http://100.100.100.100:8080/<tailnet>/<machine>/</share>/bad.exe
|
||||
// 2. Attacker then induces victim to visit
|
||||
// http://localhost:[PORT]/<share>/bad.exe
|
||||
// 3. Because that is loaded from localhost, it does not get the MOTW
|
||||
// thereby bypasses some OS-level security.
|
||||
//
|
||||
// The path on this file server is actually not as above, but rather
|
||||
// http://localhost:[PORT]/<secretToken>/<share>/bad.exe. Unless the attacker
|
||||
// can discover the secretToken, the attacker cannot craft a localhost URL that
|
||||
// will work.
|
||||
func (s *FileServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
parts := shared.CleanAndSplit(r.URL.Path)
|
||||
r.URL.Path = shared.Join(parts[1:]...)
|
||||
share := parts[0]
|
||||
|
||||
token := parts[0]
|
||||
a, b := []byte(token), []byte(s.secretToken)
|
||||
if subtle.ConstantTimeCompare(a, b) != 1 {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
return
|
||||
}
|
||||
|
||||
r.URL.Path = shared.Join(parts[2:]...)
|
||||
share := parts[1]
|
||||
s.sharesMu.RLock()
|
||||
h, found := s.shareHandlers[share]
|
||||
s.sharesMu.RUnlock()
|
||||
|
|
|
@ -27,6 +27,10 @@ const (
|
|||
// NewFileSystemForLocal starts serving a filesystem for local clients.
|
||||
// Inbound connections must be handed to HandleConn.
|
||||
func NewFileSystemForLocal(logf logger.Logf) *FileSystemForLocal {
|
||||
return newFileSystemForLocal(logf, &compositedav.StatCache{TTL: statCacheTTL})
|
||||
}
|
||||
|
||||
func newFileSystemForLocal(logf logger.Logf, statCache *compositedav.StatCache) *FileSystemForLocal {
|
||||
if logf == nil {
|
||||
logf = log.Printf
|
||||
}
|
||||
|
@ -34,7 +38,7 @@ func NewFileSystemForLocal(logf logger.Logf) *FileSystemForLocal {
|
|||
logf: logf,
|
||||
h: &compositedav.Handler{
|
||||
Logf: logf,
|
||||
StatCache: &compositedav.StatCache{TTL: statCacheTTL},
|
||||
StatCache: statCache,
|
||||
},
|
||||
listener: newConnListener(),
|
||||
}
|
||||
|
@ -77,7 +81,7 @@ func (s *FileSystemForLocal) SetRemotes(domain string, remotes []*drive.Remote,
|
|||
Name: remote.Name,
|
||||
Available: remote.Available,
|
||||
},
|
||||
BaseURL: remote.URL,
|
||||
BaseURL: func() (string, error) { return remote.URL, nil },
|
||||
Transport: transport,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -51,17 +51,18 @@ type FileSystemForRemote struct {
|
|||
|
||||
// mu guards the below values. Acquire a write lock before updating any of
|
||||
// them, acquire a read lock before reading any of them.
|
||||
mu sync.RWMutex
|
||||
fileServerAddr string
|
||||
shares []*drive.Share
|
||||
children map[string]*compositedav.Child
|
||||
userServers map[string]*userServer
|
||||
mu sync.RWMutex
|
||||
// fileServerTokenAndAddr is the secretToken|fileserverAddress
|
||||
fileServerTokenAndAddr string
|
||||
shares []*drive.Share
|
||||
children map[string]*compositedav.Child
|
||||
userServers map[string]*userServer
|
||||
}
|
||||
|
||||
// SetFileServerAddr implements drive.FileSystemForRemote.
|
||||
func (s *FileSystemForRemote) SetFileServerAddr(addr string) {
|
||||
s.mu.Lock()
|
||||
s.fileServerAddr = addr
|
||||
s.fileServerTokenAndAddr = addr
|
||||
s.mu.Unlock()
|
||||
}
|
||||
|
||||
|
@ -113,11 +114,58 @@ func (s *FileSystemForRemote) SetShares(shares []*drive.Share) {
|
|||
}
|
||||
|
||||
func (s *FileSystemForRemote) buildChild(share *drive.Share) *compositedav.Child {
|
||||
getTokenAndAddr := func(shareName string) (string, string, error) {
|
||||
s.mu.RLock()
|
||||
var share *drive.Share
|
||||
i, shareFound := slices.BinarySearchFunc(s.shares, shareName, func(s *drive.Share, name string) int {
|
||||
return strings.Compare(s.Name, name)
|
||||
})
|
||||
if shareFound {
|
||||
share = s.shares[i]
|
||||
}
|
||||
userServers := s.userServers
|
||||
fileServerTokenAndAddr := s.fileServerTokenAndAddr
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !shareFound {
|
||||
return "", "", fmt.Errorf("unknown share %v", shareName)
|
||||
}
|
||||
|
||||
var tokenAndAddr string
|
||||
if !drive.AllowShareAs() {
|
||||
tokenAndAddr = fileServerTokenAndAddr
|
||||
} else {
|
||||
userServer, found := userServers[share.As]
|
||||
if found {
|
||||
userServer.mu.RLock()
|
||||
tokenAndAddr = userServer.tokenAndAddr
|
||||
userServer.mu.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
if tokenAndAddr == "" {
|
||||
return "", "", fmt.Errorf("unable to determine address for share %v", shareName)
|
||||
}
|
||||
|
||||
parts := strings.Split(tokenAndAddr, "|")
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("invalid address for share %v", shareName)
|
||||
}
|
||||
|
||||
return parts[0], parts[1], nil
|
||||
}
|
||||
|
||||
return &compositedav.Child{
|
||||
Child: &dirfs.Child{
|
||||
Name: share.Name,
|
||||
},
|
||||
BaseURL: fmt.Sprintf("http://%v/%v", hex.EncodeToString([]byte(share.Name)), url.PathEscape(share.Name)),
|
||||
BaseURL: func() (string, error) {
|
||||
secretToken, _, err := getTokenAndAddr(share.Name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprintf("http://%s/%s/%s", hex.EncodeToString([]byte(share.Name)), secretToken, url.PathEscape(share.Name)), nil
|
||||
},
|
||||
Transport: &http.Transport{
|
||||
Dial: func(_, shareAddr string) (net.Conn, error) {
|
||||
shareNameHex, _, err := net.SplitHostPort(shareAddr)
|
||||
|
@ -132,36 +180,9 @@ func (s *FileSystemForRemote) buildChild(share *drive.Share) *compositedav.Child
|
|||
}
|
||||
shareName := string(shareNameBytes)
|
||||
|
||||
s.mu.RLock()
|
||||
var share *drive.Share
|
||||
i, shareFound := slices.BinarySearchFunc(s.shares, shareName, func(s *drive.Share, name string) int {
|
||||
return strings.Compare(s.Name, name)
|
||||
})
|
||||
if shareFound {
|
||||
share = s.shares[i]
|
||||
}
|
||||
userServers := s.userServers
|
||||
fileServerAddr := s.fileServerAddr
|
||||
s.mu.RUnlock()
|
||||
|
||||
if !shareFound {
|
||||
return nil, fmt.Errorf("unknown share %v", shareName)
|
||||
}
|
||||
|
||||
var addr string
|
||||
if !drive.AllowShareAs() {
|
||||
addr = fileServerAddr
|
||||
} else {
|
||||
userServer, found := userServers[share.As]
|
||||
if found {
|
||||
userServer.mu.RLock()
|
||||
addr = userServer.addr
|
||||
userServer.mu.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
if addr == "" {
|
||||
return nil, fmt.Errorf("unable to determine address for share %v", shareName)
|
||||
_, addr, err := getTokenAndAddr(shareName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = netip.ParseAddrPort(addr)
|
||||
|
@ -253,10 +274,10 @@ type userServer struct {
|
|||
|
||||
// mu guards the below values. Acquire a write lock before updating any of
|
||||
// them, acquire a read lock before reading any of them.
|
||||
mu sync.RWMutex
|
||||
cmd *exec.Cmd
|
||||
addr string
|
||||
closed bool
|
||||
mu sync.RWMutex
|
||||
cmd *exec.Cmd
|
||||
tokenAndAddr string
|
||||
closed bool
|
||||
}
|
||||
|
||||
func (s *userServer) Close() error {
|
||||
|
@ -366,7 +387,7 @@ func (s *userServer) run() error {
|
|||
}
|
||||
}()
|
||||
s.mu.Lock()
|
||||
s.addr = strings.TrimSpace(addr)
|
||||
s.tokenAndAddr = strings.TrimSpace(addr)
|
||||
s.mu.Unlock()
|
||||
return cmd.Wait()
|
||||
}
|
||||
|
|
|
@ -95,6 +95,9 @@ type FileSystemForRemote interface {
|
|||
// sandboxed where we can't spawn user-specific sub-processes and instead
|
||||
// rely on the UI application that's already running as an unprivileged
|
||||
// user to access the filesystem for us.
|
||||
//
|
||||
// Note that this includes both the file server's secret token and its
|
||||
// address, delimited by a pipe |.
|
||||
SetFileServerAddr(addr string)
|
||||
|
||||
// SetShares sets the complete set of shares exposed by this node. If
|
||||
|
|
2
go.mod
2
go.mod
|
@ -371,7 +371,7 @@ require (
|
|||
k8s.io/component-base v0.29.1 // indirect
|
||||
k8s.io/klog/v2 v2.120.1 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20240117194847-208609032b15 // indirect
|
||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e // indirect
|
||||
k8s.io/utils v0.0.0-20240102154912-e7106e64919e
|
||||
mvdan.cc/gofumpt v0.5.0 // indirect
|
||||
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed // indirect
|
||||
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b // indirect
|
||||
|
|
106
health/health.go
106
health/health.go
|
@ -23,6 +23,7 @@ import (
|
|||
"tailscale.com/util/mak"
|
||||
"tailscale.com/util/multierr"
|
||||
"tailscale.com/util/set"
|
||||
"tailscale.com/version"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -72,6 +73,9 @@ type Tracker struct {
|
|||
watchers set.HandleSet[func(Subsystem, error)] // opt func to run if error state changes
|
||||
timer *time.Timer
|
||||
|
||||
latestVersion *tailcfg.ClientVersion // or nil
|
||||
checkForUpdates bool
|
||||
|
||||
inMapPoll bool
|
||||
inMapPollSince time.Time
|
||||
lastMapPollEndedAt time.Time
|
||||
|
@ -543,7 +547,37 @@ func (t *Tracker) SetAuthRoutineInError(err error) {
|
|||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if err == nil && t.lastLoginErr == nil {
|
||||
return
|
||||
}
|
||||
t.lastLoginErr = err
|
||||
t.selfCheckLocked()
|
||||
}
|
||||
|
||||
// SetLatestVersion records the latest version of the Tailscale client.
|
||||
// v can be nil if unknown.
|
||||
func (t *Tracker) SetLatestVersion(v *tailcfg.ClientVersion) {
|
||||
if t.nil() {
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
t.latestVersion = v
|
||||
t.selfCheckLocked()
|
||||
}
|
||||
|
||||
// SetCheckForUpdates sets whether the client wants to check for updates.
|
||||
func (t *Tracker) SetCheckForUpdates(v bool) {
|
||||
if t.nil() {
|
||||
return
|
||||
}
|
||||
t.mu.Lock()
|
||||
defer t.mu.Unlock()
|
||||
if t.checkForUpdates == v {
|
||||
return
|
||||
}
|
||||
t.checkForUpdates = v
|
||||
t.selfCheckLocked()
|
||||
}
|
||||
|
||||
func (t *Tracker) timerSelfCheck() {
|
||||
|
@ -567,6 +601,23 @@ func (t *Tracker) selfCheckLocked() {
|
|||
t.setLocked(SysOverall, t.overallErrorLocked())
|
||||
}
|
||||
|
||||
// AppendWarnings appends all current health warnings to dst and returns the
|
||||
// result.
|
||||
func (t *Tracker) AppendWarnings(dst []string) []string {
|
||||
err := t.OverallError()
|
||||
if err == nil {
|
||||
return dst
|
||||
}
|
||||
if me, ok := err.(multierr.Error); ok {
|
||||
for _, err := range me.Errors() {
|
||||
dst = append(dst, err.Error())
|
||||
}
|
||||
} else {
|
||||
dst = append(dst, err.Error())
|
||||
}
|
||||
return dst
|
||||
}
|
||||
|
||||
// OverallError returns a summary of the health state.
|
||||
//
|
||||
// If there are multiple problems, the error will be of type
|
||||
|
@ -609,42 +660,76 @@ var errNetworkDown = errors.New("network down")
|
|||
var errNotInMapPoll = errors.New("not in map poll")
|
||||
var errNoDERPHome = errors.New("no DERP home")
|
||||
var errNoUDP4Bind = errors.New("no udp4 bind")
|
||||
var errUnstable = errors.New("This is an unstable (development) version of Tailscale; frequent updates and bugs are likely")
|
||||
|
||||
func (t *Tracker) overallErrorLocked() error {
|
||||
var errs []error
|
||||
add := func(err error) {
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
merged := func() error {
|
||||
return multierr.New(errs...)
|
||||
}
|
||||
|
||||
if t.checkForUpdates {
|
||||
if cv := t.latestVersion; cv != nil && !cv.RunningLatest && cv.LatestVersion != "" {
|
||||
if cv.UrgentSecurityUpdate {
|
||||
add(fmt.Errorf("Security update available: %v -> %v, run `tailscale update` or `tailscale set --auto-update` to update", version.Short(), cv.LatestVersion))
|
||||
} else {
|
||||
add(fmt.Errorf("Update available: %v -> %v, run `tailscale update` or `tailscale set --auto-update` to update", version.Short(), cv.LatestVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if version.IsUnstableBuild() {
|
||||
add(errUnstable)
|
||||
}
|
||||
|
||||
if v, ok := t.anyInterfaceUp.Get(); ok && !v {
|
||||
return errNetworkDown
|
||||
add(errNetworkDown)
|
||||
return merged()
|
||||
}
|
||||
if t.localLogConfigErr != nil {
|
||||
return t.localLogConfigErr
|
||||
add(t.localLogConfigErr)
|
||||
return merged()
|
||||
}
|
||||
if !t.ipnWantRunning {
|
||||
return fmt.Errorf("state=%v, wantRunning=%v", t.ipnState, t.ipnWantRunning)
|
||||
add(fmt.Errorf("state=%v, wantRunning=%v", t.ipnState, t.ipnWantRunning))
|
||||
return merged()
|
||||
}
|
||||
if t.lastLoginErr != nil {
|
||||
return fmt.Errorf("not logged in, last login error=%v", t.lastLoginErr)
|
||||
add(fmt.Errorf("not logged in, last login error=%v", t.lastLoginErr))
|
||||
return merged()
|
||||
}
|
||||
now := time.Now()
|
||||
if !t.inMapPoll && (t.lastMapPollEndedAt.IsZero() || now.Sub(t.lastMapPollEndedAt) > 10*time.Second) {
|
||||
return errNotInMapPoll
|
||||
add(errNotInMapPoll)
|
||||
return merged()
|
||||
}
|
||||
const tooIdle = 2*time.Minute + 5*time.Second
|
||||
if d := now.Sub(t.lastStreamedMapResponse).Round(time.Second); d > tooIdle {
|
||||
return t.networkErrorfLocked("no map response in %v", d)
|
||||
add(t.networkErrorfLocked("no map response in %v", d))
|
||||
return merged()
|
||||
}
|
||||
if !t.derpHomeless {
|
||||
rid := t.derpHomeRegion
|
||||
if rid == 0 {
|
||||
return errNoDERPHome
|
||||
add(errNoDERPHome)
|
||||
return merged()
|
||||
}
|
||||
if !t.derpRegionConnected[rid] {
|
||||
return t.networkErrorfLocked("not connected to home DERP region %v", rid)
|
||||
add(t.networkErrorfLocked("not connected to home DERP region %v", rid))
|
||||
return merged()
|
||||
}
|
||||
if d := now.Sub(t.derpRegionLastFrame[rid]).Round(time.Second); d > tooIdle {
|
||||
return t.networkErrorfLocked("haven't heard from home DERP region %v in %v", rid, d)
|
||||
add(t.networkErrorfLocked("haven't heard from home DERP region %v in %v", rid, d))
|
||||
return merged()
|
||||
}
|
||||
}
|
||||
if t.udp4Unbound {
|
||||
return errNoUDP4Bind
|
||||
add(errNoUDP4Bind)
|
||||
return merged()
|
||||
}
|
||||
|
||||
// TODO: use
|
||||
|
@ -653,7 +738,6 @@ func (t *Tracker) overallErrorLocked() error {
|
|||
_ = t.lastStreamedMapResponse
|
||||
_ = t.lastMapRequestHeard
|
||||
|
||||
var errs []error
|
||||
for i := range t.MagicSockReceiveFuncs {
|
||||
f := &t.MagicSockReceiveFuncs[i]
|
||||
if f.missing {
|
||||
|
|
|
@ -312,7 +312,7 @@ func (b *LocalBackend) updateDrivePeersLocked(nm *netmap.NetworkMap) {
|
|||
driveRemotes = b.driveRemotesFromPeers(nm)
|
||||
}
|
||||
|
||||
fs.SetRemotes(b.netMap.Domain, driveRemotes, &driveTransport{b: b})
|
||||
fs.SetRemotes(b.netMap.Domain, driveRemotes, b.newDriveTransport())
|
||||
}
|
||||
|
||||
func (b *LocalBackend) driveRemotesFromPeers(nm *netmap.NetworkMap) []*drive.Remote {
|
||||
|
|
|
@ -151,6 +151,12 @@ type watchSession struct {
|
|||
sessionID string
|
||||
}
|
||||
|
||||
// lastSuggestedExitNode stores the last suggested exit node ID and name in local backend.
|
||||
type lastSuggestedExitNode struct {
|
||||
id tailcfg.StableNodeID
|
||||
name string
|
||||
}
|
||||
|
||||
// LocalBackend is the glue between the major pieces of the Tailscale
|
||||
// network software: the cloud control plane (via controlclient), the
|
||||
// network data plane (via wgengine), and the user-facing UIs and CLIs
|
||||
|
@ -324,6 +330,10 @@ type LocalBackend struct {
|
|||
|
||||
// outgoingFiles keeps track of Taildrop outgoing files keyed to their OutgoingFile.ID
|
||||
outgoingFiles map[string]*ipn.OutgoingFile
|
||||
|
||||
// lastSuggestedExitNode stores the last suggested exit node ID and name.
|
||||
// lastSuggestedExitNode updates whenever the suggestion changes.
|
||||
lastSuggestedExitNode lastSuggestedExitNode
|
||||
}
|
||||
|
||||
// HealthTracker returns the health tracker for the backend.
|
||||
|
@ -364,7 +374,7 @@ func NewLocalBackend(logf logger.Logf, logID logid.PublicID, sys *tsd.System, lo
|
|||
if loginFlags&controlclient.LocalBackendStartKeyOSNeutral != 0 {
|
||||
goos = ""
|
||||
}
|
||||
pm, err := newProfileManagerWithGOOS(store, logf, goos)
|
||||
pm, err := newProfileManagerWithGOOS(store, logf, sys.HealthTracker(), goos)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -770,30 +780,14 @@ func (b *LocalBackend) UpdateStatus(sb *ipnstate.StatusBuilder) {
|
|||
s.AuthURL = b.authURLSticky
|
||||
if prefs := b.pm.CurrentPrefs(); prefs.Valid() && prefs.AutoUpdate().Check {
|
||||
s.ClientVersion = b.lastClientVersion
|
||||
if cv := b.lastClientVersion; cv != nil && !cv.RunningLatest && cv.LatestVersion != "" {
|
||||
if cv.UrgentSecurityUpdate {
|
||||
s.Health = append(s.Health, fmt.Sprintf("Security update available: %v -> %v, run `tailscale update` or `tailscale set --auto-update` to update", version.Short(), cv.LatestVersion))
|
||||
} else {
|
||||
s.Health = append(s.Health, fmt.Sprintf("Update available: %v -> %v, run `tailscale update` or `tailscale set --auto-update` to update", version.Short(), cv.LatestVersion))
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := b.health.OverallError(); err != nil {
|
||||
switch e := err.(type) {
|
||||
case multierr.Error:
|
||||
for _, err := range e.Errors() {
|
||||
s.Health = append(s.Health, err.Error())
|
||||
}
|
||||
default:
|
||||
s.Health = append(s.Health, err.Error())
|
||||
}
|
||||
}
|
||||
s.Health = b.health.AppendWarnings(s.Health)
|
||||
|
||||
// TODO(bradfitz): move this health check into a health.Warnable
|
||||
// and remove from here.
|
||||
if m := b.sshOnButUnusableHealthCheckMessageLocked(); m != "" {
|
||||
s.Health = append(s.Health, m)
|
||||
}
|
||||
if version.IsUnstableBuild() {
|
||||
s.Health = append(s.Health, "This is an unstable (development) version of Tailscale; frequent updates and bugs are likely")
|
||||
}
|
||||
if b.netMap != nil {
|
||||
s.CertDomains = append([]string(nil), b.netMap.DNS.CertDomains...)
|
||||
s.MagicDNSSuffix = b.netMap.MagicDNSSuffix()
|
||||
|
@ -2517,6 +2511,7 @@ func (b *LocalBackend) tellClientToBrowseToURL(url string) {
|
|||
func (b *LocalBackend) onClientVersion(v *tailcfg.ClientVersion) {
|
||||
b.mu.Lock()
|
||||
b.lastClientVersion = v
|
||||
b.health.SetLatestVersion(v)
|
||||
b.mu.Unlock()
|
||||
b.send(ipn.Notify{ClientVersion: v})
|
||||
}
|
||||
|
@ -3701,8 +3696,8 @@ func dnsConfigForNetmap(nm *netmap.NetworkMap, peers map[tailcfg.NodeID]tailcfg.
|
|||
}
|
||||
|
||||
// selfV6Only is whether we only have IPv6 addresses ourselves.
|
||||
selfV6Only := views.SliceContainsFunc(nm.GetAddresses(), tsaddr.PrefixIs6) &&
|
||||
!views.SliceContainsFunc(nm.GetAddresses(), tsaddr.PrefixIs4)
|
||||
selfV6Only := nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs6) &&
|
||||
!nm.GetAddresses().ContainsFunc(tsaddr.PrefixIs4)
|
||||
dcfg.OnlyIPv6 = selfV6Only
|
||||
|
||||
// Populate MagicDNS records. We do this unconditionally so that
|
||||
|
@ -4828,13 +4823,6 @@ func (b *LocalBackend) updatePeersFromNetmapLocked(nm *netmap.NetworkMap) {
|
|||
}
|
||||
}
|
||||
|
||||
// driveTransport is an http.RoundTripper that uses the latest value of
|
||||
// b.Dialer().PeerAPITransport() for each round trip and imposes a short
|
||||
// dial timeout to avoid hanging on connecting to offline/unreachable hosts.
|
||||
type driveTransport struct {
|
||||
b *LocalBackend
|
||||
}
|
||||
|
||||
// responseBodyWrapper wraps an io.ReadCloser and stores
|
||||
// the number of bytesRead.
|
||||
type responseBodyWrapper struct {
|
||||
|
@ -4888,6 +4876,20 @@ func (rbw *responseBodyWrapper) Close() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// driveTransport is an http.RoundTripper that wraps
|
||||
// b.Dialer().PeerAPITransport() with metrics tracking.
|
||||
type driveTransport struct {
|
||||
b *LocalBackend
|
||||
tr *http.Transport
|
||||
}
|
||||
|
||||
func (b *LocalBackend) newDriveTransport() *driveTransport {
|
||||
return &driveTransport{
|
||||
b: b,
|
||||
tr: b.Dialer().PeerAPITransport(),
|
||||
}
|
||||
}
|
||||
|
||||
func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
// Some WebDAV clients include origin and refer headers, which peerapi does
|
||||
// not like. Remove them.
|
||||
|
@ -4945,18 +4947,7 @@ func (dt *driveTransport) RoundTrip(req *http.Request) (resp *http.Response, err
|
|||
}
|
||||
}()
|
||||
|
||||
// dialTimeout is fairly aggressive to avoid hangs on contacting offline or
|
||||
// unreachable hosts.
|
||||
dialTimeout := 1 * time.Second // TODO(oxtoacart): tune this
|
||||
|
||||
tr := dt.b.Dialer().PeerAPITransport().Clone()
|
||||
dialContext := tr.DialContext
|
||||
tr.DialContext = func(ctx context.Context, network, addr string) (net.Conn, error) {
|
||||
ctxWithTimeout, cancel := context.WithTimeout(ctx, dialTimeout)
|
||||
defer cancel()
|
||||
return dialContext(ctxWithTimeout, network, addr)
|
||||
}
|
||||
return tr.RoundTrip(req)
|
||||
return dt.tr.RoundTrip(req)
|
||||
}
|
||||
|
||||
// roundTraffic rounds bytes. This is used to preserve user privacy within logs.
|
||||
|
@ -5972,7 +5963,8 @@ func (b *LocalBackend) resetForProfileChangeLockedOnEntry(unlock unlockOnce) err
|
|||
}
|
||||
b.lastServeConfJSON = mem.B(nil)
|
||||
b.serveConfig = ipn.ServeConfigView{}
|
||||
b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu
|
||||
b.lastSuggestedExitNode = lastSuggestedExitNode{} // Reset last suggested exit node.
|
||||
b.enterStateLockedOnEntry(ipn.NoState, unlock) // Reset state; releases b.mu
|
||||
b.health.SetLocalLogConfigHealth(nil)
|
||||
return b.Start(ipn.Options{})
|
||||
}
|
||||
|
@ -6349,6 +6341,7 @@ func mayDeref[T any](p *T) (v T) {
|
|||
|
||||
var ErrNoPreferredDERP = errors.New("no preferred DERP, try again later")
|
||||
var ErrCannotSuggestExitNode = errors.New("unable to suggest an exit node, try again later")
|
||||
var ErrUnableToSuggestLastExitNode = errors.New("unable to suggest last exit node")
|
||||
|
||||
// SuggestExitNode computes a suggestion based on the current netmap and last netcheck report. If
|
||||
// there are multiple equally good options, one is selected at random, so the result is not stable. To be
|
||||
|
@ -6361,13 +6354,41 @@ func (b *LocalBackend) SuggestExitNode() (response apitype.ExitNodeSuggestionRes
|
|||
b.mu.Lock()
|
||||
lastReport := b.MagicConn().GetLastNetcheckReport(b.ctx)
|
||||
netMap := b.netMap
|
||||
lastSuggestedExitNode := b.lastSuggestedExitNode
|
||||
b.mu.Unlock()
|
||||
if lastReport == nil || netMap == nil {
|
||||
return response, ErrCannotSuggestExitNode
|
||||
last, err := suggestLastExitNode(lastSuggestedExitNode)
|
||||
if err != nil {
|
||||
return response, ErrCannotSuggestExitNode
|
||||
}
|
||||
return last, err
|
||||
}
|
||||
seed := time.Now().UnixNano()
|
||||
r := rand.New(rand.NewSource(seed))
|
||||
return suggestExitNode(lastReport, netMap, r)
|
||||
res, err := suggestExitNode(lastReport, netMap, r)
|
||||
if err != nil {
|
||||
last, err := suggestLastExitNode(lastSuggestedExitNode)
|
||||
if err != nil {
|
||||
return response, ErrCannotSuggestExitNode
|
||||
}
|
||||
return last, err
|
||||
}
|
||||
b.mu.Lock()
|
||||
b.lastSuggestedExitNode.id = res.ID
|
||||
b.lastSuggestedExitNode.name = res.Name
|
||||
b.mu.Unlock()
|
||||
return res, err
|
||||
}
|
||||
|
||||
// suggestLastExitNode formats a response with the last suggested exit node's ID and name.
|
||||
// Used as a fallback before returning a nil response and error.
|
||||
func suggestLastExitNode(lastSuggestedExitNode lastSuggestedExitNode) (res apitype.ExitNodeSuggestionResponse, err error) {
|
||||
if lastSuggestedExitNode.id != "" && lastSuggestedExitNode.name != "" {
|
||||
res.ID = lastSuggestedExitNode.id
|
||||
res.Name = lastSuggestedExitNode.name
|
||||
return res, nil
|
||||
}
|
||||
return res, ErrUnableToSuggestLastExitNode
|
||||
}
|
||||
|
||||
func suggestExitNode(report *netcheck.Report, netMap *netmap.NetworkMap, r *rand.Rand) (res apitype.ExitNodeSuggestionResponse, err error) {
|
||||
|
|
|
@ -25,10 +25,12 @@ import (
|
|||
"golang.org/x/net/dns/dnsmessage"
|
||||
"tailscale.com/appc"
|
||||
"tailscale.com/appc/appctest"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/drive"
|
||||
"tailscale.com/drive/driveimpl"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/net/netcheck"
|
||||
|
@ -1849,7 +1851,7 @@ func TestSetExitNodeIDPolicy(t *testing.T) {
|
|||
if test.prefs == nil {
|
||||
test.prefs = ipn.NewPrefs()
|
||||
}
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
pm.prefs = test.prefs.View()
|
||||
b.netMap = test.nm
|
||||
b.pm = pm
|
||||
|
@ -2131,7 +2133,7 @@ func TestApplySysPolicy(t *testing.T) {
|
|||
wantPrefs.ControlURL = ipn.DefaultControlURL
|
||||
}
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
pm.prefs = usePrefs.View()
|
||||
|
||||
b := newTestBackend(t)
|
||||
|
@ -3436,6 +3438,357 @@ func TestMinLatencyDERPregion(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestSuggestLastExitNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
lastSuggestedExitNode lastSuggestedExitNode
|
||||
wantRes apitype.ExitNodeSuggestionResponse
|
||||
wantLastSuggestedExitNode lastSuggestedExitNode
|
||||
wantErr error
|
||||
}{
|
||||
{
|
||||
name: "last suggested exit node is populated",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{id: "test", name: "test"},
|
||||
wantRes: apitype.ExitNodeSuggestionResponse{ID: "test", Name: "test"},
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{id: "test", name: "test"},
|
||||
},
|
||||
{
|
||||
name: "last suggested exit node is not populated",
|
||||
wantErr: ErrUnableToSuggestLastExitNode,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
got, err := suggestLastExitNode(tt.lastSuggestedExitNode)
|
||||
if got != tt.wantRes || err != tt.wantErr {
|
||||
t.Errorf("got %v error %v, want %v error %v", got, err, tt.wantRes, tt.wantErr)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestLocalBackendSuggestExitNode(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
lastSuggestedExitNode lastSuggestedExitNode
|
||||
report netcheck.Report
|
||||
netMap netmap.NetworkMap
|
||||
wantID tailcfg.StableNodeID
|
||||
wantName string
|
||||
wantErr error
|
||||
wantLastSuggestedExitNode lastSuggestedExitNode
|
||||
}{
|
||||
{
|
||||
name: "nil netmap, returns last suggested exit node",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 0,
|
||||
2: -1,
|
||||
3: 0,
|
||||
},
|
||||
},
|
||||
wantID: "test",
|
||||
wantName: "test",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
},
|
||||
{
|
||||
name: "nil report, returns last suggested exit node",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
netMap: netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.1.1/32"),
|
||||
netip.MustParsePrefix("fe70::1/128"),
|
||||
},
|
||||
}).View(),
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantID: "test",
|
||||
wantName: "test",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
},
|
||||
{
|
||||
name: "found better derp node, last suggested exit node updates",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
3: 5,
|
||||
},
|
||||
PreferredDERP: 1,
|
||||
},
|
||||
netMap: netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.1.1/32"),
|
||||
netip.MustParsePrefix("fe70::1/128"),
|
||||
},
|
||||
}).View(),
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
Peers: []tailcfg.NodeView{
|
||||
(&tailcfg.Node{
|
||||
ID: 2,
|
||||
StableID: "test",
|
||||
Name: "test",
|
||||
DERP: "127.3.3.40:1",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
(&tailcfg.Node{
|
||||
ID: 3,
|
||||
StableID: "foo",
|
||||
Name: "foo",
|
||||
DERP: "127.3.3.40:3",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
},
|
||||
},
|
||||
wantID: "foo",
|
||||
wantName: "foo",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "foo", id: "foo"},
|
||||
},
|
||||
{
|
||||
name: "found better mullvad node, last suggested exit node updates",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "San Jose", id: "3"},
|
||||
report: netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 0,
|
||||
2: 0,
|
||||
3: 0,
|
||||
},
|
||||
PreferredDERP: 1,
|
||||
},
|
||||
netMap: netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.1.1/32"),
|
||||
netip.MustParsePrefix("fe70::1/128"),
|
||||
},
|
||||
}).View(),
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {
|
||||
Latitude: 40.73061,
|
||||
Longitude: -73.935242,
|
||||
},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
Peers: []tailcfg.NodeView{
|
||||
(&tailcfg.Node{
|
||||
ID: 2,
|
||||
StableID: "2",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
Name: "Dallas",
|
||||
Hostinfo: (&tailcfg.Hostinfo{
|
||||
Location: &tailcfg.Location{
|
||||
Latitude: 32.89748,
|
||||
Longitude: -97.040443,
|
||||
Priority: 100,
|
||||
},
|
||||
}).View(),
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
(&tailcfg.Node{
|
||||
ID: 3,
|
||||
StableID: "3",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
Name: "San Jose",
|
||||
Hostinfo: (&tailcfg.Hostinfo{
|
||||
Location: &tailcfg.Location{
|
||||
Latitude: 37.3382082,
|
||||
Longitude: -121.8863286,
|
||||
Priority: 20,
|
||||
},
|
||||
}).View(),
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
},
|
||||
},
|
||||
wantID: "2",
|
||||
wantName: "Dallas",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "Dallas", id: "2"},
|
||||
},
|
||||
{
|
||||
name: "ErrNoPreferredDERP, use last suggested exit node",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
3: 5,
|
||||
},
|
||||
PreferredDERP: 0,
|
||||
},
|
||||
netMap: netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.1.1/32"),
|
||||
netip.MustParsePrefix("fe70::1/128"),
|
||||
},
|
||||
}).View(),
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
Peers: []tailcfg.NodeView{
|
||||
(&tailcfg.Node{
|
||||
ID: 2,
|
||||
StableID: "test",
|
||||
Name: "test",
|
||||
DERP: "127.3.3.40:1",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
(&tailcfg.Node{
|
||||
ID: 3,
|
||||
StableID: "foo",
|
||||
Name: "foo",
|
||||
DERP: "127.3.3.40:3",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
},
|
||||
},
|
||||
wantID: "test",
|
||||
wantName: "test",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
},
|
||||
{
|
||||
name: "ErrNoPreferredDERP, use last suggested exit node",
|
||||
lastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
report: netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
3: 5,
|
||||
},
|
||||
PreferredDERP: 0,
|
||||
},
|
||||
netMap: netmap.NetworkMap{
|
||||
SelfNode: (&tailcfg.Node{
|
||||
Addresses: []netip.Prefix{
|
||||
netip.MustParsePrefix("100.64.1.1/32"),
|
||||
netip.MustParsePrefix("fe70::1/128"),
|
||||
},
|
||||
}).View(),
|
||||
DERPMap: &tailcfg.DERPMap{
|
||||
Regions: map[int]*tailcfg.DERPRegion{
|
||||
1: {},
|
||||
2: {},
|
||||
3: {},
|
||||
},
|
||||
},
|
||||
Peers: []tailcfg.NodeView{
|
||||
(&tailcfg.Node{
|
||||
ID: 2,
|
||||
StableID: "test",
|
||||
Name: "test",
|
||||
DERP: "127.3.3.40:1",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
(&tailcfg.Node{
|
||||
ID: 3,
|
||||
StableID: "foo",
|
||||
Name: "foo",
|
||||
DERP: "127.3.3.40:3",
|
||||
AllowedIPs: []netip.Prefix{
|
||||
netip.MustParsePrefix("0.0.0.0/0"), netip.MustParsePrefix("::/0"),
|
||||
},
|
||||
CapMap: (tailcfg.NodeCapMap)(map[tailcfg.NodeCapability][]tailcfg.RawMessage{
|
||||
tailcfg.NodeAttrSuggestExitNode: {},
|
||||
}),
|
||||
}).View(),
|
||||
},
|
||||
},
|
||||
wantID: "test",
|
||||
wantName: "test",
|
||||
wantLastSuggestedExitNode: lastSuggestedExitNode{name: "test", id: "test"},
|
||||
},
|
||||
{
|
||||
name: "unable to use last suggested exit node",
|
||||
report: netcheck.Report{
|
||||
RegionLatency: map[int]time.Duration{
|
||||
1: 10,
|
||||
2: 10,
|
||||
3: 5,
|
||||
},
|
||||
PreferredDERP: 0,
|
||||
},
|
||||
wantErr: ErrCannotSuggestExitNode,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
lb := newTestLocalBackend(t)
|
||||
lb.lastSuggestedExitNode = tt.lastSuggestedExitNode
|
||||
lb.netMap = &tt.netMap
|
||||
lb.sys.MagicSock.Get().SetLastNetcheckReport(context.Background(), tt.report)
|
||||
got, err := lb.SuggestExitNode()
|
||||
if got.ID != tt.wantID {
|
||||
t.Errorf("ID=%v, want=%v", got.ID, tt.wantID)
|
||||
}
|
||||
if got.Name != tt.wantName {
|
||||
t.Errorf("Name=%v, want=%v", got.Name, tt.wantName)
|
||||
}
|
||||
if lb.lastSuggestedExitNode != tt.wantLastSuggestedExitNode {
|
||||
t.Errorf("lastSuggestedExitNode=%v, want=%v", lb.lastSuggestedExitNode, tt.wantLastSuggestedExitNode)
|
||||
}
|
||||
if err != tt.wantErr {
|
||||
t.Errorf("Error=%v, want=%v", err, tt.wantErr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnableAutoUpdates(t *testing.T) {
|
||||
lb := newTestLocalBackend(t)
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ import (
|
|||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"tailscale.com/control/controlclient"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/hostinfo"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
|
@ -148,7 +149,7 @@ func TestTKAEnablementFlow(t *testing.T) {
|
|||
temp := t.TempDir()
|
||||
|
||||
cc := fakeControlClient(t, client)
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -188,7 +189,7 @@ func TestTKADisablementFlow(t *testing.T) {
|
|||
nlPriv := key.NewNLPrivate()
|
||||
key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2}
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -380,7 +381,7 @@ func TestTKASync(t *testing.T) {
|
|||
t.Run(tc.name, func(t *testing.T) {
|
||||
nodePriv := key.NewNode()
|
||||
nlPriv := key.NewNLPrivate()
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -602,7 +603,7 @@ func TestTKADisable(t *testing.T) {
|
|||
disablementSecret := bytes.Repeat([]byte{0xa5}, 32)
|
||||
nlPriv := key.NewNLPrivate()
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -693,7 +694,7 @@ func TestTKASign(t *testing.T) {
|
|||
toSign := key.NewNode()
|
||||
nlPriv := key.NewNLPrivate()
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -782,7 +783,7 @@ func TestTKAForceDisable(t *testing.T) {
|
|||
nlPriv := key.NewNLPrivate()
|
||||
key := tka.Key{Kind: tka.Key25519, Public: nlPriv.Public().Verifier(), Votes: 2}
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -877,7 +878,7 @@ func TestTKAAffectedSigs(t *testing.T) {
|
|||
// toSign := key.NewNode()
|
||||
nlPriv := key.NewNLPrivate()
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -1010,7 +1011,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) {
|
|||
cosignPriv := key.NewNLPrivate()
|
||||
compromisedPriv := key.NewNLPrivate()
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
@ -1101,7 +1102,7 @@ func TestTKARecoverCompromisedKeyFlow(t *testing.T) {
|
|||
|
||||
// Cosign using the cosigning key.
|
||||
{
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
must.Do(pm.SetPrefs((&ipn.Prefs{
|
||||
Persist: &persist.Persist{
|
||||
PrivateNodeKey: nodePriv,
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
"tailscale.com/appc"
|
||||
"tailscale.com/appc/appctest"
|
||||
"tailscale.com/client/tailscale/apitype"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
|
@ -642,7 +643,7 @@ func TestPeerAPIReplyToDNSQueries(t *testing.T) {
|
|||
h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345")
|
||||
|
||||
eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0)
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
h.ps = &peerAPIServer{
|
||||
b: &LocalBackend{
|
||||
e: eng,
|
||||
|
@ -692,7 +693,7 @@ func TestPeerAPIPrettyReplyCNAME(t *testing.T) {
|
|||
h.remoteAddr = netip.MustParseAddrPort("100.150.151.152:12345")
|
||||
|
||||
eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0)
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
var a *appc.AppConnector
|
||||
if shouldStore {
|
||||
a = appc.NewAppConnector(t.Logf, &appctest.RouteCollector{}, &appc.RouteInfo{}, fakeStoreRoutes)
|
||||
|
@ -764,7 +765,7 @@ func TestPeerAPIReplyToDNSQueriesAreObserved(t *testing.T) {
|
|||
|
||||
rc := &appctest.RouteCollector{}
|
||||
eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0)
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
var a *appc.AppConnector
|
||||
if shouldStore {
|
||||
a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes)
|
||||
|
@ -827,7 +828,7 @@ func TestPeerAPIReplyToDNSQueriesAreObservedWithCNAMEFlattening(t *testing.T) {
|
|||
|
||||
rc := &appctest.RouteCollector{}
|
||||
eng, _ := wgengine.NewFakeUserspaceEngine(logger.Discard, 0)
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
var a *appc.AppConnector
|
||||
if shouldStore {
|
||||
a = appc.NewAppConnector(t.Logf, rc, &appc.RouteInfo{}, fakeStoreRoutes)
|
||||
|
|
|
@ -12,10 +12,10 @@ import (
|
|||
"runtime"
|
||||
"slices"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/envknob"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/types/logger"
|
||||
"tailscale.com/util/clientmetric"
|
||||
|
@ -30,8 +30,9 @@ var debug = envknob.RegisterBool("TS_DEBUG_PROFILES")
|
|||
//
|
||||
// It is not safe for concurrent use.
|
||||
type profileManager struct {
|
||||
store ipn.StateStore
|
||||
logf logger.Logf
|
||||
store ipn.StateStore
|
||||
logf logger.Logf
|
||||
health *health.Tracker
|
||||
|
||||
currentUserID ipn.WindowsUserID
|
||||
knownProfiles map[ipn.ProfileID]*ipn.LoginProfile // always non-nil
|
||||
|
@ -102,6 +103,7 @@ func (pm *profileManager) SetCurrentUserID(uid ipn.WindowsUserID) error {
|
|||
}
|
||||
pm.currentProfile = prof
|
||||
pm.prefs = prefs
|
||||
pm.updateHealth()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -198,10 +200,6 @@ func (pm *profileManager) Reset() {
|
|||
pm.NewProfile()
|
||||
}
|
||||
|
||||
func init() {
|
||||
rand.Seed(time.Now().UnixNano())
|
||||
}
|
||||
|
||||
// SetPrefs sets the current profile's prefs to the provided value.
|
||||
// It also saves the prefs to the StateStore. It stores a copy of the
|
||||
// provided prefs, which may be accessed via CurrentPrefs.
|
||||
|
@ -285,6 +283,7 @@ func newUnusedID(knownProfiles map[ipn.ProfileID]*ipn.LoginProfile) (ipn.Profile
|
|||
// is not new.
|
||||
func (pm *profileManager) setPrefsLocked(clonedPrefs ipn.PrefsView) error {
|
||||
pm.prefs = clonedPrefs
|
||||
pm.updateHealth()
|
||||
if pm.currentProfile.ID == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -336,6 +335,7 @@ func (pm *profileManager) SwitchProfile(id ipn.ProfileID) error {
|
|||
return err
|
||||
}
|
||||
pm.prefs = prefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = kp
|
||||
return pm.setAsUserSelectedProfileLocked()
|
||||
}
|
||||
|
@ -443,12 +443,20 @@ func (pm *profileManager) writeKnownProfiles() error {
|
|||
return pm.WriteState(ipn.KnownProfilesStateKey, b)
|
||||
}
|
||||
|
||||
func (pm *profileManager) updateHealth() {
|
||||
if !pm.prefs.Valid() {
|
||||
return
|
||||
}
|
||||
pm.health.SetCheckForUpdates(pm.prefs.AutoUpdate().Check)
|
||||
}
|
||||
|
||||
// NewProfile creates and switches to a new unnamed profile. The new profile is
|
||||
// not persisted until SetPrefs is called with a logged-in user.
|
||||
func (pm *profileManager) NewProfile() {
|
||||
metricNewProfile.Add(1)
|
||||
|
||||
pm.prefs = defaultPrefs
|
||||
pm.updateHealth()
|
||||
pm.currentProfile = &ipn.LoginProfile{}
|
||||
}
|
||||
|
||||
|
@ -477,7 +485,8 @@ func (pm *profileManager) CurrentPrefs() ipn.PrefsView {
|
|||
|
||||
// ReadStartupPrefsForTest reads the startup prefs from disk. It is only used for testing.
|
||||
func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsView, error) {
|
||||
pm, err := newProfileManager(store, logf)
|
||||
ht := new(health.Tracker) // in tests, don't care about the health status
|
||||
pm, err := newProfileManager(store, logf, ht)
|
||||
if err != nil {
|
||||
return ipn.PrefsView{}, err
|
||||
}
|
||||
|
@ -486,8 +495,8 @@ func ReadStartupPrefsForTest(logf logger.Logf, store ipn.StateStore) (ipn.PrefsV
|
|||
|
||||
// newProfileManager creates a new ProfileManager using the provided StateStore.
|
||||
// It also loads the list of known profiles from the StateStore.
|
||||
func newProfileManager(store ipn.StateStore, logf logger.Logf) (*profileManager, error) {
|
||||
return newProfileManagerWithGOOS(store, logf, envknob.GOOS())
|
||||
func newProfileManager(store ipn.StateStore, logf logger.Logf, health *health.Tracker) (*profileManager, error) {
|
||||
return newProfileManagerWithGOOS(store, logf, health, envknob.GOOS())
|
||||
}
|
||||
|
||||
func readAutoStartKey(store ipn.StateStore, goos string) (ipn.StateKey, error) {
|
||||
|
@ -520,7 +529,7 @@ func readKnownProfiles(store ipn.StateStore) (map[ipn.ProfileID]*ipn.LoginProfil
|
|||
return knownProfiles, nil
|
||||
}
|
||||
|
||||
func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, goos string) (*profileManager, error) {
|
||||
func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, ht *health.Tracker, goos string) (*profileManager, error) {
|
||||
logf = logger.WithPrefix(logf, "pm: ")
|
||||
stateKey, err := readAutoStartKey(store, goos)
|
||||
if err != nil {
|
||||
|
@ -536,6 +545,7 @@ func newProfileManagerWithGOOS(store ipn.StateStore, logf logger.Logf, goos stri
|
|||
store: store,
|
||||
knownProfiles: knownProfiles,
|
||||
logf: logf,
|
||||
health: ht,
|
||||
}
|
||||
|
||||
if stateKey != "" {
|
||||
|
|
|
@ -12,6 +12,7 @@ import (
|
|||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
"tailscale.com/clientupdate"
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
|
@ -24,7 +25,7 @@ import (
|
|||
func TestProfileCurrentUserSwitch(t *testing.T) {
|
||||
store := new(mem.Store)
|
||||
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -61,7 +62,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) {
|
|||
t.Fatalf("CurrentPrefs() = %v, want emptyPrefs", pm.CurrentPrefs().Pretty())
|
||||
}
|
||||
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -79,7 +80,7 @@ func TestProfileCurrentUserSwitch(t *testing.T) {
|
|||
func TestProfileList(t *testing.T) {
|
||||
store := new(mem.Store)
|
||||
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -283,7 +284,7 @@ func TestProfileDupe(t *testing.T) {
|
|||
for _, tc := range tests {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
store := new(mem.Store)
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -316,7 +317,7 @@ func TestProfileDupe(t *testing.T) {
|
|||
func TestProfileManagement(t *testing.T) {
|
||||
store := new(mem.Store)
|
||||
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -414,7 +415,7 @@ func TestProfileManagement(t *testing.T) {
|
|||
t.Logf("Recreate profile manager from store")
|
||||
// Recreate the profile manager to ensure that it can load the profiles
|
||||
// from the store at startup.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -430,7 +431,7 @@ func TestProfileManagement(t *testing.T) {
|
|||
t.Logf("Recreate profile manager from store after deleting default profile")
|
||||
// Recreate the profile manager to ensure that it can load the profiles
|
||||
// from the store at startup.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -472,7 +473,7 @@ func TestProfileManagement(t *testing.T) {
|
|||
t.Fatal("SetPrefs failed to save auto-update setting")
|
||||
}
|
||||
// Re-load profiles to trigger migration for invalid auto-update value.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "linux")
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "linux")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -494,7 +495,7 @@ func TestProfileManagementWindows(t *testing.T) {
|
|||
|
||||
store := new(mem.Store)
|
||||
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, "windows")
|
||||
pm, err := newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -565,7 +566,7 @@ func TestProfileManagementWindows(t *testing.T) {
|
|||
t.Logf("Recreate profile manager from store, should reset prefs")
|
||||
// Recreate the profile manager to ensure that it can load the profiles
|
||||
// from the store at startup.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "windows")
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -590,7 +591,7 @@ func TestProfileManagementWindows(t *testing.T) {
|
|||
}
|
||||
|
||||
// Recreate the profile manager to ensure that it starts with test profile.
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, "windows")
|
||||
pm, err = newProfileManagerWithGOOS(store, logger.Discard, new(health.Tracker), "windows")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
|
@ -686,7 +687,7 @@ func newTestBackend(t *testing.T) *LocalBackend {
|
|||
dir := t.TempDir()
|
||||
b.SetVarRoot(dir)
|
||||
|
||||
pm := must.Get(newProfileManager(new(mem.Store), logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), logf, new(health.Tracker)))
|
||||
pm.currentProfile = &ipn.LoginProfile{ID: "id0"}
|
||||
b.pm = pm
|
||||
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"tailscale.com/health"
|
||||
"tailscale.com/ipn/store/mem"
|
||||
"tailscale.com/tailcfg"
|
||||
"tailscale.com/util/must"
|
||||
|
@ -49,7 +50,7 @@ type fakeSSHServer struct {
|
|||
}
|
||||
|
||||
func TestGetSSHUsernames(t *testing.T) {
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf))
|
||||
pm := must.Get(newProfileManager(new(mem.Store), t.Logf, new(health.Tracker)))
|
||||
b := &LocalBackend{pm: pm, store: pm.Store()}
|
||||
b.sshServer = fakeSSHServer{}
|
||||
res, err := b.getSSHUsernames(new(tailcfg.C2NSSHUsernamesRequest))
|
||||
|
|
|
@ -427,7 +427,7 @@ ConnectorCondition contains condition information for a Connector.
|
|||
</td>
|
||||
<td>false</td>
|
||||
</tr><tr>
|
||||
<td><b><a href="#dnsconfigstatusnameserverstatus">nameserverStatus</a></b></td>
|
||||
<td><b><a href="#dnsconfigstatusnameserver">nameserver</a></b></td>
|
||||
<td>object</td>
|
||||
<td>
|
||||
<br/>
|
||||
|
@ -503,7 +503,7 @@ ConnectorCondition contains condition information for a Connector.
|
|||
</table>
|
||||
|
||||
|
||||
### DNSConfig.status.nameserverStatus
|
||||
### DNSConfig.status.nameserver
|
||||
<sup><sup>[↩ Parent](#dnsconfigstatus)</sup></sup>
|
||||
|
||||
|
||||
|
@ -603,7 +603,7 @@ Specification of the desired state of the ProxyClass resource. https://git.k8s.i
|
|||
<td><b><a href="#proxyclassspecmetrics">metrics</a></b></td>
|
||||
<td>object</td>
|
||||
<td>
|
||||
Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation.<br/>
|
||||
Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation. Note that the metrics are currently considered unstable and will likely change in breaking ways in the future - we only recommend that you use those for debugging purposes.<br/>
|
||||
</td>
|
||||
<td>false</td>
|
||||
</tr><tr>
|
||||
|
@ -622,7 +622,7 @@ Specification of the desired state of the ProxyClass resource. https://git.k8s.i
|
|||
|
||||
|
||||
|
||||
Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation.
|
||||
Configuration for proxy metrics. Metrics are currently not supported for egress proxies and for Ingress proxies that have been configured with tailscale.com/experimental-forward-cluster-traffic-via-ingress annotation. Note that the metrics are currently considered unstable and will likely change in breaking ways in the future - we only recommend that you use those for debugging purposes.
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
|
|
|
@ -57,7 +57,9 @@ type ProxyClassSpec struct {
|
|||
// Configuration for proxy metrics. Metrics are currently not supported
|
||||
// for egress proxies and for Ingress proxies that have been configured
|
||||
// with tailscale.com/experimental-forward-cluster-traffic-via-ingress
|
||||
// annotation.
|
||||
// annotation. Note that the metrics are currently considered unstable
|
||||
// and will likely change in breaking ways in the future - we only
|
||||
// recommend that you use those for debugging purposes.
|
||||
// +optional
|
||||
Metrics *Metrics `json:"metrics,omitempty"`
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ var DNSConfigKind = "DNSConfig"
|
|||
// +kubebuilder:object:root=true
|
||||
// +kubebuilder:subresource:status
|
||||
// +kubebuilder:resource:scope=Cluster,shortName=dc
|
||||
// +kubebuilder:printcolumn:name="NameserverIP",type="string",JSONPath=`.status.nameserverStatus.ip`,description="Service IP address of the nameserver"
|
||||
// +kubebuilder:printcolumn:name="NameserverIP",type="string",JSONPath=`.status.nameserver.ip`,description="Service IP address of the nameserver"
|
||||
|
||||
type DNSConfig struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
@ -60,7 +60,7 @@ type DNSConfigStatus struct {
|
|||
// +optional
|
||||
Conditions []ConnectorCondition `json:"conditions"`
|
||||
// +optional
|
||||
NameserverStatus *NameserverStatus `json:"nameserverStatus"`
|
||||
Nameserver *NameserverStatus `json:"nameserver"`
|
||||
}
|
||||
|
||||
type NameserverStatus struct {
|
||||
|
|
|
@ -252,8 +252,8 @@ func (in *DNSConfigStatus) DeepCopyInto(out *DNSConfigStatus) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.NameserverStatus != nil {
|
||||
in, out := &in.NameserverStatus, &out.NameserverStatus
|
||||
if in.Nameserver != nil {
|
||||
in, out := &in.Nameserver, &out.Nameserver
|
||||
*out = new(NameserverStatus)
|
||||
**out = **in
|
||||
}
|
||||
|
|
|
@ -5,12 +5,18 @@
|
|||
|
||||
package kube
|
||||
|
||||
const Alpha1Version = "v1alpha1"
|
||||
const (
|
||||
Alpha1Version = "v1alpha1"
|
||||
|
||||
DNSRecordsCMName = "dnsrecords"
|
||||
DNSRecordsCMKey = "records.json"
|
||||
)
|
||||
|
||||
type Records struct {
|
||||
// Version is the version of this Records configuration. Version is
|
||||
// intended to be used by ./cmd/k8s-nameserver to determine whether it
|
||||
// can read this records configuration.
|
||||
// written by the operator, i.e when it first populates the Records.
|
||||
// k8s-nameserver must verify that it knows how to parse a given
|
||||
// version.
|
||||
Version string `json:"version"`
|
||||
// IP4 contains a mapping of DNS names to IPv4 address(es).
|
||||
IP4 map[string][]string `json:"ip4"`
|
||||
|
|
|
@ -859,7 +859,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
|
|||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return fmt.Errorf("waiting to send NXDOMAIN: %w", ctx.Err())
|
||||
case responseChan <- res:
|
||||
return nil
|
||||
}
|
||||
|
@ -885,7 +885,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
|
|||
}
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
return fmt.Errorf("waiting to send SERVFAIL: %w", ctx.Err())
|
||||
case responseChan <- res:
|
||||
return nil
|
||||
}
|
||||
|
@ -915,6 +915,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
|
|||
}
|
||||
resb, err := f.send(ctx, fq, *rr)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("resolving using %q: %w", rr.name.Addr, err)
|
||||
select {
|
||||
case errc <- err:
|
||||
case <-ctx.Done():
|
||||
|
@ -936,7 +937,7 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
|
|||
select {
|
||||
case <-ctx.Done():
|
||||
metricDNSFwdErrorContext.Add(1)
|
||||
return ctx.Err()
|
||||
return fmt.Errorf("waiting to send response: %w", ctx.Err())
|
||||
case responseChan <- packet{v, query.family, query.addr}:
|
||||
metricDNSFwdSuccess.Add(1)
|
||||
return nil
|
||||
|
@ -969,7 +970,16 @@ func (f *forwarder) forwardWithDestChan(ctx context.Context, query packet, respo
|
|||
metricDNSFwdErrorContextGotError.Add(1)
|
||||
return firstErr
|
||||
}
|
||||
return ctx.Err()
|
||||
|
||||
// If we haven't got an error or a successful response,
|
||||
// include all resolvers in the error message so we can
|
||||
// at least see what what servers we're trying to
|
||||
// query.
|
||||
var resolverAddrs []string
|
||||
for _, rr := range resolvers {
|
||||
resolverAddrs = append(resolverAddrs, rr.name.Addr)
|
||||
}
|
||||
return fmt.Errorf("waiting for response or error from %v: %w", resolverAddrs, ctx.Err())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,6 +21,7 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/tcnksm/go-httpstat"
|
||||
|
@ -1176,7 +1177,7 @@ func (c *Client) measureHTTPSLatency(ctx context.Context, reg *tailcfg.DERPRegio
|
|||
|
||||
var ip netip.Addr
|
||||
|
||||
dc := derphttp.NewNetcheckClient(c.logf)
|
||||
dc := derphttp.NewNetcheckClient(c.logf, c.NetMon)
|
||||
defer dc.Close()
|
||||
|
||||
tlsConn, tcpConn, node, err := dc.DialRegionTLS(ctx, reg)
|
||||
|
@ -1257,9 +1258,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee
|
|||
for _, reg := range need {
|
||||
go func(reg *tailcfg.DERPRegion) {
|
||||
defer wg.Done()
|
||||
if d, err := c.measureICMPLatency(ctx, reg, p); err != nil {
|
||||
if d, ok, err := c.measureICMPLatency(ctx, reg, p); err != nil {
|
||||
c.logf("[v1] measuring ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, err)
|
||||
} else {
|
||||
} else if ok {
|
||||
c.logf("[v1] ICMP latency of %v (%d): %v", reg.RegionCode, reg.RegionID, d)
|
||||
rs.mu.Lock()
|
||||
if l, ok := rs.report.RegionLatency[reg.RegionID]; !ok {
|
||||
|
@ -1281,9 +1282,9 @@ func (c *Client) measureAllICMPLatency(ctx context.Context, rs *reportState, nee
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) measureICMPLatency(ctx context.Context, reg *tailcfg.DERPRegion, p *ping.Pinger) (time.Duration, error) {
|
||||
func (c *Client) measureICMPLatency(ctx context.Context, reg *tailcfg.DERPRegion, p *ping.Pinger) (_ time.Duration, ok bool, err error) {
|
||||
if len(reg.Nodes) == 0 {
|
||||
return 0, fmt.Errorf("no nodes for region %d (%v)", reg.RegionID, reg.RegionCode)
|
||||
return 0, false, fmt.Errorf("no nodes for region %d (%v)", reg.RegionID, reg.RegionCode)
|
||||
}
|
||||
|
||||
// Try pinging the first node in the region
|
||||
|
@ -1295,7 +1296,7 @@ func (c *Client) measureICMPLatency(ctx context.Context, reg *tailcfg.DERPRegion
|
|||
// TODO(andrew-d): this is a bit ugly
|
||||
nodeAddr := c.nodeAddr(ctx, node, probeIPv4)
|
||||
if !nodeAddr.IsValid() {
|
||||
return 0, fmt.Errorf("no address for node %v", node.Name)
|
||||
return 0, false, fmt.Errorf("no address for node %v", node.Name)
|
||||
}
|
||||
addr := &net.IPAddr{
|
||||
IP: net.IP(nodeAddr.Addr().AsSlice()),
|
||||
|
@ -1304,7 +1305,14 @@ func (c *Client) measureICMPLatency(ctx context.Context, reg *tailcfg.DERPRegion
|
|||
|
||||
// Use the unique node.Name field as the packet data to reduce the
|
||||
// likelihood that we get a mismatched echo response.
|
||||
return p.Send(ctx, addr, []byte(node.Name))
|
||||
d, err := p.Send(ctx, addr, []byte(node.Name))
|
||||
if err != nil {
|
||||
if errors.Is(err, syscall.EPERM) {
|
||||
return 0, false, nil
|
||||
}
|
||||
return 0, false, err
|
||||
}
|
||||
return d, true, nil
|
||||
}
|
||||
|
||||
func (c *Client) logConciseReport(r *Report, dm *tailcfg.DERPMap) {
|
||||
|
|
|
@ -181,7 +181,7 @@ func NewContainsIPFunc(addrs views.Slice[netip.Prefix]) func(ip netip.Addr) bool
|
|||
// If any addr is more than a single IP, then just do the slow
|
||||
// linear thing until
|
||||
// https://github.com/inetaf/netaddr/issues/139 is done.
|
||||
if views.SliceContainsFunc(addrs, func(p netip.Prefix) bool { return !p.IsSingleIP() }) {
|
||||
if addrs.ContainsFunc(func(p netip.Prefix) bool { return !p.IsSingleIP() }) {
|
||||
acopy := addrs.AsSlice()
|
||||
return func(ip netip.Addr) bool {
|
||||
for _, a := range acopy {
|
||||
|
|
|
@ -106,8 +106,8 @@ type Wrapper struct {
|
|||
// timeNow, if non-nil, will be used to obtain the current time.
|
||||
timeNow func() time.Time
|
||||
|
||||
// natConfig stores the current NAT configuration.
|
||||
natConfig atomic.Pointer[natConfig]
|
||||
// peerConfig stores the current NAT configuration.
|
||||
peerConfig atomic.Pointer[peerConfig]
|
||||
|
||||
// vectorBuffer stores the oldest unconsumed packet vector from tdev. It is
|
||||
// allocated in wrap() and the underlying arrays should never grow.
|
||||
|
@ -505,9 +505,9 @@ func (t *Wrapper) sendVectorOutbound(r tunVectorReadResult) {
|
|||
|
||||
// snat does SNAT on p if the destination address requires a different source address.
|
||||
func (t *Wrapper) snat(p *packet.Parsed) {
|
||||
nc := t.natConfig.Load()
|
||||
pc := t.peerConfig.Load()
|
||||
oldSrc := p.Src.Addr()
|
||||
newSrc := nc.selectSrcIP(oldSrc, p.Dst.Addr())
|
||||
newSrc := pc.selectSrcIP(oldSrc, p.Dst.Addr())
|
||||
if oldSrc != newSrc {
|
||||
checksum.UpdateSrcAddr(p, newSrc)
|
||||
}
|
||||
|
@ -515,9 +515,9 @@ func (t *Wrapper) snat(p *packet.Parsed) {
|
|||
|
||||
// dnat does destination NAT on p.
|
||||
func (t *Wrapper) dnat(p *packet.Parsed) {
|
||||
nc := t.natConfig.Load()
|
||||
pc := t.peerConfig.Load()
|
||||
oldDst := p.Dst.Addr()
|
||||
newDst := nc.mapDstIP(oldDst)
|
||||
newDst := pc.mapDstIP(oldDst)
|
||||
if newDst != oldDst {
|
||||
checksum.UpdateDstAddr(p, newDst)
|
||||
}
|
||||
|
@ -545,66 +545,19 @@ func findV6(addrs []netip.Prefix) netip.Addr {
|
|||
return netip.Addr{}
|
||||
}
|
||||
|
||||
// natConfig is the configuration for NAT.
|
||||
// peerConfig is the configuration for different peers.
|
||||
// It should be treated as immutable.
|
||||
//
|
||||
// The nil value is a valid configuration.
|
||||
type natConfig struct {
|
||||
v4, v6 *natFamilyConfig
|
||||
}
|
||||
|
||||
func (c *natConfig) String() string {
|
||||
if c == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
|
||||
var b strings.Builder
|
||||
b.WriteString("natConfig{")
|
||||
fmt.Fprintf(&b, "v4: %v, ", c.v4)
|
||||
fmt.Fprintf(&b, "v6: %v", c.v6)
|
||||
b.WriteString("}")
|
||||
return b.String()
|
||||
}
|
||||
|
||||
// mapDstIP returns the destination IP to use for a packet to dst.
|
||||
// If dst is not one of the listen addresses, it is returned as-is,
|
||||
// otherwise the native address is returned.
|
||||
func (c *natConfig) mapDstIP(oldDst netip.Addr) netip.Addr {
|
||||
if c == nil {
|
||||
return oldDst
|
||||
}
|
||||
if oldDst.Is4() {
|
||||
return c.v4.mapDstIP(oldDst)
|
||||
}
|
||||
if oldDst.Is6() {
|
||||
return c.v6.mapDstIP(oldDst)
|
||||
}
|
||||
return oldDst
|
||||
}
|
||||
|
||||
// selectSrcIP returns the source IP to use for a packet to dst.
|
||||
// If the packet is not from the native address, it is returned as-is.
|
||||
func (c *natConfig) selectSrcIP(oldSrc, dst netip.Addr) netip.Addr {
|
||||
if c == nil {
|
||||
return oldSrc
|
||||
}
|
||||
if oldSrc.Is4() {
|
||||
return c.v4.selectSrcIP(oldSrc, dst)
|
||||
}
|
||||
if oldSrc.Is6() {
|
||||
return c.v6.selectSrcIP(oldSrc, dst)
|
||||
}
|
||||
return oldSrc
|
||||
}
|
||||
|
||||
// natFamilyConfig is the NAT configuration for a particular
|
||||
// address family.
|
||||
// It should be treated as immutable.
|
||||
//
|
||||
// The nil value is a valid configuration.
|
||||
type natFamilyConfig struct {
|
||||
// nativeAddr is the Tailscale Address of the current node.
|
||||
nativeAddr netip.Addr
|
||||
type peerConfig struct {
|
||||
// nativeAddr4 and nativeAddr6 are the IPv4/IPv6 Tailscale Addresses of
|
||||
// the current node.
|
||||
//
|
||||
// These are implicitly used as the address to rewrite to in the DNAT
|
||||
// path (as configured by listenAddrs, below). The IPv4 address will be
|
||||
// used if the inbound packet is IPv4, and the IPv6 address if the
|
||||
// inbound packet is IPv6.
|
||||
nativeAddr4, nativeAddr6 netip.Addr
|
||||
|
||||
// listenAddrs is the set of addresses that should be
|
||||
// mapped to the native address. These are the addresses that
|
||||
|
@ -620,13 +573,14 @@ type natFamilyConfig struct {
|
|||
masqAddrCounts map[netip.Addr]int
|
||||
}
|
||||
|
||||
func (c *natFamilyConfig) String() string {
|
||||
func (c *peerConfig) String() string {
|
||||
if c == nil {
|
||||
return "natFamilyConfig(nil)"
|
||||
return "peerConfig(nil)"
|
||||
}
|
||||
var b strings.Builder
|
||||
b.WriteString("natFamilyConfig{")
|
||||
fmt.Fprintf(&b, "nativeAddr: %v, ", c.nativeAddr)
|
||||
b.WriteString("peerConfig{")
|
||||
fmt.Fprintf(&b, "nativeAddr4: %v, ", c.nativeAddr4)
|
||||
fmt.Fprintf(&b, "nativeAddr6: %v, ", c.nativeAddr6)
|
||||
fmt.Fprint(&b, "listenAddrs: [")
|
||||
|
||||
i := 0
|
||||
|
@ -656,23 +610,31 @@ func (c *natFamilyConfig) String() string {
|
|||
// mapDstIP returns the destination IP to use for a packet to dst.
|
||||
// If dst is not one of the listen addresses, it is returned as-is,
|
||||
// otherwise the native address is returned.
|
||||
func (c *natFamilyConfig) mapDstIP(oldDst netip.Addr) netip.Addr {
|
||||
func (c *peerConfig) mapDstIP(oldDst netip.Addr) netip.Addr {
|
||||
if c == nil {
|
||||
return oldDst
|
||||
}
|
||||
if _, ok := c.listenAddrs.GetOk(oldDst); ok {
|
||||
return c.nativeAddr
|
||||
if oldDst.Is4() && c.nativeAddr4.IsValid() {
|
||||
return c.nativeAddr4
|
||||
}
|
||||
if oldDst.Is6() && c.nativeAddr6.IsValid() {
|
||||
return c.nativeAddr6
|
||||
}
|
||||
}
|
||||
return oldDst
|
||||
}
|
||||
|
||||
// selectSrcIP returns the source IP to use for a packet to dst.
|
||||
// If the packet is not from the native address, it is returned as-is.
|
||||
func (c *natFamilyConfig) selectSrcIP(oldSrc, dst netip.Addr) netip.Addr {
|
||||
func (c *peerConfig) selectSrcIP(oldSrc, dst netip.Addr) netip.Addr {
|
||||
if c == nil {
|
||||
return oldSrc
|
||||
}
|
||||
if oldSrc != c.nativeAddr {
|
||||
if oldSrc.Is4() && oldSrc != c.nativeAddr4 {
|
||||
return oldSrc
|
||||
}
|
||||
if oldSrc.Is6() && oldSrc != c.nativeAddr6 {
|
||||
return oldSrc
|
||||
}
|
||||
eip, ok := c.dstMasqAddrs.Get(dst)
|
||||
|
@ -682,22 +644,16 @@ func (c *natFamilyConfig) selectSrcIP(oldSrc, dst netip.Addr) netip.Addr {
|
|||
return eip
|
||||
}
|
||||
|
||||
// natConfigFromWGConfig generates a natFamilyConfig from nm,
|
||||
// for the indicated address family.
|
||||
// If NAT is not required for that address family, it returns nil.
|
||||
func natConfigFromWGConfig(wcfg *wgcfg.Config, addrFam ipproto.Version) *natFamilyConfig {
|
||||
// peerConfigFromWGConfig generates a peerConfig from nm. If NAT is not required,
|
||||
// and no additional configuration is present, it returns nil.
|
||||
func peerConfigFromWGConfig(wcfg *wgcfg.Config) *peerConfig {
|
||||
if wcfg == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var nativeAddr netip.Addr
|
||||
switch addrFam {
|
||||
case ipproto.Version4:
|
||||
nativeAddr = findV4(wcfg.Addresses)
|
||||
case ipproto.Version6:
|
||||
nativeAddr = findV6(wcfg.Addresses)
|
||||
}
|
||||
if !nativeAddr.IsValid() {
|
||||
nativeAddr4 := findV4(wcfg.Addresses)
|
||||
nativeAddr6 := findV6(wcfg.Addresses)
|
||||
if !nativeAddr4.IsValid() && !nativeAddr6.IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -714,10 +670,10 @@ func natConfigFromWGConfig(wcfg *wgcfg.Config, addrFam ipproto.Version) *natFami
|
|||
for _, p := range wcfg.Peers {
|
||||
isExitNode := slices.Contains(p.AllowedIPs, tsaddr.AllIPv4()) || slices.Contains(p.AllowedIPs, tsaddr.AllIPv6())
|
||||
if isExitNode {
|
||||
hasMasqAddrsForFamily := false ||
|
||||
(addrFam == ipproto.Version4 && p.V4MasqAddr != nil && p.V4MasqAddr.IsValid()) ||
|
||||
(addrFam == ipproto.Version6 && p.V6MasqAddr != nil && p.V6MasqAddr.IsValid())
|
||||
if hasMasqAddrsForFamily {
|
||||
hasMasqAddr := false ||
|
||||
(p.V4MasqAddr != nil && p.V4MasqAddr.IsValid()) ||
|
||||
(p.V6MasqAddr != nil && p.V6MasqAddr.IsValid())
|
||||
if hasMasqAddr {
|
||||
exitNodeRequiresMasq = true
|
||||
}
|
||||
break
|
||||
|
@ -725,29 +681,56 @@ func natConfigFromWGConfig(wcfg *wgcfg.Config, addrFam ipproto.Version) *natFami
|
|||
}
|
||||
for i := range wcfg.Peers {
|
||||
p := &wcfg.Peers[i]
|
||||
var addrToUse netip.Addr
|
||||
if addrFam == ipproto.Version4 && p.V4MasqAddr != nil && p.V4MasqAddr.IsValid() {
|
||||
addrToUse = *p.V4MasqAddr
|
||||
mak.Set(&listenAddrs, addrToUse, struct{}{})
|
||||
} else if addrFam == ipproto.Version6 && p.V6MasqAddr != nil && p.V6MasqAddr.IsValid() {
|
||||
addrToUse = *p.V6MasqAddr
|
||||
mak.Set(&listenAddrs, addrToUse, struct{}{})
|
||||
} else if exitNodeRequiresMasq {
|
||||
addrToUse = nativeAddr
|
||||
} else {
|
||||
|
||||
// Build a routing table that configures DNAT (i.e. changing
|
||||
// the V4MasqAddr/V6MasqAddr for a given peer to the current
|
||||
// peer's v4/v6 IP).
|
||||
var addrToUse4, addrToUse6 netip.Addr
|
||||
if p.V4MasqAddr != nil && p.V4MasqAddr.IsValid() {
|
||||
addrToUse4 = *p.V4MasqAddr
|
||||
mak.Set(&listenAddrs, addrToUse4, struct{}{})
|
||||
masqAddrCounts[addrToUse4]++
|
||||
}
|
||||
if p.V6MasqAddr != nil && p.V6MasqAddr.IsValid() {
|
||||
addrToUse6 = *p.V6MasqAddr
|
||||
mak.Set(&listenAddrs, addrToUse6, struct{}{})
|
||||
masqAddrCounts[addrToUse6]++
|
||||
}
|
||||
|
||||
// If the exit node requires masquerading, set the masquerade
|
||||
// addresses to our native addresses.
|
||||
if exitNodeRequiresMasq {
|
||||
if !addrToUse4.IsValid() && nativeAddr4.IsValid() {
|
||||
addrToUse4 = nativeAddr4
|
||||
}
|
||||
if !addrToUse6.IsValid() && nativeAddr6.IsValid() {
|
||||
addrToUse6 = nativeAddr6
|
||||
}
|
||||
}
|
||||
|
||||
if !addrToUse4.IsValid() && !addrToUse6.IsValid() {
|
||||
// NAT not required for this peer.
|
||||
continue
|
||||
}
|
||||
|
||||
masqAddrCounts[addrToUse]++
|
||||
// Build the SNAT table that maps each AllowedIP to the
|
||||
// masquerade address.
|
||||
for _, ip := range p.AllowedIPs {
|
||||
rt.Insert(ip, addrToUse)
|
||||
is4 := ip.Addr().Is4()
|
||||
if is4 && addrToUse4.IsValid() {
|
||||
rt.Insert(ip, addrToUse4)
|
||||
}
|
||||
if !is4 && addrToUse6.IsValid() {
|
||||
rt.Insert(ip, addrToUse6)
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(listenAddrs) == 0 && len(masqAddrCounts) == 0 {
|
||||
return nil
|
||||
}
|
||||
return &natFamilyConfig{
|
||||
nativeAddr: nativeAddr,
|
||||
return &peerConfig{
|
||||
nativeAddr4: nativeAddr4,
|
||||
nativeAddr6: nativeAddr6,
|
||||
listenAddrs: views.MapOf(listenAddrs),
|
||||
dstMasqAddrs: &rt,
|
||||
masqAddrCounts: masqAddrCounts,
|
||||
|
@ -756,15 +739,11 @@ func natConfigFromWGConfig(wcfg *wgcfg.Config, addrFam ipproto.Version) *natFami
|
|||
|
||||
// SetNetMap is called when a new NetworkMap is received.
|
||||
func (t *Wrapper) SetWGConfig(wcfg *wgcfg.Config) {
|
||||
v4, v6 := natConfigFromWGConfig(wcfg, ipproto.Version4), natConfigFromWGConfig(wcfg, ipproto.Version6)
|
||||
var cfg *natConfig
|
||||
if v4 != nil || v6 != nil {
|
||||
cfg = &natConfig{v4: v4, v6: v6}
|
||||
}
|
||||
cfg := peerConfigFromWGConfig(wcfg)
|
||||
|
||||
old := t.natConfig.Swap(cfg)
|
||||
old := t.peerConfig.Swap(cfg)
|
||||
if !reflect.DeepEqual(old, cfg) {
|
||||
t.logf("nat config: %v", cfg)
|
||||
t.logf("peer config: %v", cfg)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -601,7 +601,9 @@ func TestFilterDiscoLoop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestNATCfg(t *testing.T) {
|
||||
// TODO(andrew-d): refactor this test to no longer use addrFam, after #11945
|
||||
// removed it in peerConfigFromWGConfig
|
||||
func TestPeerCfg_NAT(t *testing.T) {
|
||||
node := func(ip, masqIP netip.Addr, otherAllowedIPs ...netip.Prefix) wgcfg.Peer {
|
||||
p := wgcfg.Peer{
|
||||
PublicKey: key.NewNode().Public(),
|
||||
|
@ -800,19 +802,19 @@ func TestNATCfg(t *testing.T) {
|
|||
|
||||
for _, tc := range tests {
|
||||
t.Run(fmt.Sprintf("%v/%v", addrFam, tc.name), func(t *testing.T) {
|
||||
ncfg := natConfigFromWGConfig(tc.wcfg, addrFam)
|
||||
pcfg := peerConfigFromWGConfig(tc.wcfg)
|
||||
for peer, want := range tc.snatMap {
|
||||
if got := ncfg.selectSrcIP(selfNativeIP, peer); got != want {
|
||||
if got := pcfg.selectSrcIP(selfNativeIP, peer); got != want {
|
||||
t.Errorf("selectSrcIP[%v]: got %v; want %v", peer, got, want)
|
||||
}
|
||||
}
|
||||
for dstIP, want := range tc.dnatMap {
|
||||
if got := ncfg.mapDstIP(dstIP); got != want {
|
||||
if got := pcfg.mapDstIP(dstIP); got != want {
|
||||
t.Errorf("mapDstIP[%v]: got %v; want %v", dstIP, got, want)
|
||||
}
|
||||
}
|
||||
if t.Failed() {
|
||||
t.Logf("%v", ncfg)
|
||||
t.Logf("%v", pcfg)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -527,7 +527,7 @@ main() {
|
|||
;;
|
||||
apk)
|
||||
set -x
|
||||
if ! grep -Eq '^http.*\/community$' /etc/apk/repositories; then
|
||||
if ! grep -Eq '^http.*/community$' /etc/apk/repositories; then
|
||||
if type setup-apkrepos >/dev/null; then
|
||||
$SUDO setup-apkrepos -c -1
|
||||
else
|
||||
|
|
|
@ -49,9 +49,12 @@ var ptyName = func(f *os.File) (string, error) {
|
|||
return "", fmt.Errorf("unimplemented")
|
||||
}
|
||||
|
||||
// maybeStartLoginSession starts a new login session for the specified UID.
|
||||
// On success, it may return a non-nil close func which must be closed to
|
||||
// maybeStartLoginSession informs the system that we are about to log someone
|
||||
// in. On success, it may return a non-nil close func which must be closed to
|
||||
// release the session.
|
||||
// We can only do this if we are running as root.
|
||||
// This is best effort to still allow running on machines where
|
||||
// we don't support starting sessions, e.g. darwin.
|
||||
// See maybeStartLoginSessionLinux.
|
||||
var maybeStartLoginSession = func(logf logger.Logf, ia incubatorArgs) (close func() error, err error) {
|
||||
return nil, nil
|
||||
|
@ -117,14 +120,13 @@ func (ss *sshSession) newIncubatorCommand(logf logger.Logf) (cmd *exec.Cmd) {
|
|||
incubatorArgs = append(incubatorArgs, "--debug-test")
|
||||
}
|
||||
|
||||
if isSFTP {
|
||||
switch {
|
||||
case isSFTP:
|
||||
incubatorArgs = append(incubatorArgs, "--sftp")
|
||||
} else {
|
||||
if isShell {
|
||||
incubatorArgs = append(incubatorArgs, "--shell")
|
||||
} else {
|
||||
incubatorArgs = append(incubatorArgs, "--cmd="+ss.RawCommand())
|
||||
}
|
||||
case isShell:
|
||||
incubatorArgs = append(incubatorArgs, "--shell")
|
||||
default:
|
||||
incubatorArgs = append(incubatorArgs, "--cmd="+ss.RawCommand())
|
||||
}
|
||||
|
||||
return exec.CommandContext(ss.ctx, ss.conn.srv.tailscaledPath, incubatorArgs...)
|
||||
|
@ -227,7 +229,7 @@ func beIncubator(args []string) error {
|
|||
logf = log.New(sl, "", 0).Printf
|
||||
}
|
||||
} else if ia.debugTest {
|
||||
// In testing, we don't always have syslog, log to a temp file
|
||||
// In testing, we don't always have syslog, so log to a temp file.
|
||||
if logFile, err := os.OpenFile("/tmp/tailscalessh.log", os.O_APPEND|os.O_WRONLY, 0666); err == nil {
|
||||
lf := log.New(logFile, "", 0)
|
||||
logf = func(msg string, args ...any) {
|
||||
|
@ -238,52 +240,51 @@ func beIncubator(args []string) error {
|
|||
}
|
||||
}
|
||||
|
||||
attemptLoginShell := shouldAttemptLoginShell(ia)
|
||||
if !attemptLoginShell {
|
||||
switch {
|
||||
case ia.isSFTP:
|
||||
return handleSFTPInProcess(logf, ia)
|
||||
case !shouldAttemptLoginShell(ia):
|
||||
logf("not attempting login shell")
|
||||
} else if err := tryExecLogin(logf, ia); err != nil {
|
||||
return err
|
||||
}
|
||||
return handleSSHInProcess(logf, ia)
|
||||
default:
|
||||
// First try the login command
|
||||
if err := tryExecLogin(logf, ia); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we got here, we weren't able to use login (because tryExecLogin
|
||||
// returned without replacing the running process), maybe we can use
|
||||
// su.
|
||||
if handled, err := trySU(logf, ia); handled {
|
||||
return err
|
||||
} else {
|
||||
logf("not attempting su")
|
||||
return handleSSHInProcess(logf, ia)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleSFTPInProcess serves SFTP connections.
|
||||
func handleSFTPInProcess(logf logger.Logf, ia incubatorArgs) error {
|
||||
logf("handling sftp")
|
||||
|
||||
// In order to trigger PAM modules like pam_mkhomedir to run, call
|
||||
// findSU, which as a side-effect will actually invoke the su command,
|
||||
// which will trigger the creation of a homedir if so configured.
|
||||
// Note - we won't actually be handling SFTP within a PAM session, so
|
||||
// modules like pam_tty_audit won't work, only side-effecting modules
|
||||
// like pam_mkhomedir will have an effect.
|
||||
_, _ = findSU(logf, ia)
|
||||
|
||||
// Inform the system that we are about to log someone in.
|
||||
// We can only do this if we are running as root.
|
||||
// This is best effort to still allow running on machines where
|
||||
// we don't support starting sessions, e.g. darwin.
|
||||
sessionCloser, err := maybeStartLoginSession(logf, ia)
|
||||
if err == nil && sessionCloser != nil {
|
||||
defer sessionCloser()
|
||||
}
|
||||
|
||||
if attemptLoginShell {
|
||||
// If we got here, we weren't able to use login (because tryExecLogin returned without replacing the running process), maybe we can use su.
|
||||
if handled, err := trySU(logf, ia); handled {
|
||||
return err
|
||||
} else {
|
||||
logf("not attempting su")
|
||||
}
|
||||
}
|
||||
|
||||
// login and su didn't work, drop privileges and handle in-process.
|
||||
if err := dropPrivileges(logf, ia); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ia.isSFTP {
|
||||
// Note - this does not trigger PAM authentication, so things like
|
||||
// pam_mkhomedir won't work with FTP.
|
||||
// TODO: it would be nice to make SFTP work with PAM, but that might
|
||||
// require directly talking to the PAM API, which would require us to
|
||||
// introduce CGO.
|
||||
return handleSFTP(logf)
|
||||
}
|
||||
|
||||
return handleSSHInProcess(logf, ia)
|
||||
}
|
||||
|
||||
// handleSFTP serves SFTP connections.
|
||||
func handleSFTP(logf logger.Logf) error {
|
||||
logf("handling sftp")
|
||||
|
||||
server, err := sftp.NewServer(stdRWC{})
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -300,7 +301,6 @@ func handleSFTP(logf logger.Logf) error {
|
|||
// login shell with the login or su commands. We will attempt a login shell
|
||||
// if all of the following conditions are met.
|
||||
//
|
||||
// - This is not an sftp session
|
||||
// - We are running as root
|
||||
// - This is not an SELinuxEnforcing host
|
||||
//
|
||||
|
@ -310,7 +310,7 @@ func handleSFTP(logf logger.Logf) error {
|
|||
// the incubator to launch the shell.
|
||||
// See http://github.com/tailscale/tailscale/issues/4908.
|
||||
func shouldAttemptLoginShell(ia incubatorArgs) bool {
|
||||
return !ia.isSFTP && runningAsRoot() && !hostinfo.IsSELinuxEnforcing()
|
||||
return runningAsRoot() && !hostinfo.IsSELinuxEnforcing()
|
||||
}
|
||||
|
||||
func runningAsRoot() bool {
|
||||
|
@ -378,32 +378,19 @@ func tryExecLogin(logf logger.Logf, ia incubatorArgs) error {
|
|||
// an su command which accepts the right flags, we'll use su instead of login
|
||||
// when no TTY is available.
|
||||
func trySU(logf logger.Logf, ia incubatorArgs) (bool, error) {
|
||||
// Currently, we only support falling back to su on Linux. This
|
||||
// potentially could work on BSDs as well, but requires testing.
|
||||
if runtime.GOOS != "linux" {
|
||||
su, found := findSU(logf, ia)
|
||||
if !found {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
su, err := exec.LookPath("su")
|
||||
if err != nil {
|
||||
logf("can't find su command: %v", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// First try to execute su -l <user> -c id to make sure su supports the
|
||||
// necessary arguments.
|
||||
err = exec.Command("su", "-l", ia.localUser, "-c", "id").Run()
|
||||
if err != nil {
|
||||
logf("su check failed: %s", err)
|
||||
return false, nil
|
||||
sessionCloser, err := maybeStartLoginSession(logf, ia)
|
||||
if err == nil && sessionCloser != nil {
|
||||
defer sessionCloser()
|
||||
}
|
||||
|
||||
loginArgs := []string{"-l", ia.localUser}
|
||||
if !ia.isShell && ia.cmd != "" {
|
||||
// We only execute the requested command if we're not requesting a
|
||||
// shell. When requesting a shell, the command is the requested shell,
|
||||
// which is redundant because `su -l` will give the user their default
|
||||
// shell.
|
||||
if ia.cmd != "" {
|
||||
// Note - unlike the login command, su allows using both -l and -c.
|
||||
loginArgs = append(loginArgs, "-c", ia.cmd)
|
||||
}
|
||||
|
||||
|
@ -412,15 +399,51 @@ func trySU(logf logger.Logf, ia incubatorArgs) (bool, error) {
|
|||
return true, cmd.Run()
|
||||
}
|
||||
|
||||
// findSU attempts to find an su command which supports the -l and -c flags.
|
||||
// This actually calls the su command, which can cause side effects like
|
||||
// triggering pam_mkhomedir.
|
||||
func findSU(logf logger.Logf, ia incubatorArgs) (string, bool) {
|
||||
// Currently, we only support falling back to su on Linux. This
|
||||
// potentially could work on BSDs as well, but requires testing.
|
||||
if runtime.GOOS != "linux" {
|
||||
return "", false
|
||||
}
|
||||
|
||||
su, err := exec.LookPath("su")
|
||||
if err != nil {
|
||||
logf("can't find su command: %v", err)
|
||||
return "", false
|
||||
}
|
||||
|
||||
// First try to execute su -l <user> -c id to make sure su supports the
|
||||
// necessary arguments.
|
||||
err = exec.Command(su, "-l", ia.localUser, "-c", "pwd").Run()
|
||||
if err != nil {
|
||||
logf("su check failed: %s", err)
|
||||
return "", false
|
||||
}
|
||||
|
||||
return su, true
|
||||
}
|
||||
|
||||
// handleSSHInProcess is a last resort if we couldn't use login or su. It
|
||||
// registers a new session with the OS, sets its UID, GID and groups to the
|
||||
// specified values, and then launches the requested `--cmd` in the user's
|
||||
// login shell.
|
||||
func handleSSHInProcess(logf logger.Logf, ia incubatorArgs) error {
|
||||
sessionCloser, err := maybeStartLoginSession(logf, ia)
|
||||
if err == nil && sessionCloser != nil {
|
||||
defer sessionCloser()
|
||||
}
|
||||
|
||||
if err := dropPrivileges(logf, ia); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
args := shellArgs(ia.isShell, ia.cmd)
|
||||
logf("running %s %q", ia.loginShell, args)
|
||||
cmd := newCommand(ia.hasTTY, ia.loginShell, args)
|
||||
err := cmd.Run()
|
||||
err = cmd.Run()
|
||||
if ee, ok := err.(*exec.ExitError); ok {
|
||||
ps := ee.ProcessState
|
||||
code := ps.ExitCode()
|
||||
|
|
|
@ -92,14 +92,6 @@ func TestIntegrationSSH(t *testing.T) {
|
|||
homeDir = "/Users/testuser"
|
||||
}
|
||||
|
||||
_, err := exec.LookPath("su")
|
||||
suPresent := err == nil
|
||||
|
||||
// Some operating systems like Fedora seem to require login to be present
|
||||
// in order for su to work.
|
||||
_, err = exec.LookPath("login")
|
||||
loginPresent := err == nil
|
||||
|
||||
tests := []struct {
|
||||
cmd string
|
||||
want []string
|
||||
|
@ -112,12 +104,12 @@ func TestIntegrationSSH(t *testing.T) {
|
|||
{
|
||||
cmd: "pwd",
|
||||
want: []string{homeDir},
|
||||
skip: runtime.GOOS != "linux" || !suPresent || !loginPresent,
|
||||
skip: !fallbackToSUAvailable(),
|
||||
},
|
||||
{
|
||||
cmd: "echo 'hello'",
|
||||
want: []string{"hello"},
|
||||
skip: runtime.GOOS != "linux" || !suPresent || !loginPresent,
|
||||
skip: !fallbackToSUAvailable(),
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -172,7 +164,10 @@ func TestIntegrationSFTP(t *testing.T) {
|
|||
debugTest.Store(false)
|
||||
})
|
||||
|
||||
filePath := "/tmp/sftptest.dat"
|
||||
filePath := "/home/testuser/sftptest.dat"
|
||||
if !fallbackToSUAvailable() {
|
||||
filePath = "/tmp/sftptest.dat"
|
||||
}
|
||||
wantText := "hello world"
|
||||
|
||||
cl := testClient(t)
|
||||
|
@ -216,6 +211,22 @@ func TestIntegrationSFTP(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func fallbackToSUAvailable() bool {
|
||||
if runtime.GOOS != "linux" {
|
||||
return false
|
||||
}
|
||||
|
||||
_, err := exec.LookPath("su")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
// Some operating systems like Fedora seem to require login to be present
|
||||
// in order for su to work.
|
||||
_, err = exec.LookPath("login")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
type session struct {
|
||||
*ssh.Session
|
||||
|
||||
|
|
|
@ -44,6 +44,7 @@ import (
|
|||
"tailscale.com/types/logger"
|
||||
"tailscale.com/types/logid"
|
||||
"tailscale.com/types/netmap"
|
||||
"tailscale.com/types/ptr"
|
||||
"tailscale.com/util/cibuild"
|
||||
"tailscale.com/util/lineread"
|
||||
"tailscale.com/util/must"
|
||||
|
@ -87,7 +88,7 @@ func TestMatchRule(t *testing.T) {
|
|||
name: "expired",
|
||||
rule: &tailcfg.SSHRule{
|
||||
Action: someAction,
|
||||
RuleExpires: timePtr(time.Unix(100, 0)),
|
||||
RuleExpires: ptr.To(time.Unix(100, 0)),
|
||||
},
|
||||
ci: &sshConnInfo{},
|
||||
wantErr: errRuleExpired,
|
||||
|
@ -222,8 +223,6 @@ func TestMatchRule(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func timePtr(t time.Time) *time.Time { return &t }
|
||||
|
||||
// localState implements ipnLocalBackend for testing.
|
||||
type localState struct {
|
||||
sshEnabled bool
|
||||
|
@ -822,7 +821,7 @@ func TestSSHAuthFlow(t *testing.T) {
|
|||
func TestSSH(t *testing.T) {
|
||||
var logf logger.Logf = t.Logf
|
||||
sys := &tsd.System{}
|
||||
eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set)
|
||||
eng, err := wgengine.NewFakeUserspaceEngine(logf, sys.Set, sys.HealthTracker())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
|
|
@ -16,23 +16,23 @@ RUN authconfig --enablemkhomedir --update || echo "might not be fedora"
|
|||
COPY . .
|
||||
|
||||
RUN echo "First run tests normally."
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegration
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSFTP TestIntegrationSSH
|
||||
|
||||
RUN echo "Then run tests as non-root user testuser and make sure tests still pass."
|
||||
RUN chown testuser:groupone /tmp/tailscalessh.log
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled su -m testuser -c "./tailssh.test -test.v -test.run TestIntegration TestDoDropPrivileges"
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled su -m testuser -c "./tailssh.test -test.v -test.run TestIntegrationSFTP TestIntegrationSSH TestDoDropPrivileges"
|
||||
|
||||
RUN echo "Then remove the login command and make sure tests still pass."
|
||||
RUN chown root:root /tmp/tailscalessh.log
|
||||
RUN rm `which login`
|
||||
RUN rm -Rf /home/testuser
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegration
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSFTP TestIntegrationSSH
|
||||
|
||||
RUN echo "Then remove the su command and make sure tests still pass."
|
||||
RUN chown root:root /tmp/tailscalessh.log
|
||||
RUN rm `which su`
|
||||
RUN rm -Rf /home/testuser
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegration
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestIntegrationSFTP TestIntegrationSSH
|
||||
|
||||
RUN echo "Test doDropPrivileges"
|
||||
RUN TAILSCALED_PATH=`pwd`tailscaled ./tailssh.test -test.v -test.run TestDoDropPrivileges
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"maps"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
|
@ -914,7 +915,12 @@ func (s *Server) MapResponse(req *tailcfg.MapRequest) (res *tailcfg.MapResponse,
|
|||
// node key rotated away (once test server supports that)
|
||||
return nil, nil
|
||||
}
|
||||
node.CapMap = s.nodeCapMaps[nk]
|
||||
|
||||
s.mu.Lock()
|
||||
nodeCapMap := maps.Clone(s.nodeCapMaps[nk])
|
||||
s.mu.Unlock()
|
||||
|
||||
node.CapMap = nodeCapMap
|
||||
node.Capabilities = append(node.Capabilities, tailcfg.NodeAttrDisableUPnP)
|
||||
|
||||
user, _ := s.getUser(nk)
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
// Copyright (c) Tailscale Inc & AUTHORS
|
||||
// SPDX-License-Identifier: BSD-3-Clause
|
||||
|
||||
// Package nettest contains additional test helpers related to network state
|
||||
// that can't go into tstest for circular dependency reasons.
|
||||
package nettest
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"tailscale.com/net/netmon"
|
||||
)
|
||||
|
||||
// SkipIfNoNetwork skips the test if it looks like there's no network
|
||||
// access.
|
||||
func SkipIfNoNetwork(t testing.TB) {
|
||||
nm := netmon.NewStatic()
|
||||
if !nm.InterfaceState().AnyInterfaceUp() {
|
||||
t.Skip("skipping; test requires network but no interface is up")
|
||||
}
|
||||
}
|
|
@ -312,19 +312,18 @@ func (h retHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
var bucket string
|
||||
bumpStartIfNeeded := func() {}
|
||||
var startRecorded bool
|
||||
if bs := h.opts.BucketedStats; bs != nil {
|
||||
bucket = bs.bucketForRequest(r)
|
||||
if bs.Started != nil {
|
||||
switch v := bs.Started.Map.Get(bucket).(type) {
|
||||
case *expvar.Int:
|
||||
// If we've already seen this bucket for, count it immediately.
|
||||
v.Add(1)
|
||||
case nil:
|
||||
// Otherwise, for newly seen paths, only count retroactively
|
||||
// (so started-finished doesn't go negative) so we don't fill
|
||||
// this LabelMap up with internet scanning spam.
|
||||
bumpStartIfNeeded = func() { bs.Started.Add(bucket, 1) }
|
||||
v.Add(1)
|
||||
startRecorded = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -437,13 +436,18 @@ func (h retHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
if bs := h.opts.BucketedStats; bs != nil && bs.Finished != nil {
|
||||
// Only increment metrics for buckets that result in good HTTP statuses.
|
||||
// Only increment metrics for buckets that result in good HTTP statuses
|
||||
// or when we know the start was already counted.
|
||||
// Otherwise they get full of internet scanning noise. Only filtering 404
|
||||
// gets most of the way there but there are also plenty of URLs that are
|
||||
// almost right but result in 400s too. Seem easier to just only ignore
|
||||
// all 4xx and 5xx.
|
||||
if msg.Code < 400 {
|
||||
bumpStartIfNeeded()
|
||||
if startRecorded {
|
||||
bs.Finished.Add(bucket, 1)
|
||||
} else if msg.Code < 400 {
|
||||
// This is the first non-error request for this bucket,
|
||||
// so count it now retroactively.
|
||||
bs.Started.Add(bucket, 1)
|
||||
bs.Finished.Add(bucket, 1)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"maps"
|
||||
"slices"
|
||||
|
||||
|
@ -274,12 +275,15 @@ func (v Slice[T]) IndexFunc(f func(T) bool) int {
|
|||
//
|
||||
// As it runs in O(n) time, use with care.
|
||||
func (v Slice[T]) ContainsFunc(f func(T) bool) bool {
|
||||
return slices.ContainsFunc(v.ж, f)
|
||||
}
|
||||
|
||||
// AppendStrings appends the string representation of each element in v to dst.
|
||||
func AppendStrings[T fmt.Stringer](dst []string, v Slice[T]) []string {
|
||||
for _, x := range v.ж {
|
||||
if f(x) {
|
||||
return true
|
||||
}
|
||||
dst = append(dst, x.String())
|
||||
}
|
||||
return false
|
||||
return dst
|
||||
}
|
||||
|
||||
// SliceContains reports whether v contains element e.
|
||||
|
@ -289,11 +293,6 @@ func SliceContains[T comparable](v Slice[T], e T) bool {
|
|||
return slices.Contains(v.ж, e)
|
||||
}
|
||||
|
||||
// SliceContainsFunc reports whether f reports true for any element in v.
|
||||
func SliceContainsFunc[T any](v Slice[T], f func(T) bool) bool {
|
||||
return slices.ContainsFunc(v.ж, f)
|
||||
}
|
||||
|
||||
// SliceEqual is like the standard library's slices.Equal, but for two views.
|
||||
func SliceEqual[T comparable](a, b Slice[T]) bool {
|
||||
return slices.Equal(a.ж, b.ж)
|
||||
|
|
|
@ -124,8 +124,6 @@ func TestViewUtils(t *testing.T) {
|
|||
c.Check(v.IndexFunc(func(s string) bool { return strings.HasPrefix(s, "z") }), qt.Equals, -1)
|
||||
c.Check(SliceContains(v, "bar"), qt.Equals, true)
|
||||
c.Check(SliceContains(v, "baz"), qt.Equals, false)
|
||||
c.Check(SliceContainsFunc(v, func(s string) bool { return strings.HasPrefix(s, "f") }), qt.Equals, true)
|
||||
c.Check(SliceContainsFunc(v, func(s string) bool { return len(s) > 3 }), qt.Equals, false)
|
||||
c.Check(SliceEqualAnyOrder(v, v), qt.Equals, true)
|
||||
c.Check(SliceEqualAnyOrder(v, SliceOf([]string{"bar", "foo"})), qt.Equals, true)
|
||||
c.Check(SliceEqualAnyOrder(v, SliceOf([]string{"foo"})), qt.Equals, false)
|
||||
|
|
|
@ -14,6 +14,11 @@ type Set[T comparable] map[T]struct{}
|
|||
|
||||
// SetOf returns a new set constructed from the elements in slice.
|
||||
func SetOf[T comparable](slice []T) Set[T] {
|
||||
return Of(slice...)
|
||||
}
|
||||
|
||||
// Of returns a new set constructed from the elements in slice.
|
||||
func Of[T comparable](slice ...T) Set[T] {
|
||||
s := make(Set[T])
|
||||
s.AddSlice(slice)
|
||||
return s
|
||||
|
@ -41,6 +46,13 @@ func (s Set[T]) AddSet(es Set[T]) {
|
|||
}
|
||||
}
|
||||
|
||||
// Make lazily initializes the map pointed to by s to be non-nil.
|
||||
func (s *Set[T]) Make() {
|
||||
if *s == nil {
|
||||
*s = make(Set[T])
|
||||
}
|
||||
}
|
||||
|
||||
// Slice returns the elements of the set as a slice. The elements will not be
|
||||
// in any particular order.
|
||||
func (s Set[T]) Slice() []T {
|
||||
|
|
|
@ -53,7 +53,7 @@ func TestSet(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestSetOf(t *testing.T) {
|
||||
s := SetOf[int]([]int{1, 2, 3, 4, 4, 1})
|
||||
s := Of(1, 2, 3, 4, 4, 1)
|
||||
if s.Len() != 4 {
|
||||
t.Errorf("wrong len %d; want 4", s.Len())
|
||||
}
|
||||
|
@ -74,20 +74,20 @@ func TestEqual(t *testing.T) {
|
|||
tests := []test{
|
||||
{
|
||||
"equal",
|
||||
SetOf([]int{1, 2, 3, 4}),
|
||||
SetOf([]int{1, 2, 3, 4}),
|
||||
Of(1, 2, 3, 4),
|
||||
Of(1, 2, 3, 4),
|
||||
true,
|
||||
},
|
||||
{
|
||||
"not equal",
|
||||
SetOf([]int{1, 2, 3, 4}),
|
||||
SetOf([]int{1, 2, 3, 5}),
|
||||
Of(1, 2, 3, 4),
|
||||
Of(1, 2, 3, 5),
|
||||
false,
|
||||
},
|
||||
{
|
||||
"different lengths",
|
||||
SetOf([]int{1, 2, 3, 4, 5}),
|
||||
SetOf([]int{1, 2, 3, 5}),
|
||||
Of(1, 2, 3, 4, 5),
|
||||
Of(1, 2, 3, 5),
|
||||
false,
|
||||
},
|
||||
}
|
||||
|
@ -100,7 +100,7 @@ func TestEqual(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
s := SetOf[int]([]int{1, 2, 3, 4, 4, 1})
|
||||
s := Of(1, 2, 3, 4, 4, 1)
|
||||
if s.Len() != 4 {
|
||||
t.Errorf("wrong len %d; want 4", s.Len())
|
||||
}
|
||||
|
@ -122,8 +122,8 @@ func TestSetJSONRoundTrip(t *testing.T) {
|
|||
}{
|
||||
{"empty", make(Set[string]), make(Set[int])},
|
||||
{"nil", nil, nil},
|
||||
{"one-item", SetOf([]string{"one"}), SetOf([]int{1})},
|
||||
{"multiple-items", SetOf([]string{"one", "two", "three"}), SetOf([]int{1, 2, 3})},
|
||||
{"one-item", Of("one"), Of(1)},
|
||||
{"multiple-items", Of("one", "two", "three"), Of(1, 2, 3)},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
|
@ -158,3 +158,12 @@ func TestSetJSONRoundTrip(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestMake(t *testing.T) {
|
||||
var s Set[int]
|
||||
s.Make()
|
||||
s.Add(1)
|
||||
if !s.Contains(1) {
|
||||
t.Error("missing 1")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3088,3 +3088,9 @@ func (c *Conn) GetLastNetcheckReport(ctx context.Context) *netcheck.Report {
|
|||
}
|
||||
return lastReport
|
||||
}
|
||||
|
||||
// SetLastNetcheckReport sets local backend's last netcheck report.
|
||||
// Used for testing purposes.
|
||||
func (c *Conn) SetLastNetcheckReport(ctx context.Context, report netcheck.Report) {
|
||||
c.lastNetCheckReport.Store(&report)
|
||||
}
|
||||
|
|
|
@ -93,7 +93,7 @@ var testClient *http.Client
|
|||
// The IP protocol and source port are always zero.
|
||||
// The sock is used to populated the PhysicalTraffic field in Message.
|
||||
// The netMon parameter is optional; if non-nil it's used to do faster interface lookups.
|
||||
func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker) error {
|
||||
func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID logid.PrivateID, tun, sock Device, netMon *netmon.Monitor, health *health.Tracker, logExitFlowEnabledEnabled bool) error {
|
||||
nl.mu.Lock()
|
||||
defer nl.mu.Unlock()
|
||||
if nl.logger != nil {
|
||||
|
@ -131,7 +131,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo
|
|||
addrs := nl.addrs
|
||||
prefixes := nl.prefixes
|
||||
nl.mu.Unlock()
|
||||
recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes)
|
||||
recordStatistics(nl.logger, nodeID, start, end, virtual, physical, addrs, prefixes, logExitFlowEnabledEnabled)
|
||||
})
|
||||
|
||||
// Register the connection tracker into the TUN device.
|
||||
|
@ -151,7 +151,7 @@ func (nl *Logger) Startup(nodeID tailcfg.StableNodeID, nodeLogID, domainLogID lo
|
|||
return nil
|
||||
}
|
||||
|
||||
func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connstats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool) {
|
||||
func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start, end time.Time, connstats, sockStats map[netlogtype.Connection]netlogtype.Counts, addrs map[netip.Addr]bool, prefixes map[netip.Prefix]bool, logExitFlowEnabled bool) {
|
||||
m := netlogtype.Message{NodeID: nodeID, Start: start.UTC(), End: end.UTC()}
|
||||
|
||||
classifyAddr := func(a netip.Addr) (isTailscale, withinRoute bool) {
|
||||
|
@ -180,7 +180,7 @@ func recordStatistics(logger *logtail.Logger, nodeID tailcfg.StableNodeID, start
|
|||
m.SubnetTraffic = append(m.SubnetTraffic, netlogtype.ConnectionCounts{Connection: conn, Counts: cnts})
|
||||
default:
|
||||
const anonymize = true
|
||||
if anonymize {
|
||||
if anonymize && !logExitFlowEnabled {
|
||||
// Only preserve the address if it is a Tailscale IP address.
|
||||
srcOrig, dstOrig := conn.Src, conn.Dst
|
||||
conn = netlogtype.Connection{} // scrub everything by default
|
||||
|
|
|
@ -244,6 +244,8 @@ func NewFakeUserspaceEngine(logf logger.Logf, opts ...any) (Engine, error) {
|
|||
conf.SetSubsystem = v
|
||||
case *controlknobs.Knobs:
|
||||
conf.ControlKnobs = v
|
||||
case *health.Tracker:
|
||||
conf.HealthTracker = v
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown option type %T", v)
|
||||
}
|
||||
|
@ -965,8 +967,9 @@ func (e *userspaceEngine) Reconfig(cfg *wgcfg.Config, routerCfg *router.Config,
|
|||
if netLogRunning && !e.networkLogger.Running() {
|
||||
nid := cfg.NetworkLogging.NodeID
|
||||
tid := cfg.NetworkLogging.DomainID
|
||||
logExitFlowEnabled := cfg.NetworkLogging.LogExitFlowEnabled
|
||||
e.logf("wgengine: Reconfig: starting up network logger (node:%s tailnet:%s)", nid.Public(), tid.Public())
|
||||
if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health); err != nil {
|
||||
if err := e.networkLogger.Startup(cfg.NodeID, nid, tid, e.tundev, e.magicConn, e.netMon, e.health, logExitFlowEnabled); err != nil {
|
||||
e.logf("wgengine: Reconfig: error starting up network logger: %v", err)
|
||||
}
|
||||
e.networkLogger.ReconfigRoutes(routerCfg)
|
||||
|
|
|
@ -27,9 +27,11 @@ type Config struct {
|
|||
|
||||
// NetworkLogging enables network logging.
|
||||
// It is disabled if either ID is the zero value.
|
||||
// LogExitFlowEnabled indicates whether or not exit flows should be logged.
|
||||
NetworkLogging struct {
|
||||
NodeID logid.PrivateID
|
||||
DomainID logid.PrivateID
|
||||
NodeID logid.PrivateID
|
||||
DomainID logid.PrivateID
|
||||
LogExitFlowEnabled bool
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -63,6 +63,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
|
|||
if nm.SelfNode.Valid() {
|
||||
cfg.NodeID = nm.SelfNode.StableID()
|
||||
canNetworkLog := nm.SelfNode.HasCap(tailcfg.CapabilityDataPlaneAuditLogs)
|
||||
logExitFlowEnabled := nm.SelfNode.HasCap(tailcfg.NodeAttrLogExitFlows)
|
||||
if canNetworkLog && nm.SelfNode.DataPlaneAuditLogID() != "" && nm.DomainAuditLogID != "" {
|
||||
nodeID, errNode := logid.ParsePrivateID(nm.SelfNode.DataPlaneAuditLogID())
|
||||
if errNode != nil {
|
||||
|
@ -75,6 +76,7 @@ func WGCfg(nm *netmap.NetworkMap, logf logger.Logf, flags netmap.WGConfigFlags,
|
|||
if errNode == nil && errDomain == nil {
|
||||
cfg.NetworkLogging.NodeID = nodeID
|
||||
cfg.NetworkLogging.DomainID = domainID
|
||||
cfg.NetworkLogging.LogExitFlowEnabled = logExitFlowEnabled
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -43,8 +43,9 @@ var _ConfigCloneNeedsRegeneration = Config(struct {
|
|||
DNS []netip.Addr
|
||||
Peers []Peer
|
||||
NetworkLogging struct {
|
||||
NodeID logid.PrivateID
|
||||
DomainID logid.PrivateID
|
||||
NodeID logid.PrivateID
|
||||
DomainID logid.PrivateID
|
||||
LogExitFlowEnabled bool
|
||||
}
|
||||
}{})
|
||||
|
||||
|
|
Loading…
Reference in New Issue