2020-02-05 22:16:58 +00:00
|
|
|
// Copyright (c) 2020 Tailscale Inc & AUTHORS All rights reserved.
|
|
|
|
// Use of this source code is governed by a BSD-style
|
|
|
|
// license that can be found in the LICENSE file.
|
|
|
|
|
|
|
|
package controlclient
|
|
|
|
|
2020-07-29 02:47:23 +01:00
|
|
|
//go:generate go run tailscale.com/cmd/cloner -type=Persist -output=direct_clone.go
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
import (
|
|
|
|
"bytes"
|
|
|
|
"context"
|
|
|
|
"crypto/rand"
|
|
|
|
"encoding/binary"
|
|
|
|
"encoding/json"
|
|
|
|
"errors"
|
|
|
|
"fmt"
|
|
|
|
"io"
|
|
|
|
"io/ioutil"
|
|
|
|
"log"
|
|
|
|
"net/http"
|
2020-04-27 16:18:35 +01:00
|
|
|
"net/url"
|
2020-02-05 22:16:58 +00:00
|
|
|
"os"
|
2020-04-02 01:18:39 +01:00
|
|
|
"reflect"
|
2020-07-28 05:14:28 +01:00
|
|
|
"runtime"
|
2020-08-08 04:44:04 +01:00
|
|
|
"sort"
|
2020-03-20 03:45:49 +00:00
|
|
|
"strconv"
|
2020-02-05 22:16:58 +00:00
|
|
|
"strings"
|
|
|
|
"sync"
|
2020-08-17 20:56:17 +01:00
|
|
|
"sync/atomic"
|
2020-02-05 22:16:58 +00:00
|
|
|
"time"
|
|
|
|
|
|
|
|
"github.com/tailscale/wireguard-go/wgcfg"
|
|
|
|
"golang.org/x/crypto/nacl/box"
|
|
|
|
"golang.org/x/oauth2"
|
2020-07-31 21:27:09 +01:00
|
|
|
"inet.af/netaddr"
|
2020-04-08 06:24:06 +01:00
|
|
|
"tailscale.com/log/logheap"
|
2020-05-29 01:06:08 +01:00
|
|
|
"tailscale.com/net/netns"
|
2020-04-25 21:24:53 +01:00
|
|
|
"tailscale.com/net/tlsdial"
|
2020-08-13 23:25:54 +01:00
|
|
|
"tailscale.com/net/tshttpproxy"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/tailcfg"
|
2020-02-15 03:23:16 +00:00
|
|
|
"tailscale.com/types/logger"
|
2020-08-17 20:56:17 +01:00
|
|
|
"tailscale.com/types/opt"
|
2020-05-03 21:58:39 +01:00
|
|
|
"tailscale.com/types/structs"
|
2020-02-05 22:16:58 +00:00
|
|
|
"tailscale.com/version"
|
|
|
|
)
|
|
|
|
|
|
|
|
type Persist struct {
|
2020-05-03 21:58:39 +01:00
|
|
|
_ structs.Incomparable
|
2020-02-05 22:16:58 +00:00
|
|
|
PrivateMachineKey wgcfg.PrivateKey
|
|
|
|
PrivateNodeKey wgcfg.PrivateKey
|
|
|
|
OldPrivateNodeKey wgcfg.PrivateKey // needed to request key rotation
|
|
|
|
Provider string
|
|
|
|
LoginName string
|
|
|
|
}
|
|
|
|
|
2020-02-17 23:01:23 +00:00
|
|
|
func (p *Persist) Equals(p2 *Persist) bool {
|
|
|
|
if p == nil && p2 == nil {
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
if p == nil || p2 == nil {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
|
|
|
|
return p.PrivateMachineKey.Equal(p2.PrivateMachineKey) &&
|
|
|
|
p.PrivateNodeKey.Equal(p2.PrivateNodeKey) &&
|
|
|
|
p.OldPrivateNodeKey.Equal(p2.OldPrivateNodeKey) &&
|
|
|
|
p.Provider == p2.Provider &&
|
|
|
|
p.LoginName == p2.LoginName
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (p *Persist) Pretty() string {
|
|
|
|
var mk, ok, nk wgcfg.Key
|
|
|
|
if !p.PrivateMachineKey.IsZero() {
|
2020-02-11 03:04:52 +00:00
|
|
|
mk = p.PrivateMachineKey.Public()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if !p.OldPrivateNodeKey.IsZero() {
|
2020-02-11 03:04:52 +00:00
|
|
|
ok = p.OldPrivateNodeKey.Public()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if !p.PrivateNodeKey.IsZero() {
|
2020-02-11 03:04:52 +00:00
|
|
|
nk = p.PrivateNodeKey.Public()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
return fmt.Sprintf("Persist{m=%v, o=%v, n=%v u=%#v}",
|
|
|
|
mk.ShortString(), ok.ShortString(), nk.ShortString(),
|
|
|
|
p.LoginName)
|
|
|
|
}
|
|
|
|
|
|
|
|
// Direct is the client that connects to a tailcontrol server for a node.
|
|
|
|
type Direct struct {
|
|
|
|
httpc *http.Client // HTTP client used to talk to tailcontrol
|
|
|
|
serverURL string // URL of the tailcontrol server
|
|
|
|
timeNow func() time.Time
|
2020-03-13 02:28:11 +00:00
|
|
|
lastPrintMap time.Time
|
2020-02-05 22:16:58 +00:00
|
|
|
newDecompressor func() (Decompressor, error)
|
|
|
|
keepAlive bool
|
|
|
|
logf logger.Logf
|
2020-06-19 20:06:49 +01:00
|
|
|
discoPubKey tailcfg.DiscoKey
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
mu sync.Mutex // mutex guards the following fields
|
|
|
|
serverKey wgcfg.Key
|
|
|
|
persist Persist
|
2020-04-09 08:16:36 +01:00
|
|
|
authKey string
|
2020-02-05 22:16:58 +00:00
|
|
|
tryingNewKey wgcfg.PrivateKey
|
|
|
|
expiry *time.Time
|
2020-06-16 00:04:12 +01:00
|
|
|
// hostinfo is mutated in-place while mu is held.
|
|
|
|
hostinfo *tailcfg.Hostinfo // always non-nil
|
|
|
|
endpoints []string
|
|
|
|
localPort uint16 // or zero to mean auto
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Options struct {
|
2020-02-25 18:04:20 +00:00
|
|
|
Persist Persist // initial persistent data
|
|
|
|
ServerURL string // URL of the tailcontrol server
|
2020-04-09 08:16:36 +01:00
|
|
|
AuthKey string // optional node auth key for auto registration
|
2020-02-25 18:04:20 +00:00
|
|
|
TimeNow func() time.Time // time.Now implementation used by Client
|
|
|
|
Hostinfo *tailcfg.Hostinfo // non-nil passes ownership, nil means to use default using os.Hostname, etc
|
2020-06-19 20:06:49 +01:00
|
|
|
DiscoPublicKey tailcfg.DiscoKey
|
2020-02-05 22:16:58 +00:00
|
|
|
NewDecompressor func() (Decompressor, error)
|
|
|
|
KeepAlive bool
|
|
|
|
Logf logger.Logf
|
2020-04-26 15:45:42 +01:00
|
|
|
HTTPTestClient *http.Client // optional HTTP client to use (for tests only)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
type Decompressor interface {
|
|
|
|
DecodeAll(input, dst []byte) ([]byte, error)
|
|
|
|
Close()
|
|
|
|
}
|
|
|
|
|
|
|
|
// NewDirect returns a new Direct client.
|
|
|
|
func NewDirect(opts Options) (*Direct, error) {
|
|
|
|
if opts.ServerURL == "" {
|
|
|
|
return nil, errors.New("controlclient.New: no server URL specified")
|
|
|
|
}
|
|
|
|
opts.ServerURL = strings.TrimRight(opts.ServerURL, "/")
|
2020-04-27 16:18:35 +01:00
|
|
|
serverURL, err := url.Parse(opts.ServerURL)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if opts.TimeNow == nil {
|
|
|
|
opts.TimeNow = time.Now
|
|
|
|
}
|
|
|
|
if opts.Logf == nil {
|
|
|
|
// TODO(apenwarr): remove this default and fail instead.
|
2020-02-25 18:04:20 +00:00
|
|
|
// TODO(bradfitz): ... but then it shouldn't be in Options.
|
2020-02-05 22:16:58 +00:00
|
|
|
opts.Logf = log.Printf
|
|
|
|
}
|
2020-04-25 21:24:53 +01:00
|
|
|
|
2020-04-26 15:45:42 +01:00
|
|
|
httpc := opts.HTTPTestClient
|
|
|
|
if httpc == nil {
|
2020-06-01 18:50:37 +01:00
|
|
|
dialer := netns.NewDialer()
|
2020-04-26 15:45:42 +01:00
|
|
|
tr := http.DefaultTransport.(*http.Transport).Clone()
|
2020-08-13 23:25:54 +01:00
|
|
|
tr.Proxy = tshttpproxy.ProxyFromEnvironment
|
2020-08-27 04:02:16 +01:00
|
|
|
tshttpproxy.SetTransportGetProxyConnectHeader(tr)
|
2020-05-29 01:06:08 +01:00
|
|
|
tr.DialContext = dialer.DialContext
|
2020-04-26 15:45:42 +01:00
|
|
|
tr.ForceAttemptHTTP2 = true
|
|
|
|
tr.TLSClientConfig = tlsdial.Config(serverURL.Host, tr.TLSClientConfig)
|
|
|
|
httpc = &http.Client{Transport: tr}
|
|
|
|
}
|
2020-04-25 21:24:53 +01:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
c := &Direct{
|
2020-04-25 21:24:53 +01:00
|
|
|
httpc: httpc,
|
2020-02-05 22:16:58 +00:00
|
|
|
serverURL: opts.ServerURL,
|
|
|
|
timeNow: opts.TimeNow,
|
|
|
|
logf: opts.Logf,
|
|
|
|
newDecompressor: opts.NewDecompressor,
|
|
|
|
keepAlive: opts.KeepAlive,
|
|
|
|
persist: opts.Persist,
|
2020-04-09 08:16:36 +01:00
|
|
|
authKey: opts.AuthKey,
|
2020-06-19 20:06:49 +01:00
|
|
|
discoPubKey: opts.DiscoPublicKey,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if opts.Hostinfo == nil {
|
|
|
|
c.SetHostinfo(NewHostinfo())
|
|
|
|
} else {
|
2020-02-25 18:04:20 +00:00
|
|
|
c.SetHostinfo(opts.Hostinfo)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
return c, nil
|
|
|
|
}
|
|
|
|
|
2020-07-28 05:14:28 +01:00
|
|
|
var osVersion func() string // non-nil on some platforms
|
|
|
|
|
2020-02-25 18:04:20 +00:00
|
|
|
func NewHostinfo() *tailcfg.Hostinfo {
|
2020-05-28 18:50:11 +01:00
|
|
|
hostname, _ := os.Hostname()
|
2020-07-28 05:14:28 +01:00
|
|
|
var osv string
|
|
|
|
if osVersion != nil {
|
|
|
|
osv = osVersion()
|
|
|
|
}
|
2020-02-25 18:04:20 +00:00
|
|
|
return &tailcfg.Hostinfo{
|
2020-02-05 22:16:58 +00:00
|
|
|
IPNVersion: version.LONG,
|
|
|
|
Hostname: hostname,
|
2020-04-01 16:49:25 +01:00
|
|
|
OS: version.OS(),
|
2020-07-28 05:14:28 +01:00
|
|
|
OSVersion: osv,
|
|
|
|
GoArch: runtime.GOARCH,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-02-25 18:04:20 +00:00
|
|
|
// SetHostinfo clones the provided Hostinfo and remembers it for the
|
2020-04-02 01:18:39 +01:00
|
|
|
// next update. It reports whether the Hostinfo has changed.
|
|
|
|
func (c *Direct) SetHostinfo(hi *tailcfg.Hostinfo) bool {
|
2020-02-25 18:04:20 +00:00
|
|
|
if hi == nil {
|
|
|
|
panic("nil Hostinfo")
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2020-04-02 01:18:39 +01:00
|
|
|
if hi.Equal(c.hostinfo) {
|
|
|
|
return false
|
|
|
|
}
|
2020-02-27 20:20:29 +00:00
|
|
|
c.hostinfo = hi.Clone()
|
2020-04-02 01:18:39 +01:00
|
|
|
return true
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
2020-03-04 06:21:56 +00:00
|
|
|
// SetNetInfo clones the provided NetInfo and remembers it for the
|
2020-04-02 01:18:39 +01:00
|
|
|
// next update. It reports whether the NetInfo has changed.
|
|
|
|
func (c *Direct) SetNetInfo(ni *tailcfg.NetInfo) bool {
|
2020-03-04 06:21:56 +00:00
|
|
|
if ni == nil {
|
|
|
|
panic("nil NetInfo")
|
|
|
|
}
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
if c.hostinfo == nil {
|
|
|
|
c.logf("[unexpected] SetNetInfo called with no HostInfo; ignoring NetInfo update: %+v", ni)
|
2020-04-02 01:18:39 +01:00
|
|
|
return false
|
|
|
|
}
|
|
|
|
if reflect.DeepEqual(ni, c.hostinfo.NetInfo) {
|
|
|
|
return false
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
|
|
|
c.hostinfo.NetInfo = ni.Clone()
|
2020-04-02 01:18:39 +01:00
|
|
|
return true
|
2020-03-04 06:21:56 +00:00
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (c *Direct) GetPersist() Persist {
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
return c.persist
|
|
|
|
}
|
|
|
|
|
|
|
|
type LoginFlags int
|
|
|
|
|
|
|
|
const (
|
|
|
|
LoginDefault = LoginFlags(0)
|
|
|
|
LoginInteractive = LoginFlags(1 << iota) // force user login and key refresh
|
|
|
|
)
|
|
|
|
|
|
|
|
func (c *Direct) TryLogout(ctx context.Context) error {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("direct.TryLogout()")
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
2020-02-11 07:16:13 +00:00
|
|
|
// TODO(crawshaw): Tell the server. This node key should be
|
|
|
|
// immediately invalidated.
|
|
|
|
//if c.persist.PrivateNodeKey != (wgcfg.PrivateKey{}) {
|
|
|
|
//}
|
2020-02-05 22:16:58 +00:00
|
|
|
c.persist = Persist{
|
|
|
|
PrivateMachineKey: c.persist.PrivateMachineKey,
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Direct) TryLogin(ctx context.Context, t *oauth2.Token, flags LoginFlags) (url string, err error) {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("direct.TryLogin(%v, %v)", t != nil, flags)
|
2020-02-05 22:16:58 +00:00
|
|
|
return c.doLoginOrRegen(ctx, t, flags, false, "")
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Direct) WaitLoginURL(ctx context.Context, url string) (newUrl string, err error) {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("direct.WaitLoginURL")
|
2020-02-05 22:16:58 +00:00
|
|
|
return c.doLoginOrRegen(ctx, nil, LoginDefault, false, url)
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Direct) doLoginOrRegen(ctx context.Context, t *oauth2.Token, flags LoginFlags, regen bool, url string) (newUrl string, err error) {
|
|
|
|
mustregen, url, err := c.doLogin(ctx, t, flags, regen, url)
|
|
|
|
if err != nil {
|
|
|
|
return url, err
|
|
|
|
}
|
|
|
|
if mustregen {
|
|
|
|
_, url, err = c.doLogin(ctx, t, flags, true, url)
|
|
|
|
}
|
|
|
|
return url, err
|
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Direct) doLogin(ctx context.Context, t *oauth2.Token, flags LoginFlags, regen bool, url string) (mustregen bool, newurl string, err error) {
|
|
|
|
c.mu.Lock()
|
|
|
|
persist := c.persist
|
|
|
|
tryingNewKey := c.tryingNewKey
|
|
|
|
serverKey := c.serverKey
|
2020-05-19 07:51:27 +01:00
|
|
|
authKey := c.authKey
|
2020-07-09 19:42:19 +01:00
|
|
|
hostinfo := c.hostinfo.Clone()
|
2020-06-16 00:04:12 +01:00
|
|
|
backendLogID := hostinfo.BackendLogID
|
2020-02-05 22:16:58 +00:00
|
|
|
expired := c.expiry != nil && !c.expiry.IsZero() && c.expiry.Before(c.timeNow())
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
if persist.PrivateMachineKey == (wgcfg.PrivateKey{}) {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("Generating a new machinekey.")
|
2020-02-05 22:16:58 +00:00
|
|
|
mkey, err := wgcfg.NewPrivateKey()
|
|
|
|
if err != nil {
|
|
|
|
log.Fatal(err)
|
|
|
|
}
|
2020-02-11 03:04:52 +00:00
|
|
|
persist.PrivateMachineKey = mkey
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if expired {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("Old key expired -> regen=true")
|
2020-02-05 22:16:58 +00:00
|
|
|
regen = true
|
|
|
|
}
|
|
|
|
if (flags & LoginInteractive) != 0 {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("LoginInteractive -> regen=true")
|
2020-02-05 22:16:58 +00:00
|
|
|
regen = true
|
|
|
|
}
|
|
|
|
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("doLogin(regen=%v, hasUrl=%v)", regen, url != "")
|
2020-02-05 22:16:58 +00:00
|
|
|
if serverKey == (wgcfg.Key{}) {
|
|
|
|
var err error
|
|
|
|
serverKey, err = loadServerKey(ctx, c.httpc, c.serverURL)
|
|
|
|
if err != nil {
|
|
|
|
return regen, url, err
|
|
|
|
}
|
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
c.serverKey = serverKey
|
|
|
|
c.mu.Unlock()
|
|
|
|
}
|
|
|
|
|
|
|
|
var oldNodeKey wgcfg.Key
|
|
|
|
if url != "" {
|
|
|
|
} else if regen || persist.PrivateNodeKey == (wgcfg.PrivateKey{}) {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("Generating a new nodekey.")
|
2020-02-05 22:16:58 +00:00
|
|
|
persist.OldPrivateNodeKey = persist.PrivateNodeKey
|
|
|
|
key, err := wgcfg.NewPrivateKey()
|
|
|
|
if err != nil {
|
|
|
|
c.logf("login keygen: %v", err)
|
|
|
|
return regen, url, err
|
|
|
|
}
|
2020-02-11 03:04:52 +00:00
|
|
|
tryingNewKey = key
|
2020-02-05 22:16:58 +00:00
|
|
|
} else {
|
|
|
|
// Try refreshing the current key first
|
|
|
|
tryingNewKey = persist.PrivateNodeKey
|
|
|
|
}
|
|
|
|
if persist.OldPrivateNodeKey != (wgcfg.PrivateKey{}) {
|
2020-02-11 03:04:52 +00:00
|
|
|
oldNodeKey = persist.OldPrivateNodeKey.Public()
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if tryingNewKey == (wgcfg.PrivateKey{}) {
|
2020-04-11 16:35:34 +01:00
|
|
|
log.Fatalf("tryingNewKey is empty, give up")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-06-16 00:04:12 +01:00
|
|
|
if backendLogID == "" {
|
2020-02-05 22:16:58 +00:00
|
|
|
err = errors.New("hostinfo: BackendLogID missing")
|
|
|
|
return regen, url, err
|
|
|
|
}
|
|
|
|
request := tailcfg.RegisterRequest{
|
|
|
|
Version: 1,
|
|
|
|
OldNodeKey: tailcfg.NodeKey(oldNodeKey),
|
2020-02-11 03:04:52 +00:00
|
|
|
NodeKey: tailcfg.NodeKey(tryingNewKey.Public()),
|
2020-06-16 00:04:12 +01:00
|
|
|
Hostinfo: hostinfo,
|
2020-02-05 22:16:58 +00:00
|
|
|
Followup: url,
|
|
|
|
}
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("RegisterReq: onode=%v node=%v fup=%v",
|
2020-03-18 22:10:46 +00:00
|
|
|
request.OldNodeKey.ShortString(),
|
|
|
|
request.NodeKey.ShortString(), url != "")
|
2020-02-05 22:16:58 +00:00
|
|
|
request.Auth.Oauth2Token = t
|
|
|
|
request.Auth.Provider = persist.Provider
|
|
|
|
request.Auth.LoginName = persist.LoginName
|
2020-05-19 07:51:27 +01:00
|
|
|
request.Auth.AuthKey = authKey
|
2020-02-05 22:16:58 +00:00
|
|
|
bodyData, err := encode(request, &serverKey, &persist.PrivateMachineKey)
|
|
|
|
if err != nil {
|
|
|
|
return regen, url, err
|
|
|
|
}
|
|
|
|
body := bytes.NewReader(bodyData)
|
|
|
|
|
|
|
|
u := fmt.Sprintf("%s/machine/%s", c.serverURL, persist.PrivateMachineKey.Public().HexString())
|
|
|
|
req, err := http.NewRequest("POST", u, body)
|
|
|
|
if err != nil {
|
|
|
|
return regen, url, err
|
|
|
|
}
|
|
|
|
req = req.WithContext(ctx)
|
|
|
|
|
|
|
|
res, err := c.httpc.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return regen, url, fmt.Errorf("register request: %v", err)
|
|
|
|
}
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("RegisterReq: returned.")
|
2020-02-05 22:16:58 +00:00
|
|
|
resp := tailcfg.RegisterResponse{}
|
|
|
|
if err := decode(res, &resp, &serverKey, &persist.PrivateMachineKey); err != nil {
|
|
|
|
return regen, url, fmt.Errorf("register request: %v", err)
|
|
|
|
}
|
|
|
|
|
|
|
|
if resp.NodeKeyExpired {
|
|
|
|
if regen {
|
|
|
|
return true, "", fmt.Errorf("weird: regen=true but server says NodeKeyExpired: %v", request.NodeKey)
|
|
|
|
}
|
|
|
|
c.logf("server reports new node key %v has expired",
|
2020-03-18 22:10:46 +00:00
|
|
|
request.NodeKey.ShortString())
|
2020-02-05 22:16:58 +00:00
|
|
|
return true, "", nil
|
|
|
|
}
|
|
|
|
if persist.Provider == "" {
|
|
|
|
persist.Provider = resp.Login.Provider
|
|
|
|
}
|
|
|
|
if persist.LoginName == "" {
|
|
|
|
persist.LoginName = resp.Login.LoginName
|
|
|
|
}
|
|
|
|
|
|
|
|
// TODO(crawshaw): RegisterResponse should be able to mechanically
|
|
|
|
// communicate some extra instructions from the server:
|
|
|
|
// - new node key required
|
|
|
|
// - machine key no longer supported
|
|
|
|
// - user is disabled
|
|
|
|
|
|
|
|
if resp.AuthURL != "" {
|
2020-07-02 17:45:08 +01:00
|
|
|
c.logf("AuthURL is %v", resp.AuthURL)
|
2020-02-05 22:16:58 +00:00
|
|
|
} else {
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("No AuthURL")
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
if resp.AuthURL == "" {
|
|
|
|
// key rotation is complete
|
|
|
|
persist.PrivateNodeKey = tryingNewKey
|
|
|
|
} else {
|
|
|
|
// save it for the retry-with-URL
|
|
|
|
c.tryingNewKey = tryingNewKey
|
|
|
|
}
|
|
|
|
c.persist = persist
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
if err != nil {
|
|
|
|
return regen, "", err
|
|
|
|
}
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return regen, "", ctx.Err()
|
|
|
|
}
|
|
|
|
return false, resp.AuthURL, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func sameStrings(a, b []string) bool {
|
|
|
|
if len(a) != len(b) {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
for i := range a {
|
|
|
|
if a[i] != b[i] {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
2020-02-14 17:28:29 +00:00
|
|
|
// newEndpoints acquires c.mu and sets the local port and endpoints and reports
|
|
|
|
// whether they've changed.
|
|
|
|
//
|
|
|
|
// It does not retain the provided slice.
|
|
|
|
func (c *Direct) newEndpoints(localPort uint16, endpoints []string) (changed bool) {
|
2020-02-05 22:16:58 +00:00
|
|
|
c.mu.Lock()
|
|
|
|
defer c.mu.Unlock()
|
|
|
|
|
|
|
|
// Nothing new?
|
|
|
|
if c.localPort == localPort && sameStrings(c.endpoints, endpoints) {
|
|
|
|
return false // unchanged
|
|
|
|
}
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("client.newEndpoints(%v, %v)", localPort, endpoints)
|
2020-02-05 22:16:58 +00:00
|
|
|
c.localPort = localPort
|
2020-02-14 17:28:29 +00:00
|
|
|
c.endpoints = append(c.endpoints[:0], endpoints...)
|
2020-02-05 22:16:58 +00:00
|
|
|
return true // changed
|
|
|
|
}
|
|
|
|
|
|
|
|
// SetEndpoints updates the list of locally advertised endpoints.
|
|
|
|
// It won't be replicated to the server until a *fresh* call to PollNetMap().
|
|
|
|
// You don't need to restart PollNetMap if we return changed==false.
|
2020-02-14 17:28:29 +00:00
|
|
|
func (c *Direct) SetEndpoints(localPort uint16, endpoints []string) (changed bool) {
|
2020-02-05 22:16:58 +00:00
|
|
|
// (no log message on function entry, because it clutters the logs
|
|
|
|
// if endpoints haven't changed. newEndpoints() will log it.)
|
2020-02-14 17:28:29 +00:00
|
|
|
return c.newEndpoints(localPort, endpoints)
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
func (c *Direct) PollNetMap(ctx context.Context, maxPolls int, cb func(*NetworkMap)) error {
|
|
|
|
c.mu.Lock()
|
|
|
|
persist := c.persist
|
|
|
|
serverURL := c.serverURL
|
|
|
|
serverKey := c.serverKey
|
2020-07-09 00:49:02 +01:00
|
|
|
hostinfo := c.hostinfo.Clone()
|
2020-06-16 00:04:12 +01:00
|
|
|
backendLogID := hostinfo.BackendLogID
|
2020-02-05 22:16:58 +00:00
|
|
|
localPort := c.localPort
|
|
|
|
ep := append([]string(nil), c.endpoints...)
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
2020-06-16 00:04:12 +01:00
|
|
|
if backendLogID == "" {
|
2020-02-05 22:16:58 +00:00
|
|
|
return errors.New("hostinfo: BackendLogID missing")
|
|
|
|
}
|
|
|
|
|
|
|
|
allowStream := maxPolls != 1
|
2020-04-11 16:35:34 +01:00
|
|
|
c.logf("PollNetMap: stream=%v :%v %v", maxPolls, localPort, ep)
|
2020-02-05 22:16:58 +00:00
|
|
|
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf := logger.Discard
|
2020-06-28 19:53:37 +01:00
|
|
|
if Debug.NetMap {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf = c.logf
|
|
|
|
}
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
request := tailcfg.MapRequest{
|
2020-07-03 21:55:33 +01:00
|
|
|
Version: 4,
|
|
|
|
IncludeIPv6: true,
|
2020-08-08 04:44:04 +01:00
|
|
|
DeltaPeers: true,
|
2020-07-03 21:55:33 +01:00
|
|
|
KeepAlive: c.keepAlive,
|
|
|
|
NodeKey: tailcfg.NodeKey(persist.PrivateNodeKey.Public()),
|
|
|
|
DiscoKey: c.discoPubKey,
|
|
|
|
Endpoints: ep,
|
|
|
|
Stream: allowStream,
|
|
|
|
Hostinfo: hostinfo,
|
|
|
|
DebugForceDisco: Debug.ForceDisco,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
if c.newDecompressor != nil {
|
|
|
|
request.Compress = "zstd"
|
|
|
|
}
|
|
|
|
|
|
|
|
bodyData, err := encode(request, &serverKey, &persist.PrivateMachineKey)
|
|
|
|
if err != nil {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: encode: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
|
2020-04-11 17:22:33 +01:00
|
|
|
t0 := time.Now()
|
2020-02-05 22:16:58 +00:00
|
|
|
u := fmt.Sprintf("%s/machine/%s/map", serverURL, persist.PrivateMachineKey.Public().HexString())
|
|
|
|
req, err := http.NewRequest("POST", u, bytes.NewReader(bodyData))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
ctx, cancel := context.WithCancel(ctx)
|
|
|
|
defer cancel()
|
|
|
|
req = req.WithContext(ctx)
|
|
|
|
|
|
|
|
res, err := c.httpc.Do(req)
|
|
|
|
if err != nil {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: Do: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: Do = %v after %v", res.StatusCode, time.Since(t0).Round(time.Millisecond))
|
2020-02-05 22:16:58 +00:00
|
|
|
if res.StatusCode != 200 {
|
|
|
|
msg, _ := ioutil.ReadAll(res.Body)
|
|
|
|
res.Body.Close()
|
|
|
|
return fmt.Errorf("initial fetch failed %d: %s",
|
|
|
|
res.StatusCode, strings.TrimSpace(string(msg)))
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
|
|
|
|
// If we go more than pollTimeout without hearing from the server,
|
|
|
|
// end the long poll. We should be receiving a keep alive ping
|
|
|
|
// every minute.
|
|
|
|
const pollTimeout = 120 * time.Second
|
|
|
|
timeout := time.NewTimer(pollTimeout)
|
|
|
|
timeoutReset := make(chan struct{})
|
2020-04-21 23:04:05 +01:00
|
|
|
pollDone := make(chan struct{})
|
|
|
|
defer close(pollDone)
|
2020-02-05 22:16:58 +00:00
|
|
|
go func() {
|
|
|
|
for {
|
|
|
|
select {
|
2020-04-21 23:04:05 +01:00
|
|
|
case <-pollDone:
|
|
|
|
vlogf("netmap: ending timeout goroutine")
|
|
|
|
return
|
2020-02-05 22:16:58 +00:00
|
|
|
case <-timeout.C:
|
|
|
|
c.logf("map response long-poll timed out!")
|
|
|
|
cancel()
|
|
|
|
return
|
2020-04-21 23:04:05 +01:00
|
|
|
case <-timeoutReset:
|
2020-02-05 22:16:58 +00:00
|
|
|
if !timeout.Stop() {
|
2020-04-21 23:04:05 +01:00
|
|
|
select {
|
|
|
|
case <-timeout.C:
|
|
|
|
case <-pollDone:
|
2020-04-21 23:35:37 +01:00
|
|
|
vlogf("netmap: ending timeout goroutine")
|
2020-04-21 23:04:05 +01:00
|
|
|
return
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: reset timeout timer")
|
2020-02-05 22:16:58 +00:00
|
|
|
timeout.Reset(pollTimeout)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}()
|
|
|
|
|
2020-05-17 17:51:38 +01:00
|
|
|
var lastDERPMap *tailcfg.DERPMap
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
// If allowStream, then the server will use an HTTP long poll to
|
|
|
|
// return incremental results. There is always one response right
|
|
|
|
// away, followed by a delay, and eventually others.
|
|
|
|
// If !allowStream, it'll still send the first result in exactly
|
|
|
|
// the same format before just closing the connection.
|
|
|
|
// We can use this same read loop either way.
|
|
|
|
var msg []byte
|
2020-08-08 04:44:04 +01:00
|
|
|
var previousPeers []*tailcfg.Node // for delta-purposes
|
2020-02-05 22:16:58 +00:00
|
|
|
for i := 0; i < maxPolls || maxPolls < 0; i++ {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: starting size read after %v (poll %v)", time.Since(t0).Round(time.Millisecond), i)
|
2020-02-05 22:16:58 +00:00
|
|
|
var siz [4]byte
|
|
|
|
if _, err := io.ReadFull(res.Body, siz[:]); err != nil {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: size read error after %v: %v", time.Since(t0).Round(time.Millisecond), err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
|
|
|
size := binary.LittleEndian.Uint32(siz[:])
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: read size %v after %v", size, time.Since(t0).Round(time.Millisecond))
|
2020-02-05 22:16:58 +00:00
|
|
|
msg = append(msg[:0], make([]byte, size)...)
|
|
|
|
if _, err := io.ReadFull(res.Body, msg); err != nil {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: body read error: %v", err)
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: read body after %v", time.Since(t0).Round(time.Millisecond))
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
var resp tailcfg.MapResponse
|
|
|
|
if err := c.decodeMsg(msg, &resp); err != nil {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: decode error: %v")
|
2020-02-05 22:16:58 +00:00
|
|
|
return err
|
|
|
|
}
|
2020-08-08 04:44:04 +01:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
if resp.KeepAlive {
|
2020-04-11 17:22:33 +01:00
|
|
|
vlogf("netmap: got keep-alive")
|
2020-08-07 05:24:31 +01:00
|
|
|
} else {
|
|
|
|
vlogf("netmap: got new map")
|
|
|
|
}
|
|
|
|
select {
|
|
|
|
case timeoutReset <- struct{}{}:
|
|
|
|
vlogf("netmap: sent timer reset")
|
|
|
|
case <-ctx.Done():
|
|
|
|
c.logf("netmap: not resetting timer; context done: %v", ctx.Err())
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
if resp.KeepAlive {
|
2020-02-05 22:16:58 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-08-08 04:44:04 +01:00
|
|
|
|
|
|
|
undeltaPeers(&resp, previousPeers)
|
|
|
|
previousPeers = cloneNodes(resp.Peers) // defensive/lazy clone, since this escapes to who knows where
|
|
|
|
|
2020-05-17 17:51:38 +01:00
|
|
|
if resp.DERPMap != nil {
|
|
|
|
vlogf("netmap: new map contains DERP map")
|
|
|
|
lastDERPMap = resp.DERPMap
|
|
|
|
}
|
2020-08-17 20:56:17 +01:00
|
|
|
if resp.Debug != nil {
|
|
|
|
if resp.Debug.LogHeapPprof {
|
|
|
|
go logheap.LogHeap(resp.Debug.LogHeapURL)
|
|
|
|
}
|
2020-08-20 21:21:25 +01:00
|
|
|
setControlAtomic(&controlUseDERPRoute, resp.Debug.DERPRoute)
|
|
|
|
setControlAtomic(&controlTrimWGConfig, resp.Debug.TrimWGConfig)
|
2020-04-08 06:24:06 +01:00
|
|
|
}
|
2020-06-28 19:53:37 +01:00
|
|
|
// Temporarily (2020-06-29) support removing all but
|
|
|
|
// discovery-supporting nodes during development, for
|
|
|
|
// less noise.
|
|
|
|
if Debug.OnlyDisco {
|
|
|
|
filtered := resp.Peers[:0]
|
|
|
|
for _, p := range resp.Peers {
|
|
|
|
if !p.DiscoKey.IsZero() {
|
|
|
|
filtered = append(filtered, p)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
resp.Peers = filtered
|
|
|
|
}
|
2020-05-17 17:51:38 +01:00
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
nm := &NetworkMap{
|
2020-02-11 03:04:52 +00:00
|
|
|
NodeKey: tailcfg.NodeKey(persist.PrivateNodeKey.Public()),
|
2020-02-05 22:16:58 +00:00
|
|
|
PrivateKey: persist.PrivateNodeKey,
|
|
|
|
Expiry: resp.Node.KeyExpiry,
|
2020-07-29 02:47:23 +01:00
|
|
|
Name: resp.Node.Name,
|
2020-02-05 22:16:58 +00:00
|
|
|
Addresses: resp.Node.Addresses,
|
|
|
|
Peers: resp.Peers,
|
|
|
|
LocalPort: localPort,
|
|
|
|
User: resp.Node.User,
|
|
|
|
UserProfiles: make(map[tailcfg.UserID]tailcfg.UserProfile),
|
|
|
|
Domain: resp.Domain,
|
|
|
|
Roles: resp.Roles,
|
2020-07-31 21:27:09 +01:00
|
|
|
DNS: resp.DNSConfig,
|
2020-02-05 22:16:58 +00:00
|
|
|
Hostinfo: resp.Node.Hostinfo,
|
2020-04-30 06:49:17 +01:00
|
|
|
PacketFilter: c.parsePacketFilter(resp.PacketFilter),
|
2020-05-17 17:51:38 +01:00
|
|
|
DERPMap: lastDERPMap,
|
2020-06-25 18:47:33 +01:00
|
|
|
Debug: resp.Debug,
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
|
|
|
for _, profile := range resp.UserProfiles {
|
|
|
|
nm.UserProfiles[profile.ID] = profile
|
|
|
|
}
|
|
|
|
if resp.Node.MachineAuthorized {
|
|
|
|
nm.MachineStatus = tailcfg.MachineAuthorized
|
|
|
|
} else {
|
|
|
|
nm.MachineStatus = tailcfg.MachineUnauthorized
|
|
|
|
}
|
2020-07-31 21:27:09 +01:00
|
|
|
if len(resp.DNS) > 0 {
|
|
|
|
nm.DNS.Nameservers = wgIPToNetaddr(resp.DNS)
|
|
|
|
}
|
|
|
|
if len(resp.SearchPaths) > 0 {
|
|
|
|
nm.DNS.Domains = resp.SearchPaths
|
|
|
|
}
|
|
|
|
if Debug.ProxyDNS {
|
|
|
|
nm.DNS.Proxied = true
|
|
|
|
}
|
2020-03-13 02:28:11 +00:00
|
|
|
|
|
|
|
// Printing the netmap can be extremely verbose, but is very
|
|
|
|
// handy for debugging. Let's limit how often we do it.
|
|
|
|
// Code elsewhere prints netmap diffs every time, so this
|
|
|
|
// occasional full dump, plus incremental diffs, should do
|
|
|
|
// the job.
|
|
|
|
now := c.timeNow()
|
|
|
|
if now.Sub(c.lastPrintMap) >= 5*time.Minute {
|
|
|
|
c.lastPrintMap = now
|
|
|
|
c.logf("new network map[%d]:\n%s", i, nm.Concise())
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
c.mu.Lock()
|
|
|
|
c.expiry = &nm.Expiry
|
|
|
|
c.mu.Unlock()
|
|
|
|
|
|
|
|
cb(nm)
|
|
|
|
}
|
|
|
|
if ctx.Err() != nil {
|
|
|
|
return ctx.Err()
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func decode(res *http.Response, v interface{}, serverKey *wgcfg.Key, mkey *wgcfg.PrivateKey) error {
|
|
|
|
defer res.Body.Close()
|
|
|
|
msg, err := ioutil.ReadAll(io.LimitReader(res.Body, 1<<20))
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if res.StatusCode != 200 {
|
|
|
|
return fmt.Errorf("%d: %v", res.StatusCode, string(msg))
|
|
|
|
}
|
|
|
|
return decodeMsg(msg, v, serverKey, mkey)
|
|
|
|
}
|
|
|
|
|
2020-09-15 17:54:52 +01:00
|
|
|
var dumpMapResponse, _ = strconv.ParseBool(os.Getenv("TS_DEBUG_MAPRESPONSE"))
|
|
|
|
|
2020-02-05 22:16:58 +00:00
|
|
|
func (c *Direct) decodeMsg(msg []byte, v interface{}) error {
|
2020-07-09 19:42:19 +01:00
|
|
|
c.mu.Lock()
|
2020-02-05 22:16:58 +00:00
|
|
|
mkey := c.persist.PrivateMachineKey
|
|
|
|
serverKey := c.serverKey
|
2020-07-09 19:42:19 +01:00
|
|
|
c.mu.Unlock()
|
2020-02-05 22:16:58 +00:00
|
|
|
|
|
|
|
decrypted, err := decryptMsg(msg, &serverKey, &mkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
var b []byte
|
|
|
|
if c.newDecompressor == nil {
|
|
|
|
b = decrypted
|
|
|
|
} else {
|
|
|
|
decoder, err := c.newDecompressor()
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
defer decoder.Close()
|
|
|
|
b, err = decoder.DecodeAll(decrypted, nil)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
}
|
2020-09-15 17:54:52 +01:00
|
|
|
if dumpMapResponse {
|
|
|
|
var buf bytes.Buffer
|
|
|
|
json.Indent(&buf, b, "", " ")
|
|
|
|
log.Printf("MapResponse: %s", buf.Bytes())
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
if err := json.Unmarshal(b, v); err != nil {
|
|
|
|
return fmt.Errorf("response: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
func decodeMsg(msg []byte, v interface{}, serverKey *wgcfg.Key, mkey *wgcfg.PrivateKey) error {
|
|
|
|
decrypted, err := decryptMsg(msg, serverKey, mkey)
|
|
|
|
if err != nil {
|
|
|
|
return err
|
|
|
|
}
|
|
|
|
if err := json.Unmarshal(decrypted, v); err != nil {
|
|
|
|
return fmt.Errorf("response: %v", err)
|
|
|
|
}
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func decryptMsg(msg []byte, serverKey *wgcfg.Key, mkey *wgcfg.PrivateKey) ([]byte, error) {
|
|
|
|
var nonce [24]byte
|
|
|
|
if len(msg) < len(nonce)+1 {
|
|
|
|
return nil, fmt.Errorf("response missing nonce, len=%d", len(msg))
|
|
|
|
}
|
|
|
|
copy(nonce[:], msg)
|
|
|
|
msg = msg[len(nonce):]
|
|
|
|
|
|
|
|
pub, pri := (*[32]byte)(serverKey), (*[32]byte)(mkey)
|
|
|
|
decrypted, ok := box.Open(nil, msg, &nonce, pub, pri)
|
|
|
|
if !ok {
|
|
|
|
return nil, fmt.Errorf("cannot decrypt response")
|
|
|
|
}
|
|
|
|
return decrypted, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func encode(v interface{}, serverKey *wgcfg.Key, mkey *wgcfg.PrivateKey) ([]byte, error) {
|
|
|
|
b, err := json.Marshal(v)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2020-03-04 06:21:56 +00:00
|
|
|
const debugMapRequests = false
|
|
|
|
if debugMapRequests {
|
|
|
|
if _, ok := v.(tailcfg.MapRequest); ok {
|
|
|
|
log.Printf("MapRequest: %s", b)
|
|
|
|
}
|
|
|
|
}
|
2020-02-05 22:16:58 +00:00
|
|
|
var nonce [24]byte
|
|
|
|
if _, err := io.ReadFull(rand.Reader, nonce[:]); err != nil {
|
|
|
|
panic(err)
|
|
|
|
}
|
|
|
|
pub, pri := (*[32]byte)(serverKey), (*[32]byte)(mkey)
|
|
|
|
msg := box.Seal(nonce[:], b, &nonce, pub, pri)
|
|
|
|
return msg, nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func loadServerKey(ctx context.Context, httpc *http.Client, serverURL string) (wgcfg.Key, error) {
|
|
|
|
req, err := http.NewRequest("GET", serverURL+"/key", nil)
|
|
|
|
if err != nil {
|
|
|
|
return wgcfg.Key{}, fmt.Errorf("create control key request: %v", err)
|
|
|
|
}
|
|
|
|
req = req.WithContext(ctx)
|
|
|
|
res, err := httpc.Do(req)
|
|
|
|
if err != nil {
|
|
|
|
return wgcfg.Key{}, fmt.Errorf("fetch control key: %v", err)
|
|
|
|
}
|
|
|
|
defer res.Body.Close()
|
|
|
|
b, err := ioutil.ReadAll(io.LimitReader(res.Body, 1<<16))
|
|
|
|
if err != nil {
|
|
|
|
return wgcfg.Key{}, fmt.Errorf("fetch control key response: %v", err)
|
|
|
|
}
|
|
|
|
if res.StatusCode != 200 {
|
|
|
|
return wgcfg.Key{}, fmt.Errorf("fetch control key: %d: %s", res.StatusCode, string(b))
|
|
|
|
}
|
|
|
|
key, err := wgcfg.ParseHexKey(string(b))
|
|
|
|
if err != nil {
|
|
|
|
return wgcfg.Key{}, fmt.Errorf("fetch control key: %v", err)
|
|
|
|
}
|
2020-02-11 03:04:52 +00:00
|
|
|
return key, nil
|
2020-02-05 22:16:58 +00:00
|
|
|
}
|
2020-06-28 19:53:37 +01:00
|
|
|
|
2020-07-31 21:27:09 +01:00
|
|
|
func wgIPToNetaddr(ips []wgcfg.IP) (ret []netaddr.IP) {
|
|
|
|
for _, ip := range ips {
|
|
|
|
nip, ok := netaddr.FromStdIP(ip.IP())
|
|
|
|
if !ok {
|
|
|
|
panic(fmt.Sprintf("conversion of %s from wgcfg to netaddr IP failed", ip))
|
|
|
|
}
|
|
|
|
ret = append(ret, nip.Unmap())
|
|
|
|
}
|
|
|
|
return ret
|
|
|
|
}
|
|
|
|
|
2020-06-28 19:53:37 +01:00
|
|
|
// Debug contains temporary internal-only debug knobs.
|
|
|
|
// They're unexported to not draw attention to them.
|
|
|
|
var Debug = initDebug()
|
|
|
|
|
|
|
|
type debug struct {
|
2020-07-03 21:55:33 +01:00
|
|
|
NetMap bool
|
2020-07-31 21:27:09 +01:00
|
|
|
ProxyDNS bool
|
2020-07-03 21:55:33 +01:00
|
|
|
OnlyDisco bool
|
|
|
|
Disco bool
|
|
|
|
ForceDisco bool // ask control server to not filter out our disco key
|
2020-06-28 19:53:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func initDebug() debug {
|
2020-07-03 21:55:33 +01:00
|
|
|
d := debug{
|
|
|
|
NetMap: envBool("TS_DEBUG_NETMAP"),
|
2020-07-31 21:27:09 +01:00
|
|
|
ProxyDNS: envBool("TS_DEBUG_PROXY_DNS"),
|
2020-07-03 21:55:33 +01:00
|
|
|
OnlyDisco: os.Getenv("TS_DEBUG_USE_DISCO") == "only",
|
|
|
|
ForceDisco: os.Getenv("TS_DEBUG_USE_DISCO") == "only" || envBool("TS_DEBUG_USE_DISCO"),
|
2020-06-28 19:53:37 +01:00
|
|
|
}
|
2020-07-03 21:55:33 +01:00
|
|
|
if d.ForceDisco || os.Getenv("TS_DEBUG_USE_DISCO") == "" {
|
|
|
|
// This is now defaults to on.
|
|
|
|
d.Disco = true
|
|
|
|
}
|
|
|
|
return d
|
2020-06-28 19:53:37 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func envBool(k string) bool {
|
|
|
|
e := os.Getenv(k)
|
|
|
|
if e == "" {
|
|
|
|
return false
|
|
|
|
}
|
2020-07-02 18:50:19 +01:00
|
|
|
v, err := strconv.ParseBool(e)
|
2020-06-28 19:53:37 +01:00
|
|
|
if err != nil {
|
|
|
|
panic(fmt.Sprintf("invalid non-bool %q for env var %q", e, k))
|
|
|
|
}
|
|
|
|
return v
|
|
|
|
}
|
2020-08-08 04:44:04 +01:00
|
|
|
|
|
|
|
// undeltaPeers updates mapRes.Peers to be complete based on the provided previous peer list
|
|
|
|
// and the PeersRemoved and PeersChanged fields in mapRes.
|
|
|
|
// It then also nils out the delta fields.
|
|
|
|
func undeltaPeers(mapRes *tailcfg.MapResponse, prev []*tailcfg.Node) {
|
|
|
|
if len(mapRes.Peers) > 0 {
|
|
|
|
// Not delta encoded.
|
|
|
|
if !nodesSorted(mapRes.Peers) {
|
|
|
|
log.Printf("netmap: undeltaPeers: MapResponse.Peers not sorted; sorting")
|
|
|
|
sortNodes(mapRes.Peers)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
var removed map[tailcfg.NodeID]bool
|
|
|
|
if pr := mapRes.PeersRemoved; len(pr) > 0 {
|
|
|
|
removed = make(map[tailcfg.NodeID]bool, len(pr))
|
|
|
|
for _, id := range pr {
|
|
|
|
removed[id] = true
|
|
|
|
}
|
|
|
|
}
|
|
|
|
changed := mapRes.PeersChanged
|
|
|
|
|
|
|
|
if len(removed) == 0 && len(changed) == 0 {
|
|
|
|
// No changes fast path.
|
|
|
|
mapRes.Peers = prev
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
if !nodesSorted(changed) {
|
|
|
|
log.Printf("netmap: undeltaPeers: MapResponse.PeersChanged not sorted; sorting")
|
|
|
|
sortNodes(changed)
|
|
|
|
}
|
|
|
|
if !nodesSorted(prev) {
|
|
|
|
// Internal error (unrelated to the network) if we get here.
|
|
|
|
log.Printf("netmap: undeltaPeers: [unexpected] prev not sorted; sorting")
|
|
|
|
sortNodes(prev)
|
|
|
|
}
|
|
|
|
|
|
|
|
newFull := make([]*tailcfg.Node, 0, len(prev)-len(removed))
|
|
|
|
for len(prev) > 0 && len(changed) > 0 {
|
|
|
|
pID := prev[0].ID
|
|
|
|
cID := changed[0].ID
|
|
|
|
if removed[pID] {
|
|
|
|
prev = prev[1:]
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
switch {
|
|
|
|
case pID < cID:
|
|
|
|
newFull = append(newFull, prev[0])
|
|
|
|
prev = prev[1:]
|
|
|
|
case pID == cID:
|
|
|
|
newFull = append(newFull, changed[0])
|
|
|
|
prev, changed = prev[1:], changed[1:]
|
|
|
|
case cID < pID:
|
|
|
|
newFull = append(newFull, changed[0])
|
|
|
|
changed = changed[1:]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
newFull = append(newFull, changed...)
|
|
|
|
for _, n := range prev {
|
|
|
|
if !removed[n.ID] {
|
|
|
|
newFull = append(newFull, n)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
sortNodes(newFull)
|
|
|
|
mapRes.Peers = newFull
|
|
|
|
mapRes.PeersChanged = nil
|
|
|
|
mapRes.PeersRemoved = nil
|
|
|
|
}
|
|
|
|
|
|
|
|
func nodesSorted(v []*tailcfg.Node) bool {
|
|
|
|
for i, n := range v {
|
|
|
|
if i > 0 && n.ID <= v[i-1].ID {
|
|
|
|
return false
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return true
|
|
|
|
}
|
|
|
|
|
|
|
|
func sortNodes(v []*tailcfg.Node) {
|
|
|
|
sort.Slice(v, func(i, j int) bool { return v[i].ID < v[j].ID })
|
|
|
|
}
|
|
|
|
|
|
|
|
func cloneNodes(v1 []*tailcfg.Node) []*tailcfg.Node {
|
|
|
|
if v1 == nil {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
v2 := make([]*tailcfg.Node, len(v1))
|
|
|
|
for i, n := range v1 {
|
|
|
|
v2[i] = n.Clone()
|
|
|
|
}
|
|
|
|
return v2
|
|
|
|
}
|
2020-08-17 20:56:17 +01:00
|
|
|
|
2020-08-20 21:21:25 +01:00
|
|
|
// opt.Bool configs from control.
|
|
|
|
var (
|
|
|
|
controlUseDERPRoute atomic.Value
|
|
|
|
controlTrimWGConfig atomic.Value
|
|
|
|
)
|
|
|
|
|
|
|
|
func setControlAtomic(dst *atomic.Value, v opt.Bool) {
|
|
|
|
old, ok := dst.Load().(opt.Bool)
|
|
|
|
if !ok || old != v {
|
|
|
|
dst.Store(v)
|
|
|
|
}
|
|
|
|
}
|
2020-08-17 20:56:17 +01:00
|
|
|
|
|
|
|
// DERPRouteFlag reports the last reported value from control for whether
|
|
|
|
// DERP route optimization (Issue 150) should be enabled.
|
|
|
|
func DERPRouteFlag() opt.Bool {
|
|
|
|
v, _ := controlUseDERPRoute.Load().(opt.Bool)
|
|
|
|
return v
|
|
|
|
}
|
2020-08-20 21:21:25 +01:00
|
|
|
|
|
|
|
// TrimWGConfig reports the last reported value from control for whether
|
|
|
|
// we should do lazy wireguard configuration.
|
|
|
|
func TrimWGConfig() opt.Bool {
|
|
|
|
v, _ := controlTrimWGConfig.Load().(opt.Bool)
|
|
|
|
return v
|
|
|
|
}
|