derp: add docs on current protocol overview, break accept apart a bit
Signed-off-by: Brad Fitzpatrick <bradfitz@tailscale.com>
This commit is contained in:
parent
79af6054bf
commit
11048b8932
24
derp/derp.go
24
derp/derp.go
|
@ -27,6 +27,30 @@ const magic = 0x44c55250
|
|||
// frameType is the one byte frame type header in frame headers.
|
||||
type frameType byte
|
||||
|
||||
/*
|
||||
Protocol flow:
|
||||
|
||||
Login:
|
||||
* client connects
|
||||
* server sends magic: [be_uint32(magic)]
|
||||
* server sends typeServerKey frame: 1 byte typeServerKey + 32 bytes of public key
|
||||
* client sends: (with no frameType)
|
||||
- 32 bytes client public key
|
||||
- 24 bytes nonce
|
||||
- be_uint32 length of naclbox (capped at 256k)
|
||||
- that many bytes of naclbox
|
||||
* (server verifies client is authorized)
|
||||
* server sends typeServerInfo frame byte + 24 byte nonce + beu32 len + naclbox(json)
|
||||
|
||||
Steady state:
|
||||
* server occasionally sends typeKeepAlive. (One byte only)
|
||||
* client sends typeSendPacket byte + 32 byte dest pub key + beu32 packet len + packet bytes
|
||||
* server then sends typeRecvPacket byte + beu32 packet len + packet bytes to recipient conn
|
||||
|
||||
TODO(bradfitz): require pings to be acknowledged; copy http2 PING frame w/ ping payload
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
typeServerKey = frameType(0x01)
|
||||
typeServerInfo = frameType(0x02)
|
||||
|
|
|
@ -92,6 +92,32 @@ func (s *Server) Accept(nc net.Conn, brw *bufio.ReadWriter) {
|
|||
}
|
||||
}
|
||||
|
||||
// registerClient notes that client c is now authenticated and ready for packets.
|
||||
// If c's public key was already connected with a different connection, the prior one is closed.
|
||||
func (s *Server) registerClient(c *sclient) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
old := s.clients[c.key]
|
||||
if old == nil {
|
||||
s.logf("derp: %s: client %x: adding connection", c.nc.RemoteAddr(), c.key)
|
||||
} else {
|
||||
old.nc.Close()
|
||||
s.logf("derp: %s: client %x: adding connection, replacing %s", c.nc.RemoteAddr(), c.key, old.nc.RemoteAddr())
|
||||
}
|
||||
s.clients[c.key] = c
|
||||
}
|
||||
|
||||
// unregisterClient
|
||||
func (s *Server) unregisterClient(c *sclient) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
cur := s.clients[c.key]
|
||||
if cur == c {
|
||||
s.logf("derp: %s: client %x: removing connection", c.nc.RemoteAddr(), c.key)
|
||||
delete(s.clients, c.key)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) accept(nc net.Conn, brw *bufio.ReadWriter) error {
|
||||
br, bw := brw.Reader, brw.Writer
|
||||
nc.SetDeadline(time.Now().Add(10 * time.Second))
|
||||
|
@ -110,8 +136,9 @@ func (s *Server) accept(nc net.Conn, brw *bufio.ReadWriter) error {
|
|||
// At this point we trust the client so we don't time out.
|
||||
nc.SetDeadline(time.Time{})
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
if err := s.sendServerInfo(bw, clientKey); err != nil {
|
||||
return fmt.Errorf("send server info: %v", err)
|
||||
}
|
||||
|
||||
c := &sclient{
|
||||
key: clientKey,
|
||||
|
@ -123,42 +150,12 @@ func (s *Server) accept(nc net.Conn, brw *bufio.ReadWriter) error {
|
|||
c.info = *clientInfo
|
||||
}
|
||||
|
||||
defer func() {
|
||||
s.mu.Lock()
|
||||
curClient := s.clients[c.key]
|
||||
if curClient == c {
|
||||
s.logf("derp: %s: client %x: removing connection", nc.RemoteAddr(), c.key)
|
||||
delete(s.clients, c.key)
|
||||
}
|
||||
s.mu.Unlock()
|
||||
}()
|
||||
s.registerClient(c)
|
||||
defer s.unregisterClient(c)
|
||||
|
||||
// Hold mu while we add the new client to the clients list and under
|
||||
// the same acquisition send server info. This ensure that both:
|
||||
// 1. by the time the client receives the server info, it can be addressed.
|
||||
// 2. the server info is the very first
|
||||
c.mu.Lock()
|
||||
s.mu.Lock()
|
||||
oldClient := s.clients[c.key]
|
||||
s.clients[c.key] = c
|
||||
s.mu.Unlock()
|
||||
if err := s.sendServerInfo(c.bw, clientKey); err != nil {
|
||||
return fmt.Errorf("send server info: %v", err)
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
if oldClient == nil {
|
||||
s.logf("derp: %s: client %x: adding connection", nc.RemoteAddr(), c.key)
|
||||
} else {
|
||||
oldClient.nc.Close()
|
||||
s.logf("derp: %s: client %x: adding connection, replacing %s", nc.RemoteAddr(), c.key, oldClient.nc.RemoteAddr())
|
||||
}
|
||||
|
||||
go func() {
|
||||
if err := c.keepAlive(ctx); err != nil {
|
||||
s.logf("derp: %s: client %x: keep alive failed: %v", nc.RemoteAddr(), c.key, err)
|
||||
}
|
||||
}()
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
go s.sendClientKeepAlives(ctx, c)
|
||||
|
||||
for {
|
||||
dstKey, contents, err := s.recvPacket(c.br)
|
||||
|
@ -193,6 +190,12 @@ func (s *Server) accept(nc net.Conn, brw *bufio.ReadWriter) error {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *Server) sendClientKeepAlives(ctx context.Context, c *sclient) {
|
||||
if err := c.keepAliveLoop(ctx); err != nil {
|
||||
s.logf("derp: %s: client %x: keep alive failed: %v", c.nc.RemoteAddr(), c.key, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) verifyClient(clientKey key.Public, info *sclientInfo) error {
|
||||
// TODO(crawshaw): implement policy constraints on who can use the DERP server
|
||||
return nil
|
||||
|
@ -312,13 +315,14 @@ type sclient struct {
|
|||
bw *bufio.Writer
|
||||
}
|
||||
|
||||
func (c *sclient) keepAlive(ctx context.Context) error {
|
||||
func (c *sclient) keepAliveLoop(ctx context.Context) error {
|
||||
jitterMs, err := crand.Int(crand.Reader, big.NewInt(5000))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
jitter := time.Duration(jitterMs.Int64()) * time.Millisecond
|
||||
c.keepAliveTimer = time.NewTimer(keepAlive + jitter)
|
||||
defer c.keepAliveTimer.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
|
@ -338,7 +342,6 @@ func (c *sclient) keepAlive(ctx context.Context) error {
|
|||
c.mu.Unlock()
|
||||
|
||||
if err != nil {
|
||||
// TODO log
|
||||
c.nc.Close()
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue