cmd/routed: add quic support
This commit is contained in:
parent
c1fa811afe
commit
90536c6ec0
|
@ -31,6 +31,12 @@
|
||||||
packages = ["mathx"]
|
packages = ["mathx"]
|
||||||
revision = "033754ab1fee508c9f98f2785eec2365964e0b05"
|
revision = "033754ab1fee508c9f98f2785eec2365964e0b05"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/aead/chacha20"
|
||||||
|
packages = [".","chacha"]
|
||||||
|
revision = "8d6ce0550041f9d97e7f15ec27ed489f8bbbb0fb"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
name = "github.com/agext/levenshtein"
|
name = "github.com/agext/levenshtein"
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
|
@ -205,6 +211,12 @@
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "4fe82ae3040f80a03d04d2cccb5606a626b8e1ee"
|
revision = "4fe82ae3040f80a03d04d2cccb5606a626b8e1ee"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/hashicorp/golang-lru"
|
||||||
|
packages = [".","simplelru"]
|
||||||
|
revision = "0a025b7e63adc15a622f29b0b2c4c3848243bbf6"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/hashicorp/hcl"
|
name = "github.com/hashicorp/hcl"
|
||||||
|
@ -270,6 +282,30 @@
|
||||||
packages = ["."]
|
packages = ["."]
|
||||||
revision = "7cafcd837844e784b526369c9bce262804aebc60"
|
revision = "7cafcd837844e784b526369c9bce262804aebc60"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/lucas-clemente/aes12"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "cd47fb39b79f867c6e4e5cd39cf7abd799f71670"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/lucas-clemente/fnv128a"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "393af48d391698c6ae4219566bfbdfef67269997"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
name = "github.com/lucas-clemente/quic-go"
|
||||||
|
packages = [".","ackhandler","congestion","crypto","flowcontrol","frames","h2quic","handshake","protocol","qerr","utils"]
|
||||||
|
revision = "d51a4a1ba70df8c2d5c4522c071aaa225690a11d"
|
||||||
|
version = "v0.5.0"
|
||||||
|
|
||||||
|
[[projects]]
|
||||||
|
branch = "master"
|
||||||
|
name = "github.com/lucas-clemente/quic-go-certificates"
|
||||||
|
packages = ["."]
|
||||||
|
revision = "d2f86524cced5186554df90d92529757d22c1cb6"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "github.com/magefile/mage"
|
name = "github.com/magefile/mage"
|
||||||
|
@ -429,7 +465,7 @@
|
||||||
[[projects]]
|
[[projects]]
|
||||||
branch = "master"
|
branch = "master"
|
||||||
name = "golang.org/x/crypto"
|
name = "golang.org/x/crypto"
|
||||||
packages = ["acme","acme/autocert","bcrypt","blowfish","cast5","nacl/secretbox","openpgp","openpgp/armor","openpgp/elgamal","openpgp/errors","openpgp/packet","openpgp/s2k","pbkdf2","poly1305","salsa20","salsa20/salsa","tea","twofish","xtea"]
|
packages = ["acme","acme/autocert","bcrypt","blowfish","cast5","curve25519","hkdf","nacl/secretbox","openpgp","openpgp/armor","openpgp/elgamal","openpgp/errors","openpgp/packet","openpgp/s2k","pbkdf2","poly1305","salsa20","salsa20/salsa","tea","twofish","xtea"]
|
||||||
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
|
revision = "9419663f5a44be8b34ca85f08abc5fe1be11f8a3"
|
||||||
|
|
||||||
[[projects]]
|
[[projects]]
|
||||||
|
@ -471,6 +507,6 @@
|
||||||
[solve-meta]
|
[solve-meta]
|
||||||
analyzer-name = "dep"
|
analyzer-name = "dep"
|
||||||
analyzer-version = 1
|
analyzer-version = 1
|
||||||
inputs-digest = "012fa73a3c0eb714def85cdad9b5d9f8abc13ea4e7bd30b78da555876f2e487d"
|
inputs-digest = "4afa28f3bd4cc58dcc7474b1f476bb98f523241a5e998c1c0671a0eaad7fbf41"
|
||||||
solver-name = "gps-cdcl"
|
solver-name = "gps-cdcl"
|
||||||
solver-version = 1
|
solver-version = 1
|
||||||
|
|
|
@ -17,6 +17,7 @@ import (
|
||||||
"github.com/caarlos0/env"
|
"github.com/caarlos0/env"
|
||||||
"github.com/facebookgo/flagenv"
|
"github.com/facebookgo/flagenv"
|
||||||
_ "github.com/joho/godotenv/autoload"
|
_ "github.com/joho/godotenv/autoload"
|
||||||
|
"github.com/lucas-clemente/quic-go/h2quic"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -75,6 +76,22 @@ func main() {
|
||||||
hs.Serve(l)
|
hs.Serve(l)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func setupQuic(s *server.Server, scfg server.Config) {
|
||||||
|
qs := &h2quic.Server{
|
||||||
|
Server: &http.Server{
|
||||||
|
Handler: middleware.Trace(s),
|
||||||
|
Addr: scfg.QuicAddr,
|
||||||
|
TLSConfig: &tls.Config{
|
||||||
|
GetCertificate: s.GetCertificate,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for {
|
||||||
|
qs.ListenAndServeTLS("", "")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func setupTLS(s *server.Server, scfg server.Config) {
|
func setupTLS(s *server.Server, scfg server.Config) {
|
||||||
hs := &http.Server{
|
hs := &http.Server{
|
||||||
Handler: middleware.Trace(s),
|
Handler: middleware.Trace(s),
|
||||||
|
|
|
@ -3,15 +3,18 @@ package server
|
||||||
import (
|
import (
|
||||||
"crypto/tls"
|
"crypto/tls"
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"git.xeserv.us/xena/route/internal/database"
|
"git.xeserv.us/xena/route/internal/database"
|
||||||
"git.xeserv.us/xena/route/internal/tun2"
|
"git.xeserv.us/xena/route/internal/tun2"
|
||||||
proto "git.xeserv.us/xena/route/proto"
|
proto "git.xeserv.us/xena/route/proto"
|
||||||
"github.com/Xe/ln"
|
"github.com/Xe/ln"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
"github.com/mtneug/pkg/ulid"
|
"github.com/mtneug/pkg/ulid"
|
||||||
kcp "github.com/xtaci/kcp-go"
|
kcp "github.com/xtaci/kcp-go"
|
||||||
"golang.org/x/crypto/acme/autocert"
|
"golang.org/x/crypto/acme/autocert"
|
||||||
|
@ -40,6 +43,7 @@ type Config struct {
|
||||||
|
|
||||||
WebAddr string `env:"WEB_ADDR,required"`
|
WebAddr string `env:"WEB_ADDR,required"`
|
||||||
SSLAddr string `env:"SSL_ADDR,required"`
|
SSLAddr string `env:"SSL_ADDR,required"`
|
||||||
|
QuicAddr string `env:"QUIC_ADDR,required"`
|
||||||
BackendTCPAddr string `env:"BACKEND_TCP_ADDR,required"`
|
BackendTCPAddr string `env:"BACKEND_TCP_ADDR,required"`
|
||||||
BackendKCPAddr string `env:"BACKEND_KCP_ADDR,required"`
|
BackendKCPAddr string `env:"BACKEND_KCP_ADDR,required"`
|
||||||
GRPCAddr string `env:"GRPC_ADDR,required"`
|
GRPCAddr string `env:"GRPC_ADDR,required"`
|
||||||
|
@ -164,6 +168,14 @@ func New(cfg Config) (*Server, error) {
|
||||||
func (s *Server) Director(r *http.Request) {
|
func (s *Server) Director(r *http.Request) {
|
||||||
r.Header.Del("X-Forwarded-For")
|
r.Header.Del("X-Forwarded-For")
|
||||||
r.Header.Del("X-Client-Ip")
|
r.Header.Del("X-Client-Ip")
|
||||||
|
|
||||||
|
var versions []string
|
||||||
|
for _, v := range protocol.SupportedVersions {
|
||||||
|
versions = append(versions, v.ToAltSvc())
|
||||||
|
}
|
||||||
|
versionsStr := strings.Join(versions, ",")
|
||||||
|
|
||||||
|
r.Header.Add("Alt-Svc", fmt.Sprintf(`quic="%s"; ma=2592000; v="%s"`, s.cfg.QuicAddr, versionsStr))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
|
@ -0,0 +1,25 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
.vscode
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
||||||
|
*.prof
|
|
@ -0,0 +1,19 @@
|
||||||
|
language: go
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.5.3
|
||||||
|
- 1.6
|
||||||
|
- 1.7
|
||||||
|
- 1.8
|
||||||
|
- master
|
||||||
|
|
||||||
|
env:
|
||||||
|
- TRAVIS_GOARCH=amd64
|
||||||
|
- TRAVIS_GOARCH=386
|
||||||
|
|
||||||
|
before_install:
|
||||||
|
- export GOARCH=$TRAVIS_GOARCH
|
||||||
|
|
||||||
|
branches:
|
||||||
|
only:
|
||||||
|
- master
|
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Andreas Auernhammer
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,79 @@
|
||||||
|
[![Godoc Reference](https://godoc.org/github.com/aead/chacha20?status.svg)](https://godoc.org/github.com/aead/chacha20)
|
||||||
|
|
||||||
|
## The ChaCha20 stream cipher
|
||||||
|
|
||||||
|
ChaCha is a stream cipher family created by Daniel J. Bernstein.
|
||||||
|
The most common ChaCha cipher is ChaCha20 (20 rounds). ChaCha20 is standardized in [RFC 7539](https://tools.ietf.org/html/rfc7539 "RFC 7539").
|
||||||
|
|
||||||
|
This package provides implementations of three ChaCha versions:
|
||||||
|
- ChaCha20 with a 64 bit nonce (can en/decrypt up to 2^64 * 64 bytes for one key-nonce combination)
|
||||||
|
- ChaCha20 with a 96 bit nonce (can en/decrypt up to 2^32 * 64 bytes ~ 256 GB for one key-nonce combination)
|
||||||
|
- XChaCha20 with a 192 bit nonce (can en/decrypt up to 2^64 * 64 bytes for one key-nonce combination)
|
||||||
|
|
||||||
|
Furthermore the chacha subpackage implements ChaCha20/12 and ChaCha20/8.
|
||||||
|
These versions use 12 or 8 rounds instead of 20.
|
||||||
|
But it's recommended to use ChaCha20 (with 20 rounds) - it will be fast enough for almost all purposes.
|
||||||
|
|
||||||
|
### Installation
|
||||||
|
Install in your GOPATH: `go get -u github.com/aead/chacha20`
|
||||||
|
|
||||||
|
### Requirements
|
||||||
|
All go versions >= 1.5.3 are supported.
|
||||||
|
Please notice, that the amd64 AVX2 asm implementation requires go1.7 or newer.
|
||||||
|
|
||||||
|
### Performance
|
||||||
|
|
||||||
|
#### AMD64
|
||||||
|
Hardware: Intel i7-6500U 2.50GHz x 2
|
||||||
|
System: Linux Ubuntu 16.04 - kernel: 4.4.0-62-generic
|
||||||
|
Go version: 1.8.0
|
||||||
|
```
|
||||||
|
AVX2
|
||||||
|
name speed cpb
|
||||||
|
ChaCha20_64-4 573MB/s ± 0% 4.16
|
||||||
|
ChaCha20_1K-4 2.19GB/s ± 0% 1.06
|
||||||
|
XChaCha20_64-4 261MB/s ± 0% 9.13
|
||||||
|
XChaCha20_1K-4 1.69GB/s ± 4% 1.37
|
||||||
|
XORKeyStream64-4 474MB/s ± 2% 5.02
|
||||||
|
XORKeyStream1K-4 2.09GB/s ± 1% 1.11
|
||||||
|
XChaCha20_XORKeyStream64-4 262MB/s ± 0% 9.09
|
||||||
|
XChaCha20_XORKeyStream1K-4 1.71GB/s ± 1% 1.36
|
||||||
|
|
||||||
|
SSSE3
|
||||||
|
name speed cpb
|
||||||
|
ChaCha20_64-4 583MB/s ± 0% 4.08
|
||||||
|
ChaCha20_1K-4 1.15GB/s ± 1% 2.02
|
||||||
|
XChaCha20_64-4 267MB/s ± 0% 8.92
|
||||||
|
XChaCha20_1K-4 984MB/s ± 5% 2.42
|
||||||
|
XORKeyStream64-4 492MB/s ± 1% 4.84
|
||||||
|
XORKeyStream1K-4 1.10GB/s ± 5% 2.11
|
||||||
|
XChaCha20_XORKeyStream64-4 266MB/s ± 0% 8.96
|
||||||
|
XChaCha20_XORKeyStream1K-4 1.00GB/s ± 2% 2.32
|
||||||
|
```
|
||||||
|
#### 386
|
||||||
|
Hardware: Intel i7-6500U 2.50GHz x 2
|
||||||
|
System: Linux Ubuntu 16.04 - kernel: 4.4.0-62-generic
|
||||||
|
Go version: 1.8.0
|
||||||
|
```
|
||||||
|
SSSE3
|
||||||
|
name speed cpb
|
||||||
|
ChaCha20_64-4 570MB/s ± 0% 4.18
|
||||||
|
ChaCha20_1K-4 650MB/s ± 0% 3.66
|
||||||
|
XChaCha20_64-4 223MB/s ± 0% 10.69
|
||||||
|
XChaCha20_1K-4 584MB/s ± 1% 4.08
|
||||||
|
XORKeyStream64-4 392MB/s ± 1% 6.08
|
||||||
|
XORKeyStream1K-4 629MB/s ± 1% 3.79
|
||||||
|
XChaCha20_XORKeyStream64-4 222MB/s ± 0% 10.73
|
||||||
|
XChaCha20_XORKeyStream1K-4 585MB/s ± 0% 4.07
|
||||||
|
|
||||||
|
SSE2
|
||||||
|
name speed cpb
|
||||||
|
ChaCha20_64-4 509MB/s ± 0% 4.68
|
||||||
|
ChaCha20_1K-4 553MB/s ± 2% 4.31
|
||||||
|
XChaCha20_64-4 201MB/s ± 0% 11.86
|
||||||
|
XChaCha20_1K-4 498MB/s ± 4% 4.78
|
||||||
|
XORKeyStream64-4 359MB/s ± 1% 6.64
|
||||||
|
XORKeyStream1K-4 545MB/s ± 0% 4.37
|
||||||
|
XChaCha20_XORKeyStream64-4 201MB/s ± 1% 11.86
|
||||||
|
XChaCha20_XORKeyStream1K-4 507MB/s ± 0% 4.70
|
||||||
|
```
|
|
@ -0,0 +1,176 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package chacha implements some low-level functions of the
|
||||||
|
// ChaCha cipher family.
|
||||||
|
package chacha // import "github.com/aead/chacha20/chacha"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// NonceSize is the size of the ChaCha20 nonce in bytes.
|
||||||
|
NonceSize = 8
|
||||||
|
|
||||||
|
// INonceSize is the size of the IETF-ChaCha20 nonce in bytes.
|
||||||
|
INonceSize = 12
|
||||||
|
|
||||||
|
// XNonceSize is the size of the XChaCha20 nonce in bytes.
|
||||||
|
XNonceSize = 24
|
||||||
|
|
||||||
|
// KeySize is the size of the key in bytes.
|
||||||
|
KeySize = 32
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
useSSE2 bool
|
||||||
|
useSSSE3 bool
|
||||||
|
useAVX2 bool
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errKeySize = errors.New("chacha20/chacha: bad key length")
|
||||||
|
errInvalidNonce = errors.New("chacha20/chacha: bad nonce length")
|
||||||
|
)
|
||||||
|
|
||||||
|
func setup(state *[64]byte, nonce, key []byte) (err error) {
|
||||||
|
if len(key) != KeySize {
|
||||||
|
err = errKeySize
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var Nonce [16]byte
|
||||||
|
switch len(nonce) {
|
||||||
|
case NonceSize:
|
||||||
|
copy(Nonce[8:], nonce)
|
||||||
|
initialize(state, key, &Nonce)
|
||||||
|
case INonceSize:
|
||||||
|
copy(Nonce[4:], nonce)
|
||||||
|
initialize(state, key, &Nonce)
|
||||||
|
case XNonceSize:
|
||||||
|
var tmpKey [32]byte
|
||||||
|
var hNonce [16]byte
|
||||||
|
|
||||||
|
copy(hNonce[:], nonce[:16])
|
||||||
|
copy(tmpKey[:], key)
|
||||||
|
hChaCha20(&tmpKey, &hNonce, &tmpKey)
|
||||||
|
copy(Nonce[8:], nonce[16:])
|
||||||
|
initialize(state, tmpKey[:], &Nonce)
|
||||||
|
|
||||||
|
// BUG(aead): A "good" compiler will remove this (optimizations)
|
||||||
|
// But using the provided key instead of tmpKey,
|
||||||
|
// will change the key (-> probably confuses users)
|
||||||
|
for i := range tmpKey {
|
||||||
|
tmpKey[i] = 0
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
err = errInvalidNonce
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// XORKeyStream crypts bytes from src to dst using the given nonce and key.
|
||||||
|
// The length of the nonce determinds the version of ChaCha20:
|
||||||
|
// - NonceSize: ChaCha20/r with a 64 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// - INonceSize: ChaCha20/r as defined in RFC 7539 and a 2^32 * 64 byte period.
|
||||||
|
// - XNonceSize: XChaCha20/r with a 192 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// The rounds argument specifies the number of rounds performed for keystream
|
||||||
|
// generation - valid values are 8, 12 or 20. The src and dst may be the same slice
|
||||||
|
// but otherwise should not overlap. If len(dst) < len(src) this function panics.
|
||||||
|
// If the nonce is neither 64, 96 nor 192 bits long, this function panics.
|
||||||
|
func XORKeyStream(dst, src, nonce, key []byte, rounds int) {
|
||||||
|
if rounds != 20 && rounds != 12 && rounds != 8 {
|
||||||
|
panic("chacha20/chacha: bad number of rounds")
|
||||||
|
}
|
||||||
|
if len(dst) < len(src) {
|
||||||
|
panic("chacha20/chacha: dst buffer is to small")
|
||||||
|
}
|
||||||
|
if len(nonce) == INonceSize && uint64(len(src)) > (1<<38) {
|
||||||
|
panic("chacha20/chacha: src is too large")
|
||||||
|
}
|
||||||
|
|
||||||
|
var block, state [64]byte
|
||||||
|
if err := setup(&state, nonce, key); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
xorKeyStream(dst, src, &block, &state, rounds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cipher implements ChaCha20/r (XChaCha20/r) for a given number of rounds r.
|
||||||
|
type Cipher struct {
|
||||||
|
state, block [64]byte
|
||||||
|
off int
|
||||||
|
rounds int // 20 for ChaCha20
|
||||||
|
noncesize int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCipher returns a new *chacha.Cipher implementing the ChaCha20/r or XChaCha20/r
|
||||||
|
// (r = 8, 12 or 20) stream cipher. The nonce must be unique for one key for all time.
|
||||||
|
// The length of the nonce determinds the version of ChaCha20:
|
||||||
|
// - NonceSize: ChaCha20/r with a 64 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// - INonceSize: ChaCha20/r as defined in RFC 7539 and a 2^32 * 64 byte period.
|
||||||
|
// - XNonceSize: XChaCha20/r with a 192 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// If the nonce is neither 64, 96 nor 192 bits long, a non-nil error is returned.
|
||||||
|
func NewCipher(nonce, key []byte, rounds int) (*Cipher, error) {
|
||||||
|
if rounds != 20 && rounds != 12 && rounds != 8 {
|
||||||
|
panic("chacha20/chacha: bad number of rounds")
|
||||||
|
}
|
||||||
|
|
||||||
|
c := new(Cipher)
|
||||||
|
if err := setup(&(c.state), nonce, key); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c.rounds = rounds
|
||||||
|
|
||||||
|
if len(nonce) == INonceSize {
|
||||||
|
c.noncesize = INonceSize
|
||||||
|
} else {
|
||||||
|
c.noncesize = NonceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// XORKeyStream crypts bytes from src to dst. Src and dst may be the same slice
|
||||||
|
// but otherwise should not overlap. If len(dst) < len(src) the function panics.
|
||||||
|
func (c *Cipher) XORKeyStream(dst, src []byte) {
|
||||||
|
if len(dst) < len(src) {
|
||||||
|
panic("chacha20/chacha: dst buffer is to small")
|
||||||
|
}
|
||||||
|
|
||||||
|
if c.off > 0 {
|
||||||
|
n := len(c.block[c.off:])
|
||||||
|
if len(src) <= n {
|
||||||
|
for i, v := range src {
|
||||||
|
dst[i] = v ^ c.block[c.off]
|
||||||
|
c.off++
|
||||||
|
}
|
||||||
|
if c.off == 64 {
|
||||||
|
c.off = 0
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, v := range c.block[c.off:] {
|
||||||
|
dst[i] = src[i] ^ v
|
||||||
|
}
|
||||||
|
src = src[n:]
|
||||||
|
dst = dst[n:]
|
||||||
|
c.off = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
c.off += xorKeyStream(dst, src, &(c.block), &(c.state), c.rounds)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetCounter skips ctr * 64 byte blocks. SetCounter(0) resets the cipher.
|
||||||
|
// This function always skips the unused keystream of the current 64 byte block.
|
||||||
|
func (c *Cipher) SetCounter(ctr uint64) {
|
||||||
|
if c.noncesize == INonceSize {
|
||||||
|
binary.LittleEndian.PutUint32(c.state[48:], uint32(ctr))
|
||||||
|
} else {
|
||||||
|
binary.LittleEndian.PutUint64(c.state[48:], ctr)
|
||||||
|
}
|
||||||
|
c.off = 0
|
||||||
|
}
|
|
@ -0,0 +1,542 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7,amd64,!gccgo,!appengine,!nacl
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
DATA ·sigma_AVX<>+0x00(SB)/4, $0x61707865
|
||||||
|
DATA ·sigma_AVX<>+0x04(SB)/4, $0x3320646e
|
||||||
|
DATA ·sigma_AVX<>+0x08(SB)/4, $0x79622d32
|
||||||
|
DATA ·sigma_AVX<>+0x0C(SB)/4, $0x6b206574
|
||||||
|
GLOBL ·sigma_AVX<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·one_AVX<>+0x00(SB)/8, $1
|
||||||
|
DATA ·one_AVX<>+0x08(SB)/8, $0
|
||||||
|
GLOBL ·one_AVX<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·one_AVX2<>+0x00(SB)/8, $0
|
||||||
|
DATA ·one_AVX2<>+0x08(SB)/8, $0
|
||||||
|
DATA ·one_AVX2<>+0x10(SB)/8, $1
|
||||||
|
DATA ·one_AVX2<>+0x18(SB)/8, $0
|
||||||
|
GLOBL ·one_AVX2<>(SB), (NOPTR+RODATA), $32
|
||||||
|
|
||||||
|
DATA ·two_AVX2<>+0x00(SB)/8, $2
|
||||||
|
DATA ·two_AVX2<>+0x08(SB)/8, $0
|
||||||
|
DATA ·two_AVX2<>+0x10(SB)/8, $2
|
||||||
|
DATA ·two_AVX2<>+0x18(SB)/8, $0
|
||||||
|
GLOBL ·two_AVX2<>(SB), (NOPTR+RODATA), $32
|
||||||
|
|
||||||
|
DATA ·rol16_AVX2<>+0x00(SB)/8, $0x0504070601000302
|
||||||
|
DATA ·rol16_AVX2<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
|
||||||
|
DATA ·rol16_AVX2<>+0x10(SB)/8, $0x0504070601000302
|
||||||
|
DATA ·rol16_AVX2<>+0x18(SB)/8, $0x0D0C0F0E09080B0A
|
||||||
|
GLOBL ·rol16_AVX2<>(SB), (NOPTR+RODATA), $32
|
||||||
|
|
||||||
|
DATA ·rol8_AVX2<>+0x00(SB)/8, $0x0605040702010003
|
||||||
|
DATA ·rol8_AVX2<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
|
||||||
|
DATA ·rol8_AVX2<>+0x10(SB)/8, $0x0605040702010003
|
||||||
|
DATA ·rol8_AVX2<>+0x18(SB)/8, $0x0E0D0C0F0A09080B
|
||||||
|
GLOBL ·rol8_AVX2<>(SB), (NOPTR+RODATA), $32
|
||||||
|
|
||||||
|
#define ROTL(n, t, v) \
|
||||||
|
VPSLLD $n, v, t; \
|
||||||
|
VPSRLD $(32-n), v, v; \
|
||||||
|
VPXOR v, t, v
|
||||||
|
|
||||||
|
#define CHACHA_QROUND(v0, v1, v2, v3, t, c16, c8) \
|
||||||
|
VPADDD v0, v1, v0; \
|
||||||
|
VPXOR v3, v0, v3; \
|
||||||
|
VPSHUFB c16, v3, v3; \
|
||||||
|
VPADDD v2, v3, v2; \
|
||||||
|
VPXOR v1, v2, v1; \
|
||||||
|
ROTL(12, t, v1); \
|
||||||
|
VPADDD v0, v1, v0; \
|
||||||
|
VPXOR v3, v0, v3; \
|
||||||
|
VPSHUFB c8, v3, v3; \
|
||||||
|
VPADDD v2, v3, v2; \
|
||||||
|
VPXOR v1, v2, v1; \
|
||||||
|
ROTL(7, t, v1)
|
||||||
|
|
||||||
|
#define CHACHA_SHUFFLE(v1, v2, v3) \
|
||||||
|
VPSHUFD $0x39, v1, v1; \
|
||||||
|
VPSHUFD $0x4E, v2, v2; \
|
||||||
|
VPSHUFD $-109, v3, v3
|
||||||
|
|
||||||
|
#define XOR_AVX2(dst, src, off, v0, v1, v2, v3, t0, t1) \
|
||||||
|
VMOVDQU (0+off)(src), t0; \
|
||||||
|
VPERM2I128 $32, v1, v0, t1; \
|
||||||
|
VPXOR t0, t1, t0; \
|
||||||
|
VMOVDQU t0, (0+off)(dst); \
|
||||||
|
VMOVDQU (32+off)(src), t0; \
|
||||||
|
VPERM2I128 $32, v3, v2, t1; \
|
||||||
|
VPXOR t0, t1, t0; \
|
||||||
|
VMOVDQU t0, (32+off)(dst); \
|
||||||
|
VMOVDQU (64+off)(src), t0; \
|
||||||
|
VPERM2I128 $49, v1, v0, t1; \
|
||||||
|
VPXOR t0, t1, t0; \
|
||||||
|
VMOVDQU t0, (64+off)(dst); \
|
||||||
|
VMOVDQU (96+off)(src), t0; \
|
||||||
|
VPERM2I128 $49, v3, v2, t1; \
|
||||||
|
VPXOR t0, t1, t0; \
|
||||||
|
VMOVDQU t0, (96+off)(dst)
|
||||||
|
|
||||||
|
#define XOR_UPPER_AVX2(dst, src, off, v0, v1, v2, v3, t0, t1) \
|
||||||
|
VMOVDQU (0+off)(src), t0; \
|
||||||
|
VPERM2I128 $32, v1, v0, t1; \
|
||||||
|
VPXOR t0, t1, t0; \
|
||||||
|
VMOVDQU t0, (0+off)(dst); \
|
||||||
|
VMOVDQU (32+off)(src), t0; \
|
||||||
|
VPERM2I128 $32, v3, v2, t1; \
|
||||||
|
VPXOR t0, t1, t0; \
|
||||||
|
VMOVDQU t0, (32+off)(dst); \
|
||||||
|
|
||||||
|
#define EXTRACT_LOWER(dst, v0, v1, v2, v3, t0) \
|
||||||
|
VPERM2I128 $49, v1, v0, t0; \
|
||||||
|
VMOVDQU t0, 0(dst); \
|
||||||
|
VPERM2I128 $49, v3, v2, t0; \
|
||||||
|
VMOVDQU t0, 32(dst)
|
||||||
|
|
||||||
|
#define XOR_AVX(dst, src, off, v0, v1, v2, v3, t0) \
|
||||||
|
VPXOR 0+off(src), v0, t0; \
|
||||||
|
VMOVDQU t0, 0+off(dst); \
|
||||||
|
VPXOR 16+off(src), v1, t0; \
|
||||||
|
VMOVDQU t0, 16+off(dst); \
|
||||||
|
VPXOR 32+off(src), v2, t0; \
|
||||||
|
VMOVDQU t0, 32+off(dst); \
|
||||||
|
VPXOR 48+off(src), v3, t0; \
|
||||||
|
VMOVDQU t0, 48+off(dst)
|
||||||
|
|
||||||
|
#define TWO 0(SP)
|
||||||
|
#define C16 32(SP)
|
||||||
|
#define C8 64(SP)
|
||||||
|
#define STATE_0 96(SP)
|
||||||
|
#define STATE_1 128(SP)
|
||||||
|
#define STATE_2 160(SP)
|
||||||
|
#define STATE_3 192(SP)
|
||||||
|
#define TMP_0 224(SP)
|
||||||
|
#define TMP_1 256(SP)
|
||||||
|
|
||||||
|
// func xorKeyStreamAVX(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
TEXT ·xorKeyStreamAVX2(SB), 4, $320-80
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ src_base+24(FP), SI
|
||||||
|
MOVQ src_len+32(FP), CX
|
||||||
|
MOVQ block+48(FP), BX
|
||||||
|
MOVQ state+56(FP), AX
|
||||||
|
MOVQ rounds+64(FP), DX
|
||||||
|
|
||||||
|
MOVQ SP, R8
|
||||||
|
ADDQ $32, SP
|
||||||
|
ANDQ $-32, SP
|
||||||
|
|
||||||
|
VMOVDQU 0(AX), Y2
|
||||||
|
VMOVDQU 32(AX), Y3
|
||||||
|
VPERM2I128 $0x22, Y2, Y0, Y0
|
||||||
|
VPERM2I128 $0x33, Y2, Y1, Y1
|
||||||
|
VPERM2I128 $0x22, Y3, Y2, Y2
|
||||||
|
VPERM2I128 $0x33, Y3, Y3, Y3
|
||||||
|
|
||||||
|
TESTQ CX, CX
|
||||||
|
JZ done
|
||||||
|
|
||||||
|
VMOVDQU ·one_AVX2<>(SB), Y4
|
||||||
|
VPADDD Y4, Y3, Y3
|
||||||
|
|
||||||
|
VMOVDQA Y0, STATE_0
|
||||||
|
VMOVDQA Y1, STATE_1
|
||||||
|
VMOVDQA Y2, STATE_2
|
||||||
|
VMOVDQA Y3, STATE_3
|
||||||
|
|
||||||
|
VMOVDQU ·rol16_AVX2<>(SB), Y4
|
||||||
|
VMOVDQU ·rol8_AVX2<>(SB), Y5
|
||||||
|
VMOVDQU ·two_AVX2<>(SB), Y6
|
||||||
|
VMOVDQA Y4, Y14
|
||||||
|
VMOVDQA Y5, Y15
|
||||||
|
VMOVDQA Y4, C16
|
||||||
|
VMOVDQA Y5, C8
|
||||||
|
VMOVDQA Y6, TWO
|
||||||
|
|
||||||
|
CMPQ CX, $64
|
||||||
|
JBE between_0_and_64
|
||||||
|
CMPQ CX, $192
|
||||||
|
JBE between_64_and_192
|
||||||
|
CMPQ CX, $320
|
||||||
|
JBE between_192_and_320
|
||||||
|
CMPQ CX, $448
|
||||||
|
JBE between_320_and_448
|
||||||
|
|
||||||
|
at_least_512:
|
||||||
|
VMOVDQA Y0, Y4
|
||||||
|
VMOVDQA Y1, Y5
|
||||||
|
VMOVDQA Y2, Y6
|
||||||
|
VPADDQ TWO, Y3, Y7
|
||||||
|
VMOVDQA Y0, Y8
|
||||||
|
VMOVDQA Y1, Y9
|
||||||
|
VMOVDQA Y2, Y10
|
||||||
|
VPADDQ TWO, Y7, Y11
|
||||||
|
VMOVDQA Y0, Y12
|
||||||
|
VMOVDQA Y1, Y13
|
||||||
|
VMOVDQA Y2, Y14
|
||||||
|
VPADDQ TWO, Y11, Y15
|
||||||
|
|
||||||
|
MOVQ DX, R9
|
||||||
|
|
||||||
|
chacha_loop_512:
|
||||||
|
VMOVDQA Y8, TMP_0
|
||||||
|
CHACHA_QROUND(Y0, Y1, Y2, Y3, Y8, C16, C8)
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y8, C16, C8)
|
||||||
|
VMOVDQA TMP_0, Y8
|
||||||
|
VMOVDQA Y0, TMP_0
|
||||||
|
CHACHA_QROUND(Y8, Y9, Y10, Y11, Y0, C16, C8)
|
||||||
|
CHACHA_QROUND(Y12, Y13, Y14, Y15, Y0, C16, C8)
|
||||||
|
CHACHA_SHUFFLE(Y1, Y2, Y3)
|
||||||
|
CHACHA_SHUFFLE(Y5, Y6, Y7)
|
||||||
|
CHACHA_SHUFFLE(Y9, Y10, Y11)
|
||||||
|
CHACHA_SHUFFLE(Y13, Y14, Y15)
|
||||||
|
|
||||||
|
CHACHA_QROUND(Y12, Y13, Y14, Y15, Y0, C16, C8)
|
||||||
|
CHACHA_QROUND(Y8, Y9, Y10, Y11, Y0, C16, C8)
|
||||||
|
VMOVDQA TMP_0, Y0
|
||||||
|
VMOVDQA Y8, TMP_0
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y8, C16, C8)
|
||||||
|
CHACHA_QROUND(Y0, Y1, Y2, Y3, Y8, C16, C8)
|
||||||
|
VMOVDQA TMP_0, Y8
|
||||||
|
CHACHA_SHUFFLE(Y3, Y2, Y1)
|
||||||
|
CHACHA_SHUFFLE(Y7, Y6, Y5)
|
||||||
|
CHACHA_SHUFFLE(Y11, Y10, Y9)
|
||||||
|
CHACHA_SHUFFLE(Y15, Y14, Y13)
|
||||||
|
SUBQ $2, R9
|
||||||
|
JA chacha_loop_512
|
||||||
|
|
||||||
|
VMOVDQA Y12, TMP_0
|
||||||
|
VMOVDQA Y13, TMP_1
|
||||||
|
VPADDD STATE_0, Y0, Y0
|
||||||
|
VPADDD STATE_1, Y1, Y1
|
||||||
|
VPADDD STATE_2, Y2, Y2
|
||||||
|
VPADDD STATE_3, Y3, Y3
|
||||||
|
XOR_AVX2(DI, SI, 0, Y0, Y1, Y2, Y3, Y12, Y13)
|
||||||
|
VMOVDQA STATE_0, Y0
|
||||||
|
VMOVDQA STATE_1, Y1
|
||||||
|
VMOVDQA STATE_2, Y2
|
||||||
|
VMOVDQA STATE_3, Y3
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
VPADDD Y0, Y4, Y4
|
||||||
|
VPADDD Y1, Y5, Y5
|
||||||
|
VPADDD Y2, Y6, Y6
|
||||||
|
VPADDD Y3, Y7, Y7
|
||||||
|
XOR_AVX2(DI, SI, 128, Y4, Y5, Y6, Y7, Y12, Y13)
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
VPADDD Y0, Y8, Y8
|
||||||
|
VPADDD Y1, Y9, Y9
|
||||||
|
VPADDD Y2, Y10, Y10
|
||||||
|
VPADDD Y3, Y11, Y11
|
||||||
|
XOR_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13)
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
VPADDD TMP_0, Y0, Y12
|
||||||
|
VPADDD TMP_1, Y1, Y13
|
||||||
|
VPADDD Y2, Y14, Y14
|
||||||
|
VPADDD Y3, Y15, Y15
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
CMPQ CX, $512
|
||||||
|
JB less_than_512
|
||||||
|
|
||||||
|
XOR_AVX2(DI, SI, 384, Y12, Y13, Y14, Y15, Y4, Y5)
|
||||||
|
VMOVDQA Y3, STATE_3
|
||||||
|
ADDQ $512, SI
|
||||||
|
ADDQ $512, DI
|
||||||
|
SUBQ $512, CX
|
||||||
|
CMPQ CX, $448
|
||||||
|
JA at_least_512
|
||||||
|
|
||||||
|
TESTQ CX, CX
|
||||||
|
JZ done
|
||||||
|
|
||||||
|
VMOVDQA C16, Y14
|
||||||
|
VMOVDQA C8, Y15
|
||||||
|
|
||||||
|
CMPQ CX, $64
|
||||||
|
JBE between_0_and_64
|
||||||
|
CMPQ CX, $192
|
||||||
|
JBE between_64_and_192
|
||||||
|
CMPQ CX, $320
|
||||||
|
JBE between_192_and_320
|
||||||
|
JMP between_320_and_448
|
||||||
|
|
||||||
|
less_than_512:
|
||||||
|
XOR_UPPER_AVX2(DI, SI, 384, Y12, Y13, Y14, Y15, Y4, Y5)
|
||||||
|
EXTRACT_LOWER(BX, Y12, Y13, Y14, Y15, Y4)
|
||||||
|
ADDQ $448, SI
|
||||||
|
ADDQ $448, DI
|
||||||
|
SUBQ $448, CX
|
||||||
|
JMP finalize
|
||||||
|
|
||||||
|
between_320_and_448:
|
||||||
|
VMOVDQA Y0, Y4
|
||||||
|
VMOVDQA Y1, Y5
|
||||||
|
VMOVDQA Y2, Y6
|
||||||
|
VPADDQ TWO, Y3, Y7
|
||||||
|
VMOVDQA Y0, Y8
|
||||||
|
VMOVDQA Y1, Y9
|
||||||
|
VMOVDQA Y2, Y10
|
||||||
|
VPADDQ TWO, Y7, Y11
|
||||||
|
|
||||||
|
MOVQ DX, R9
|
||||||
|
|
||||||
|
chacha_loop_384:
|
||||||
|
CHACHA_QROUND(Y0, Y1, Y2, Y3, Y13, Y14, Y15)
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
|
||||||
|
CHACHA_QROUND(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
|
||||||
|
CHACHA_SHUFFLE(Y1, Y2, Y3)
|
||||||
|
CHACHA_SHUFFLE(Y5, Y6, Y7)
|
||||||
|
CHACHA_SHUFFLE(Y9, Y10, Y11)
|
||||||
|
CHACHA_QROUND(Y0, Y1, Y2, Y3, Y13, Y14, Y15)
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
|
||||||
|
CHACHA_QROUND(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
|
||||||
|
CHACHA_SHUFFLE(Y3, Y2, Y1)
|
||||||
|
CHACHA_SHUFFLE(Y7, Y6, Y5)
|
||||||
|
CHACHA_SHUFFLE(Y11, Y10, Y9)
|
||||||
|
SUBQ $2, R9
|
||||||
|
JA chacha_loop_384
|
||||||
|
|
||||||
|
VPADDD STATE_0, Y0, Y0
|
||||||
|
VPADDD STATE_1, Y1, Y1
|
||||||
|
VPADDD STATE_2, Y2, Y2
|
||||||
|
VPADDD STATE_3, Y3, Y3
|
||||||
|
XOR_AVX2(DI, SI, 0, Y0, Y1, Y2, Y3, Y12, Y13)
|
||||||
|
VMOVDQA STATE_0, Y0
|
||||||
|
VMOVDQA STATE_1, Y1
|
||||||
|
VMOVDQA STATE_2, Y2
|
||||||
|
VMOVDQA STATE_3, Y3
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
VPADDD Y0, Y4, Y4
|
||||||
|
VPADDD Y1, Y5, Y5
|
||||||
|
VPADDD Y2, Y6, Y6
|
||||||
|
VPADDD Y3, Y7, Y7
|
||||||
|
XOR_AVX2(DI, SI, 128, Y4, Y5, Y6, Y7, Y12, Y13)
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
VPADDD Y0, Y8, Y8
|
||||||
|
VPADDD Y1, Y9, Y9
|
||||||
|
VPADDD Y2, Y10, Y10
|
||||||
|
VPADDD Y3, Y11, Y11
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
CMPQ CX, $384
|
||||||
|
JB less_than_384
|
||||||
|
|
||||||
|
XOR_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13)
|
||||||
|
SUBQ $384, CX
|
||||||
|
TESTQ CX, CX
|
||||||
|
JE done
|
||||||
|
|
||||||
|
ADDQ $384, SI
|
||||||
|
ADDQ $384, DI
|
||||||
|
JMP between_0_and_64
|
||||||
|
|
||||||
|
less_than_384:
|
||||||
|
XOR_UPPER_AVX2(DI, SI, 256, Y8, Y9, Y10, Y11, Y12, Y13)
|
||||||
|
EXTRACT_LOWER(BX, Y8, Y9, Y10, Y11, Y12)
|
||||||
|
ADDQ $320, SI
|
||||||
|
ADDQ $320, DI
|
||||||
|
SUBQ $320, CX
|
||||||
|
JMP finalize
|
||||||
|
|
||||||
|
between_192_and_320:
|
||||||
|
VMOVDQA Y0, Y4
|
||||||
|
VMOVDQA Y1, Y5
|
||||||
|
VMOVDQA Y2, Y6
|
||||||
|
VMOVDQA Y3, Y7
|
||||||
|
VMOVDQA Y0, Y8
|
||||||
|
VMOVDQA Y1, Y9
|
||||||
|
VMOVDQA Y2, Y10
|
||||||
|
VPADDQ TWO, Y3, Y11
|
||||||
|
|
||||||
|
MOVQ DX, R9
|
||||||
|
|
||||||
|
chacha_loop_256:
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
|
||||||
|
CHACHA_QROUND(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
|
||||||
|
CHACHA_SHUFFLE(Y5, Y6, Y7)
|
||||||
|
CHACHA_SHUFFLE(Y9, Y10, Y11)
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
|
||||||
|
CHACHA_QROUND(Y8, Y9, Y10, Y11, Y13, Y14, Y15)
|
||||||
|
CHACHA_SHUFFLE(Y7, Y6, Y5)
|
||||||
|
CHACHA_SHUFFLE(Y11, Y10, Y9)
|
||||||
|
SUBQ $2, R9
|
||||||
|
JA chacha_loop_256
|
||||||
|
|
||||||
|
VPADDD Y0, Y4, Y4
|
||||||
|
VPADDD Y1, Y5, Y5
|
||||||
|
VPADDD Y2, Y6, Y6
|
||||||
|
VPADDD Y3, Y7, Y7
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
XOR_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13)
|
||||||
|
VPADDD Y0, Y8, Y8
|
||||||
|
VPADDD Y1, Y9, Y9
|
||||||
|
VPADDD Y2, Y10, Y10
|
||||||
|
VPADDD Y3, Y11, Y11
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
CMPQ CX, $256
|
||||||
|
JB less_than_256
|
||||||
|
|
||||||
|
XOR_AVX2(DI, SI, 128, Y8, Y9, Y10, Y11, Y12, Y13)
|
||||||
|
SUBQ $256, CX
|
||||||
|
TESTQ CX, CX
|
||||||
|
JE done
|
||||||
|
|
||||||
|
ADDQ $256, SI
|
||||||
|
ADDQ $256, DI
|
||||||
|
JMP between_0_and_64
|
||||||
|
|
||||||
|
less_than_256:
|
||||||
|
XOR_UPPER_AVX2(DI, SI, 128, Y8, Y9, Y10, Y11, Y12, Y13)
|
||||||
|
EXTRACT_LOWER(BX, Y8, Y9, Y10, Y11, Y12)
|
||||||
|
ADDQ $192, SI
|
||||||
|
ADDQ $192, DI
|
||||||
|
SUBQ $192, CX
|
||||||
|
JMP finalize
|
||||||
|
|
||||||
|
between_64_and_192:
|
||||||
|
VMOVDQA Y0, Y4
|
||||||
|
VMOVDQA Y1, Y5
|
||||||
|
VMOVDQA Y2, Y6
|
||||||
|
VMOVDQA Y3, Y7
|
||||||
|
|
||||||
|
MOVQ DX, R9
|
||||||
|
|
||||||
|
chacha_loop_128:
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
|
||||||
|
CHACHA_SHUFFLE(Y5, Y6, Y7)
|
||||||
|
CHACHA_QROUND(Y4, Y5, Y6, Y7, Y13, Y14, Y15)
|
||||||
|
CHACHA_SHUFFLE(Y7, Y6, Y5)
|
||||||
|
SUBQ $2, R9
|
||||||
|
JA chacha_loop_128
|
||||||
|
|
||||||
|
VPADDD Y0, Y4, Y4
|
||||||
|
VPADDD Y1, Y5, Y5
|
||||||
|
VPADDD Y2, Y6, Y6
|
||||||
|
VPADDD Y3, Y7, Y7
|
||||||
|
VPADDQ TWO, Y3, Y3
|
||||||
|
|
||||||
|
CMPQ CX, $128
|
||||||
|
JB less_than_128
|
||||||
|
|
||||||
|
XOR_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13)
|
||||||
|
SUBQ $128, CX
|
||||||
|
TESTQ CX, CX
|
||||||
|
JE done
|
||||||
|
|
||||||
|
ADDQ $128, SI
|
||||||
|
ADDQ $128, DI
|
||||||
|
JMP between_0_and_64
|
||||||
|
|
||||||
|
less_than_128:
|
||||||
|
XOR_UPPER_AVX2(DI, SI, 0, Y4, Y5, Y6, Y7, Y12, Y13)
|
||||||
|
EXTRACT_LOWER(BX, Y4, Y5, Y6, Y7, Y13)
|
||||||
|
ADDQ $64, SI
|
||||||
|
ADDQ $64, DI
|
||||||
|
SUBQ $64, CX
|
||||||
|
JMP finalize
|
||||||
|
|
||||||
|
between_0_and_64:
|
||||||
|
VMOVDQA X0, X4
|
||||||
|
VMOVDQA X1, X5
|
||||||
|
VMOVDQA X2, X6
|
||||||
|
VMOVDQA X3, X7
|
||||||
|
|
||||||
|
MOVQ DX, R9
|
||||||
|
|
||||||
|
chacha_loop_64:
|
||||||
|
CHACHA_QROUND(X4, X5, X6, X7, X13, X14, X15)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_QROUND(X4, X5, X6, X7, X13, X14, X15)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
SUBQ $2, R9
|
||||||
|
JA chacha_loop_64
|
||||||
|
|
||||||
|
VPADDD X0, X4, X4
|
||||||
|
VPADDD X1, X5, X5
|
||||||
|
VPADDD X2, X6, X6
|
||||||
|
VPADDD X3, X7, X7
|
||||||
|
VMOVDQU ·one_AVX<>(SB), X0
|
||||||
|
VPADDQ X0, X3, X3
|
||||||
|
|
||||||
|
CMPQ CX, $64
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR_AVX(DI, SI, 0, X4, X5, X6, X7, X13)
|
||||||
|
SUBQ $64, CX
|
||||||
|
JMP done
|
||||||
|
|
||||||
|
less_than_64:
|
||||||
|
VMOVDQU X4, 0(BX)
|
||||||
|
VMOVDQU X5, 16(BX)
|
||||||
|
VMOVDQU X6, 32(BX)
|
||||||
|
VMOVDQU X7, 48(BX)
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
XORQ R11, R11
|
||||||
|
XORQ R12, R12
|
||||||
|
MOVQ CX, BP
|
||||||
|
|
||||||
|
xor_loop:
|
||||||
|
MOVB 0(SI), R11
|
||||||
|
MOVB 0(BX), R12
|
||||||
|
XORQ R11, R12
|
||||||
|
MOVB R12, 0(DI)
|
||||||
|
INCQ SI
|
||||||
|
INCQ BX
|
||||||
|
INCQ DI
|
||||||
|
DECQ BP
|
||||||
|
JA xor_loop
|
||||||
|
|
||||||
|
done:
|
||||||
|
VMOVDQU X3, 48(AX)
|
||||||
|
VZEROUPPER
|
||||||
|
MOVQ R8, SP
|
||||||
|
MOVQ CX, ret+72(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func hChaCha20AVX(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
TEXT ·hChaCha20AVX(SB), 4, $0-24
|
||||||
|
MOVQ out+0(FP), DI
|
||||||
|
MOVQ nonce+8(FP), AX
|
||||||
|
MOVQ key+16(FP), BX
|
||||||
|
|
||||||
|
VMOVDQU ·sigma_AVX<>(SB), X0
|
||||||
|
VMOVDQU 0(BX), X1
|
||||||
|
VMOVDQU 16(BX), X2
|
||||||
|
VMOVDQU 0(AX), X3
|
||||||
|
VMOVDQU ·rol16_AVX2<>(SB), X5
|
||||||
|
VMOVDQU ·rol8_AVX2<>(SB), X6
|
||||||
|
|
||||||
|
MOVQ $20, CX
|
||||||
|
|
||||||
|
chacha_loop:
|
||||||
|
CHACHA_QROUND(X0, X1, X2, X3, X4, X5, X6)
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_QROUND(X0, X1, X2, X3, X4, X5, X6)
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
SUBQ $2, CX
|
||||||
|
JNZ chacha_loop
|
||||||
|
|
||||||
|
VMOVDQU X0, 0(DI)
|
||||||
|
VMOVDQU X3, 16(DI)
|
||||||
|
VZEROUPPER
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func supportsAVX2() bool
|
||||||
|
TEXT ·supportsAVX2(SB), 4, $0-1
|
||||||
|
MOVQ runtime·support_avx(SB), AX
|
||||||
|
MOVQ runtime·support_avx2(SB), BX
|
||||||
|
ANDQ AX, BX
|
||||||
|
MOVB BX, ret+0(FP)
|
||||||
|
RET
|
|
@ -0,0 +1,67 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build 386,!gccgo,!appengine,!nacl
|
||||||
|
|
||||||
|
package chacha
|
||||||
|
|
||||||
|
import "encoding/binary"
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
useSSE2 = supportsSSE2()
|
||||||
|
useSSSE3 = supportsSSSE3()
|
||||||
|
useAVX2 = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func initialize(state *[64]byte, key []byte, nonce *[16]byte) {
|
||||||
|
binary.LittleEndian.PutUint32(state[0:], sigma[0])
|
||||||
|
binary.LittleEndian.PutUint32(state[4:], sigma[1])
|
||||||
|
binary.LittleEndian.PutUint32(state[8:], sigma[2])
|
||||||
|
binary.LittleEndian.PutUint32(state[12:], sigma[3])
|
||||||
|
copy(state[16:], key[:])
|
||||||
|
copy(state[48:], nonce[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is implemented in chacha_386.s
|
||||||
|
//go:noescape
|
||||||
|
func supportsSSE2() bool
|
||||||
|
|
||||||
|
// This function is implemented in chacha_386.s
|
||||||
|
//go:noescape
|
||||||
|
func supportsSSSE3() bool
|
||||||
|
|
||||||
|
// This function is implemented in chacha_386.s
|
||||||
|
//go:noescape
|
||||||
|
func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_386.s
|
||||||
|
//go:noescape
|
||||||
|
func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_386.s
|
||||||
|
//go:noescape
|
||||||
|
func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
|
||||||
|
// This function is implemented in chacha_386.s
|
||||||
|
//go:noescape
|
||||||
|
func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
|
||||||
|
func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) {
|
||||||
|
if useSSSE3 {
|
||||||
|
hChaCha20SSSE3(out, nonce, key)
|
||||||
|
} else if useSSE2 {
|
||||||
|
hChaCha20SSE2(out, nonce, key)
|
||||||
|
} else {
|
||||||
|
hChaCha20Generic(out, nonce, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int {
|
||||||
|
if useSSSE3 {
|
||||||
|
return xorKeyStreamSSSE3(dst, src, block, state, rounds)
|
||||||
|
} else if useSSE2 {
|
||||||
|
return xorKeyStreamSSE2(dst, src, block, state, rounds)
|
||||||
|
}
|
||||||
|
return xorKeyStreamGeneric(dst, src, block, state, rounds)
|
||||||
|
}
|
|
@ -0,0 +1,311 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build 386,!gccgo,!appengine,!nacl
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
DATA ·sigma<>+0x00(SB)/4, $0x61707865
|
||||||
|
DATA ·sigma<>+0x04(SB)/4, $0x3320646e
|
||||||
|
DATA ·sigma<>+0x08(SB)/4, $0x79622d32
|
||||||
|
DATA ·sigma<>+0x0C(SB)/4, $0x6b206574
|
||||||
|
GLOBL ·sigma<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·one<>+0x00(SB)/8, $1
|
||||||
|
DATA ·one<>+0x08(SB)/8, $0
|
||||||
|
GLOBL ·one<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302
|
||||||
|
DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
|
||||||
|
GLOBL ·rol16<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003
|
||||||
|
DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
|
||||||
|
GLOBL ·rol8<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
#define ROTL_SSE2(n, t, v) \
|
||||||
|
MOVO v, t; \
|
||||||
|
PSLLL $n, t; \
|
||||||
|
PSRLL $(32-n), v; \
|
||||||
|
PXOR t, v
|
||||||
|
|
||||||
|
#define CHACHA_QROUND_SSE2(v0, v1, v2, v3, t0) \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
ROTL_SSE2(16, t0, v3); \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(12, t0, v1); \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
ROTL_SSE2(8, t0, v3); \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(7, t0, v1)
|
||||||
|
|
||||||
|
#define CHACHA_QROUND_SSSE3(v0, v1, v2, v3, t0, r16, r8) \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
PSHUFB r16, v3; \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(12, t0, v1); \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
PSHUFB r8, v3; \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(7, t0, v1)
|
||||||
|
|
||||||
|
#define CHACHA_SHUFFLE(v1, v2, v3) \
|
||||||
|
PSHUFL $0x39, v1, v1; \
|
||||||
|
PSHUFL $0x4E, v2, v2; \
|
||||||
|
PSHUFL $0x93, v3, v3
|
||||||
|
|
||||||
|
#define XOR(dst, src, off, v0, v1, v2, v3, t0) \
|
||||||
|
MOVOU 0+off(src), t0; \
|
||||||
|
PXOR v0, t0; \
|
||||||
|
MOVOU t0, 0+off(dst); \
|
||||||
|
MOVOU 16+off(src), t0; \
|
||||||
|
PXOR v1, t0; \
|
||||||
|
MOVOU t0, 16+off(dst); \
|
||||||
|
MOVOU 32+off(src), t0; \
|
||||||
|
PXOR v2, t0; \
|
||||||
|
MOVOU t0, 32+off(dst); \
|
||||||
|
MOVOU 48+off(src), t0; \
|
||||||
|
PXOR v3, t0; \
|
||||||
|
MOVOU t0, 48+off(dst)
|
||||||
|
|
||||||
|
#define FINALIZE(dst, src, block, len, t0, t1) \
|
||||||
|
XORL t0, t0; \
|
||||||
|
XORL t1, t1; \
|
||||||
|
finalize: \
|
||||||
|
MOVB 0(src), t0; \
|
||||||
|
MOVB 0(block), t1; \
|
||||||
|
XORL t0, t1; \
|
||||||
|
MOVB t1, 0(dst); \
|
||||||
|
INCL src; \
|
||||||
|
INCL block; \
|
||||||
|
INCL dst; \
|
||||||
|
DECL len; \
|
||||||
|
JA finalize \
|
||||||
|
|
||||||
|
// func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
TEXT ·xorKeyStreamSSE2(SB), 4, $0-40
|
||||||
|
MOVL dst_base+0(FP), DI
|
||||||
|
MOVL src_base+12(FP), SI
|
||||||
|
MOVL src_len+16(FP), CX
|
||||||
|
MOVL state+28(FP), AX
|
||||||
|
MOVL rounds+32(FP), DX
|
||||||
|
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
MOVOU 16(AX), X1
|
||||||
|
MOVOU 32(AX), X2
|
||||||
|
MOVOU 48(AX), X3
|
||||||
|
|
||||||
|
TESTL CX, CX
|
||||||
|
JZ done
|
||||||
|
|
||||||
|
at_least_64:
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO X1, X5
|
||||||
|
MOVO X2, X6
|
||||||
|
MOVO X3, X7
|
||||||
|
|
||||||
|
MOVL DX, BX
|
||||||
|
|
||||||
|
chacha_loop:
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X0)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
SUBL $2, BX
|
||||||
|
JA chacha_loop
|
||||||
|
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL X1, X5
|
||||||
|
PADDL X2, X6
|
||||||
|
PADDL X3, X7
|
||||||
|
MOVOU ·one<>(SB), X0
|
||||||
|
PADDQ X0, X3
|
||||||
|
|
||||||
|
CMPL CX, $64
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 0, X4, X5, X6, X7, X0)
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
ADDL $64, SI
|
||||||
|
ADDL $64, DI
|
||||||
|
SUBL $64, CX
|
||||||
|
JNZ at_least_64
|
||||||
|
|
||||||
|
less_than_64:
|
||||||
|
MOVL CX, BP
|
||||||
|
TESTL BP, BP
|
||||||
|
JZ done
|
||||||
|
|
||||||
|
MOVL block+24(FP), BX
|
||||||
|
MOVOU X4, 0(BX)
|
||||||
|
MOVOU X5, 16(BX)
|
||||||
|
MOVOU X6, 32(BX)
|
||||||
|
MOVOU X7, 48(BX)
|
||||||
|
FINALIZE(DI, SI, BX, BP, AX, DX)
|
||||||
|
|
||||||
|
done:
|
||||||
|
MOVL state+28(FP), AX
|
||||||
|
MOVOU X3, 48(AX)
|
||||||
|
MOVL CX, ret+36(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
TEXT ·xorKeyStreamSSSE3(SB), 4, $64-40
|
||||||
|
MOVL dst_base+0(FP), DI
|
||||||
|
MOVL src_base+12(FP), SI
|
||||||
|
MOVL src_len+16(FP), CX
|
||||||
|
MOVL state+28(FP), AX
|
||||||
|
MOVL rounds+32(FP), DX
|
||||||
|
|
||||||
|
MOVOU 48(AX), X3
|
||||||
|
TESTL CX, CX
|
||||||
|
JZ done
|
||||||
|
|
||||||
|
MOVL SP, BP
|
||||||
|
ADDL $16, SP
|
||||||
|
ANDL $-16, SP
|
||||||
|
|
||||||
|
MOVOU ·one<>(SB), X0
|
||||||
|
MOVOU 16(AX), X1
|
||||||
|
MOVOU 32(AX), X2
|
||||||
|
MOVO X0, 0(SP)
|
||||||
|
MOVO X1, 16(SP)
|
||||||
|
MOVO X2, 32(SP)
|
||||||
|
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
MOVOU ·rol16<>(SB), X1
|
||||||
|
MOVOU ·rol8<>(SB), X2
|
||||||
|
|
||||||
|
at_least_64:
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO 16(SP), X5
|
||||||
|
MOVO 32(SP), X6
|
||||||
|
MOVO X3, X7
|
||||||
|
|
||||||
|
MOVL DX, BX
|
||||||
|
|
||||||
|
chacha_loop:
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X0, X1, X2)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X0, X1, X2)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
SUBL $2, BX
|
||||||
|
JA chacha_loop
|
||||||
|
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL 16(SP), X5
|
||||||
|
PADDL 32(SP), X6
|
||||||
|
PADDL X3, X7
|
||||||
|
PADDQ 0(SP), X3
|
||||||
|
|
||||||
|
CMPL CX, $64
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 0, X4, X5, X6, X7, X0)
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
ADDL $64, SI
|
||||||
|
ADDL $64, DI
|
||||||
|
SUBL $64, CX
|
||||||
|
JNZ at_least_64
|
||||||
|
|
||||||
|
less_than_64:
|
||||||
|
MOVL BP, SP
|
||||||
|
MOVL CX, BP
|
||||||
|
TESTL BP, BP
|
||||||
|
JE done
|
||||||
|
|
||||||
|
MOVL block+24(FP), BX
|
||||||
|
MOVOU X4, 0(BX)
|
||||||
|
MOVOU X5, 16(BX)
|
||||||
|
MOVOU X6, 32(BX)
|
||||||
|
MOVOU X7, 48(BX)
|
||||||
|
FINALIZE(DI, SI, BX, BP, AX, DX)
|
||||||
|
|
||||||
|
done:
|
||||||
|
MOVL state+28(FP), AX
|
||||||
|
MOVOU X3, 48(AX)
|
||||||
|
MOVL CX, ret+36(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func supportsSSE2() bool
|
||||||
|
TEXT ·supportsSSE2(SB), NOSPLIT, $0-1
|
||||||
|
XORL AX, AX
|
||||||
|
INCL AX
|
||||||
|
CPUID
|
||||||
|
SHRL $26, DX
|
||||||
|
ANDL $1, DX
|
||||||
|
MOVB DX, ret+0(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func supportsSSSE3() bool
|
||||||
|
TEXT ·supportsSSSE3(SB), NOSPLIT, $0-1
|
||||||
|
XORL AX, AX
|
||||||
|
INCL AX
|
||||||
|
CPUID
|
||||||
|
SHRL $9, CX
|
||||||
|
ANDL $1, CX
|
||||||
|
MOVB CX, ret+0(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
TEXT ·hChaCha20SSE2(SB), 4, $0-12
|
||||||
|
MOVL out+0(FP), DI
|
||||||
|
MOVL nonce+4(FP), AX
|
||||||
|
MOVL key+8(FP), BX
|
||||||
|
|
||||||
|
MOVOU ·sigma<>(SB), X0
|
||||||
|
MOVOU 0(BX), X1
|
||||||
|
MOVOU 16(BX), X2
|
||||||
|
MOVOU 0(AX), X3
|
||||||
|
|
||||||
|
MOVL $20, CX
|
||||||
|
|
||||||
|
chacha_loop:
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
SUBL $2, CX
|
||||||
|
JNZ chacha_loop
|
||||||
|
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
MOVOU X3, 16(DI)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
TEXT ·hChaCha20SSSE3(SB), 4, $0-12
|
||||||
|
MOVL out+0(FP), DI
|
||||||
|
MOVL nonce+4(FP), AX
|
||||||
|
MOVL key+8(FP), BX
|
||||||
|
|
||||||
|
MOVOU ·sigma<>(SB), X0
|
||||||
|
MOVOU 0(BX), X1
|
||||||
|
MOVOU 16(BX), X2
|
||||||
|
MOVOU 0(AX), X3
|
||||||
|
MOVOU ·rol16<>(SB), X5
|
||||||
|
MOVOU ·rol8<>(SB), X6
|
||||||
|
|
||||||
|
MOVL $20, CX
|
||||||
|
|
||||||
|
chacha_loop:
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
SUBL $2, CX
|
||||||
|
JNZ chacha_loop
|
||||||
|
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
MOVOU X3, 16(DI)
|
||||||
|
RET
|
|
@ -0,0 +1,788 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build amd64,!gccgo,!appengine,!nacl
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
DATA ·sigma<>+0x00(SB)/4, $0x61707865
|
||||||
|
DATA ·sigma<>+0x04(SB)/4, $0x3320646e
|
||||||
|
DATA ·sigma<>+0x08(SB)/4, $0x79622d32
|
||||||
|
DATA ·sigma<>+0x0C(SB)/4, $0x6b206574
|
||||||
|
GLOBL ·sigma<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·one<>+0x00(SB)/8, $1
|
||||||
|
DATA ·one<>+0x08(SB)/8, $0
|
||||||
|
GLOBL ·one<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·rol16<>+0x00(SB)/8, $0x0504070601000302
|
||||||
|
DATA ·rol16<>+0x08(SB)/8, $0x0D0C0F0E09080B0A
|
||||||
|
GLOBL ·rol16<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
DATA ·rol8<>+0x00(SB)/8, $0x0605040702010003
|
||||||
|
DATA ·rol8<>+0x08(SB)/8, $0x0E0D0C0F0A09080B
|
||||||
|
GLOBL ·rol8<>(SB), (NOPTR+RODATA), $16
|
||||||
|
|
||||||
|
#define ROTL_SSE2(n, t, v) \
|
||||||
|
MOVO v, t; \
|
||||||
|
PSLLL $n, t; \
|
||||||
|
PSRLL $(32-n), v; \
|
||||||
|
PXOR t, v
|
||||||
|
|
||||||
|
#define CHACHA_QROUND_SSE2(v0, v1, v2, v3, t0) \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
ROTL_SSE2(16, t0, v3); \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(12, t0, v1); \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
ROTL_SSE2(8, t0, v3); \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(7, t0, v1)
|
||||||
|
|
||||||
|
#define CHACHA_QROUND_SSSE3(v0, v1, v2, v3, t0, r16, r8) \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
PSHUFB r16, v3; \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(12, t0, v1); \
|
||||||
|
PADDL v1, v0; \
|
||||||
|
PXOR v0, v3; \
|
||||||
|
PSHUFB r8, v3; \
|
||||||
|
PADDL v3, v2; \
|
||||||
|
PXOR v2, v1; \
|
||||||
|
ROTL_SSE2(7, t0, v1)
|
||||||
|
|
||||||
|
#define CHACHA_SHUFFLE(v1, v2, v3) \
|
||||||
|
PSHUFL $0x39, v1, v1; \
|
||||||
|
PSHUFL $0x4E, v2, v2; \
|
||||||
|
PSHUFL $0x93, v3, v3
|
||||||
|
|
||||||
|
#define XOR(dst, src, off, v0, v1, v2, v3, t0) \
|
||||||
|
MOVOU 0+off(src), t0; \
|
||||||
|
PXOR v0, t0; \
|
||||||
|
MOVOU t0, 0+off(dst); \
|
||||||
|
MOVOU 16+off(src), t0; \
|
||||||
|
PXOR v1, t0; \
|
||||||
|
MOVOU t0, 16+off(dst); \
|
||||||
|
MOVOU 32+off(src), t0; \
|
||||||
|
PXOR v2, t0; \
|
||||||
|
MOVOU t0, 32+off(dst); \
|
||||||
|
MOVOU 48+off(src), t0; \
|
||||||
|
PXOR v3, t0; \
|
||||||
|
MOVOU t0, 48+off(dst)
|
||||||
|
|
||||||
|
// func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
TEXT ·xorKeyStreamSSE2(SB), 4, $112-80
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ src_base+24(FP), SI
|
||||||
|
MOVQ src_len+32(FP), CX
|
||||||
|
MOVQ block+48(FP), BX
|
||||||
|
MOVQ state+56(FP), AX
|
||||||
|
MOVQ rounds+64(FP), DX
|
||||||
|
|
||||||
|
MOVQ SP, R9
|
||||||
|
ADDQ $16, SP
|
||||||
|
ANDQ $-16, SP
|
||||||
|
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
MOVOU 16(AX), X1
|
||||||
|
MOVOU 32(AX), X2
|
||||||
|
MOVOU 48(AX), X3
|
||||||
|
MOVOU ·one<>(SB), X15
|
||||||
|
|
||||||
|
TESTQ CX, CX
|
||||||
|
JZ done
|
||||||
|
|
||||||
|
CMPQ CX, $64
|
||||||
|
JBE between_0_and_64
|
||||||
|
|
||||||
|
CMPQ CX, $128
|
||||||
|
JBE between_64_and_128
|
||||||
|
|
||||||
|
MOVO X0, 0(SP)
|
||||||
|
MOVO X1, 16(SP)
|
||||||
|
MOVO X2, 32(SP)
|
||||||
|
MOVO X3, 48(SP)
|
||||||
|
MOVO X15, 64(SP)
|
||||||
|
|
||||||
|
CMPQ CX, $192
|
||||||
|
JBE between_128_and_192
|
||||||
|
|
||||||
|
MOVQ $192, R14
|
||||||
|
|
||||||
|
at_least_256:
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO X1, X5
|
||||||
|
MOVO X2, X6
|
||||||
|
MOVO X3, X7
|
||||||
|
PADDQ 64(SP), X7
|
||||||
|
MOVO X0, X12
|
||||||
|
MOVO X1, X13
|
||||||
|
MOVO X2, X14
|
||||||
|
MOVO X7, X15
|
||||||
|
PADDQ 64(SP), X15
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X15, X11
|
||||||
|
PADDQ 64(SP), X11
|
||||||
|
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_256:
|
||||||
|
MOVO X8, 80(SP)
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X8)
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X8)
|
||||||
|
MOVO 80(SP), X8
|
||||||
|
|
||||||
|
MOVO X0, 80(SP)
|
||||||
|
CHACHA_QROUND_SSE2(X12, X13, X14, X15, X0)
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X0)
|
||||||
|
MOVO 80(SP), X0
|
||||||
|
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_SHUFFLE(X13, X14, X15)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
|
||||||
|
MOVO X8, 80(SP)
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X8)
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X8)
|
||||||
|
MOVO 80(SP), X8
|
||||||
|
|
||||||
|
MOVO X0, 80(SP)
|
||||||
|
CHACHA_QROUND_SSE2(X12, X13, X14, X15, X0)
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X0)
|
||||||
|
MOVO 80(SP), X0
|
||||||
|
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
CHACHA_SHUFFLE(X15, X14, X13)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_256
|
||||||
|
|
||||||
|
MOVO X8, 80(SP)
|
||||||
|
|
||||||
|
PADDL 0(SP), X0
|
||||||
|
PADDL 16(SP), X1
|
||||||
|
PADDL 32(SP), X2
|
||||||
|
PADDL 48(SP), X3
|
||||||
|
XOR(DI, SI, 0, X0, X1, X2, X3, X8)
|
||||||
|
|
||||||
|
MOVO 0(SP), X0
|
||||||
|
MOVO 16(SP), X1
|
||||||
|
MOVO 32(SP), X2
|
||||||
|
MOVO 48(SP), X3
|
||||||
|
PADDQ 64(SP), X3
|
||||||
|
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL X1, X5
|
||||||
|
PADDL X2, X6
|
||||||
|
PADDL X3, X7
|
||||||
|
PADDQ 64(SP), X3
|
||||||
|
XOR(DI, SI, 64, X4, X5, X6, X7, X8)
|
||||||
|
|
||||||
|
MOVO 64(SP), X5
|
||||||
|
MOVO 80(SP), X8
|
||||||
|
|
||||||
|
PADDL X0, X12
|
||||||
|
PADDL X1, X13
|
||||||
|
PADDL X2, X14
|
||||||
|
PADDL X3, X15
|
||||||
|
PADDQ X5, X3
|
||||||
|
XOR(DI, SI, 128, X12, X13, X14, X15, X4)
|
||||||
|
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X5, X3
|
||||||
|
|
||||||
|
CMPQ CX, $256
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 192, X8, X9, X10, X11, X4)
|
||||||
|
MOVO X3, 48(SP)
|
||||||
|
ADDQ $256, SI
|
||||||
|
ADDQ $256, DI
|
||||||
|
SUBQ $256, CX
|
||||||
|
CMPQ CX, $192
|
||||||
|
JA at_least_256
|
||||||
|
|
||||||
|
TESTQ CX, CX
|
||||||
|
JZ done
|
||||||
|
MOVO 64(SP), X15
|
||||||
|
CMPQ CX, $64
|
||||||
|
JBE between_0_and_64
|
||||||
|
CMPQ CX, $128
|
||||||
|
JBE between_64_and_128
|
||||||
|
|
||||||
|
between_128_and_192:
|
||||||
|
MOVQ $128, R14
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO X1, X5
|
||||||
|
MOVO X2, X6
|
||||||
|
MOVO X3, X7
|
||||||
|
PADDQ X15, X7
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X7, X11
|
||||||
|
PADDQ X15, X11
|
||||||
|
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_192:
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X12)
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X12)
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12)
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X12)
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X12)
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12)
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_192
|
||||||
|
|
||||||
|
PADDL 0(SP), X0
|
||||||
|
PADDL 16(SP), X1
|
||||||
|
PADDL 32(SP), X2
|
||||||
|
PADDL 48(SP), X3
|
||||||
|
XOR(DI, SI, 0, X0, X1, X2, X3, X12)
|
||||||
|
|
||||||
|
MOVO 0(SP), X0
|
||||||
|
MOVO 16(SP), X1
|
||||||
|
MOVO 32(SP), X2
|
||||||
|
MOVO 48(SP), X3
|
||||||
|
PADDQ X15, X3
|
||||||
|
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL X1, X5
|
||||||
|
PADDL X2, X6
|
||||||
|
PADDL X3, X7
|
||||||
|
PADDQ X15, X3
|
||||||
|
XOR(DI, SI, 64, X4, X5, X6, X7, X12)
|
||||||
|
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X15, X3
|
||||||
|
|
||||||
|
CMPQ CX, $192
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 128, X8, X9, X10, X11, X12)
|
||||||
|
SUBQ $192, CX
|
||||||
|
JMP done
|
||||||
|
|
||||||
|
between_64_and_128:
|
||||||
|
MOVQ $64, R14
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO X1, X5
|
||||||
|
MOVO X2, X6
|
||||||
|
MOVO X3, X7
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X3, X11
|
||||||
|
PADDQ X15, X11
|
||||||
|
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_128:
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X12)
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
CHACHA_QROUND_SSE2(X4, X5, X6, X7, X12)
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_128
|
||||||
|
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL X1, X5
|
||||||
|
PADDL X2, X6
|
||||||
|
PADDL X3, X7
|
||||||
|
PADDQ X15, X3
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X15, X3
|
||||||
|
XOR(DI, SI, 0, X4, X5, X6, X7, X12)
|
||||||
|
|
||||||
|
CMPQ CX, $128
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 64, X8, X9, X10, X11, X12)
|
||||||
|
SUBQ $128, CX
|
||||||
|
JMP done
|
||||||
|
|
||||||
|
between_0_and_64:
|
||||||
|
MOVQ $0, R14
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X3, X11
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_64:
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
CHACHA_QROUND_SSE2(X8, X9, X10, X11, X12)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_64
|
||||||
|
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X15, X3
|
||||||
|
CMPQ CX, $64
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 0, X8, X9, X10, X11, X12)
|
||||||
|
SUBQ $64, CX
|
||||||
|
JMP done
|
||||||
|
|
||||||
|
less_than_64:
|
||||||
|
// R14 contains the num of bytes already xor'd
|
||||||
|
ADDQ R14, SI
|
||||||
|
ADDQ R14, DI
|
||||||
|
SUBQ R14, CX
|
||||||
|
MOVOU X8, 0(BX)
|
||||||
|
MOVOU X9, 16(BX)
|
||||||
|
MOVOU X10, 32(BX)
|
||||||
|
MOVOU X11, 48(BX)
|
||||||
|
XORQ R11, R11
|
||||||
|
XORQ R12, R12
|
||||||
|
MOVQ CX, BP
|
||||||
|
|
||||||
|
xor_loop:
|
||||||
|
MOVB 0(SI), R11
|
||||||
|
MOVB 0(BX), R12
|
||||||
|
XORQ R11, R12
|
||||||
|
MOVB R12, 0(DI)
|
||||||
|
INCQ SI
|
||||||
|
INCQ BX
|
||||||
|
INCQ DI
|
||||||
|
DECQ BP
|
||||||
|
JA xor_loop
|
||||||
|
|
||||||
|
done:
|
||||||
|
MOVOU X3, 48(AX)
|
||||||
|
MOVQ R9, SP
|
||||||
|
MOVQ CX, ret+72(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
TEXT ·xorKeyStreamSSSE3(SB), 4, $144-80
|
||||||
|
MOVQ dst_base+0(FP), DI
|
||||||
|
MOVQ src_base+24(FP), SI
|
||||||
|
MOVQ src_len+32(FP), CX
|
||||||
|
MOVQ block+48(FP), BX
|
||||||
|
MOVQ state+56(FP), AX
|
||||||
|
MOVQ rounds+64(FP), DX
|
||||||
|
|
||||||
|
MOVQ SP, R9
|
||||||
|
ADDQ $16, SP
|
||||||
|
ANDQ $-16, SP
|
||||||
|
|
||||||
|
MOVOU 0(AX), X0
|
||||||
|
MOVOU 16(AX), X1
|
||||||
|
MOVOU 32(AX), X2
|
||||||
|
MOVOU 48(AX), X3
|
||||||
|
MOVOU ·rol16<>(SB), X13
|
||||||
|
MOVOU ·rol8<>(SB), X14
|
||||||
|
MOVOU ·one<>(SB), X15
|
||||||
|
|
||||||
|
TESTQ CX, CX
|
||||||
|
JZ done
|
||||||
|
|
||||||
|
CMPQ CX, $64
|
||||||
|
JBE between_0_and_64
|
||||||
|
|
||||||
|
CMPQ CX, $128
|
||||||
|
JBE between_64_and_128
|
||||||
|
|
||||||
|
MOVO X0, 0(SP)
|
||||||
|
MOVO X1, 16(SP)
|
||||||
|
MOVO X2, 32(SP)
|
||||||
|
MOVO X3, 48(SP)
|
||||||
|
MOVO X15, 64(SP)
|
||||||
|
|
||||||
|
CMPQ CX, $192
|
||||||
|
JBE between_128_and_192
|
||||||
|
|
||||||
|
MOVO X13, 96(SP)
|
||||||
|
MOVO X14, 112(SP)
|
||||||
|
MOVQ $192, R14
|
||||||
|
|
||||||
|
at_least_256:
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO X1, X5
|
||||||
|
MOVO X2, X6
|
||||||
|
MOVO X3, X7
|
||||||
|
PADDQ 64(SP), X7
|
||||||
|
MOVO X0, X12
|
||||||
|
MOVO X1, X13
|
||||||
|
MOVO X2, X14
|
||||||
|
MOVO X7, X15
|
||||||
|
PADDQ 64(SP), X15
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X15, X11
|
||||||
|
PADDQ 64(SP), X11
|
||||||
|
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_256:
|
||||||
|
MOVO X8, 80(SP)
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X8, 96(SP), 112(SP))
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X8, 96(SP), 112(SP))
|
||||||
|
MOVO 80(SP), X8
|
||||||
|
|
||||||
|
MOVO X0, 80(SP)
|
||||||
|
CHACHA_QROUND_SSSE3(X12, X13, X14, X15, X0, 96(SP), 112(SP))
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X0, 96(SP), 112(SP))
|
||||||
|
MOVO 80(SP), X0
|
||||||
|
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_SHUFFLE(X13, X14, X15)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
|
||||||
|
MOVO X8, 80(SP)
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X8, 96(SP), 112(SP))
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X8, 96(SP), 112(SP))
|
||||||
|
MOVO 80(SP), X8
|
||||||
|
|
||||||
|
MOVO X0, 80(SP)
|
||||||
|
CHACHA_QROUND_SSSE3(X12, X13, X14, X15, X0, 96(SP), 112(SP))
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X0, 96(SP), 112(SP))
|
||||||
|
MOVO 80(SP), X0
|
||||||
|
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
CHACHA_SHUFFLE(X15, X14, X13)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_256
|
||||||
|
|
||||||
|
MOVO X8, 80(SP)
|
||||||
|
|
||||||
|
PADDL 0(SP), X0
|
||||||
|
PADDL 16(SP), X1
|
||||||
|
PADDL 32(SP), X2
|
||||||
|
PADDL 48(SP), X3
|
||||||
|
XOR(DI, SI, 0, X0, X1, X2, X3, X8)
|
||||||
|
MOVO 0(SP), X0
|
||||||
|
MOVO 16(SP), X1
|
||||||
|
MOVO 32(SP), X2
|
||||||
|
MOVO 48(SP), X3
|
||||||
|
PADDQ 64(SP), X3
|
||||||
|
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL X1, X5
|
||||||
|
PADDL X2, X6
|
||||||
|
PADDL X3, X7
|
||||||
|
PADDQ 64(SP), X3
|
||||||
|
XOR(DI, SI, 64, X4, X5, X6, X7, X8)
|
||||||
|
|
||||||
|
MOVO 64(SP), X5
|
||||||
|
MOVO 80(SP), X8
|
||||||
|
|
||||||
|
PADDL X0, X12
|
||||||
|
PADDL X1, X13
|
||||||
|
PADDL X2, X14
|
||||||
|
PADDL X3, X15
|
||||||
|
PADDQ X5, X3
|
||||||
|
XOR(DI, SI, 128, X12, X13, X14, X15, X4)
|
||||||
|
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X5, X3
|
||||||
|
|
||||||
|
CMPQ CX, $256
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 192, X8, X9, X10, X11, X4)
|
||||||
|
MOVO X3, 48(SP)
|
||||||
|
ADDQ $256, SI
|
||||||
|
ADDQ $256, DI
|
||||||
|
SUBQ $256, CX
|
||||||
|
CMPQ CX, $192
|
||||||
|
JA at_least_256
|
||||||
|
|
||||||
|
TESTQ CX, CX
|
||||||
|
JZ done
|
||||||
|
MOVOU ·rol16<>(SB), X13
|
||||||
|
MOVOU ·rol8<>(SB), X14
|
||||||
|
MOVO 64(SP), X15
|
||||||
|
CMPQ CX, $64
|
||||||
|
JBE between_0_and_64
|
||||||
|
CMPQ CX, $128
|
||||||
|
JBE between_64_and_128
|
||||||
|
|
||||||
|
between_128_and_192:
|
||||||
|
MOVQ $128, R14
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO X1, X5
|
||||||
|
MOVO X2, X6
|
||||||
|
MOVO X3, X7
|
||||||
|
PADDQ X15, X7
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X7, X11
|
||||||
|
PADDQ X15, X11
|
||||||
|
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_192:
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X12, X13, X14)
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X12, X13, X14)
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14)
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X12, X13, X14)
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X12, X13, X14)
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14)
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_192
|
||||||
|
|
||||||
|
PADDL 0(SP), X0
|
||||||
|
PADDL 16(SP), X1
|
||||||
|
PADDL 32(SP), X2
|
||||||
|
PADDL 48(SP), X3
|
||||||
|
XOR(DI, SI, 0, X0, X1, X2, X3, X12)
|
||||||
|
|
||||||
|
MOVO 0(SP), X0
|
||||||
|
MOVO 16(SP), X1
|
||||||
|
MOVO 32(SP), X2
|
||||||
|
MOVO 48(SP), X3
|
||||||
|
PADDQ X15, X3
|
||||||
|
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL X1, X5
|
||||||
|
PADDL X2, X6
|
||||||
|
PADDL X3, X7
|
||||||
|
PADDQ X15, X3
|
||||||
|
XOR(DI, SI, 64, X4, X5, X6, X7, X12)
|
||||||
|
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X15, X3
|
||||||
|
|
||||||
|
CMPQ CX, $192
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 128, X8, X9, X10, X11, X12)
|
||||||
|
SUBQ $192, CX
|
||||||
|
JMP done
|
||||||
|
|
||||||
|
between_64_and_128:
|
||||||
|
MOVQ $64, R14
|
||||||
|
MOVO X0, X4
|
||||||
|
MOVO X1, X5
|
||||||
|
MOVO X2, X6
|
||||||
|
MOVO X3, X7
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X3, X11
|
||||||
|
PADDQ X15, X11
|
||||||
|
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_128:
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X12, X13, X14)
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14)
|
||||||
|
CHACHA_SHUFFLE(X5, X6, X7)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
CHACHA_QROUND_SSSE3(X4, X5, X6, X7, X12, X13, X14)
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14)
|
||||||
|
CHACHA_SHUFFLE(X7, X6, X5)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_128
|
||||||
|
|
||||||
|
PADDL X0, X4
|
||||||
|
PADDL X1, X5
|
||||||
|
PADDL X2, X6
|
||||||
|
PADDL X3, X7
|
||||||
|
PADDQ X15, X3
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X15, X3
|
||||||
|
XOR(DI, SI, 0, X4, X5, X6, X7, X12)
|
||||||
|
|
||||||
|
CMPQ CX, $128
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 64, X8, X9, X10, X11, X12)
|
||||||
|
SUBQ $128, CX
|
||||||
|
JMP done
|
||||||
|
|
||||||
|
between_0_and_64:
|
||||||
|
MOVQ $0, R14
|
||||||
|
MOVO X0, X8
|
||||||
|
MOVO X1, X9
|
||||||
|
MOVO X2, X10
|
||||||
|
MOVO X3, X11
|
||||||
|
MOVQ DX, R8
|
||||||
|
|
||||||
|
chacha_loop_64:
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14)
|
||||||
|
CHACHA_SHUFFLE(X9, X10, X11)
|
||||||
|
CHACHA_QROUND_SSSE3(X8, X9, X10, X11, X12, X13, X14)
|
||||||
|
CHACHA_SHUFFLE(X11, X10, X9)
|
||||||
|
SUBQ $2, R8
|
||||||
|
JA chacha_loop_64
|
||||||
|
|
||||||
|
PADDL X0, X8
|
||||||
|
PADDL X1, X9
|
||||||
|
PADDL X2, X10
|
||||||
|
PADDL X3, X11
|
||||||
|
PADDQ X15, X3
|
||||||
|
CMPQ CX, $64
|
||||||
|
JB less_than_64
|
||||||
|
|
||||||
|
XOR(DI, SI, 0, X8, X9, X10, X11, X12)
|
||||||
|
SUBQ $64, CX
|
||||||
|
JMP done
|
||||||
|
|
||||||
|
less_than_64:
|
||||||
|
// R14 contains the num of bytes already xor'd
|
||||||
|
ADDQ R14, SI
|
||||||
|
ADDQ R14, DI
|
||||||
|
SUBQ R14, CX
|
||||||
|
MOVOU X8, 0(BX)
|
||||||
|
MOVOU X9, 16(BX)
|
||||||
|
MOVOU X10, 32(BX)
|
||||||
|
MOVOU X11, 48(BX)
|
||||||
|
XORQ R11, R11
|
||||||
|
XORQ R12, R12
|
||||||
|
MOVQ CX, BP
|
||||||
|
|
||||||
|
xor_loop:
|
||||||
|
MOVB 0(SI), R11
|
||||||
|
MOVB 0(BX), R12
|
||||||
|
XORQ R11, R12
|
||||||
|
MOVB R12, 0(DI)
|
||||||
|
INCQ SI
|
||||||
|
INCQ BX
|
||||||
|
INCQ DI
|
||||||
|
DECQ BP
|
||||||
|
JA xor_loop
|
||||||
|
|
||||||
|
done:
|
||||||
|
MOVQ R9, SP
|
||||||
|
MOVOU X3, 48(AX)
|
||||||
|
MOVQ CX, ret+72(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func supportsSSSE3() bool
|
||||||
|
TEXT ·supportsSSSE3(SB), NOSPLIT, $0-1
|
||||||
|
XORQ AX, AX
|
||||||
|
INCQ AX
|
||||||
|
CPUID
|
||||||
|
SHRQ $9, CX
|
||||||
|
ANDQ $1, CX
|
||||||
|
MOVB CX, ret+0(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func initialize(state *[64]byte, key []byte, nonce *[16]byte)
|
||||||
|
TEXT ·initialize(SB), 4, $0-40
|
||||||
|
MOVQ state+0(FP), DI
|
||||||
|
MOVQ key+8(FP), AX
|
||||||
|
MOVQ nonce+32(FP), BX
|
||||||
|
|
||||||
|
MOVOU ·sigma<>(SB), X0
|
||||||
|
MOVOU 0(AX), X1
|
||||||
|
MOVOU 16(AX), X2
|
||||||
|
MOVOU 0(BX), X3
|
||||||
|
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
MOVOU X1, 16(DI)
|
||||||
|
MOVOU X2, 32(DI)
|
||||||
|
MOVOU X3, 48(DI)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
TEXT ·hChaCha20SSE2(SB), 4, $0-24
|
||||||
|
MOVQ out+0(FP), DI
|
||||||
|
MOVQ nonce+8(FP), AX
|
||||||
|
MOVQ key+16(FP), BX
|
||||||
|
|
||||||
|
MOVOU ·sigma<>(SB), X0
|
||||||
|
MOVOU 0(BX), X1
|
||||||
|
MOVOU 16(BX), X2
|
||||||
|
MOVOU 0(AX), X3
|
||||||
|
|
||||||
|
MOVQ $20, CX
|
||||||
|
|
||||||
|
chacha_loop:
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_QROUND_SSE2(X0, X1, X2, X3, X4)
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
SUBQ $2, CX
|
||||||
|
JNZ chacha_loop
|
||||||
|
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
MOVOU X3, 16(DI)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
TEXT ·hChaCha20SSSE3(SB), 4, $0-24
|
||||||
|
MOVQ out+0(FP), DI
|
||||||
|
MOVQ nonce+8(FP), AX
|
||||||
|
MOVQ key+16(FP), BX
|
||||||
|
|
||||||
|
MOVOU ·sigma<>(SB), X0
|
||||||
|
MOVOU 0(BX), X1
|
||||||
|
MOVOU 16(BX), X2
|
||||||
|
MOVOU 0(AX), X3
|
||||||
|
MOVOU ·rol16<>(SB), X5
|
||||||
|
MOVOU ·rol8<>(SB), X6
|
||||||
|
|
||||||
|
MOVQ $20, CX
|
||||||
|
|
||||||
|
chacha_loop:
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
|
||||||
|
CHACHA_SHUFFLE(X1, X2, X3)
|
||||||
|
CHACHA_QROUND_SSSE3(X0, X1, X2, X3, X4, X5, X6)
|
||||||
|
CHACHA_SHUFFLE(X3, X2, X1)
|
||||||
|
SUBQ $2, CX
|
||||||
|
JNZ chacha_loop
|
||||||
|
|
||||||
|
MOVOU X0, 0(DI)
|
||||||
|
MOVOU X3, 16(DI)
|
||||||
|
RET
|
|
@ -0,0 +1,319 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package chacha
|
||||||
|
|
||||||
|
import "encoding/binary"
|
||||||
|
|
||||||
|
var sigma = [4]uint32{0x61707865, 0x3320646e, 0x79622d32, 0x6b206574}
|
||||||
|
|
||||||
|
func xorKeyStreamGeneric(dst, src []byte, block, state *[64]byte, rounds int) int {
|
||||||
|
for len(src) >= 64 {
|
||||||
|
chachaGeneric(block, state, rounds)
|
||||||
|
|
||||||
|
for i, v := range block {
|
||||||
|
dst[i] = src[i] ^ v
|
||||||
|
}
|
||||||
|
src = src[64:]
|
||||||
|
dst = dst[64:]
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(src)
|
||||||
|
if n > 0 {
|
||||||
|
chachaGeneric(block, state, rounds)
|
||||||
|
for i, v := range src {
|
||||||
|
dst[i] = v ^ block[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func chachaGeneric(dst *[64]byte, state *[64]byte, rounds int) {
|
||||||
|
v00 := binary.LittleEndian.Uint32(state[0:])
|
||||||
|
v01 := binary.LittleEndian.Uint32(state[4:])
|
||||||
|
v02 := binary.LittleEndian.Uint32(state[8:])
|
||||||
|
v03 := binary.LittleEndian.Uint32(state[12:])
|
||||||
|
v04 := binary.LittleEndian.Uint32(state[16:])
|
||||||
|
v05 := binary.LittleEndian.Uint32(state[20:])
|
||||||
|
v06 := binary.LittleEndian.Uint32(state[24:])
|
||||||
|
v07 := binary.LittleEndian.Uint32(state[28:])
|
||||||
|
v08 := binary.LittleEndian.Uint32(state[32:])
|
||||||
|
v09 := binary.LittleEndian.Uint32(state[36:])
|
||||||
|
v10 := binary.LittleEndian.Uint32(state[40:])
|
||||||
|
v11 := binary.LittleEndian.Uint32(state[44:])
|
||||||
|
v12 := binary.LittleEndian.Uint32(state[48:])
|
||||||
|
v13 := binary.LittleEndian.Uint32(state[52:])
|
||||||
|
v14 := binary.LittleEndian.Uint32(state[56:])
|
||||||
|
v15 := binary.LittleEndian.Uint32(state[60:])
|
||||||
|
|
||||||
|
s00, s01, s02, s03, s04, s05, s06, s07 := v00, v01, v02, v03, v04, v05, v06, v07
|
||||||
|
s08, s09, s10, s11, s12, s13, s14, s15 := v08, v09, v10, v11, v12, v13, v14, v15
|
||||||
|
|
||||||
|
for i := 0; i < rounds; i += 2 {
|
||||||
|
v00 += v04
|
||||||
|
v12 ^= v00
|
||||||
|
v12 = (v12 << 16) | (v12 >> 16)
|
||||||
|
v08 += v12
|
||||||
|
v04 ^= v08
|
||||||
|
v04 = (v04 << 12) | (v04 >> 20)
|
||||||
|
v00 += v04
|
||||||
|
v12 ^= v00
|
||||||
|
v12 = (v12 << 8) | (v12 >> 24)
|
||||||
|
v08 += v12
|
||||||
|
v04 ^= v08
|
||||||
|
v04 = (v04 << 7) | (v04 >> 25)
|
||||||
|
v01 += v05
|
||||||
|
v13 ^= v01
|
||||||
|
v13 = (v13 << 16) | (v13 >> 16)
|
||||||
|
v09 += v13
|
||||||
|
v05 ^= v09
|
||||||
|
v05 = (v05 << 12) | (v05 >> 20)
|
||||||
|
v01 += v05
|
||||||
|
v13 ^= v01
|
||||||
|
v13 = (v13 << 8) | (v13 >> 24)
|
||||||
|
v09 += v13
|
||||||
|
v05 ^= v09
|
||||||
|
v05 = (v05 << 7) | (v05 >> 25)
|
||||||
|
v02 += v06
|
||||||
|
v14 ^= v02
|
||||||
|
v14 = (v14 << 16) | (v14 >> 16)
|
||||||
|
v10 += v14
|
||||||
|
v06 ^= v10
|
||||||
|
v06 = (v06 << 12) | (v06 >> 20)
|
||||||
|
v02 += v06
|
||||||
|
v14 ^= v02
|
||||||
|
v14 = (v14 << 8) | (v14 >> 24)
|
||||||
|
v10 += v14
|
||||||
|
v06 ^= v10
|
||||||
|
v06 = (v06 << 7) | (v06 >> 25)
|
||||||
|
v03 += v07
|
||||||
|
v15 ^= v03
|
||||||
|
v15 = (v15 << 16) | (v15 >> 16)
|
||||||
|
v11 += v15
|
||||||
|
v07 ^= v11
|
||||||
|
v07 = (v07 << 12) | (v07 >> 20)
|
||||||
|
v03 += v07
|
||||||
|
v15 ^= v03
|
||||||
|
v15 = (v15 << 8) | (v15 >> 24)
|
||||||
|
v11 += v15
|
||||||
|
v07 ^= v11
|
||||||
|
v07 = (v07 << 7) | (v07 >> 25)
|
||||||
|
v00 += v05
|
||||||
|
v15 ^= v00
|
||||||
|
v15 = (v15 << 16) | (v15 >> 16)
|
||||||
|
v10 += v15
|
||||||
|
v05 ^= v10
|
||||||
|
v05 = (v05 << 12) | (v05 >> 20)
|
||||||
|
v00 += v05
|
||||||
|
v15 ^= v00
|
||||||
|
v15 = (v15 << 8) | (v15 >> 24)
|
||||||
|
v10 += v15
|
||||||
|
v05 ^= v10
|
||||||
|
v05 = (v05 << 7) | (v05 >> 25)
|
||||||
|
v01 += v06
|
||||||
|
v12 ^= v01
|
||||||
|
v12 = (v12 << 16) | (v12 >> 16)
|
||||||
|
v11 += v12
|
||||||
|
v06 ^= v11
|
||||||
|
v06 = (v06 << 12) | (v06 >> 20)
|
||||||
|
v01 += v06
|
||||||
|
v12 ^= v01
|
||||||
|
v12 = (v12 << 8) | (v12 >> 24)
|
||||||
|
v11 += v12
|
||||||
|
v06 ^= v11
|
||||||
|
v06 = (v06 << 7) | (v06 >> 25)
|
||||||
|
v02 += v07
|
||||||
|
v13 ^= v02
|
||||||
|
v13 = (v13 << 16) | (v13 >> 16)
|
||||||
|
v08 += v13
|
||||||
|
v07 ^= v08
|
||||||
|
v07 = (v07 << 12) | (v07 >> 20)
|
||||||
|
v02 += v07
|
||||||
|
v13 ^= v02
|
||||||
|
v13 = (v13 << 8) | (v13 >> 24)
|
||||||
|
v08 += v13
|
||||||
|
v07 ^= v08
|
||||||
|
v07 = (v07 << 7) | (v07 >> 25)
|
||||||
|
v03 += v04
|
||||||
|
v14 ^= v03
|
||||||
|
v14 = (v14 << 16) | (v14 >> 16)
|
||||||
|
v09 += v14
|
||||||
|
v04 ^= v09
|
||||||
|
v04 = (v04 << 12) | (v04 >> 20)
|
||||||
|
v03 += v04
|
||||||
|
v14 ^= v03
|
||||||
|
v14 = (v14 << 8) | (v14 >> 24)
|
||||||
|
v09 += v14
|
||||||
|
v04 ^= v09
|
||||||
|
v04 = (v04 << 7) | (v04 >> 25)
|
||||||
|
}
|
||||||
|
|
||||||
|
v00 += s00
|
||||||
|
v01 += s01
|
||||||
|
v02 += s02
|
||||||
|
v03 += s03
|
||||||
|
v04 += s04
|
||||||
|
v05 += s05
|
||||||
|
v06 += s06
|
||||||
|
v07 += s07
|
||||||
|
v08 += s08
|
||||||
|
v09 += s09
|
||||||
|
v10 += s10
|
||||||
|
v11 += s11
|
||||||
|
v12 += s12
|
||||||
|
v13 += s13
|
||||||
|
v14 += s14
|
||||||
|
v15 += s15
|
||||||
|
|
||||||
|
s12++
|
||||||
|
binary.LittleEndian.PutUint32(state[48:], s12)
|
||||||
|
if s12 == 0 { // indicates overflow
|
||||||
|
s13++
|
||||||
|
binary.LittleEndian.PutUint32(state[52:], s13)
|
||||||
|
}
|
||||||
|
|
||||||
|
binary.LittleEndian.PutUint32(dst[0:], v00)
|
||||||
|
binary.LittleEndian.PutUint32(dst[4:], v01)
|
||||||
|
binary.LittleEndian.PutUint32(dst[8:], v02)
|
||||||
|
binary.LittleEndian.PutUint32(dst[12:], v03)
|
||||||
|
binary.LittleEndian.PutUint32(dst[16:], v04)
|
||||||
|
binary.LittleEndian.PutUint32(dst[20:], v05)
|
||||||
|
binary.LittleEndian.PutUint32(dst[24:], v06)
|
||||||
|
binary.LittleEndian.PutUint32(dst[28:], v07)
|
||||||
|
binary.LittleEndian.PutUint32(dst[32:], v08)
|
||||||
|
binary.LittleEndian.PutUint32(dst[36:], v09)
|
||||||
|
binary.LittleEndian.PutUint32(dst[40:], v10)
|
||||||
|
binary.LittleEndian.PutUint32(dst[44:], v11)
|
||||||
|
binary.LittleEndian.PutUint32(dst[48:], v12)
|
||||||
|
binary.LittleEndian.PutUint32(dst[52:], v13)
|
||||||
|
binary.LittleEndian.PutUint32(dst[56:], v14)
|
||||||
|
binary.LittleEndian.PutUint32(dst[60:], v15)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hChaCha20Generic(out *[32]byte, nonce *[16]byte, key *[32]byte) {
|
||||||
|
v00 := sigma[0]
|
||||||
|
v01 := sigma[1]
|
||||||
|
v02 := sigma[2]
|
||||||
|
v03 := sigma[3]
|
||||||
|
v04 := binary.LittleEndian.Uint32(key[0:])
|
||||||
|
v05 := binary.LittleEndian.Uint32(key[4:])
|
||||||
|
v06 := binary.LittleEndian.Uint32(key[8:])
|
||||||
|
v07 := binary.LittleEndian.Uint32(key[12:])
|
||||||
|
v08 := binary.LittleEndian.Uint32(key[16:])
|
||||||
|
v09 := binary.LittleEndian.Uint32(key[20:])
|
||||||
|
v10 := binary.LittleEndian.Uint32(key[24:])
|
||||||
|
v11 := binary.LittleEndian.Uint32(key[28:])
|
||||||
|
v12 := binary.LittleEndian.Uint32(nonce[0:])
|
||||||
|
v13 := binary.LittleEndian.Uint32(nonce[4:])
|
||||||
|
v14 := binary.LittleEndian.Uint32(nonce[8:])
|
||||||
|
v15 := binary.LittleEndian.Uint32(nonce[12:])
|
||||||
|
|
||||||
|
for i := 0; i < 20; i += 2 {
|
||||||
|
v00 += v04
|
||||||
|
v12 ^= v00
|
||||||
|
v12 = (v12 << 16) | (v12 >> 16)
|
||||||
|
v08 += v12
|
||||||
|
v04 ^= v08
|
||||||
|
v04 = (v04 << 12) | (v04 >> 20)
|
||||||
|
v00 += v04
|
||||||
|
v12 ^= v00
|
||||||
|
v12 = (v12 << 8) | (v12 >> 24)
|
||||||
|
v08 += v12
|
||||||
|
v04 ^= v08
|
||||||
|
v04 = (v04 << 7) | (v04 >> 25)
|
||||||
|
v01 += v05
|
||||||
|
v13 ^= v01
|
||||||
|
v13 = (v13 << 16) | (v13 >> 16)
|
||||||
|
v09 += v13
|
||||||
|
v05 ^= v09
|
||||||
|
v05 = (v05 << 12) | (v05 >> 20)
|
||||||
|
v01 += v05
|
||||||
|
v13 ^= v01
|
||||||
|
v13 = (v13 << 8) | (v13 >> 24)
|
||||||
|
v09 += v13
|
||||||
|
v05 ^= v09
|
||||||
|
v05 = (v05 << 7) | (v05 >> 25)
|
||||||
|
v02 += v06
|
||||||
|
v14 ^= v02
|
||||||
|
v14 = (v14 << 16) | (v14 >> 16)
|
||||||
|
v10 += v14
|
||||||
|
v06 ^= v10
|
||||||
|
v06 = (v06 << 12) | (v06 >> 20)
|
||||||
|
v02 += v06
|
||||||
|
v14 ^= v02
|
||||||
|
v14 = (v14 << 8) | (v14 >> 24)
|
||||||
|
v10 += v14
|
||||||
|
v06 ^= v10
|
||||||
|
v06 = (v06 << 7) | (v06 >> 25)
|
||||||
|
v03 += v07
|
||||||
|
v15 ^= v03
|
||||||
|
v15 = (v15 << 16) | (v15 >> 16)
|
||||||
|
v11 += v15
|
||||||
|
v07 ^= v11
|
||||||
|
v07 = (v07 << 12) | (v07 >> 20)
|
||||||
|
v03 += v07
|
||||||
|
v15 ^= v03
|
||||||
|
v15 = (v15 << 8) | (v15 >> 24)
|
||||||
|
v11 += v15
|
||||||
|
v07 ^= v11
|
||||||
|
v07 = (v07 << 7) | (v07 >> 25)
|
||||||
|
v00 += v05
|
||||||
|
v15 ^= v00
|
||||||
|
v15 = (v15 << 16) | (v15 >> 16)
|
||||||
|
v10 += v15
|
||||||
|
v05 ^= v10
|
||||||
|
v05 = (v05 << 12) | (v05 >> 20)
|
||||||
|
v00 += v05
|
||||||
|
v15 ^= v00
|
||||||
|
v15 = (v15 << 8) | (v15 >> 24)
|
||||||
|
v10 += v15
|
||||||
|
v05 ^= v10
|
||||||
|
v05 = (v05 << 7) | (v05 >> 25)
|
||||||
|
v01 += v06
|
||||||
|
v12 ^= v01
|
||||||
|
v12 = (v12 << 16) | (v12 >> 16)
|
||||||
|
v11 += v12
|
||||||
|
v06 ^= v11
|
||||||
|
v06 = (v06 << 12) | (v06 >> 20)
|
||||||
|
v01 += v06
|
||||||
|
v12 ^= v01
|
||||||
|
v12 = (v12 << 8) | (v12 >> 24)
|
||||||
|
v11 += v12
|
||||||
|
v06 ^= v11
|
||||||
|
v06 = (v06 << 7) | (v06 >> 25)
|
||||||
|
v02 += v07
|
||||||
|
v13 ^= v02
|
||||||
|
v13 = (v13 << 16) | (v13 >> 16)
|
||||||
|
v08 += v13
|
||||||
|
v07 ^= v08
|
||||||
|
v07 = (v07 << 12) | (v07 >> 20)
|
||||||
|
v02 += v07
|
||||||
|
v13 ^= v02
|
||||||
|
v13 = (v13 << 8) | (v13 >> 24)
|
||||||
|
v08 += v13
|
||||||
|
v07 ^= v08
|
||||||
|
v07 = (v07 << 7) | (v07 >> 25)
|
||||||
|
v03 += v04
|
||||||
|
v14 ^= v03
|
||||||
|
v14 = (v14 << 16) | (v14 >> 16)
|
||||||
|
v09 += v14
|
||||||
|
v04 ^= v09
|
||||||
|
v04 = (v04 << 12) | (v04 >> 20)
|
||||||
|
v03 += v04
|
||||||
|
v14 ^= v03
|
||||||
|
v14 = (v14 << 8) | (v14 >> 24)
|
||||||
|
v09 += v14
|
||||||
|
v04 ^= v09
|
||||||
|
v04 = (v04 << 7) | (v04 >> 25)
|
||||||
|
}
|
||||||
|
|
||||||
|
binary.LittleEndian.PutUint32(out[0:], v00)
|
||||||
|
binary.LittleEndian.PutUint32(out[4:], v01)
|
||||||
|
binary.LittleEndian.PutUint32(out[8:], v02)
|
||||||
|
binary.LittleEndian.PutUint32(out[12:], v03)
|
||||||
|
binary.LittleEndian.PutUint32(out[16:], v12)
|
||||||
|
binary.LittleEndian.PutUint32(out[20:], v13)
|
||||||
|
binary.LittleEndian.PutUint32(out[24:], v14)
|
||||||
|
binary.LittleEndian.PutUint32(out[28:], v15)
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
// Copyright (c) 2017 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build amd64,!gccgo,!appengine,!nacl,!go1.7
|
||||||
|
|
||||||
|
package chacha
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
useSSE2 = true
|
||||||
|
useSSSE3 = supportsSSSE3()
|
||||||
|
useAVX2 = false
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func initialize(state *[64]byte, key []byte, nonce *[16]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func supportsSSSE3() bool
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
|
||||||
|
func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) {
|
||||||
|
if useSSSE3 {
|
||||||
|
hChaCha20SSSE3(out, nonce, key)
|
||||||
|
} else if useSSE2 { // on amd64 this is always true - used to test generic on amd64
|
||||||
|
hChaCha20SSE2(out, nonce, key)
|
||||||
|
} else {
|
||||||
|
hChaCha20Generic(out, nonce, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int {
|
||||||
|
if useSSSE3 {
|
||||||
|
return xorKeyStreamSSSE3(dst, src, block, state, rounds)
|
||||||
|
} else if useSSE2 { // on amd64 this is always true - used to test generic on amd64
|
||||||
|
return xorKeyStreamSSE2(dst, src, block, state, rounds)
|
||||||
|
}
|
||||||
|
return xorKeyStreamGeneric(dst, src, block, state, rounds)
|
||||||
|
}
|
|
@ -0,0 +1,72 @@
|
||||||
|
// Copyright (c) 2017 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build go1.7,amd64,!gccgo,!appengine,!nacl
|
||||||
|
|
||||||
|
package chacha
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
useSSE2 = true
|
||||||
|
useSSSE3 = supportsSSSE3()
|
||||||
|
useAVX2 = supportsAVX2()
|
||||||
|
}
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func initialize(state *[64]byte, key []byte, nonce *[16]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func supportsSSSE3() bool
|
||||||
|
|
||||||
|
// This function is implemented in chachaAVX2_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func supportsAVX2() bool
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func hChaCha20SSE2(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func hChaCha20SSSE3(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chachaAVX2_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func hChaCha20AVX(out *[32]byte, nonce *[16]byte, key *[32]byte)
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func xorKeyStreamSSE2(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
|
||||||
|
// This function is implemented in chacha_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func xorKeyStreamSSSE3(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
|
||||||
|
// This function is implemented in chachaAVX2_amd64.s
|
||||||
|
//go:noescape
|
||||||
|
func xorKeyStreamAVX2(dst, src []byte, block, state *[64]byte, rounds int) int
|
||||||
|
|
||||||
|
func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) {
|
||||||
|
if useAVX2 {
|
||||||
|
hChaCha20AVX(out, nonce, key)
|
||||||
|
} else if useSSSE3 {
|
||||||
|
hChaCha20SSSE3(out, nonce, key)
|
||||||
|
} else if useSSE2 { // on amd64 this is always true - neccessary for testing generic on amd64
|
||||||
|
hChaCha20SSE2(out, nonce, key)
|
||||||
|
} else {
|
||||||
|
hChaCha20Generic(out, nonce, key)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int {
|
||||||
|
if useAVX2 {
|
||||||
|
return xorKeyStreamAVX2(dst, src, block, state, rounds)
|
||||||
|
} else if useSSSE3 {
|
||||||
|
return xorKeyStreamSSSE3(dst, src, block, state, rounds)
|
||||||
|
} else if useSSE2 { // on amd64 this is always true - neccessary for testing generic on amd64
|
||||||
|
return xorKeyStreamSSE2(dst, src, block, state, rounds)
|
||||||
|
}
|
||||||
|
return xorKeyStreamGeneric(dst, src, block, state, rounds)
|
||||||
|
}
|
|
@ -0,0 +1,26 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64,!386 gccgo appengine nacl
|
||||||
|
|
||||||
|
package chacha
|
||||||
|
|
||||||
|
import "encoding/binary"
|
||||||
|
|
||||||
|
func initialize(state *[64]byte, key []byte, nonce *[16]byte) {
|
||||||
|
binary.LittleEndian.PutUint32(state[0:], sigma[0])
|
||||||
|
binary.LittleEndian.PutUint32(state[4:], sigma[1])
|
||||||
|
binary.LittleEndian.PutUint32(state[8:], sigma[2])
|
||||||
|
binary.LittleEndian.PutUint32(state[12:], sigma[3])
|
||||||
|
copy(state[16:], key[:])
|
||||||
|
copy(state[48:], nonce[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorKeyStream(dst, src []byte, block, state *[64]byte, rounds int) int {
|
||||||
|
return xorKeyStreamGeneric(dst, src, block, state, rounds)
|
||||||
|
}
|
||||||
|
|
||||||
|
func hChaCha20(out *[32]byte, nonce *[16]byte, key *[32]byte) {
|
||||||
|
hChaCha20Generic(out, nonce, key)
|
||||||
|
}
|
|
@ -0,0 +1,382 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package chacha
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func toHex(bits []byte) string {
|
||||||
|
return hex.EncodeToString(bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromHex(bits string) []byte {
|
||||||
|
b, err := hex.DecodeString(bits)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHChaCha20(t *testing.T) {
|
||||||
|
defer func(sse2, ssse3, avx2 bool) {
|
||||||
|
useSSE2, useSSSE3, useAVX2 = sse2, ssse3, avx2
|
||||||
|
}(useSSE2, useSSSE3, useAVX2)
|
||||||
|
|
||||||
|
if useAVX2 {
|
||||||
|
t.Log("AVX2 version")
|
||||||
|
testHChaCha20(t)
|
||||||
|
useAVX2 = false
|
||||||
|
}
|
||||||
|
if useSSSE3 {
|
||||||
|
t.Log("SSSE3 version")
|
||||||
|
testHChaCha20(t)
|
||||||
|
useSSSE3 = false
|
||||||
|
}
|
||||||
|
if useSSE2 {
|
||||||
|
t.Log("SSE2 version")
|
||||||
|
testHChaCha20(t)
|
||||||
|
useSSE2 = false
|
||||||
|
}
|
||||||
|
t.Log("generic version")
|
||||||
|
testHChaCha20(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVectors(t *testing.T) {
|
||||||
|
defer func(sse2, ssse3, avx2 bool) {
|
||||||
|
useSSE2, useSSSE3, useAVX2 = sse2, ssse3, avx2
|
||||||
|
}(useSSE2, useSSSE3, useAVX2)
|
||||||
|
|
||||||
|
if useAVX2 {
|
||||||
|
t.Log("AVX2 version")
|
||||||
|
testVectors(t)
|
||||||
|
useAVX2 = false
|
||||||
|
}
|
||||||
|
if useSSSE3 {
|
||||||
|
t.Log("SSSE3 version")
|
||||||
|
testVectors(t)
|
||||||
|
useSSSE3 = false
|
||||||
|
}
|
||||||
|
if useSSE2 {
|
||||||
|
t.Log("SSE2 version")
|
||||||
|
testVectors(t)
|
||||||
|
useSSE2 = false
|
||||||
|
}
|
||||||
|
t.Log("generic version")
|
||||||
|
testVectors(t)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestIncremental(t *testing.T) {
|
||||||
|
defer func(sse2, ssse3, avx2 bool) {
|
||||||
|
useSSE2, useSSSE3, useAVX2 = sse2, ssse3, avx2
|
||||||
|
}(useSSE2, useSSSE3, useAVX2)
|
||||||
|
|
||||||
|
if useAVX2 {
|
||||||
|
t.Log("AVX2 version")
|
||||||
|
testIncremental(t, 5, 2049)
|
||||||
|
useAVX2 = false
|
||||||
|
}
|
||||||
|
if useSSSE3 {
|
||||||
|
t.Log("SSSE3 version")
|
||||||
|
testIncremental(t, 5, 2049)
|
||||||
|
useSSSE3 = false
|
||||||
|
}
|
||||||
|
if useSSE2 {
|
||||||
|
t.Log("SSE2 version")
|
||||||
|
testIncremental(t, 5, 2049)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testHChaCha20(t *testing.T) {
|
||||||
|
for i, v := range hChaCha20Vectors {
|
||||||
|
var key [32]byte
|
||||||
|
var nonce [16]byte
|
||||||
|
copy(key[:], v.key)
|
||||||
|
copy(nonce[:], v.nonce)
|
||||||
|
|
||||||
|
hChaCha20(&key, &nonce, &key)
|
||||||
|
if !bytes.Equal(key[:], v.keystream) {
|
||||||
|
t.Errorf("Test %d: keystream mismatch:\n \t got: %s\n \t want: %s", i, toHex(key[:]), toHex(v.keystream))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testVectors(t *testing.T) {
|
||||||
|
for i, v := range vectors {
|
||||||
|
if len(v.plaintext) == 0 {
|
||||||
|
v.plaintext = make([]byte, len(v.ciphertext))
|
||||||
|
}
|
||||||
|
|
||||||
|
dst := make([]byte, len(v.ciphertext))
|
||||||
|
|
||||||
|
XORKeyStream(dst, v.plaintext, v.nonce, v.key, v.rounds)
|
||||||
|
if !bytes.Equal(dst, v.ciphertext) {
|
||||||
|
t.Errorf("Test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext))
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewCipher(v.nonce, v.key, v.rounds)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
c.XORKeyStream(dst[:1], v.plaintext[:1])
|
||||||
|
c.XORKeyStream(dst[1:], v.plaintext[1:])
|
||||||
|
if !bytes.Equal(dst, v.ciphertext) {
|
||||||
|
t.Errorf("Test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func testIncremental(t *testing.T, iter int, size int) {
|
||||||
|
sse2, ssse3, avx2 := useSSE2, useSSSE3, useAVX2
|
||||||
|
msg, ref, stream := make([]byte, size), make([]byte, size), make([]byte, size)
|
||||||
|
|
||||||
|
for i := 0; i < iter; i++ {
|
||||||
|
var key [32]byte
|
||||||
|
var nonce []byte
|
||||||
|
switch i % 3 {
|
||||||
|
case 0:
|
||||||
|
nonce = make([]byte, 8)
|
||||||
|
case 1:
|
||||||
|
nonce = make([]byte, 12)
|
||||||
|
case 2:
|
||||||
|
nonce = make([]byte, 24)
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := range key {
|
||||||
|
key[j] = byte(len(nonce) + i)
|
||||||
|
}
|
||||||
|
for j := range nonce {
|
||||||
|
nonce[j] = byte(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
for j := 0; j <= len(msg); j++ {
|
||||||
|
useSSE2, useSSSE3, useAVX2 = false, false, false
|
||||||
|
XORKeyStream(ref[:j], msg[:j], nonce, key[:], 20)
|
||||||
|
|
||||||
|
useSSE2, useSSSE3, useAVX2 = sse2, ssse3, avx2
|
||||||
|
XORKeyStream(stream[:j], msg[:j], nonce, key[:], 20)
|
||||||
|
|
||||||
|
if !bytes.Equal(ref[:j], stream[:j]) {
|
||||||
|
t.Fatalf("Iteration %d failed:\n Message length: %d\n\n got: %s\nwant: %s", i, j, toHex(stream[:j]), toHex(ref[:j]))
|
||||||
|
}
|
||||||
|
|
||||||
|
useSSE2, useSSSE3, useAVX2 = false, false, false
|
||||||
|
c, _ := NewCipher(nonce, key[:], 20)
|
||||||
|
c.XORKeyStream(stream[:j], msg[:j])
|
||||||
|
|
||||||
|
useSSE2, useSSSE3, useAVX2 = sse2, ssse3, avx2
|
||||||
|
c, _ = NewCipher(nonce, key[:], 20)
|
||||||
|
c.XORKeyStream(stream[:j], msg[:j])
|
||||||
|
|
||||||
|
if !bytes.Equal(ref[:j], stream[:j]) {
|
||||||
|
t.Fatalf("Iteration %d failed:\n Message length: %d\n\n got: %s\nwant: %s", i, j, toHex(stream[:j]), toHex(ref[:j]))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
copy(msg, stream)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var hChaCha20Vectors = []struct {
|
||||||
|
key, nonce, keystream []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("1140704c328d1d5d0e30086cdf209dbd6a43b8f41518a11cc387b669b2ee6586"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("8000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("7d266a7fd808cae4c02a0a70dcbfbcc250dae65ce3eae7fc210f54cc8f77df86"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||||
|
fromHex("000000000000000000000000000000000000000000000002"),
|
||||||
|
fromHex("e0c77ff931bb9163a5460c02ac281c2b53d792b1c43fea817e9ad275ae546963"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
|
||||||
|
fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"),
|
||||||
|
fromHex("51e3ff45a895675c4b33b46c64f4a9ace110d34df6a2ceab486372bacbd3eff6"),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
var vectors = []struct {
|
||||||
|
key, nonce, plaintext, ciphertext []byte
|
||||||
|
rounds int
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000001"),
|
||||||
|
fromHex("000000000000000000000002"),
|
||||||
|
fromHex("00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000" +
|
||||||
|
"416e79207375626d697373696f6e20746f20746865204945544620696e74656e6465642062792074686520436f6e7472696275746f7220666f72207075626c69" +
|
||||||
|
"636174696f6e20617320616c6c206f722070617274206f6620616e204945544620496e7465726e65742d4472616674206f722052464320616e6420616e792073" +
|
||||||
|
"746174656d656e74206d6164652077697468696e2074686520636f6e74657874206f6620616e204945544620616374697669747920697320636f6e7369646572" +
|
||||||
|
"656420616e20224945544620436f6e747269627574696f6e222e20537563682073746174656d656e747320696e636c756465206f72616c2073746174656d656e" +
|
||||||
|
"747320696e20494554462073657373696f6e732c2061732077656c6c206173207772697474656e20616e6420656c656374726f6e696320636f6d6d756e696361" +
|
||||||
|
"74696f6e73206d61646520617420616e792074696d65206f7220706c6163652c207768696368206172652061646472657373656420746f"),
|
||||||
|
fromHex("ecfa254f845f647473d3cb140da9e87606cb33066c447b87bc2666dde3fbb739a371c9ec7abcb4cfa9211f7d90f64c2d07f89e5cf9b93e330a6e4c08af5ba6d5" +
|
||||||
|
"a3fbf07df3fa2fde4f376ca23e82737041605d9f4f4f57bd8cff2c1d4b7955ec2a97948bd3722915c8f3d337f7d370050e9e96d647b7c39f56e031ca5eb6250d" +
|
||||||
|
"4042e02785ececfa4b4bb5e8ead0440e20b6e8db09d881a7c6132f420e52795042bdfa7773d8a9051447b3291ce1411c680465552aa6c405b7764d5e87bea85a" +
|
||||||
|
"d00f8449ed8f72d0d662ab052691ca66424bc86d2df80ea41f43abf937d3259dc4b2d0dfb48a6c9139ddd7f76966e928e635553ba76c5c879d7b35d49eb2e62b" +
|
||||||
|
"0871cdac638939e25e8a1e0ef9d5280fa8ca328b351c3c765989cbcf3daa8b6ccc3aaf9f3979c92b3720fc88dc95ed84a1be059c6499b9fda236e7e818b04b0b" +
|
||||||
|
"c39c1e876b193bfe5569753f88128cc08aaa9b63d1a16f80ef2554d7189c411f5869ca52c5b83fa36ff216b9c1d30062bebcfd2dc5bce0911934fda79a86f6e6" +
|
||||||
|
"98ced759c3ff9b6477338f3da4f9cd8514ea9982ccafb341b2384dd902f3d1ab7ac61dd29c6f21ba5b862f3730e37cfdc4fd806c22f221"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("8000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("e29edae0466dea17f2576ce95025dd2db2d34fc81b5153f1b70a87f315a35286"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("8000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("e29edae0466dea17f2576ce95025dd2db2d34fc81b5153f1b70a87f315a35286fb56db91e8dbf0a93faaa25777aad63450dae65ce3eae7fc210f54cc8f77df8662f8" +
|
||||||
|
"955228b2358d61d8c5ccf63a6c40203be5fb4541c39c52861de70b8a1416ddd3fe9a818bae8f0e8ff2288cede0459fbb00032fd85fef972fcb586c228d"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("0000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee65869f07" +
|
||||||
|
"e7be5551387a98ba977c732d080dcb0f29a048e3656912c6533e32ee7aed29b721769ce64e43d57133b074d839d531ed1f28510afb45ace10a1f4b794d6f2d09a0e663266ce1ae7ed1081968a0758e7" +
|
||||||
|
"18e997bd362c6b0c34634a9a0b35d012737681f7b5d0f281e3afde458bc1e73d2d313c9cf94c05ff3716240a248f21320a058d7b3566bd520daaa3ed2bf0ac5b8b120fb852773c3639734b45c91a42d" +
|
||||||
|
"d4cb83f8840d2eedb158131062ac3f1f2cf8ff6dcd1856e86a1e6c3167167ee5a688742b47c5adfb59d4df76fd1db1e51ee03b1ca9f82aca173edb8b7293474ebe980f904d10c916442b4783a0e9848" +
|
||||||
|
"60cb6c957b39c38ed8f51cffaa68a4de01025a39c504546b9dc1406a7eb28151e5150d7b204baa719d4f091021217db5cf1b5c84c4fa71a879610a1a695ac527c5b56774a6b8a21aae88685868e094c" +
|
||||||
|
"f29ef4090af7a90cc07e8817aa528763797d3c332b67ca4bc110642c2151ec47ee84cb8c42d85f10e2a8cb18c3b7335f26e8c39a12b1bcc1707177b7613873"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("0100000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("ef3fdfd6c61578fbf5cf35bd3dd33b8009631634d21e42ac33960bd138e50d32111e4caf237ee53ca8ad6426194a88545ddc497a0b466e7d6bbdb0041b2f586b5305" +
|
||||||
|
"e5e44aff19b235936144675efbe4409eb7e8e5f1430f5f5836aeb49bb5328b017c4b9dc11f8a03863fa803dc71d5726b2b6b31aa32708afe5af1d6b690584d58792b271e5fdb92c486051c48b79a4d4" +
|
||||||
|
"8a109bb2d0477956e74c25e93c3c2db34bf779470464a033b8394517a5cf3576a6618c8551a456628b253ef0117c90cd46d8177a2a06d16e20e05c05f889bf87e95d6ee8a03807d1cd53d586872b125" +
|
||||||
|
"9d0647da7b7aae80af9b3aad41ad5a8141d2e156c9dd52a3bd2ae165bd7d6a2a4e2cf6938b8b390828ff20dc8fd60e2cd17fe368e35b467a70654ba93cfa62760a9d2f26da7818d4d863808e1add5ff" +
|
||||||
|
"db76d41efd524ded4246e03caa008950c91dedfc9a8e68173fe481c4d3d3c215fdf3af22aeab0097b835a84faabbbce094c6181a193ffeda067271ff7c10cce76542241116283842e31e922430211dc" +
|
||||||
|
"b38e556158fc2daaec367b705b75f782f8bc2c2c5e33a375390c3052f7e3446feb105fb47820f1d2539811c5b49bb76dc15f2d20a7e2c200b573db9f653ed7"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
|
||||||
|
fromHex("0001020304050607"),
|
||||||
|
nil,
|
||||||
|
fromHex("f798a189f195e66982105ffb640bb7757f579da31602fc93ec01ac56f85ac3c134a4547b733b46413042c9440049176905d3be59ea1c53f15916155c2be8241a3800" +
|
||||||
|
"8b9a26bc35941e2444177c8ade6689de95264986d95889fb60e84629c9bd9a5acb1cc118be563eb9b3a4a472f82e09a7e778492b562ef7130e88dfe031c79db9d4f7c7a899151b9a475032b63fc3852" +
|
||||||
|
"45fe054e3dd5a97a5f576fe064025d3ce042c566ab2c507b138db853e3d6959660996546cc9c4a6eafdc777c040d70eaf46f76dad3979e5c5360c3317166a1c894c94a371876a94df7628fe4eaaf2cc" +
|
||||||
|
"b27d5aaae0ad7ad0f9d4b6ad3b54098746d4524d38407a6deb3ab78fab78c94213668bbbd394c5de93b853178addd6b97f9fa1ec3e56c00c9ddff0a44a204241175a4cab0f961ba53ede9bdf960b94f" +
|
||||||
|
"9829b1f3414726429b362c5b538e391520f489b7ed8d20ae3fd49e9e259e44397514d618c96c4846be3c680bdc11c71dcbbe29ccf80d62a0938fa549391e6ea57ecbe2606790ec15d2224ae307c1442" +
|
||||||
|
"26b7c4e8c2f97d2a1d67852d29beba110edd445197012062a393a9c92803ad3b4f31d7bc6033ccf7932cfed3f019044d25905916777286f82f9a4cc1ffe430"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("9bf49a6a0755f953811fce125f2683d50429c3bb49e074147e0089a52eae155f0564f879d27ae3c02ce82834acfa8c793a629f2ca0de6919610be82f411326be0bd588" +
|
||||||
|
"41203e74fe86fc71338ce0173dc628ebb719bdcbcc151585214cc089b442258dcda14cf111c602b8971b8cc843e91e46ca905151c02744a6b017e69316b20cd67c4bdecc538e8be990c1b6425d68bfd3a" +
|
||||||
|
"6fe97693e4846351596cca8abf59fddd0b7f52dcc0c60a448cbf9511610b0a742f1e4d238a7a45cae054ec2"),
|
||||||
|
12,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("8000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("789cc357f0b6cda5395f08c8538f1226d08eb3e16ebd6b6db6cc9ca77d81d900bb9d21f6ef0b720550d161f1a80fab0468e48c086daad356edce3a3f988d8e"),
|
||||||
|
12,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
|
||||||
|
fromHex("0001020304050607"),
|
||||||
|
nil,
|
||||||
|
fromHex("6898eb04f3d151985e28e882f35daf28d2a1689f79081ffb08cdc48edbbd3dcd683c764f3dd7302293928ca3d4ef4194e6e22f41a72204a14b89115d06ca29fb0b9f6e" +
|
||||||
|
"ba3da6793a928afe76cdf62a5d5b0898bb9bb2348612189fdb825e5aa7559c9ec79ff80d05079fad81e9bc2521b2ebcb179cebeade91f20ff3e13192d60de2ee983ec07047e7827594773c28448d89e9b" +
|
||||||
|
"96bb0f8665b1a56f85abebd584a446e17d5a6fb847a1dbf341ece5124ff5f80d4a57fb7edf65a2907939b2f3c9654ccbfa2e5225edc8d799bf7ce296d6c8f9234cec0bd7b91b3d2ddc27f93ff8591ddb3" +
|
||||||
|
"62b54fab111a7da9d5b4187661ed0e691f7aa5959fb83112427a95bbeb"),
|
||||||
|
12,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
|
||||||
|
fromHex("0001020304050607"),
|
||||||
|
nil,
|
||||||
|
fromHex("40e1aaea1c843baa28b18eb728fec05dce47b0e824bf9a5d3f1bb1aad13b37fbbf0b0e146732c16380efeab70a1b6edff9acedc876b70d98b61f192290537973"),
|
||||||
|
8,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("bcd02a18bf3f01d19292de30a7a8fdaca4b65e50a6002cc72cd6d2f7c91ac3d5728f83e0aad2bfcf9abd2d2db58faedd65015dd83fc09b131e271043019e8e0f789e96" +
|
||||||
|
"89e5208d7fd9e1f3c5b5341f48ef18a13e418998addadd97a3693a987f8e82ecd5c1433bfed1af49750c0f1ff29c4174a05b119aa3a9e8333812e0c0fea49e1ee0134a70a9d49c24e0cbd8fc3ba27e97c" +
|
||||||
|
"3322ad487f778f8dc6a122fa59cbe33e7"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("8000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("ccfe8a9e93431bd582f07b3eb0f4a7afc22ef39337ddd84f0d3545b318a315a32b3abb96de0fc6acde48b248fe8a80e6fa72bfcdf9d8d2656b991676476f052d937308" +
|
||||||
|
"0e30d8c0e217126a3c64402e1d9404ba9d6b8ce4ad5ac9693f3660638c26ea2cd1b4a8d3348c1e179ead353ee72fee558e9994c51a27195e287d00ec2f8cfef8866d1f98714f40cbe4e18cebabf3cd1fd" +
|
||||||
|
"3bb65506e5dce1ad09f438bffe2c96d7f2f0827c8c3f2ca59dbaa393785c6b8da7c69c8a4a63ffd113dcc93de8f52dbcfaed5e4cbcc1dc310b1352868fab7b14d930a9f7a7d47bed0eaf5b151f6dac8bd" +
|
||||||
|
"45510698bdc205d70b944ea5450888dd3ec753da9708bf06c0714822dda74f285c361abd0cd1071324c253dc421905edca36e8808bffef091e7dbdecebdad98cf70b7cede72e9c3c4108e5b32ffae0f42" +
|
||||||
|
"151a8196939d8e3b8384be1"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"),
|
||||||
|
fromHex("000102030405060708090a0b0c0d0e0f1011121314151617"),
|
||||||
|
nil,
|
||||||
|
fromHex("e53a61cef151e81401067de33adfc02e90ab205361b49b539fda7f0e63b1bc7d68fbee56c9c20c39960e595f3ea76c979804d08cfa728e66cb5f766b840ec61f9ec20f" +
|
||||||
|
"7f90d28dae334426cecb52a8e84b4728a5fdd61deb7f1a3fb63dadf5595e06b6e441670964d595ae59cf21536271bae2594774fb19079b933d8fe744f4"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("FF00000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("4fe0956ef81829ff96ef093f03c15dc0eaf4e6905eff9777a5db78348915689ed64204e8fce664cb71ea4016185d15e05be4329e02fcd472707508ef62fd89565ffa632effdb" +
|
||||||
|
"bf08394aa437d8ff093e6cea49b61672cf294474927a8150e06cec9fdec0f5cf26f257fe335a8d7dd6d208e6df6f0a83bb1b0b5c574edc2c9a604e4310acb970815a9819c91a5137794d1ee71ede3e5d59f27e76" +
|
||||||
|
"84d287d704fe3945de0a9b66be3d86e66980263602aeb600efaef243b1adf4c701dbf8f57427dee71dacd703d25317ffc7a67e7881ad13f0bf096d3b0486eec71fef5e0efb5964d14eb2cea0336e34ed4444cc2b" +
|
||||||
|
"bdbd8ef5ba89a0a5e9e35a2e23b38d3f9136f42aefb25c2e7eae0b42c1d1ada5618c5299aedd469ce4f9353ccbae3f89110922b669b8d1b62e72aaf893b83ca264707efbefdcf22ef2333b01f18a849653b52925" +
|
||||||
|
"63c37314bf34289b0636a2f8c24bc97fec554a9c31ec2cb4e30ba70fa965a17561e56739be138d86a4777f866ca24ba24f70913230e1b3ea34a9a90eea1b6a3a81b93286bb582a53e78557845a654775a18efb77" +
|
||||||
|
"eee098d2680bc4ceb866874f31c7fadd70262cca6039833522de03cb2527dc5cfc7072db48b6011b852d705c7b24ffedf52facf352ab2512c625811db7965edc87d08f7f27e02665c9a6a42968e4c58cd86aa847" +
|
||||||
|
"69658153b62f208b2dcfbcb364d63e6671cf60698640"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0120000000000000000000000000007000000000000000000000000000000DEF"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("ba6bce79c4f79c815b7fec53840ff0549ff5496378aa1f6ba481a48a5b9b8dbea8b820eccbc4eca37e1050fc53510a746037d2707f81e9683ec3f495b02ad0f848d7f9bf67bc" +
|
||||||
|
"6299be525d1bf3bfd9953caa12cc4e1d5a6969e6fcd5d3c3e3d9f2e735cd7808755ddda7b22a3ae6040e7f8d05d62661a97d84dad694c69637aea3ae0af9f73303ffce3ae6161281d7a3c7e50a5706d766b34ddd" +
|
||||||
|
"eab6974fdab10b3f48fb31f26df72e54c616edf1afc019f240c059a7c003677008227f49b021bc23c9c51d6f85ad136a4aa4950d9692f7094d344d88c05868691eb620d39bd8154986c971a8c9552ff0015fd78a" +
|
||||||
|
"6bdd33df94b0056786a1e0ceb9cc9a38a31fbba224c1fb82bf6af376f67e94337a730301a6365d49b0dd56328e0269cbdfb5bcbccf1c7c3f4922ec1310aa2ef8136be788a55190453d3d3153b1b960a16f79365a" +
|
||||||
|
"0bc7d6d2d5cda9f0993dbb815ee72f83b9d2ed296598fb21d91c29d1acf4ff0a549784a1d6a4f0935ee18efbf41fdc98d81c449544e9701d92648c06e5f416833b90d15fd4c04fc720a5ec6c6fc8b3d85a66826a" +
|
||||||
|
"5e6817e21c4c4c0d7151b128236c41397ad4c6549e827c42269659973c153db70ffc33951b19ff21428091cea3836f72f88082508bae1839b59fa9c2556bdf373419d3cf29a8fad4d1787d829ad884f9927228fc" +
|
||||||
|
"0b8bb7f1a067e7bdbf06c3885154f76f5be0cde8c7c59442b72b0e3f0341afe644e7eb4c29a467288aebc893e17b446c63da7551b8b59ebdd0cbcd65bc79a969bd3397f83d149840de731df4c09a833d5bd9feda" +
|
||||||
|
"e1cd78a09b233b020de86ab71b9fd425adf84e502cef7c62015eade66ca91b0a90306894b53c7c5147e524d7b919ccdd0731e4eef8fe476b6eed38c91b611cd1777b9acf6eee0a11eaff16ae872db92a5d133fe7" +
|
||||||
|
"bed999882da283893dd1e96f530be3cd36bf38c16deed2cd77651b6e0d3628de3cb86a78f1d07f6fc79434da5f73888be617b84595acef154f66b95ade1a3e120421a9dac6eec1e5b60139da3d604a03d4a9b7a3" +
|
||||||
|
"0810a9c7d551aa8df08e11544486ad33000bfe410e8e6f35cb9d22806a5fcacefc6a1257d373d426243576fad9b20ad5ba84befc1a47c79d7bd2923b5776d3df86c8ed98b700d317502849ec8c02ecb8513a7a32" +
|
||||||
|
"e2db15e75a814f12cfc20429ae06cae2021406b4f174ce56dca65f7994a3b2722e764520a52f87d0a887fc771dbfbf381b4f750dc074fedec1a43a4df37a5a2c148f89d9630ebbd1be1858bed10207cdacae9a0a" +
|
||||||
|
"b92df58de53de4718f929a83474fbcf9969f1d28a5b257cacd56f0ff0bc425c93d8c91ac833c2cfefb97d82fe6236f3ec3c29e0112a6cac5abfec733db41265f8ff486e7d7fa0b3d9766357377f089056c9408d8" +
|
||||||
|
"2f09f18700236cc1058ea1c273e287d07d521fdbb5e28d41cc1d95999eccee"),
|
||||||
|
20,
|
||||||
|
},
|
||||||
|
}
|
|
@ -0,0 +1,41 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
// Package chacha20 implements the ChaCha20 / XChaCha20 stream chipher.
|
||||||
|
// Notice that one specific key-nonce combination must be unique for all time.
|
||||||
|
//
|
||||||
|
// There are three versions of ChaCha20:
|
||||||
|
// - ChaCha20 with a 64 bit nonce (en/decrypt up to 2^64 * 64 bytes for one key-nonce combination)
|
||||||
|
// - ChaCha20 with a 96 bit nonce (en/decrypt up to 2^32 * 64 bytes (~256 GB) for one key-nonce combination)
|
||||||
|
// - XChaCha20 with a 192 bit nonce (en/decrypt up to 2^64 * 64 bytes for one key-nonce combination)
|
||||||
|
package chacha20 // import "github.com/aead/chacha20"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/cipher"
|
||||||
|
|
||||||
|
"github.com/aead/chacha20/chacha"
|
||||||
|
)
|
||||||
|
|
||||||
|
// XORKeyStream crypts bytes from src to dst using the given nonce and key.
|
||||||
|
// The length of the nonce determinds the version of ChaCha20:
|
||||||
|
// - 8 bytes: ChaCha20 with a 64 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// - 12 bytes: ChaCha20 as defined in RFC 7539 and a 2^32 * 64 byte period.
|
||||||
|
// - 24 bytes: XChaCha20 with a 192 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// Src and dst may be the same slice but otherwise should not overlap.
|
||||||
|
// If len(dst) < len(src) this function panics.
|
||||||
|
// If the nonce is neither 64, 96 nor 192 bits long, this function panics.
|
||||||
|
func XORKeyStream(dst, src, nonce, key []byte) {
|
||||||
|
chacha.XORKeyStream(dst, src, nonce, key, 20)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCipher returns a new cipher.Stream implementing a ChaCha20 version.
|
||||||
|
// The nonce must be unique for one key for all time.
|
||||||
|
// The length of the nonce determinds the version of ChaCha20:
|
||||||
|
// - 8 bytes: ChaCha20 with a 64 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// - 12 bytes: ChaCha20 as defined in RFC 7539 and a 2^32 * 64 byte period.
|
||||||
|
// - 24 bytes: XChaCha20 with a 192 bit nonce and a 2^64 * 64 byte period.
|
||||||
|
// If the nonce is neither 64, 96 nor 192 bits long, a non-nil error is returned.
|
||||||
|
func NewCipher(nonce, key []byte) (cipher.Stream, error) {
|
||||||
|
return chacha.NewCipher(nonce, key, 20)
|
||||||
|
}
|
|
@ -0,0 +1,108 @@
|
||||||
|
// Copyright (c) 2016 Andreas Auernhammer. All rights reserved.
|
||||||
|
// Use of this source code is governed by a license that can be
|
||||||
|
// found in the LICENSE file.
|
||||||
|
|
||||||
|
package chacha20
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/hex"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/aead/chacha20/chacha"
|
||||||
|
)
|
||||||
|
|
||||||
|
func toHex(bits []byte) string {
|
||||||
|
return hex.EncodeToString(bits)
|
||||||
|
}
|
||||||
|
|
||||||
|
func fromHex(bits string) []byte {
|
||||||
|
b, err := hex.DecodeString(bits)
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestVectors(t *testing.T) {
|
||||||
|
for i, v := range vectors {
|
||||||
|
if len(v.plaintext) == 0 {
|
||||||
|
v.plaintext = make([]byte, len(v.ciphertext))
|
||||||
|
}
|
||||||
|
|
||||||
|
dst := make([]byte, len(v.ciphertext))
|
||||||
|
|
||||||
|
XORKeyStream(dst, v.plaintext, v.nonce, v.key)
|
||||||
|
if !bytes.Equal(dst, v.ciphertext) {
|
||||||
|
t.Errorf("Test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext))
|
||||||
|
}
|
||||||
|
|
||||||
|
c, err := NewCipher(v.nonce, v.key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
c.XORKeyStream(dst[:1], v.plaintext[:1])
|
||||||
|
c.XORKeyStream(dst[1:], v.plaintext[1:])
|
||||||
|
if !bytes.Equal(dst, v.ciphertext) {
|
||||||
|
t.Errorf("Test %d: ciphertext mismatch:\n \t got: %s\n \t want: %s", i, toHex(dst), toHex(v.ciphertext))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkCipher(b *testing.B, size int, nonceSize int) {
|
||||||
|
var key [32]byte
|
||||||
|
nonce := make([]byte, nonceSize)
|
||||||
|
c, _ := NewCipher(nonce, key[:])
|
||||||
|
buf := make([]byte, size)
|
||||||
|
|
||||||
|
b.SetBytes(int64(len(buf)))
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
c.XORKeyStream(buf, buf)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkXORKeyStream(b *testing.B, size int, nonceSize int) {
|
||||||
|
var key [32]byte
|
||||||
|
nonce := make([]byte, nonceSize)
|
||||||
|
buf := make([]byte, size)
|
||||||
|
b.SetBytes(int64(len(buf)))
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
XORKeyStream(buf, buf, nonce[:], key[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkChaCha20_64(b *testing.B) { benchmarkCipher(b, 64, chacha.NonceSize) }
|
||||||
|
func BenchmarkChaCha20_1K(b *testing.B) { benchmarkCipher(b, 1024, chacha.NonceSize) }
|
||||||
|
func BenchmarkXChaCha20_64(b *testing.B) { benchmarkXORKeyStream(b, 64, chacha.XNonceSize) }
|
||||||
|
func BenchmarkXChaCha20_1K(b *testing.B) { benchmarkXORKeyStream(b, 1024, chacha.XNonceSize) }
|
||||||
|
func BenchmarkXORKeyStream64(b *testing.B) { benchmarkXORKeyStream(b, 64, chacha.NonceSize) }
|
||||||
|
func BenchmarkXORKeyStream1K(b *testing.B) { benchmarkXORKeyStream(b, 1024, chacha.NonceSize) }
|
||||||
|
func BenchmarkXChaCha20_XORKeyStream64(b *testing.B) { benchmarkXORKeyStream(b, 64, chacha.XNonceSize) }
|
||||||
|
func BenchmarkXChaCha20_XORKeyStream1K(b *testing.B) {
|
||||||
|
benchmarkXORKeyStream(b, 1024, chacha.XNonceSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var vectors = []struct {
|
||||||
|
key, nonce, plaintext, ciphertext []byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("0000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586"),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
fromHex("0000000000000000000000000000000000000000000000000000000000000000"),
|
||||||
|
fromHex("000000000000000000000000000000000000000000000000"),
|
||||||
|
nil,
|
||||||
|
fromHex("bcd02a18bf3f01d19292de30a7a8fdaca4b65e50a6002cc72cd6d2f7c91ac3d5728f83e0aad2bfcf9abd2d2db58faedd65015dd83fc09b131e271043019e8e0f"),
|
||||||
|
},
|
||||||
|
}
|
|
@ -0,0 +1,23 @@
|
||||||
|
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||||
|
*.o
|
||||||
|
*.a
|
||||||
|
*.so
|
||||||
|
|
||||||
|
# Folders
|
||||||
|
_obj
|
||||||
|
_test
|
||||||
|
|
||||||
|
# Architecture specific extensions/prefixes
|
||||||
|
*.[568vq]
|
||||||
|
[568vq].out
|
||||||
|
|
||||||
|
*.cgo1.go
|
||||||
|
*.cgo2.c
|
||||||
|
_cgo_defun.c
|
||||||
|
_cgo_gotypes.go
|
||||||
|
_cgo_export.*
|
||||||
|
|
||||||
|
_testmain.go
|
||||||
|
|
||||||
|
*.exe
|
||||||
|
*.test
|
|
@ -0,0 +1,212 @@
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/golang-lru/simplelru"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Default2QRecentRatio is the ratio of the 2Q cache dedicated
|
||||||
|
// to recently added entries that have only been accessed once.
|
||||||
|
Default2QRecentRatio = 0.25
|
||||||
|
|
||||||
|
// Default2QGhostEntries is the default ratio of ghost
|
||||||
|
// entries kept to track entries recently evicted
|
||||||
|
Default2QGhostEntries = 0.50
|
||||||
|
)
|
||||||
|
|
||||||
|
// TwoQueueCache is a thread-safe fixed size 2Q cache.
|
||||||
|
// 2Q is an enhancement over the standard LRU cache
|
||||||
|
// in that it tracks both frequently and recently used
|
||||||
|
// entries separately. This avoids a burst in access to new
|
||||||
|
// entries from evicting frequently used entries. It adds some
|
||||||
|
// additional tracking overhead to the standard LRU cache, and is
|
||||||
|
// computationally about 2x the cost, and adds some metadata over
|
||||||
|
// head. The ARCCache is similar, but does not require setting any
|
||||||
|
// parameters.
|
||||||
|
type TwoQueueCache struct {
|
||||||
|
size int
|
||||||
|
recentSize int
|
||||||
|
|
||||||
|
recent *simplelru.LRU
|
||||||
|
frequent *simplelru.LRU
|
||||||
|
recentEvict *simplelru.LRU
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New2Q creates a new TwoQueueCache using the default
|
||||||
|
// values for the parameters.
|
||||||
|
func New2Q(size int) (*TwoQueueCache, error) {
|
||||||
|
return New2QParams(size, Default2QRecentRatio, Default2QGhostEntries)
|
||||||
|
}
|
||||||
|
|
||||||
|
// New2QParams creates a new TwoQueueCache using the provided
|
||||||
|
// parameter values.
|
||||||
|
func New2QParams(size int, recentRatio float64, ghostRatio float64) (*TwoQueueCache, error) {
|
||||||
|
if size <= 0 {
|
||||||
|
return nil, fmt.Errorf("invalid size")
|
||||||
|
}
|
||||||
|
if recentRatio < 0.0 || recentRatio > 1.0 {
|
||||||
|
return nil, fmt.Errorf("invalid recent ratio")
|
||||||
|
}
|
||||||
|
if ghostRatio < 0.0 || ghostRatio > 1.0 {
|
||||||
|
return nil, fmt.Errorf("invalid ghost ratio")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Determine the sub-sizes
|
||||||
|
recentSize := int(float64(size) * recentRatio)
|
||||||
|
evictSize := int(float64(size) * ghostRatio)
|
||||||
|
|
||||||
|
// Allocate the LRUs
|
||||||
|
recent, err := simplelru.NewLRU(size, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
frequent, err := simplelru.NewLRU(size, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
recentEvict, err := simplelru.NewLRU(evictSize, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the cache
|
||||||
|
c := &TwoQueueCache{
|
||||||
|
size: size,
|
||||||
|
recentSize: recentSize,
|
||||||
|
recent: recent,
|
||||||
|
frequent: frequent,
|
||||||
|
recentEvict: recentEvict,
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Get(key interface{}) (interface{}, bool) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
// Check if this is a frequent value
|
||||||
|
if val, ok := c.frequent.Get(key); ok {
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the value is contained in recent, then we
|
||||||
|
// promote it to frequent
|
||||||
|
if val, ok := c.recent.Peek(key); ok {
|
||||||
|
c.recent.Remove(key)
|
||||||
|
c.frequent.Add(key, val)
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// No hit
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Add(key, value interface{}) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
// Check if the value is frequently used already,
|
||||||
|
// and just update the value
|
||||||
|
if c.frequent.Contains(key) {
|
||||||
|
c.frequent.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the value is recently used, and promote
|
||||||
|
// the value into the frequent list
|
||||||
|
if c.recent.Contains(key) {
|
||||||
|
c.recent.Remove(key)
|
||||||
|
c.frequent.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the value was recently evicted, add it to the
|
||||||
|
// frequently used list
|
||||||
|
if c.recentEvict.Contains(key) {
|
||||||
|
c.ensureSpace(true)
|
||||||
|
c.recentEvict.Remove(key)
|
||||||
|
c.frequent.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to the recently seen list
|
||||||
|
c.ensureSpace(false)
|
||||||
|
c.recent.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ensureSpace is used to ensure we have space in the cache
|
||||||
|
func (c *TwoQueueCache) ensureSpace(recentEvict bool) {
|
||||||
|
// If we have space, nothing to do
|
||||||
|
recentLen := c.recent.Len()
|
||||||
|
freqLen := c.frequent.Len()
|
||||||
|
if recentLen+freqLen < c.size {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the recent buffer is larger than
|
||||||
|
// the target, evict from there
|
||||||
|
if recentLen > 0 && (recentLen > c.recentSize || (recentLen == c.recentSize && !recentEvict)) {
|
||||||
|
k, _, _ := c.recent.RemoveOldest()
|
||||||
|
c.recentEvict.Add(k, nil)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from the frequent list otherwise
|
||||||
|
c.frequent.RemoveOldest()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Len() int {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.recent.Len() + c.frequent.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Keys() []interface{} {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
k1 := c.frequent.Keys()
|
||||||
|
k2 := c.recent.Keys()
|
||||||
|
return append(k1, k2...)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Remove(key interface{}) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
if c.frequent.Remove(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.recent.Remove(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.recentEvict.Remove(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Purge() {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
c.recent.Purge()
|
||||||
|
c.frequent.Purge()
|
||||||
|
c.recentEvict.Purge()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Contains(key interface{}) bool {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.frequent.Contains(key) || c.recent.Contains(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *TwoQueueCache) Peek(key interface{}) (interface{}, bool) {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
if val, ok := c.frequent.Peek(key); ok {
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
return c.recent.Peek(key)
|
||||||
|
}
|
|
@ -0,0 +1,306 @@
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func Benchmark2Q_Rand(b *testing.B) {
|
||||||
|
l, err := New2Q(8192)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trace := make([]int64, b.N*2)
|
||||||
|
for i := 0; i < b.N*2; i++ {
|
||||||
|
trace[i] = rand.Int63() % 32768
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
var hit, miss int
|
||||||
|
for i := 0; i < 2*b.N; i++ {
|
||||||
|
if i%2 == 0 {
|
||||||
|
l.Add(trace[i], trace[i])
|
||||||
|
} else {
|
||||||
|
_, ok := l.Get(trace[i])
|
||||||
|
if ok {
|
||||||
|
hit++
|
||||||
|
} else {
|
||||||
|
miss++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Benchmark2Q_Freq(b *testing.B) {
|
||||||
|
l, err := New2Q(8192)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trace := make([]int64, b.N*2)
|
||||||
|
for i := 0; i < b.N*2; i++ {
|
||||||
|
if i%2 == 0 {
|
||||||
|
trace[i] = rand.Int63() % 16384
|
||||||
|
} else {
|
||||||
|
trace[i] = rand.Int63() % 32768
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
l.Add(trace[i], trace[i])
|
||||||
|
}
|
||||||
|
var hit, miss int
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, ok := l.Get(trace[i])
|
||||||
|
if ok {
|
||||||
|
hit++
|
||||||
|
} else {
|
||||||
|
miss++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test2Q_RandomOps(t *testing.T) {
|
||||||
|
size := 128
|
||||||
|
l, err := New2Q(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := 200000
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
key := rand.Int63() % 512
|
||||||
|
r := rand.Int63()
|
||||||
|
switch r % 3 {
|
||||||
|
case 0:
|
||||||
|
l.Add(key, key)
|
||||||
|
case 1:
|
||||||
|
l.Get(key)
|
||||||
|
case 2:
|
||||||
|
l.Remove(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.recent.Len()+l.frequent.Len() > size {
|
||||||
|
t.Fatalf("bad: recent: %d freq: %d",
|
||||||
|
l.recent.Len(), l.frequent.Len())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test2Q_Get_RecentToFrequent(t *testing.T) {
|
||||||
|
l, err := New2Q(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch all the entries, should be in t1
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
if n := l.recent.Len(); n != 128 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should upgrade to t2
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing: %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n := l.recent.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 128 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get be from t2
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing: %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n := l.recent.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 128 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test2Q_Add_RecentToFrequent(t *testing.T) {
|
||||||
|
l, err := New2Q(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add initially to recent
|
||||||
|
l.Add(1, 1)
|
||||||
|
if n := l.recent.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add should upgrade to frequent
|
||||||
|
l.Add(1, 1)
|
||||||
|
if n := l.recent.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add should remain in frequent
|
||||||
|
l.Add(1, 1)
|
||||||
|
if n := l.recent.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test2Q_Add_RecentEvict(t *testing.T) {
|
||||||
|
l, err := New2Q(4)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add 1,2,3,4,5 -> Evict 1
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
l.Add(3, 3)
|
||||||
|
l.Add(4, 4)
|
||||||
|
l.Add(5, 5)
|
||||||
|
if n := l.recent.Len(); n != 4 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.recentEvict.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pull in the recently evicted
|
||||||
|
l.Add(1, 1)
|
||||||
|
if n := l.recent.Len(); n != 3 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.recentEvict.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add 6, should cause another recent evict
|
||||||
|
l.Add(6, 6)
|
||||||
|
if n := l.recent.Len(); n != 3 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.recentEvict.Len(); n != 2 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.frequent.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func Test2Q(t *testing.T) {
|
||||||
|
l, err := New2Q(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
if l.Len() != 128 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range l.Keys() {
|
||||||
|
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||||
|
t.Fatalf("bad key: %v", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 256; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("should not be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 192; i++ {
|
||||||
|
l.Remove(i)
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be deleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Purge()
|
||||||
|
if l.Len() != 0 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
if _, ok := l.Get(200); ok {
|
||||||
|
t.Fatalf("should contain nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that Contains doesn't update recent-ness
|
||||||
|
func Test2Q_Contains(t *testing.T) {
|
||||||
|
l, err := New2Q(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if !l.Contains(1) {
|
||||||
|
t.Errorf("1 should be contained")
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that Peek doesn't update recent-ness
|
||||||
|
func Test2Q_Peek(t *testing.T) {
|
||||||
|
l, err := New2Q(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||||
|
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,362 @@
|
||||||
|
Mozilla Public License, version 2.0
|
||||||
|
|
||||||
|
1. Definitions
|
||||||
|
|
||||||
|
1.1. "Contributor"
|
||||||
|
|
||||||
|
means each individual or legal entity that creates, contributes to the
|
||||||
|
creation of, or owns Covered Software.
|
||||||
|
|
||||||
|
1.2. "Contributor Version"
|
||||||
|
|
||||||
|
means the combination of the Contributions of others (if any) used by a
|
||||||
|
Contributor and that particular Contributor's Contribution.
|
||||||
|
|
||||||
|
1.3. "Contribution"
|
||||||
|
|
||||||
|
means Covered Software of a particular Contributor.
|
||||||
|
|
||||||
|
1.4. "Covered Software"
|
||||||
|
|
||||||
|
means Source Code Form to which the initial Contributor has attached the
|
||||||
|
notice in Exhibit A, the Executable Form of such Source Code Form, and
|
||||||
|
Modifications of such Source Code Form, in each case including portions
|
||||||
|
thereof.
|
||||||
|
|
||||||
|
1.5. "Incompatible With Secondary Licenses"
|
||||||
|
means
|
||||||
|
|
||||||
|
a. that the initial Contributor has attached the notice described in
|
||||||
|
Exhibit B to the Covered Software; or
|
||||||
|
|
||||||
|
b. that the Covered Software was made available under the terms of
|
||||||
|
version 1.1 or earlier of the License, but not also under the terms of
|
||||||
|
a Secondary License.
|
||||||
|
|
||||||
|
1.6. "Executable Form"
|
||||||
|
|
||||||
|
means any form of the work other than Source Code Form.
|
||||||
|
|
||||||
|
1.7. "Larger Work"
|
||||||
|
|
||||||
|
means a work that combines Covered Software with other material, in a
|
||||||
|
separate file or files, that is not Covered Software.
|
||||||
|
|
||||||
|
1.8. "License"
|
||||||
|
|
||||||
|
means this document.
|
||||||
|
|
||||||
|
1.9. "Licensable"
|
||||||
|
|
||||||
|
means having the right to grant, to the maximum extent possible, whether
|
||||||
|
at the time of the initial grant or subsequently, any and all of the
|
||||||
|
rights conveyed by this License.
|
||||||
|
|
||||||
|
1.10. "Modifications"
|
||||||
|
|
||||||
|
means any of the following:
|
||||||
|
|
||||||
|
a. any file in Source Code Form that results from an addition to,
|
||||||
|
deletion from, or modification of the contents of Covered Software; or
|
||||||
|
|
||||||
|
b. any new file in Source Code Form that contains any Covered Software.
|
||||||
|
|
||||||
|
1.11. "Patent Claims" of a Contributor
|
||||||
|
|
||||||
|
means any patent claim(s), including without limitation, method,
|
||||||
|
process, and apparatus claims, in any patent Licensable by such
|
||||||
|
Contributor that would be infringed, but for the grant of the License,
|
||||||
|
by the making, using, selling, offering for sale, having made, import,
|
||||||
|
or transfer of either its Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
1.12. "Secondary License"
|
||||||
|
|
||||||
|
means either the GNU General Public License, Version 2.0, the GNU Lesser
|
||||||
|
General Public License, Version 2.1, the GNU Affero General Public
|
||||||
|
License, Version 3.0, or any later versions of those licenses.
|
||||||
|
|
||||||
|
1.13. "Source Code Form"
|
||||||
|
|
||||||
|
means the form of the work preferred for making modifications.
|
||||||
|
|
||||||
|
1.14. "You" (or "Your")
|
||||||
|
|
||||||
|
means an individual or a legal entity exercising rights under this
|
||||||
|
License. For legal entities, "You" includes any entity that controls, is
|
||||||
|
controlled by, or is under common control with You. For purposes of this
|
||||||
|
definition, "control" means (a) the power, direct or indirect, to cause
|
||||||
|
the direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (b) ownership of more than fifty percent (50%) of the
|
||||||
|
outstanding shares or beneficial ownership of such entity.
|
||||||
|
|
||||||
|
|
||||||
|
2. License Grants and Conditions
|
||||||
|
|
||||||
|
2.1. Grants
|
||||||
|
|
||||||
|
Each Contributor hereby grants You a world-wide, royalty-free,
|
||||||
|
non-exclusive license:
|
||||||
|
|
||||||
|
a. under intellectual property rights (other than patent or trademark)
|
||||||
|
Licensable by such Contributor to use, reproduce, make available,
|
||||||
|
modify, display, perform, distribute, and otherwise exploit its
|
||||||
|
Contributions, either on an unmodified basis, with Modifications, or
|
||||||
|
as part of a Larger Work; and
|
||||||
|
|
||||||
|
b. under Patent Claims of such Contributor to make, use, sell, offer for
|
||||||
|
sale, have made, import, and otherwise transfer either its
|
||||||
|
Contributions or its Contributor Version.
|
||||||
|
|
||||||
|
2.2. Effective Date
|
||||||
|
|
||||||
|
The licenses granted in Section 2.1 with respect to any Contribution
|
||||||
|
become effective for each Contribution on the date the Contributor first
|
||||||
|
distributes such Contribution.
|
||||||
|
|
||||||
|
2.3. Limitations on Grant Scope
|
||||||
|
|
||||||
|
The licenses granted in this Section 2 are the only rights granted under
|
||||||
|
this License. No additional rights or licenses will be implied from the
|
||||||
|
distribution or licensing of Covered Software under this License.
|
||||||
|
Notwithstanding Section 2.1(b) above, no patent license is granted by a
|
||||||
|
Contributor:
|
||||||
|
|
||||||
|
a. for any code that a Contributor has removed from Covered Software; or
|
||||||
|
|
||||||
|
b. for infringements caused by: (i) Your and any other third party's
|
||||||
|
modifications of Covered Software, or (ii) the combination of its
|
||||||
|
Contributions with other software (except as part of its Contributor
|
||||||
|
Version); or
|
||||||
|
|
||||||
|
c. under Patent Claims infringed by Covered Software in the absence of
|
||||||
|
its Contributions.
|
||||||
|
|
||||||
|
This License does not grant any rights in the trademarks, service marks,
|
||||||
|
or logos of any Contributor (except as may be necessary to comply with
|
||||||
|
the notice requirements in Section 3.4).
|
||||||
|
|
||||||
|
2.4. Subsequent Licenses
|
||||||
|
|
||||||
|
No Contributor makes additional grants as a result of Your choice to
|
||||||
|
distribute the Covered Software under a subsequent version of this
|
||||||
|
License (see Section 10.2) or under the terms of a Secondary License (if
|
||||||
|
permitted under the terms of Section 3.3).
|
||||||
|
|
||||||
|
2.5. Representation
|
||||||
|
|
||||||
|
Each Contributor represents that the Contributor believes its
|
||||||
|
Contributions are its original creation(s) or it has sufficient rights to
|
||||||
|
grant the rights to its Contributions conveyed by this License.
|
||||||
|
|
||||||
|
2.6. Fair Use
|
||||||
|
|
||||||
|
This License is not intended to limit any rights You have under
|
||||||
|
applicable copyright doctrines of fair use, fair dealing, or other
|
||||||
|
equivalents.
|
||||||
|
|
||||||
|
2.7. Conditions
|
||||||
|
|
||||||
|
Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in
|
||||||
|
Section 2.1.
|
||||||
|
|
||||||
|
|
||||||
|
3. Responsibilities
|
||||||
|
|
||||||
|
3.1. Distribution of Source Form
|
||||||
|
|
||||||
|
All distribution of Covered Software in Source Code Form, including any
|
||||||
|
Modifications that You create or to which You contribute, must be under
|
||||||
|
the terms of this License. You must inform recipients that the Source
|
||||||
|
Code Form of the Covered Software is governed by the terms of this
|
||||||
|
License, and how they can obtain a copy of this License. You may not
|
||||||
|
attempt to alter or restrict the recipients' rights in the Source Code
|
||||||
|
Form.
|
||||||
|
|
||||||
|
3.2. Distribution of Executable Form
|
||||||
|
|
||||||
|
If You distribute Covered Software in Executable Form then:
|
||||||
|
|
||||||
|
a. such Covered Software must also be made available in Source Code Form,
|
||||||
|
as described in Section 3.1, and You must inform recipients of the
|
||||||
|
Executable Form how they can obtain a copy of such Source Code Form by
|
||||||
|
reasonable means in a timely manner, at a charge no more than the cost
|
||||||
|
of distribution to the recipient; and
|
||||||
|
|
||||||
|
b. You may distribute such Executable Form under the terms of this
|
||||||
|
License, or sublicense it under different terms, provided that the
|
||||||
|
license for the Executable Form does not attempt to limit or alter the
|
||||||
|
recipients' rights in the Source Code Form under this License.
|
||||||
|
|
||||||
|
3.3. Distribution of a Larger Work
|
||||||
|
|
||||||
|
You may create and distribute a Larger Work under terms of Your choice,
|
||||||
|
provided that You also comply with the requirements of this License for
|
||||||
|
the Covered Software. If the Larger Work is a combination of Covered
|
||||||
|
Software with a work governed by one or more Secondary Licenses, and the
|
||||||
|
Covered Software is not Incompatible With Secondary Licenses, this
|
||||||
|
License permits You to additionally distribute such Covered Software
|
||||||
|
under the terms of such Secondary License(s), so that the recipient of
|
||||||
|
the Larger Work may, at their option, further distribute the Covered
|
||||||
|
Software under the terms of either this License or such Secondary
|
||||||
|
License(s).
|
||||||
|
|
||||||
|
3.4. Notices
|
||||||
|
|
||||||
|
You may not remove or alter the substance of any license notices
|
||||||
|
(including copyright notices, patent notices, disclaimers of warranty, or
|
||||||
|
limitations of liability) contained within the Source Code Form of the
|
||||||
|
Covered Software, except that You may alter any license notices to the
|
||||||
|
extent required to remedy known factual inaccuracies.
|
||||||
|
|
||||||
|
3.5. Application of Additional Terms
|
||||||
|
|
||||||
|
You may choose to offer, and to charge a fee for, warranty, support,
|
||||||
|
indemnity or liability obligations to one or more recipients of Covered
|
||||||
|
Software. However, You may do so only on Your own behalf, and not on
|
||||||
|
behalf of any Contributor. You must make it absolutely clear that any
|
||||||
|
such warranty, support, indemnity, or liability obligation is offered by
|
||||||
|
You alone, and You hereby agree to indemnify every Contributor for any
|
||||||
|
liability incurred by such Contributor as a result of warranty, support,
|
||||||
|
indemnity or liability terms You offer. You may include additional
|
||||||
|
disclaimers of warranty and limitations of liability specific to any
|
||||||
|
jurisdiction.
|
||||||
|
|
||||||
|
4. Inability to Comply Due to Statute or Regulation
|
||||||
|
|
||||||
|
If it is impossible for You to comply with any of the terms of this License
|
||||||
|
with respect to some or all of the Covered Software due to statute,
|
||||||
|
judicial order, or regulation then You must: (a) comply with the terms of
|
||||||
|
this License to the maximum extent possible; and (b) describe the
|
||||||
|
limitations and the code they affect. Such description must be placed in a
|
||||||
|
text file included with all distributions of the Covered Software under
|
||||||
|
this License. Except to the extent prohibited by statute or regulation,
|
||||||
|
such description must be sufficiently detailed for a recipient of ordinary
|
||||||
|
skill to be able to understand it.
|
||||||
|
|
||||||
|
5. Termination
|
||||||
|
|
||||||
|
5.1. The rights granted under this License will terminate automatically if You
|
||||||
|
fail to comply with any of its terms. However, if You become compliant,
|
||||||
|
then the rights granted under this License from a particular Contributor
|
||||||
|
are reinstated (a) provisionally, unless and until such Contributor
|
||||||
|
explicitly and finally terminates Your grants, and (b) on an ongoing
|
||||||
|
basis, if such Contributor fails to notify You of the non-compliance by
|
||||||
|
some reasonable means prior to 60 days after You have come back into
|
||||||
|
compliance. Moreover, Your grants from a particular Contributor are
|
||||||
|
reinstated on an ongoing basis if such Contributor notifies You of the
|
||||||
|
non-compliance by some reasonable means, this is the first time You have
|
||||||
|
received notice of non-compliance with this License from such
|
||||||
|
Contributor, and You become compliant prior to 30 days after Your receipt
|
||||||
|
of the notice.
|
||||||
|
|
||||||
|
5.2. If You initiate litigation against any entity by asserting a patent
|
||||||
|
infringement claim (excluding declaratory judgment actions,
|
||||||
|
counter-claims, and cross-claims) alleging that a Contributor Version
|
||||||
|
directly or indirectly infringes any patent, then the rights granted to
|
||||||
|
You by any and all Contributors for the Covered Software under Section
|
||||||
|
2.1 of this License shall terminate.
|
||||||
|
|
||||||
|
5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user
|
||||||
|
license agreements (excluding distributors and resellers) which have been
|
||||||
|
validly granted by You or Your distributors under this License prior to
|
||||||
|
termination shall survive termination.
|
||||||
|
|
||||||
|
6. Disclaimer of Warranty
|
||||||
|
|
||||||
|
Covered Software is provided under this License on an "as is" basis,
|
||||||
|
without warranty of any kind, either expressed, implied, or statutory,
|
||||||
|
including, without limitation, warranties that the Covered Software is free
|
||||||
|
of defects, merchantable, fit for a particular purpose or non-infringing.
|
||||||
|
The entire risk as to the quality and performance of the Covered Software
|
||||||
|
is with You. Should any Covered Software prove defective in any respect,
|
||||||
|
You (not any Contributor) assume the cost of any necessary servicing,
|
||||||
|
repair, or correction. This disclaimer of warranty constitutes an essential
|
||||||
|
part of this License. No use of any Covered Software is authorized under
|
||||||
|
this License except under this disclaimer.
|
||||||
|
|
||||||
|
7. Limitation of Liability
|
||||||
|
|
||||||
|
Under no circumstances and under no legal theory, whether tort (including
|
||||||
|
negligence), contract, or otherwise, shall any Contributor, or anyone who
|
||||||
|
distributes Covered Software as permitted above, be liable to You for any
|
||||||
|
direct, indirect, special, incidental, or consequential damages of any
|
||||||
|
character including, without limitation, damages for lost profits, loss of
|
||||||
|
goodwill, work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses, even if such party shall have been
|
||||||
|
informed of the possibility of such damages. This limitation of liability
|
||||||
|
shall not apply to liability for death or personal injury resulting from
|
||||||
|
such party's negligence to the extent applicable law prohibits such
|
||||||
|
limitation. Some jurisdictions do not allow the exclusion or limitation of
|
||||||
|
incidental or consequential damages, so this exclusion and limitation may
|
||||||
|
not apply to You.
|
||||||
|
|
||||||
|
8. Litigation
|
||||||
|
|
||||||
|
Any litigation relating to this License may be brought only in the courts
|
||||||
|
of a jurisdiction where the defendant maintains its principal place of
|
||||||
|
business and such litigation shall be governed by laws of that
|
||||||
|
jurisdiction, without reference to its conflict-of-law provisions. Nothing
|
||||||
|
in this Section shall prevent a party's ability to bring cross-claims or
|
||||||
|
counter-claims.
|
||||||
|
|
||||||
|
9. Miscellaneous
|
||||||
|
|
||||||
|
This License represents the complete agreement concerning the subject
|
||||||
|
matter hereof. If any provision of this License is held to be
|
||||||
|
unenforceable, such provision shall be reformed only to the extent
|
||||||
|
necessary to make it enforceable. Any law or regulation which provides that
|
||||||
|
the language of a contract shall be construed against the drafter shall not
|
||||||
|
be used to construe this License against a Contributor.
|
||||||
|
|
||||||
|
|
||||||
|
10. Versions of the License
|
||||||
|
|
||||||
|
10.1. New Versions
|
||||||
|
|
||||||
|
Mozilla Foundation is the license steward. Except as provided in Section
|
||||||
|
10.3, no one other than the license steward has the right to modify or
|
||||||
|
publish new versions of this License. Each version will be given a
|
||||||
|
distinguishing version number.
|
||||||
|
|
||||||
|
10.2. Effect of New Versions
|
||||||
|
|
||||||
|
You may distribute the Covered Software under the terms of the version
|
||||||
|
of the License under which You originally received the Covered Software,
|
||||||
|
or under the terms of any subsequent version published by the license
|
||||||
|
steward.
|
||||||
|
|
||||||
|
10.3. Modified Versions
|
||||||
|
|
||||||
|
If you create software not governed by this License, and you want to
|
||||||
|
create a new license for such software, you may create and use a
|
||||||
|
modified version of this License if you rename the license and remove
|
||||||
|
any references to the name of the license steward (except to note that
|
||||||
|
such modified license differs from this License).
|
||||||
|
|
||||||
|
10.4. Distributing Source Code Form that is Incompatible With Secondary
|
||||||
|
Licenses If You choose to distribute Source Code Form that is
|
||||||
|
Incompatible With Secondary Licenses under the terms of this version of
|
||||||
|
the License, the notice described in Exhibit B of this License must be
|
||||||
|
attached.
|
||||||
|
|
||||||
|
Exhibit A - Source Code Form License Notice
|
||||||
|
|
||||||
|
This Source Code Form is subject to the
|
||||||
|
terms of the Mozilla Public License, v.
|
||||||
|
2.0. If a copy of the MPL was not
|
||||||
|
distributed with this file, You can
|
||||||
|
obtain one at
|
||||||
|
http://mozilla.org/MPL/2.0/.
|
||||||
|
|
||||||
|
If it is not possible or desirable to put the notice in a particular file,
|
||||||
|
then You may include the notice in a location (such as a LICENSE file in a
|
||||||
|
relevant directory) where a recipient would be likely to look for such a
|
||||||
|
notice.
|
||||||
|
|
||||||
|
You may add additional accurate notices of copyright ownership.
|
||||||
|
|
||||||
|
Exhibit B - "Incompatible With Secondary Licenses" Notice
|
||||||
|
|
||||||
|
This Source Code Form is "Incompatible
|
||||||
|
With Secondary Licenses", as defined by
|
||||||
|
the Mozilla Public License, v. 2.0.
|
|
@ -0,0 +1,25 @@
|
||||||
|
golang-lru
|
||||||
|
==========
|
||||||
|
|
||||||
|
This provides the `lru` package which implements a fixed-size
|
||||||
|
thread safe LRU cache. It is based on the cache in Groupcache.
|
||||||
|
|
||||||
|
Documentation
|
||||||
|
=============
|
||||||
|
|
||||||
|
Full docs are available on [Godoc](http://godoc.org/github.com/hashicorp/golang-lru)
|
||||||
|
|
||||||
|
Example
|
||||||
|
=======
|
||||||
|
|
||||||
|
Using the LRU is very simple:
|
||||||
|
|
||||||
|
```go
|
||||||
|
l, _ := New(128)
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
l.Add(i, nil)
|
||||||
|
}
|
||||||
|
if l.Len() != 128 {
|
||||||
|
panic(fmt.Sprintf("bad len: %v", l.Len()))
|
||||||
|
}
|
||||||
|
```
|
|
@ -0,0 +1,257 @@
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/golang-lru/simplelru"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ARCCache is a thread-safe fixed size Adaptive Replacement Cache (ARC).
|
||||||
|
// ARC is an enhancement over the standard LRU cache in that tracks both
|
||||||
|
// frequency and recency of use. This avoids a burst in access to new
|
||||||
|
// entries from evicting the frequently used older entries. It adds some
|
||||||
|
// additional tracking overhead to a standard LRU cache, computationally
|
||||||
|
// it is roughly 2x the cost, and the extra memory overhead is linear
|
||||||
|
// with the size of the cache. ARC has been patented by IBM, but is
|
||||||
|
// similar to the TwoQueueCache (2Q) which requires setting parameters.
|
||||||
|
type ARCCache struct {
|
||||||
|
size int // Size is the total capacity of the cache
|
||||||
|
p int // P is the dynamic preference towards T1 or T2
|
||||||
|
|
||||||
|
t1 *simplelru.LRU // T1 is the LRU for recently accessed items
|
||||||
|
b1 *simplelru.LRU // B1 is the LRU for evictions from t1
|
||||||
|
|
||||||
|
t2 *simplelru.LRU // T2 is the LRU for frequently accessed items
|
||||||
|
b2 *simplelru.LRU // B2 is the LRU for evictions from t2
|
||||||
|
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewARC creates an ARC of the given size
|
||||||
|
func NewARC(size int) (*ARCCache, error) {
|
||||||
|
// Create the sub LRUs
|
||||||
|
b1, err := simplelru.NewLRU(size, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
b2, err := simplelru.NewLRU(size, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
t1, err := simplelru.NewLRU(size, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
t2, err := simplelru.NewLRU(size, nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize the ARC
|
||||||
|
c := &ARCCache{
|
||||||
|
size: size,
|
||||||
|
p: 0,
|
||||||
|
t1: t1,
|
||||||
|
b1: b1,
|
||||||
|
t2: t2,
|
||||||
|
b2: b2,
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get looks up a key's value from the cache.
|
||||||
|
func (c *ARCCache) Get(key interface{}) (interface{}, bool) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
// Ff the value is contained in T1 (recent), then
|
||||||
|
// promote it to T2 (frequent)
|
||||||
|
if val, ok := c.t1.Peek(key); ok {
|
||||||
|
c.t1.Remove(key)
|
||||||
|
c.t2.Add(key, val)
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the value is contained in T2 (frequent)
|
||||||
|
if val, ok := c.t2.Get(key); ok {
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// No hit
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the cache.
|
||||||
|
func (c *ARCCache) Add(key, value interface{}) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
// Check if the value is contained in T1 (recent), and potentially
|
||||||
|
// promote it to frequent T2
|
||||||
|
if c.t1.Contains(key) {
|
||||||
|
c.t1.Remove(key)
|
||||||
|
c.t2.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if the value is already in T2 (frequent) and update it
|
||||||
|
if c.t2.Contains(key) {
|
||||||
|
c.t2.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this value was recently evicted as part of the
|
||||||
|
// recently used list
|
||||||
|
if c.b1.Contains(key) {
|
||||||
|
// T1 set is too small, increase P appropriately
|
||||||
|
delta := 1
|
||||||
|
b1Len := c.b1.Len()
|
||||||
|
b2Len := c.b2.Len()
|
||||||
|
if b2Len > b1Len {
|
||||||
|
delta = b2Len / b1Len
|
||||||
|
}
|
||||||
|
if c.p+delta >= c.size {
|
||||||
|
c.p = c.size
|
||||||
|
} else {
|
||||||
|
c.p += delta
|
||||||
|
}
|
||||||
|
|
||||||
|
// Potentially need to make room in the cache
|
||||||
|
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||||
|
c.replace(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from B1
|
||||||
|
c.b1.Remove(key)
|
||||||
|
|
||||||
|
// Add the key to the frequently used list
|
||||||
|
c.t2.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this value was recently evicted as part of the
|
||||||
|
// frequently used list
|
||||||
|
if c.b2.Contains(key) {
|
||||||
|
// T2 set is too small, decrease P appropriately
|
||||||
|
delta := 1
|
||||||
|
b1Len := c.b1.Len()
|
||||||
|
b2Len := c.b2.Len()
|
||||||
|
if b1Len > b2Len {
|
||||||
|
delta = b1Len / b2Len
|
||||||
|
}
|
||||||
|
if delta >= c.p {
|
||||||
|
c.p = 0
|
||||||
|
} else {
|
||||||
|
c.p -= delta
|
||||||
|
}
|
||||||
|
|
||||||
|
// Potentially need to make room in the cache
|
||||||
|
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||||
|
c.replace(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove from B2
|
||||||
|
c.b2.Remove(key)
|
||||||
|
|
||||||
|
// Add the key to the frequntly used list
|
||||||
|
c.t2.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Potentially need to make room in the cache
|
||||||
|
if c.t1.Len()+c.t2.Len() >= c.size {
|
||||||
|
c.replace(false)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keep the size of the ghost buffers trim
|
||||||
|
if c.b1.Len() > c.size-c.p {
|
||||||
|
c.b1.RemoveOldest()
|
||||||
|
}
|
||||||
|
if c.b2.Len() > c.p {
|
||||||
|
c.b2.RemoveOldest()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add to the recently seen list
|
||||||
|
c.t1.Add(key, value)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// replace is used to adaptively evict from either T1 or T2
|
||||||
|
// based on the current learned value of P
|
||||||
|
func (c *ARCCache) replace(b2ContainsKey bool) {
|
||||||
|
t1Len := c.t1.Len()
|
||||||
|
if t1Len > 0 && (t1Len > c.p || (t1Len == c.p && b2ContainsKey)) {
|
||||||
|
k, _, ok := c.t1.RemoveOldest()
|
||||||
|
if ok {
|
||||||
|
c.b1.Add(k, nil)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
k, _, ok := c.t2.RemoveOldest()
|
||||||
|
if ok {
|
||||||
|
c.b2.Add(k, nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of cached entries
|
||||||
|
func (c *ARCCache) Len() int {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.t1.Len() + c.t2.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys returns all the cached keys
|
||||||
|
func (c *ARCCache) Keys() []interface{} {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
k1 := c.t1.Keys()
|
||||||
|
k2 := c.t2.Keys()
|
||||||
|
return append(k1, k2...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove is used to purge a key from the cache
|
||||||
|
func (c *ARCCache) Remove(key interface{}) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
if c.t1.Remove(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.t2.Remove(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.b1.Remove(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.b2.Remove(key) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge is used to clear the cache
|
||||||
|
func (c *ARCCache) Purge() {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
c.t1.Purge()
|
||||||
|
c.t2.Purge()
|
||||||
|
c.b1.Purge()
|
||||||
|
c.b2.Purge()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains is used to check if the cache contains a key
|
||||||
|
// without updating recency or frequency.
|
||||||
|
func (c *ARCCache) Contains(key interface{}) bool {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.t1.Contains(key) || c.t2.Contains(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Peek is used to inspect the cache value of a key
|
||||||
|
// without updating recency or frequency.
|
||||||
|
func (c *ARCCache) Peek(key interface{}) (interface{}, bool) {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
if val, ok := c.t1.Peek(key); ok {
|
||||||
|
return val, ok
|
||||||
|
}
|
||||||
|
return c.t2.Peek(key)
|
||||||
|
}
|
|
@ -0,0 +1,377 @@
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
rand.Seed(time.Now().Unix())
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkARC_Rand(b *testing.B) {
|
||||||
|
l, err := NewARC(8192)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trace := make([]int64, b.N*2)
|
||||||
|
for i := 0; i < b.N*2; i++ {
|
||||||
|
trace[i] = rand.Int63() % 32768
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
var hit, miss int
|
||||||
|
for i := 0; i < 2*b.N; i++ {
|
||||||
|
if i%2 == 0 {
|
||||||
|
l.Add(trace[i], trace[i])
|
||||||
|
} else {
|
||||||
|
_, ok := l.Get(trace[i])
|
||||||
|
if ok {
|
||||||
|
hit++
|
||||||
|
} else {
|
||||||
|
miss++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkARC_Freq(b *testing.B) {
|
||||||
|
l, err := NewARC(8192)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trace := make([]int64, b.N*2)
|
||||||
|
for i := 0; i < b.N*2; i++ {
|
||||||
|
if i%2 == 0 {
|
||||||
|
trace[i] = rand.Int63() % 16384
|
||||||
|
} else {
|
||||||
|
trace[i] = rand.Int63() % 32768
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
l.Add(trace[i], trace[i])
|
||||||
|
}
|
||||||
|
var hit, miss int
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, ok := l.Get(trace[i])
|
||||||
|
if ok {
|
||||||
|
hit++
|
||||||
|
} else {
|
||||||
|
miss++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestARC_RandomOps(t *testing.T) {
|
||||||
|
size := 128
|
||||||
|
l, err := NewARC(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
n := 200000
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
key := rand.Int63() % 512
|
||||||
|
r := rand.Int63()
|
||||||
|
switch r % 3 {
|
||||||
|
case 0:
|
||||||
|
l.Add(key, key)
|
||||||
|
case 1:
|
||||||
|
l.Get(key)
|
||||||
|
case 2:
|
||||||
|
l.Remove(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.t1.Len()+l.t2.Len() > size {
|
||||||
|
t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d",
|
||||||
|
l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p)
|
||||||
|
}
|
||||||
|
if l.b1.Len()+l.b2.Len() > size {
|
||||||
|
t.Fatalf("bad: t1: %d t2: %d b1: %d b2: %d p: %d",
|
||||||
|
l.t1.Len(), l.t2.Len(), l.b1.Len(), l.b2.Len(), l.p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestARC_Get_RecentToFrequent(t *testing.T) {
|
||||||
|
l, err := NewARC(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Touch all the entries, should be in t1
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
if n := l.t1.Len(); n != 128 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get should upgrade to t2
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing: %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n := l.t1.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 128 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get be from t2
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing: %d", i)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if n := l.t1.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 128 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestARC_Add_RecentToFrequent(t *testing.T) {
|
||||||
|
l, err := NewARC(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add initially to t1
|
||||||
|
l.Add(1, 1)
|
||||||
|
if n := l.t1.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add should upgrade to t2
|
||||||
|
l.Add(1, 1)
|
||||||
|
if n := l.t1.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add should remain in t2
|
||||||
|
l.Add(1, 1)
|
||||||
|
if n := l.t1.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestARC_Adaptive(t *testing.T) {
|
||||||
|
l, err := NewARC(4)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fill t1
|
||||||
|
for i := 0; i < 4; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
if n := l.t1.Len(); n != 4 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Move to t2
|
||||||
|
l.Get(0)
|
||||||
|
l.Get(1)
|
||||||
|
if n := l.t2.Len(); n != 2 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Evict from t1
|
||||||
|
l.Add(4, 4)
|
||||||
|
if n := l.b1.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current state
|
||||||
|
// t1 : (MRU) [4, 3] (LRU)
|
||||||
|
// t2 : (MRU) [1, 0] (LRU)
|
||||||
|
// b1 : (MRU) [2] (LRU)
|
||||||
|
// b2 : (MRU) [] (LRU)
|
||||||
|
|
||||||
|
// Add 2, should cause hit on b1
|
||||||
|
l.Add(2, 2)
|
||||||
|
if n := l.b1.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if l.p != 1 {
|
||||||
|
t.Fatalf("bad: %d", l.p)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 3 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current state
|
||||||
|
// t1 : (MRU) [4] (LRU)
|
||||||
|
// t2 : (MRU) [2, 1, 0] (LRU)
|
||||||
|
// b1 : (MRU) [3] (LRU)
|
||||||
|
// b2 : (MRU) [] (LRU)
|
||||||
|
|
||||||
|
// Add 4, should migrate to t2
|
||||||
|
l.Add(4, 4)
|
||||||
|
if n := l.t1.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 4 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current state
|
||||||
|
// t1 : (MRU) [] (LRU)
|
||||||
|
// t2 : (MRU) [4, 2, 1, 0] (LRU)
|
||||||
|
// b1 : (MRU) [3] (LRU)
|
||||||
|
// b2 : (MRU) [] (LRU)
|
||||||
|
|
||||||
|
// Add 4, should evict to b2
|
||||||
|
l.Add(5, 5)
|
||||||
|
if n := l.t1.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 3 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.b2.Len(); n != 1 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current state
|
||||||
|
// t1 : (MRU) [5] (LRU)
|
||||||
|
// t2 : (MRU) [4, 2, 1] (LRU)
|
||||||
|
// b1 : (MRU) [3] (LRU)
|
||||||
|
// b2 : (MRU) [0] (LRU)
|
||||||
|
|
||||||
|
// Add 0, should decrease p
|
||||||
|
l.Add(0, 0)
|
||||||
|
if n := l.t1.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.t2.Len(); n != 4 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.b1.Len(); n != 2 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if n := l.b2.Len(); n != 0 {
|
||||||
|
t.Fatalf("bad: %d", n)
|
||||||
|
}
|
||||||
|
if l.p != 0 {
|
||||||
|
t.Fatalf("bad: %d", l.p)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Current state
|
||||||
|
// t1 : (MRU) [] (LRU)
|
||||||
|
// t2 : (MRU) [0, 4, 2, 1] (LRU)
|
||||||
|
// b1 : (MRU) [5, 3] (LRU)
|
||||||
|
// b2 : (MRU) [0] (LRU)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestARC(t *testing.T) {
|
||||||
|
l, err := NewARC(128)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
if l.Len() != 128 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range l.Keys() {
|
||||||
|
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||||
|
t.Fatalf("bad key: %v", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 256; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("should not be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 192; i++ {
|
||||||
|
l.Remove(i)
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be deleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Purge()
|
||||||
|
if l.Len() != 0 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
if _, ok := l.Get(200); ok {
|
||||||
|
t.Fatalf("should contain nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that Contains doesn't update recent-ness
|
||||||
|
func TestARC_Contains(t *testing.T) {
|
||||||
|
l, err := NewARC(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if !l.Contains(1) {
|
||||||
|
t.Errorf("1 should be contained")
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that Peek doesn't update recent-ness
|
||||||
|
func TestARC_Peek(t *testing.T) {
|
||||||
|
l, err := NewARC(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||||
|
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,114 @@
|
||||||
|
// This package provides a simple LRU cache. It is based on the
|
||||||
|
// LRU implementation in groupcache:
|
||||||
|
// https://github.com/golang/groupcache/tree/master/lru
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/hashicorp/golang-lru/simplelru"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Cache is a thread-safe fixed size LRU cache.
|
||||||
|
type Cache struct {
|
||||||
|
lru *simplelru.LRU
|
||||||
|
lock sync.RWMutex
|
||||||
|
}
|
||||||
|
|
||||||
|
// New creates an LRU of the given size
|
||||||
|
func New(size int) (*Cache, error) {
|
||||||
|
return NewWithEvict(size, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithEvict constructs a fixed size cache with the given eviction
|
||||||
|
// callback.
|
||||||
|
func NewWithEvict(size int, onEvicted func(key interface{}, value interface{})) (*Cache, error) {
|
||||||
|
lru, err := simplelru.NewLRU(size, simplelru.EvictCallback(onEvicted))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
c := &Cache{
|
||||||
|
lru: lru,
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge is used to completely clear the cache
|
||||||
|
func (c *Cache) Purge() {
|
||||||
|
c.lock.Lock()
|
||||||
|
c.lru.Purge()
|
||||||
|
c.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||||
|
func (c *Cache) Add(key, value interface{}) bool {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
return c.lru.Add(key, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get looks up a key's value from the cache.
|
||||||
|
func (c *Cache) Get(key interface{}) (interface{}, bool) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
return c.lru.Get(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a key is in the cache, without updating the recent-ness
|
||||||
|
// or deleting it for being stale.
|
||||||
|
func (c *Cache) Contains(key interface{}) bool {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.lru.Contains(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the key value (or undefined if not found) without updating
|
||||||
|
// the "recently used"-ness of the key.
|
||||||
|
func (c *Cache) Peek(key interface{}) (interface{}, bool) {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.lru.Peek(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContainsOrAdd checks if a key is in the cache without updating the
|
||||||
|
// recent-ness or deleting it for being stale, and if not, adds the value.
|
||||||
|
// Returns whether found and whether an eviction occurred.
|
||||||
|
func (c *Cache) ContainsOrAdd(key, value interface{}) (ok, evict bool) {
|
||||||
|
c.lock.Lock()
|
||||||
|
defer c.lock.Unlock()
|
||||||
|
|
||||||
|
if c.lru.Contains(key) {
|
||||||
|
return true, false
|
||||||
|
} else {
|
||||||
|
evict := c.lru.Add(key, value)
|
||||||
|
return false, evict
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the provided key from the cache.
|
||||||
|
func (c *Cache) Remove(key interface{}) {
|
||||||
|
c.lock.Lock()
|
||||||
|
c.lru.Remove(key)
|
||||||
|
c.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveOldest removes the oldest item from the cache.
|
||||||
|
func (c *Cache) RemoveOldest() {
|
||||||
|
c.lock.Lock()
|
||||||
|
c.lru.RemoveOldest()
|
||||||
|
c.lock.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||||
|
func (c *Cache) Keys() []interface{} {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.lru.Keys()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items in the cache.
|
||||||
|
func (c *Cache) Len() int {
|
||||||
|
c.lock.RLock()
|
||||||
|
defer c.lock.RUnlock()
|
||||||
|
return c.lru.Len()
|
||||||
|
}
|
|
@ -0,0 +1,221 @@
|
||||||
|
package lru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math/rand"
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkLRU_Rand(b *testing.B) {
|
||||||
|
l, err := New(8192)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trace := make([]int64, b.N*2)
|
||||||
|
for i := 0; i < b.N*2; i++ {
|
||||||
|
trace[i] = rand.Int63() % 32768
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
var hit, miss int
|
||||||
|
for i := 0; i < 2*b.N; i++ {
|
||||||
|
if i%2 == 0 {
|
||||||
|
l.Add(trace[i], trace[i])
|
||||||
|
} else {
|
||||||
|
_, ok := l.Get(trace[i])
|
||||||
|
if ok {
|
||||||
|
hit++
|
||||||
|
} else {
|
||||||
|
miss++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkLRU_Freq(b *testing.B) {
|
||||||
|
l, err := New(8192)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
trace := make([]int64, b.N*2)
|
||||||
|
for i := 0; i < b.N*2; i++ {
|
||||||
|
if i%2 == 0 {
|
||||||
|
trace[i] = rand.Int63() % 16384
|
||||||
|
} else {
|
||||||
|
trace[i] = rand.Int63() % 32768
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
l.Add(trace[i], trace[i])
|
||||||
|
}
|
||||||
|
var hit, miss int
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, ok := l.Get(trace[i])
|
||||||
|
if ok {
|
||||||
|
hit++
|
||||||
|
} else {
|
||||||
|
miss++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
b.Logf("hit: %d miss: %d ratio: %f", hit, miss, float64(hit)/float64(miss))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRU(t *testing.T) {
|
||||||
|
evictCounter := 0
|
||||||
|
onEvicted := func(k interface{}, v interface{}) {
|
||||||
|
if k != v {
|
||||||
|
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
|
||||||
|
}
|
||||||
|
evictCounter += 1
|
||||||
|
}
|
||||||
|
l, err := NewWithEvict(128, onEvicted)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
if l.Len() != 128 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
if evictCounter != 128 {
|
||||||
|
t.Fatalf("bad evict count: %v", evictCounter)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range l.Keys() {
|
||||||
|
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||||
|
t.Fatalf("bad key: %v", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 256; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("should not be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 192; i++ {
|
||||||
|
l.Remove(i)
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be deleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Get(192) // expect 192 to be last key in l.Keys()
|
||||||
|
|
||||||
|
for i, k := range l.Keys() {
|
||||||
|
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||||
|
t.Fatalf("out of order key: %v", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Purge()
|
||||||
|
if l.Len() != 0 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
if _, ok := l.Get(200); ok {
|
||||||
|
t.Fatalf("should contain nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that Add returns true/false if an eviction occurred
|
||||||
|
func TestLRUAdd(t *testing.T) {
|
||||||
|
evictCounter := 0
|
||||||
|
onEvicted := func(k interface{}, v interface{}) {
|
||||||
|
evictCounter += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
l, err := NewWithEvict(1, onEvicted)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||||
|
t.Errorf("should not have an eviction")
|
||||||
|
}
|
||||||
|
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||||
|
t.Errorf("should have an eviction")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that Contains doesn't update recent-ness
|
||||||
|
func TestLRUContains(t *testing.T) {
|
||||||
|
l, err := New(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if !l.Contains(1) {
|
||||||
|
t.Errorf("1 should be contained")
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that Contains doesn't update recent-ness
|
||||||
|
func TestLRUContainsOrAdd(t *testing.T) {
|
||||||
|
l, err := New(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
contains, evict := l.ContainsOrAdd(1, 1)
|
||||||
|
if !contains {
|
||||||
|
t.Errorf("1 should be contained")
|
||||||
|
}
|
||||||
|
if evict {
|
||||||
|
t.Errorf("nothing should be evicted here")
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
contains, evict = l.ContainsOrAdd(1, 1)
|
||||||
|
if contains {
|
||||||
|
t.Errorf("1 should not have been contained")
|
||||||
|
}
|
||||||
|
if !evict {
|
||||||
|
t.Errorf("an eviction should have occurred")
|
||||||
|
}
|
||||||
|
if !l.Contains(1) {
|
||||||
|
t.Errorf("now 1 should be contained")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// test that Peek doesn't update recent-ness
|
||||||
|
func TestLRUPeek(t *testing.T) {
|
||||||
|
l, err := New(2)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||||
|
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,160 @@
|
||||||
|
package simplelru
|
||||||
|
|
||||||
|
import (
|
||||||
|
"container/list"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// EvictCallback is used to get a callback when a cache entry is evicted
|
||||||
|
type EvictCallback func(key interface{}, value interface{})
|
||||||
|
|
||||||
|
// LRU implements a non-thread safe fixed size LRU cache
|
||||||
|
type LRU struct {
|
||||||
|
size int
|
||||||
|
evictList *list.List
|
||||||
|
items map[interface{}]*list.Element
|
||||||
|
onEvict EvictCallback
|
||||||
|
}
|
||||||
|
|
||||||
|
// entry is used to hold a value in the evictList
|
||||||
|
type entry struct {
|
||||||
|
key interface{}
|
||||||
|
value interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLRU constructs an LRU of the given size
|
||||||
|
func NewLRU(size int, onEvict EvictCallback) (*LRU, error) {
|
||||||
|
if size <= 0 {
|
||||||
|
return nil, errors.New("Must provide a positive size")
|
||||||
|
}
|
||||||
|
c := &LRU{
|
||||||
|
size: size,
|
||||||
|
evictList: list.New(),
|
||||||
|
items: make(map[interface{}]*list.Element),
|
||||||
|
onEvict: onEvict,
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Purge is used to completely clear the cache
|
||||||
|
func (c *LRU) Purge() {
|
||||||
|
for k, v := range c.items {
|
||||||
|
if c.onEvict != nil {
|
||||||
|
c.onEvict(k, v.Value.(*entry).value)
|
||||||
|
}
|
||||||
|
delete(c.items, k)
|
||||||
|
}
|
||||||
|
c.evictList.Init()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add adds a value to the cache. Returns true if an eviction occurred.
|
||||||
|
func (c *LRU) Add(key, value interface{}) bool {
|
||||||
|
// Check for existing item
|
||||||
|
if ent, ok := c.items[key]; ok {
|
||||||
|
c.evictList.MoveToFront(ent)
|
||||||
|
ent.Value.(*entry).value = value
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Add new item
|
||||||
|
ent := &entry{key, value}
|
||||||
|
entry := c.evictList.PushFront(ent)
|
||||||
|
c.items[key] = entry
|
||||||
|
|
||||||
|
evict := c.evictList.Len() > c.size
|
||||||
|
// Verify size not exceeded
|
||||||
|
if evict {
|
||||||
|
c.removeOldest()
|
||||||
|
}
|
||||||
|
return evict
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get looks up a key's value from the cache.
|
||||||
|
func (c *LRU) Get(key interface{}) (value interface{}, ok bool) {
|
||||||
|
if ent, ok := c.items[key]; ok {
|
||||||
|
c.evictList.MoveToFront(ent)
|
||||||
|
return ent.Value.(*entry).value, true
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if a key is in the cache, without updating the recent-ness
|
||||||
|
// or deleting it for being stale.
|
||||||
|
func (c *LRU) Contains(key interface{}) (ok bool) {
|
||||||
|
_, ok = c.items[key]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns the key value (or undefined if not found) without updating
|
||||||
|
// the "recently used"-ness of the key.
|
||||||
|
func (c *LRU) Peek(key interface{}) (value interface{}, ok bool) {
|
||||||
|
if ent, ok := c.items[key]; ok {
|
||||||
|
return ent.Value.(*entry).value, true
|
||||||
|
}
|
||||||
|
return nil, ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes the provided key from the cache, returning if the
|
||||||
|
// key was contained.
|
||||||
|
func (c *LRU) Remove(key interface{}) bool {
|
||||||
|
if ent, ok := c.items[key]; ok {
|
||||||
|
c.removeElement(ent)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// RemoveOldest removes the oldest item from the cache.
|
||||||
|
func (c *LRU) RemoveOldest() (interface{}, interface{}, bool) {
|
||||||
|
ent := c.evictList.Back()
|
||||||
|
if ent != nil {
|
||||||
|
c.removeElement(ent)
|
||||||
|
kv := ent.Value.(*entry)
|
||||||
|
return kv.key, kv.value, true
|
||||||
|
}
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetOldest returns the oldest entry
|
||||||
|
func (c *LRU) GetOldest() (interface{}, interface{}, bool) {
|
||||||
|
ent := c.evictList.Back()
|
||||||
|
if ent != nil {
|
||||||
|
kv := ent.Value.(*entry)
|
||||||
|
return kv.key, kv.value, true
|
||||||
|
}
|
||||||
|
return nil, nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Keys returns a slice of the keys in the cache, from oldest to newest.
|
||||||
|
func (c *LRU) Keys() []interface{} {
|
||||||
|
keys := make([]interface{}, len(c.items))
|
||||||
|
i := 0
|
||||||
|
for ent := c.evictList.Back(); ent != nil; ent = ent.Prev() {
|
||||||
|
keys[i] = ent.Value.(*entry).key
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
return keys
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of items in the cache.
|
||||||
|
func (c *LRU) Len() int {
|
||||||
|
return c.evictList.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeOldest removes the oldest item from the cache.
|
||||||
|
func (c *LRU) removeOldest() {
|
||||||
|
ent := c.evictList.Back()
|
||||||
|
if ent != nil {
|
||||||
|
c.removeElement(ent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// removeElement is used to remove a given list element from the cache
|
||||||
|
func (c *LRU) removeElement(e *list.Element) {
|
||||||
|
c.evictList.Remove(e)
|
||||||
|
kv := e.Value.(*entry)
|
||||||
|
delete(c.items, kv.key)
|
||||||
|
if c.onEvict != nil {
|
||||||
|
c.onEvict(kv.key, kv.value)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,167 @@
|
||||||
|
package simplelru
|
||||||
|
|
||||||
|
import "testing"
|
||||||
|
|
||||||
|
func TestLRU(t *testing.T) {
|
||||||
|
evictCounter := 0
|
||||||
|
onEvicted := func(k interface{}, v interface{}) {
|
||||||
|
if k != v {
|
||||||
|
t.Fatalf("Evict values not equal (%v!=%v)", k, v)
|
||||||
|
}
|
||||||
|
evictCounter += 1
|
||||||
|
}
|
||||||
|
l, err := NewLRU(128, onEvicted)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
if l.Len() != 128 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
|
||||||
|
if evictCounter != 128 {
|
||||||
|
t.Fatalf("bad evict count: %v", evictCounter)
|
||||||
|
}
|
||||||
|
|
||||||
|
for i, k := range l.Keys() {
|
||||||
|
if v, ok := l.Get(k); !ok || v != k || v != i+128 {
|
||||||
|
t.Fatalf("bad key: %v", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 0; i < 128; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 256; i++ {
|
||||||
|
_, ok := l.Get(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("should not be evicted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
for i := 128; i < 192; i++ {
|
||||||
|
ok := l.Remove(i)
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("should be contained")
|
||||||
|
}
|
||||||
|
ok = l.Remove(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should not be contained")
|
||||||
|
}
|
||||||
|
_, ok = l.Get(i)
|
||||||
|
if ok {
|
||||||
|
t.Fatalf("should be deleted")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Get(192) // expect 192 to be last key in l.Keys()
|
||||||
|
|
||||||
|
for i, k := range l.Keys() {
|
||||||
|
if (i < 63 && k != i+193) || (i == 63 && k != 192) {
|
||||||
|
t.Fatalf("out of order key: %v", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Purge()
|
||||||
|
if l.Len() != 0 {
|
||||||
|
t.Fatalf("bad len: %v", l.Len())
|
||||||
|
}
|
||||||
|
if _, ok := l.Get(200); ok {
|
||||||
|
t.Fatalf("should contain nothing")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestLRU_GetOldest_RemoveOldest(t *testing.T) {
|
||||||
|
l, err := NewLRU(128, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
for i := 0; i < 256; i++ {
|
||||||
|
l.Add(i, i)
|
||||||
|
}
|
||||||
|
k, _, ok := l.GetOldest()
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing")
|
||||||
|
}
|
||||||
|
if k.(int) != 128 {
|
||||||
|
t.Fatalf("bad: %v", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
k, _, ok = l.RemoveOldest()
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing")
|
||||||
|
}
|
||||||
|
if k.(int) != 128 {
|
||||||
|
t.Fatalf("bad: %v", k)
|
||||||
|
}
|
||||||
|
|
||||||
|
k, _, ok = l.RemoveOldest()
|
||||||
|
if !ok {
|
||||||
|
t.Fatalf("missing")
|
||||||
|
}
|
||||||
|
if k.(int) != 129 {
|
||||||
|
t.Fatalf("bad: %v", k)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that Add returns true/false if an eviction occurred
|
||||||
|
func TestLRU_Add(t *testing.T) {
|
||||||
|
evictCounter := 0
|
||||||
|
onEvicted := func(k interface{}, v interface{}) {
|
||||||
|
evictCounter += 1
|
||||||
|
}
|
||||||
|
|
||||||
|
l, err := NewLRU(1, onEvicted)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if l.Add(1, 1) == true || evictCounter != 0 {
|
||||||
|
t.Errorf("should not have an eviction")
|
||||||
|
}
|
||||||
|
if l.Add(2, 2) == false || evictCounter != 1 {
|
||||||
|
t.Errorf("should have an eviction")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that Contains doesn't update recent-ness
|
||||||
|
func TestLRU_Contains(t *testing.T) {
|
||||||
|
l, err := NewLRU(2, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if !l.Contains(1) {
|
||||||
|
t.Errorf("1 should be contained")
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("Contains should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that Peek doesn't update recent-ness
|
||||||
|
func TestLRU_Peek(t *testing.T) {
|
||||||
|
l, err := NewLRU(2, nil)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("err: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(1, 1)
|
||||||
|
l.Add(2, 2)
|
||||||
|
if v, ok := l.Peek(1); !ok || v != 1 {
|
||||||
|
t.Errorf("1 should be set to 1: %v, %v", v, ok)
|
||||||
|
}
|
||||||
|
|
||||||
|
l.Add(3, 3)
|
||||||
|
if l.Contains(1) {
|
||||||
|
t.Errorf("should not have updated recent-ness of 1")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016 Lucas Clemente
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,28 @@
|
||||||
|
# aes12
|
||||||
|
|
||||||
|
This package modifies the AES-GCM implementation from Go's standard library to use 12 byte tag sizes. It is not intended for a general audience, and used in [quic-go](https://github.com/lucas-clemente/quic-go).
|
||||||
|
|
||||||
|
To make use of the in-place encryption / decryption feature, the `dst` parameter to `Seal` and `Open` should be 16 bytes longer than plaintext, not 12.
|
||||||
|
|
||||||
|
Command for testing:
|
||||||
|
|
||||||
|
```
|
||||||
|
go test . --bench=. && GOARCH=386 go test . --bench=.
|
||||||
|
```
|
||||||
|
|
||||||
|
The output (on my machine):
|
||||||
|
|
||||||
|
```
|
||||||
|
BenchmarkAESGCMSeal1K-8 3000000 467 ns/op 2192.37 MB/s
|
||||||
|
BenchmarkAESGCMOpen1K-8 3000000 416 ns/op 2456.72 MB/s
|
||||||
|
BenchmarkAESGCMSeal8K-8 500000 2742 ns/op 2986.53 MB/s
|
||||||
|
BenchmarkAESGCMOpen8K-8 500000 2791 ns/op 2934.65 MB/s
|
||||||
|
PASS
|
||||||
|
ok github.com/lucas-clemente/aes12 6.383s
|
||||||
|
BenchmarkAESGCMSeal1K-8 50000 35233 ns/op 29.06 MB/s
|
||||||
|
BenchmarkAESGCMOpen1K-8 50000 34529 ns/op 29.66 MB/s
|
||||||
|
BenchmarkAESGCMSeal8K-8 5000 262678 ns/op 31.19 MB/s
|
||||||
|
BenchmarkAESGCMOpen8K-8 5000 267296 ns/op 30.65 MB/s
|
||||||
|
PASS
|
||||||
|
ok github.com/lucas-clemente/aes12 6.972s
|
||||||
|
```
|
|
@ -0,0 +1,148 @@
|
||||||
|
// Copyright 2015 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build amd64
|
||||||
|
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
import "crypto/subtle"
|
||||||
|
|
||||||
|
// The following functions are defined in gcm_amd64.s.
|
||||||
|
func hasGCMAsm() bool
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func aesEncBlock(dst, src *[16]byte, ks []uint32)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gcmAesInit(productTable *[256]byte, ks []uint32)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gcmAesData(productTable *[256]byte, data []byte, T *[16]byte)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gcmAesEnc(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gcmAesDec(productTable *[256]byte, dst, src []byte, ctr, T *[16]byte, ks []uint32)
|
||||||
|
|
||||||
|
//go:noescape
|
||||||
|
func gcmAesFinish(productTable *[256]byte, tagMask, T *[16]byte, pLen, dLen uint64)
|
||||||
|
|
||||||
|
// aesCipherGCM implements crypto/cipher.gcmAble so that crypto/cipher.NewGCM
|
||||||
|
// will use the optimised implementation in this file when possible. Instances
|
||||||
|
// of this type only exist when hasGCMAsm returns true.
|
||||||
|
type aesCipherGCM struct {
|
||||||
|
aesCipherAsm
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assert that aesCipherGCM implements the gcmAble interface.
|
||||||
|
var _ gcmAble = (*aesCipherGCM)(nil)
|
||||||
|
|
||||||
|
// NewGCM returns the AES cipher wrapped in Galois Counter Mode. This is only
|
||||||
|
// called by crypto/cipher.NewGCM via the gcmAble interface.
|
||||||
|
func (c *aesCipherGCM) NewGCM(nonceSize int) (AEAD, error) {
|
||||||
|
g := &gcmAsm{ks: c.enc, nonceSize: nonceSize}
|
||||||
|
gcmAesInit(&g.productTable, g.ks)
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type gcmAsm struct {
|
||||||
|
// ks is the key schedule, the length of which depends on the size of
|
||||||
|
// the AES key.
|
||||||
|
ks []uint32
|
||||||
|
// productTable contains pre-computed multiples of the binary-field
|
||||||
|
// element used in GHASH.
|
||||||
|
productTable [256]byte
|
||||||
|
// nonceSize contains the expected size of the nonce, in bytes.
|
||||||
|
nonceSize int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gcmAsm) NonceSize() int {
|
||||||
|
return g.nonceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*gcmAsm) Overhead() int {
|
||||||
|
return gcmTagSize
|
||||||
|
}
|
||||||
|
|
||||||
|
// Seal encrypts and authenticates plaintext. See the AEAD interface for
|
||||||
|
// details.
|
||||||
|
func (g *gcmAsm) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||||
|
if len(nonce) != g.nonceSize {
|
||||||
|
panic("cipher: incorrect nonce length given to GCM")
|
||||||
|
}
|
||||||
|
|
||||||
|
var counter, tagMask [gcmBlockSize]byte
|
||||||
|
|
||||||
|
if len(nonce) == gcmStandardNonceSize {
|
||||||
|
// Init counter to nonce||1
|
||||||
|
copy(counter[:], nonce)
|
||||||
|
counter[gcmBlockSize-1] = 1
|
||||||
|
} else {
|
||||||
|
// Otherwise counter = GHASH(nonce)
|
||||||
|
gcmAesData(&g.productTable, nonce, &counter)
|
||||||
|
gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
aesEncBlock(&tagMask, &counter, g.ks)
|
||||||
|
|
||||||
|
var tagOut [16]byte
|
||||||
|
gcmAesData(&g.productTable, data, &tagOut)
|
||||||
|
|
||||||
|
ret, out := sliceForAppend(dst, len(plaintext)+gcmTagSize)
|
||||||
|
if len(plaintext) > 0 {
|
||||||
|
gcmAesEnc(&g.productTable, out, plaintext, &counter, &tagOut, g.ks)
|
||||||
|
}
|
||||||
|
gcmAesFinish(&g.productTable, &tagMask, &tagOut, uint64(len(plaintext)), uint64(len(data)))
|
||||||
|
copy(out[len(plaintext):], tagOut[:gcmTagSize])
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Open authenticates and decrypts ciphertext. See the AEAD interface
|
||||||
|
// for details.
|
||||||
|
func (g *gcmAsm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||||
|
if len(nonce) != g.nonceSize {
|
||||||
|
panic("cipher: incorrect nonce length given to GCM")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ciphertext) < gcmTagSize {
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
tag := ciphertext[len(ciphertext)-gcmTagSize:]
|
||||||
|
ciphertext = ciphertext[:len(ciphertext)-gcmTagSize]
|
||||||
|
|
||||||
|
// See GCM spec, section 7.1.
|
||||||
|
var counter, tagMask [gcmBlockSize]byte
|
||||||
|
|
||||||
|
if len(nonce) == gcmStandardNonceSize {
|
||||||
|
// Init counter to nonce||1
|
||||||
|
copy(counter[:], nonce)
|
||||||
|
counter[gcmBlockSize-1] = 1
|
||||||
|
} else {
|
||||||
|
// Otherwise counter = GHASH(nonce)
|
||||||
|
gcmAesData(&g.productTable, nonce, &counter)
|
||||||
|
gcmAesFinish(&g.productTable, &tagMask, &counter, uint64(len(nonce)), uint64(0))
|
||||||
|
}
|
||||||
|
|
||||||
|
aesEncBlock(&tagMask, &counter, g.ks)
|
||||||
|
|
||||||
|
var expectedTag [16]byte
|
||||||
|
gcmAesData(&g.productTable, data, &expectedTag)
|
||||||
|
|
||||||
|
ret, out := sliceForAppend(dst, len(ciphertext))
|
||||||
|
if len(ciphertext) > 0 {
|
||||||
|
gcmAesDec(&g.productTable, out, ciphertext, &counter, &expectedTag, g.ks)
|
||||||
|
}
|
||||||
|
gcmAesFinish(&g.productTable, &tagMask, &expectedTag, uint64(len(ciphertext)), uint64(len(data)))
|
||||||
|
|
||||||
|
if subtle.ConstantTimeCompare(expectedTag[:12], tag) != 1 {
|
||||||
|
for i := range out {
|
||||||
|
out[i] = 0
|
||||||
|
}
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
|
@ -0,0 +1,285 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// func hasAsm() bool
|
||||||
|
// returns whether AES-NI is supported
|
||||||
|
TEXT ·hasAsm(SB),NOSPLIT,$0
|
||||||
|
XORQ AX, AX
|
||||||
|
INCL AX
|
||||||
|
CPUID
|
||||||
|
SHRQ $25, CX
|
||||||
|
ANDQ $1, CX
|
||||||
|
MOVB CX, ret+0(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
|
||||||
|
TEXT ·encryptBlockAsm(SB),NOSPLIT,$0
|
||||||
|
MOVQ nr+0(FP), CX
|
||||||
|
MOVQ xk+8(FP), AX
|
||||||
|
MOVQ dst+16(FP), DX
|
||||||
|
MOVQ src+24(FP), BX
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
MOVUPS 0(BX), X0
|
||||||
|
ADDQ $16, AX
|
||||||
|
PXOR X1, X0
|
||||||
|
SUBQ $12, CX
|
||||||
|
JE Lenc196
|
||||||
|
JB Lenc128
|
||||||
|
Lenc256:
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 16(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
ADDQ $32, AX
|
||||||
|
Lenc196:
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 16(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
ADDQ $32, AX
|
||||||
|
Lenc128:
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 16(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 32(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 48(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 64(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 80(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 96(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 112(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 128(AX), X1
|
||||||
|
AESENC X1, X0
|
||||||
|
MOVUPS 144(AX), X1
|
||||||
|
AESENCLAST X1, X0
|
||||||
|
MOVUPS X0, 0(DX)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
|
||||||
|
TEXT ·decryptBlockAsm(SB),NOSPLIT,$0
|
||||||
|
MOVQ nr+0(FP), CX
|
||||||
|
MOVQ xk+8(FP), AX
|
||||||
|
MOVQ dst+16(FP), DX
|
||||||
|
MOVQ src+24(FP), BX
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
MOVUPS 0(BX), X0
|
||||||
|
ADDQ $16, AX
|
||||||
|
PXOR X1, X0
|
||||||
|
SUBQ $12, CX
|
||||||
|
JE Ldec196
|
||||||
|
JB Ldec128
|
||||||
|
Ldec256:
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 16(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
ADDQ $32, AX
|
||||||
|
Ldec196:
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 16(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
ADDQ $32, AX
|
||||||
|
Ldec128:
|
||||||
|
MOVUPS 0(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 16(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 32(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 48(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 64(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 80(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 96(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 112(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 128(AX), X1
|
||||||
|
AESDEC X1, X0
|
||||||
|
MOVUPS 144(AX), X1
|
||||||
|
AESDECLAST X1, X0
|
||||||
|
MOVUPS X0, 0(DX)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func expandKeyAsm(nr int, key *byte, enc, dec *uint32) {
|
||||||
|
// Note that round keys are stored in uint128 format, not uint32
|
||||||
|
TEXT ·expandKeyAsm(SB),NOSPLIT,$0
|
||||||
|
MOVQ nr+0(FP), CX
|
||||||
|
MOVQ key+8(FP), AX
|
||||||
|
MOVQ enc+16(FP), BX
|
||||||
|
MOVQ dec+24(FP), DX
|
||||||
|
MOVUPS (AX), X0
|
||||||
|
// enc
|
||||||
|
MOVUPS X0, (BX)
|
||||||
|
ADDQ $16, BX
|
||||||
|
PXOR X4, X4 // _expand_key_* expect X4 to be zero
|
||||||
|
CMPL CX, $12
|
||||||
|
JE Lexp_enc196
|
||||||
|
JB Lexp_enc128
|
||||||
|
Lexp_enc256:
|
||||||
|
MOVUPS 16(AX), X2
|
||||||
|
MOVUPS X2, (BX)
|
||||||
|
ADDQ $16, BX
|
||||||
|
AESKEYGENASSIST $0x01, X2, X1
|
||||||
|
CALL _expand_key_256a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x01, X0, X1
|
||||||
|
CALL _expand_key_256b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x02, X2, X1
|
||||||
|
CALL _expand_key_256a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x02, X0, X1
|
||||||
|
CALL _expand_key_256b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x04, X2, X1
|
||||||
|
CALL _expand_key_256a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x04, X0, X1
|
||||||
|
CALL _expand_key_256b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x08, X2, X1
|
||||||
|
CALL _expand_key_256a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x08, X0, X1
|
||||||
|
CALL _expand_key_256b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x10, X2, X1
|
||||||
|
CALL _expand_key_256a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x10, X0, X1
|
||||||
|
CALL _expand_key_256b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x20, X2, X1
|
||||||
|
CALL _expand_key_256a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x20, X0, X1
|
||||||
|
CALL _expand_key_256b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x40, X2, X1
|
||||||
|
CALL _expand_key_256a<>(SB)
|
||||||
|
JMP Lexp_dec
|
||||||
|
Lexp_enc196:
|
||||||
|
MOVQ 16(AX), X2
|
||||||
|
AESKEYGENASSIST $0x01, X2, X1
|
||||||
|
CALL _expand_key_192a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x02, X2, X1
|
||||||
|
CALL _expand_key_192b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x04, X2, X1
|
||||||
|
CALL _expand_key_192a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x08, X2, X1
|
||||||
|
CALL _expand_key_192b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x10, X2, X1
|
||||||
|
CALL _expand_key_192a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x20, X2, X1
|
||||||
|
CALL _expand_key_192b<>(SB)
|
||||||
|
AESKEYGENASSIST $0x40, X2, X1
|
||||||
|
CALL _expand_key_192a<>(SB)
|
||||||
|
AESKEYGENASSIST $0x80, X2, X1
|
||||||
|
CALL _expand_key_192b<>(SB)
|
||||||
|
JMP Lexp_dec
|
||||||
|
Lexp_enc128:
|
||||||
|
AESKEYGENASSIST $0x01, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x02, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x04, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x08, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x10, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x20, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x40, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x80, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x1b, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
AESKEYGENASSIST $0x36, X0, X1
|
||||||
|
CALL _expand_key_128<>(SB)
|
||||||
|
Lexp_dec:
|
||||||
|
// dec
|
||||||
|
SUBQ $16, BX
|
||||||
|
MOVUPS (BX), X1
|
||||||
|
MOVUPS X1, (DX)
|
||||||
|
DECQ CX
|
||||||
|
Lexp_dec_loop:
|
||||||
|
MOVUPS -16(BX), X1
|
||||||
|
AESIMC X1, X0
|
||||||
|
MOVUPS X0, 16(DX)
|
||||||
|
SUBQ $16, BX
|
||||||
|
ADDQ $16, DX
|
||||||
|
DECQ CX
|
||||||
|
JNZ Lexp_dec_loop
|
||||||
|
MOVUPS -16(BX), X0
|
||||||
|
MOVUPS X0, 16(DX)
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT _expand_key_128<>(SB),NOSPLIT,$0
|
||||||
|
PSHUFD $0xff, X1, X1
|
||||||
|
SHUFPS $0x10, X0, X4
|
||||||
|
PXOR X4, X0
|
||||||
|
SHUFPS $0x8c, X0, X4
|
||||||
|
PXOR X4, X0
|
||||||
|
PXOR X1, X0
|
||||||
|
MOVUPS X0, (BX)
|
||||||
|
ADDQ $16, BX
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT _expand_key_192a<>(SB),NOSPLIT,$0
|
||||||
|
PSHUFD $0x55, X1, X1
|
||||||
|
SHUFPS $0x10, X0, X4
|
||||||
|
PXOR X4, X0
|
||||||
|
SHUFPS $0x8c, X0, X4
|
||||||
|
PXOR X4, X0
|
||||||
|
PXOR X1, X0
|
||||||
|
|
||||||
|
MOVAPS X2, X5
|
||||||
|
MOVAPS X2, X6
|
||||||
|
PSLLDQ $0x4, X5
|
||||||
|
PSHUFD $0xff, X0, X3
|
||||||
|
PXOR X3, X2
|
||||||
|
PXOR X5, X2
|
||||||
|
|
||||||
|
MOVAPS X0, X1
|
||||||
|
SHUFPS $0x44, X0, X6
|
||||||
|
MOVUPS X6, (BX)
|
||||||
|
SHUFPS $0x4e, X2, X1
|
||||||
|
MOVUPS X1, 16(BX)
|
||||||
|
ADDQ $32, BX
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT _expand_key_192b<>(SB),NOSPLIT,$0
|
||||||
|
PSHUFD $0x55, X1, X1
|
||||||
|
SHUFPS $0x10, X0, X4
|
||||||
|
PXOR X4, X0
|
||||||
|
SHUFPS $0x8c, X0, X4
|
||||||
|
PXOR X4, X0
|
||||||
|
PXOR X1, X0
|
||||||
|
|
||||||
|
MOVAPS X2, X5
|
||||||
|
PSLLDQ $0x4, X5
|
||||||
|
PSHUFD $0xff, X0, X3
|
||||||
|
PXOR X3, X2
|
||||||
|
PXOR X5, X2
|
||||||
|
|
||||||
|
MOVUPS X0, (BX)
|
||||||
|
ADDQ $16, BX
|
||||||
|
RET
|
||||||
|
|
||||||
|
TEXT _expand_key_256a<>(SB),NOSPLIT,$0
|
||||||
|
JMP _expand_key_128<>(SB)
|
||||||
|
|
||||||
|
TEXT _expand_key_256b<>(SB),NOSPLIT,$0
|
||||||
|
PSHUFD $0xaa, X1, X1
|
||||||
|
SHUFPS $0x10, X2, X4
|
||||||
|
PXOR X4, X2
|
||||||
|
SHUFPS $0x8c, X2, X4
|
||||||
|
PXOR X4, X2
|
||||||
|
PXOR X1, X2
|
||||||
|
|
||||||
|
MOVUPS X2, (BX)
|
||||||
|
ADDQ $16, BX
|
||||||
|
RET
|
|
@ -0,0 +1,63 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package aes12_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/aes12"
|
||||||
|
)
|
||||||
|
|
||||||
|
func benchmarkAESGCMSeal(b *testing.B, buf []byte) {
|
||||||
|
b.SetBytes(int64(len(buf)))
|
||||||
|
|
||||||
|
var key [16]byte
|
||||||
|
var nonce [12]byte
|
||||||
|
var ad [13]byte
|
||||||
|
aes, _ := aes12.NewCipher(key[:])
|
||||||
|
aesgcm, _ := aes12.NewGCM(aes)
|
||||||
|
var out []byte
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
out = aesgcm.Seal(out[:0], nonce[:], buf, ad[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func benchmarkAESGCMOpen(b *testing.B, buf []byte) {
|
||||||
|
b.SetBytes(int64(len(buf)))
|
||||||
|
|
||||||
|
var key [16]byte
|
||||||
|
var nonce [12]byte
|
||||||
|
var ad [13]byte
|
||||||
|
aes, _ := aes12.NewCipher(key[:])
|
||||||
|
aesgcm, _ := aes12.NewGCM(aes)
|
||||||
|
var out []byte
|
||||||
|
out = aesgcm.Seal(out[:0], nonce[:], buf, ad[:])
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for i := 0; i < b.N; i++ {
|
||||||
|
_, err := aesgcm.Open(buf[:0], nonce[:], out, ad[:])
|
||||||
|
if err != nil {
|
||||||
|
b.Errorf("Open: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAESGCMSeal1K(b *testing.B) {
|
||||||
|
benchmarkAESGCMSeal(b, make([]byte, 1024))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAESGCMOpen1K(b *testing.B) {
|
||||||
|
benchmarkAESGCMOpen(b, make([]byte, 1024))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAESGCMSeal8K(b *testing.B) {
|
||||||
|
benchmarkAESGCMSeal(b, make([]byte, 8*1024))
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkAESGCMOpen8K(b *testing.B) {
|
||||||
|
benchmarkAESGCMOpen(b, make([]byte, 8*1024))
|
||||||
|
}
|
|
@ -0,0 +1,176 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// This Go implementation is derived in part from the reference
|
||||||
|
// ANSI C implementation, which carries the following notice:
|
||||||
|
//
|
||||||
|
// rijndael-alg-fst.c
|
||||||
|
//
|
||||||
|
// @version 3.0 (December 2000)
|
||||||
|
//
|
||||||
|
// Optimised ANSI C code for the Rijndael cipher (now AES)
|
||||||
|
//
|
||||||
|
// @author Vincent Rijmen <vincent.rijmen@esat.kuleuven.ac.be>
|
||||||
|
// @author Antoon Bosselaers <antoon.bosselaers@esat.kuleuven.ac.be>
|
||||||
|
// @author Paulo Barreto <paulo.barreto@terra.com.br>
|
||||||
|
//
|
||||||
|
// This code is hereby placed in the public domain.
|
||||||
|
//
|
||||||
|
// THIS SOFTWARE IS PROVIDED BY THE AUTHORS ''AS IS'' AND ANY EXPRESS
|
||||||
|
// OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
|
||||||
|
// ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE
|
||||||
|
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
|
||||||
|
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
|
||||||
|
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
|
||||||
|
// BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
|
||||||
|
// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
|
||||||
|
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
|
||||||
|
// EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||||
|
//
|
||||||
|
// See FIPS 197 for specification, and see Daemen and Rijmen's Rijndael submission
|
||||||
|
// for implementation details.
|
||||||
|
// http://www.csrc.nist.gov/publications/fips/fips197/fips-197.pdf
|
||||||
|
// http://csrc.nist.gov/archive/aes/rijndael/Rijndael-ammended.pdf
|
||||||
|
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
// Encrypt one block from src into dst, using the expanded key xk.
|
||||||
|
func encryptBlockGo(xk []uint32, dst, src []byte) {
|
||||||
|
var s0, s1, s2, s3, t0, t1, t2, t3 uint32
|
||||||
|
|
||||||
|
s0 = uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||||
|
s1 = uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||||
|
s2 = uint32(src[8])<<24 | uint32(src[9])<<16 | uint32(src[10])<<8 | uint32(src[11])
|
||||||
|
s3 = uint32(src[12])<<24 | uint32(src[13])<<16 | uint32(src[14])<<8 | uint32(src[15])
|
||||||
|
|
||||||
|
// First round just XORs input with key.
|
||||||
|
s0 ^= xk[0]
|
||||||
|
s1 ^= xk[1]
|
||||||
|
s2 ^= xk[2]
|
||||||
|
s3 ^= xk[3]
|
||||||
|
|
||||||
|
// Middle rounds shuffle using tables.
|
||||||
|
// Number of rounds is set by length of expanded key.
|
||||||
|
nr := len(xk)/4 - 2 // - 2: one above, one more below
|
||||||
|
k := 4
|
||||||
|
for r := 0; r < nr; r++ {
|
||||||
|
t0 = xk[k+0] ^ te0[uint8(s0>>24)] ^ te1[uint8(s1>>16)] ^ te2[uint8(s2>>8)] ^ te3[uint8(s3)]
|
||||||
|
t1 = xk[k+1] ^ te0[uint8(s1>>24)] ^ te1[uint8(s2>>16)] ^ te2[uint8(s3>>8)] ^ te3[uint8(s0)]
|
||||||
|
t2 = xk[k+2] ^ te0[uint8(s2>>24)] ^ te1[uint8(s3>>16)] ^ te2[uint8(s0>>8)] ^ te3[uint8(s1)]
|
||||||
|
t3 = xk[k+3] ^ te0[uint8(s3>>24)] ^ te1[uint8(s0>>16)] ^ te2[uint8(s1>>8)] ^ te3[uint8(s2)]
|
||||||
|
k += 4
|
||||||
|
s0, s1, s2, s3 = t0, t1, t2, t3
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last round uses s-box directly and XORs to produce output.
|
||||||
|
s0 = uint32(sbox0[t0>>24])<<24 | uint32(sbox0[t1>>16&0xff])<<16 | uint32(sbox0[t2>>8&0xff])<<8 | uint32(sbox0[t3&0xff])
|
||||||
|
s1 = uint32(sbox0[t1>>24])<<24 | uint32(sbox0[t2>>16&0xff])<<16 | uint32(sbox0[t3>>8&0xff])<<8 | uint32(sbox0[t0&0xff])
|
||||||
|
s2 = uint32(sbox0[t2>>24])<<24 | uint32(sbox0[t3>>16&0xff])<<16 | uint32(sbox0[t0>>8&0xff])<<8 | uint32(sbox0[t1&0xff])
|
||||||
|
s3 = uint32(sbox0[t3>>24])<<24 | uint32(sbox0[t0>>16&0xff])<<16 | uint32(sbox0[t1>>8&0xff])<<8 | uint32(sbox0[t2&0xff])
|
||||||
|
|
||||||
|
s0 ^= xk[k+0]
|
||||||
|
s1 ^= xk[k+1]
|
||||||
|
s2 ^= xk[k+2]
|
||||||
|
s3 ^= xk[k+3]
|
||||||
|
|
||||||
|
dst[0], dst[1], dst[2], dst[3] = byte(s0>>24), byte(s0>>16), byte(s0>>8), byte(s0)
|
||||||
|
dst[4], dst[5], dst[6], dst[7] = byte(s1>>24), byte(s1>>16), byte(s1>>8), byte(s1)
|
||||||
|
dst[8], dst[9], dst[10], dst[11] = byte(s2>>24), byte(s2>>16), byte(s2>>8), byte(s2)
|
||||||
|
dst[12], dst[13], dst[14], dst[15] = byte(s3>>24), byte(s3>>16), byte(s3>>8), byte(s3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decrypt one block from src into dst, using the expanded key xk.
|
||||||
|
func decryptBlockGo(xk []uint32, dst, src []byte) {
|
||||||
|
var s0, s1, s2, s3, t0, t1, t2, t3 uint32
|
||||||
|
|
||||||
|
s0 = uint32(src[0])<<24 | uint32(src[1])<<16 | uint32(src[2])<<8 | uint32(src[3])
|
||||||
|
s1 = uint32(src[4])<<24 | uint32(src[5])<<16 | uint32(src[6])<<8 | uint32(src[7])
|
||||||
|
s2 = uint32(src[8])<<24 | uint32(src[9])<<16 | uint32(src[10])<<8 | uint32(src[11])
|
||||||
|
s3 = uint32(src[12])<<24 | uint32(src[13])<<16 | uint32(src[14])<<8 | uint32(src[15])
|
||||||
|
|
||||||
|
// First round just XORs input with key.
|
||||||
|
s0 ^= xk[0]
|
||||||
|
s1 ^= xk[1]
|
||||||
|
s2 ^= xk[2]
|
||||||
|
s3 ^= xk[3]
|
||||||
|
|
||||||
|
// Middle rounds shuffle using tables.
|
||||||
|
// Number of rounds is set by length of expanded key.
|
||||||
|
nr := len(xk)/4 - 2 // - 2: one above, one more below
|
||||||
|
k := 4
|
||||||
|
for r := 0; r < nr; r++ {
|
||||||
|
t0 = xk[k+0] ^ td0[uint8(s0>>24)] ^ td1[uint8(s3>>16)] ^ td2[uint8(s2>>8)] ^ td3[uint8(s1)]
|
||||||
|
t1 = xk[k+1] ^ td0[uint8(s1>>24)] ^ td1[uint8(s0>>16)] ^ td2[uint8(s3>>8)] ^ td3[uint8(s2)]
|
||||||
|
t2 = xk[k+2] ^ td0[uint8(s2>>24)] ^ td1[uint8(s1>>16)] ^ td2[uint8(s0>>8)] ^ td3[uint8(s3)]
|
||||||
|
t3 = xk[k+3] ^ td0[uint8(s3>>24)] ^ td1[uint8(s2>>16)] ^ td2[uint8(s1>>8)] ^ td3[uint8(s0)]
|
||||||
|
k += 4
|
||||||
|
s0, s1, s2, s3 = t0, t1, t2, t3
|
||||||
|
}
|
||||||
|
|
||||||
|
// Last round uses s-box directly and XORs to produce output.
|
||||||
|
s0 = uint32(sbox1[t0>>24])<<24 | uint32(sbox1[t3>>16&0xff])<<16 | uint32(sbox1[t2>>8&0xff])<<8 | uint32(sbox1[t1&0xff])
|
||||||
|
s1 = uint32(sbox1[t1>>24])<<24 | uint32(sbox1[t0>>16&0xff])<<16 | uint32(sbox1[t3>>8&0xff])<<8 | uint32(sbox1[t2&0xff])
|
||||||
|
s2 = uint32(sbox1[t2>>24])<<24 | uint32(sbox1[t1>>16&0xff])<<16 | uint32(sbox1[t0>>8&0xff])<<8 | uint32(sbox1[t3&0xff])
|
||||||
|
s3 = uint32(sbox1[t3>>24])<<24 | uint32(sbox1[t2>>16&0xff])<<16 | uint32(sbox1[t1>>8&0xff])<<8 | uint32(sbox1[t0&0xff])
|
||||||
|
|
||||||
|
s0 ^= xk[k+0]
|
||||||
|
s1 ^= xk[k+1]
|
||||||
|
s2 ^= xk[k+2]
|
||||||
|
s3 ^= xk[k+3]
|
||||||
|
|
||||||
|
dst[0], dst[1], dst[2], dst[3] = byte(s0>>24), byte(s0>>16), byte(s0>>8), byte(s0)
|
||||||
|
dst[4], dst[5], dst[6], dst[7] = byte(s1>>24), byte(s1>>16), byte(s1>>8), byte(s1)
|
||||||
|
dst[8], dst[9], dst[10], dst[11] = byte(s2>>24), byte(s2>>16), byte(s2>>8), byte(s2)
|
||||||
|
dst[12], dst[13], dst[14], dst[15] = byte(s3>>24), byte(s3>>16), byte(s3>>8), byte(s3)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Apply sbox0 to each byte in w.
|
||||||
|
func subw(w uint32) uint32 {
|
||||||
|
return uint32(sbox0[w>>24])<<24 |
|
||||||
|
uint32(sbox0[w>>16&0xff])<<16 |
|
||||||
|
uint32(sbox0[w>>8&0xff])<<8 |
|
||||||
|
uint32(sbox0[w&0xff])
|
||||||
|
}
|
||||||
|
|
||||||
|
// Rotate
|
||||||
|
func rotw(w uint32) uint32 { return w<<8 | w>>24 }
|
||||||
|
|
||||||
|
// Key expansion algorithm. See FIPS-197, Figure 11.
|
||||||
|
// Their rcon[i] is our powx[i-1] << 24.
|
||||||
|
func expandKeyGo(key []byte, enc, dec []uint32) {
|
||||||
|
// Encryption key setup.
|
||||||
|
var i int
|
||||||
|
nk := len(key) / 4
|
||||||
|
for i = 0; i < nk; i++ {
|
||||||
|
enc[i] = uint32(key[4*i])<<24 | uint32(key[4*i+1])<<16 | uint32(key[4*i+2])<<8 | uint32(key[4*i+3])
|
||||||
|
}
|
||||||
|
for ; i < len(enc); i++ {
|
||||||
|
t := enc[i-1]
|
||||||
|
if i%nk == 0 {
|
||||||
|
t = subw(rotw(t)) ^ (uint32(powx[i/nk-1]) << 24)
|
||||||
|
} else if nk > 6 && i%nk == 4 {
|
||||||
|
t = subw(t)
|
||||||
|
}
|
||||||
|
enc[i] = enc[i-nk] ^ t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Derive decryption key from encryption key.
|
||||||
|
// Reverse the 4-word round key sets from enc to produce dec.
|
||||||
|
// All sets but the first and last get the MixColumn transform applied.
|
||||||
|
if dec == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
n := len(enc)
|
||||||
|
for i := 0; i < n; i += 4 {
|
||||||
|
ei := n - i - 4
|
||||||
|
for j := 0; j < 4; j++ {
|
||||||
|
x := enc[ei+j]
|
||||||
|
if i > 0 && i+4 < n {
|
||||||
|
x = td0[sbox0[x>>24]] ^ td1[sbox0[x>>16&0xff]] ^ td2[sbox0[x>>8&0xff]] ^ td3[sbox0[x&0xff]]
|
||||||
|
}
|
||||||
|
dec[i+j] = x
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,68 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
// The AES block size in bytes.
|
||||||
|
const BlockSize = 16
|
||||||
|
|
||||||
|
// A cipher is an instance of AES encryption using a particular key.
|
||||||
|
type aesCipher struct {
|
||||||
|
enc []uint32
|
||||||
|
dec []uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
type KeySizeError int
|
||||||
|
|
||||||
|
func (k KeySizeError) Error() string {
|
||||||
|
return "crypto/aes: invalid key size " + strconv.Itoa(int(k))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCipher creates and returns a new Block.
|
||||||
|
// The key argument should be the AES key,
|
||||||
|
// either 16, 24, or 32 bytes to select
|
||||||
|
// AES-128, AES-192, or AES-256.
|
||||||
|
func NewCipher(key []byte) (Block, error) {
|
||||||
|
k := len(key)
|
||||||
|
switch k {
|
||||||
|
default:
|
||||||
|
return nil, KeySizeError(k)
|
||||||
|
case 16, 24, 32:
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return newCipher(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// newCipherGeneric creates and returns a new Block
|
||||||
|
// implemented in pure Go.
|
||||||
|
func newCipherGeneric(key []byte) (Block, error) {
|
||||||
|
n := len(key) + 28
|
||||||
|
c := aesCipher{make([]uint32, n), make([]uint32, n)}
|
||||||
|
expandKeyGo(key, c.enc, c.dec)
|
||||||
|
return &c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *aesCipher) BlockSize() int { return BlockSize }
|
||||||
|
|
||||||
|
func (c *aesCipher) Encrypt(dst, src []byte) {
|
||||||
|
if len(src) < BlockSize {
|
||||||
|
panic("crypto/aes: input not full block")
|
||||||
|
}
|
||||||
|
if len(dst) < BlockSize {
|
||||||
|
panic("crypto/aes: output not full block")
|
||||||
|
}
|
||||||
|
encryptBlockGo(c.enc, dst, src)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *aesCipher) Decrypt(dst, src []byte) {
|
||||||
|
if len(src) < BlockSize {
|
||||||
|
panic("crypto/aes: input not full block")
|
||||||
|
}
|
||||||
|
if len(dst) < BlockSize {
|
||||||
|
panic("crypto/aes: output not full block")
|
||||||
|
}
|
||||||
|
decryptBlockGo(c.dec, dst, src)
|
||||||
|
}
|
|
@ -0,0 +1,56 @@
|
||||||
|
// Copyright 2010 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// package aes12 implements standard block cipher modes that can be wrapped
|
||||||
|
// around low-level block cipher implementations.
|
||||||
|
// See http://csrc.nist.gov/groups/ST/toolkit/BCM/current_modes.html
|
||||||
|
// and NIST Special Publication 800-38A.
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
// A Block represents an implementation of block cipher
|
||||||
|
// using a given key. It provides the capability to encrypt
|
||||||
|
// or decrypt individual blocks. The mode implementations
|
||||||
|
// extend that capability to streams of blocks.
|
||||||
|
type Block interface {
|
||||||
|
// BlockSize returns the cipher's block size.
|
||||||
|
BlockSize() int
|
||||||
|
|
||||||
|
// Encrypt encrypts the first block in src into dst.
|
||||||
|
// Dst and src may point at the same memory.
|
||||||
|
Encrypt(dst, src []byte)
|
||||||
|
|
||||||
|
// Decrypt decrypts the first block in src into dst.
|
||||||
|
// Dst and src may point at the same memory.
|
||||||
|
Decrypt(dst, src []byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A Stream represents a stream cipher.
|
||||||
|
type Stream interface {
|
||||||
|
// XORKeyStream XORs each byte in the given slice with a byte from the
|
||||||
|
// cipher's key stream. Dst and src may point to the same memory.
|
||||||
|
// If len(dst) < len(src), XORKeyStream should panic. It is acceptable
|
||||||
|
// to pass a dst bigger than src, and in that case, XORKeyStream will
|
||||||
|
// only update dst[:len(src)] and will not touch the rest of dst.
|
||||||
|
XORKeyStream(dst, src []byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// A BlockMode represents a block cipher running in a block-based mode (CBC,
|
||||||
|
// ECB etc).
|
||||||
|
type BlockMode interface {
|
||||||
|
// BlockSize returns the mode's block size.
|
||||||
|
BlockSize() int
|
||||||
|
|
||||||
|
// CryptBlocks encrypts or decrypts a number of blocks. The length of
|
||||||
|
// src must be a multiple of the block size. Dst and src may point to
|
||||||
|
// the same memory.
|
||||||
|
CryptBlocks(dst, src []byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Utility routines
|
||||||
|
|
||||||
|
func dup(p []byte) []byte {
|
||||||
|
q := make([]byte, len(p))
|
||||||
|
copy(q, p)
|
||||||
|
return q
|
||||||
|
}
|
|
@ -0,0 +1,79 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
// defined in asm_amd64.s
|
||||||
|
func hasAsm() bool
|
||||||
|
func encryptBlockAsm(nr int, xk *uint32, dst, src *byte)
|
||||||
|
func decryptBlockAsm(nr int, xk *uint32, dst, src *byte)
|
||||||
|
func expandKeyAsm(nr int, key *byte, enc *uint32, dec *uint32)
|
||||||
|
|
||||||
|
type aesCipherAsm struct {
|
||||||
|
aesCipher
|
||||||
|
}
|
||||||
|
|
||||||
|
var useAsm = hasAsm()
|
||||||
|
|
||||||
|
func newCipher(key []byte) (Block, error) {
|
||||||
|
if !useAsm {
|
||||||
|
return newCipherGeneric(key)
|
||||||
|
}
|
||||||
|
n := len(key) + 28
|
||||||
|
c := aesCipherAsm{aesCipher{make([]uint32, n), make([]uint32, n)}}
|
||||||
|
rounds := 10
|
||||||
|
switch len(key) {
|
||||||
|
case 128 / 8:
|
||||||
|
rounds = 10
|
||||||
|
case 192 / 8:
|
||||||
|
rounds = 12
|
||||||
|
case 256 / 8:
|
||||||
|
rounds = 14
|
||||||
|
}
|
||||||
|
expandKeyAsm(rounds, &key[0], &c.enc[0], &c.dec[0])
|
||||||
|
if hasGCMAsm() {
|
||||||
|
return &aesCipherGCM{c}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return &c, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *aesCipherAsm) BlockSize() int { return BlockSize }
|
||||||
|
|
||||||
|
func (c *aesCipherAsm) Encrypt(dst, src []byte) {
|
||||||
|
if len(src) < BlockSize {
|
||||||
|
panic("crypto/aes: input not full block")
|
||||||
|
}
|
||||||
|
if len(dst) < BlockSize {
|
||||||
|
panic("crypto/aes: output not full block")
|
||||||
|
}
|
||||||
|
encryptBlockAsm(len(c.enc)/4-1, &c.enc[0], &dst[0], &src[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *aesCipherAsm) Decrypt(dst, src []byte) {
|
||||||
|
if len(src) < BlockSize {
|
||||||
|
panic("crypto/aes: input not full block")
|
||||||
|
}
|
||||||
|
if len(dst) < BlockSize {
|
||||||
|
panic("crypto/aes: output not full block")
|
||||||
|
}
|
||||||
|
decryptBlockAsm(len(c.dec)/4-1, &c.dec[0], &dst[0], &src[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandKey is used by BenchmarkExpand to ensure that the asm implementation
|
||||||
|
// of key expansion is used for the benchmark when it is available.
|
||||||
|
func expandKey(key []byte, enc, dec []uint32) {
|
||||||
|
if useAsm {
|
||||||
|
rounds := 10 // rounds needed for AES128
|
||||||
|
switch len(key) {
|
||||||
|
case 192 / 8:
|
||||||
|
rounds = 12
|
||||||
|
case 256 / 8:
|
||||||
|
rounds = 14
|
||||||
|
}
|
||||||
|
expandKeyAsm(rounds, &key[0], &enc[0], &dec[0])
|
||||||
|
} else {
|
||||||
|
expandKeyGo(key, enc, dec)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,22 @@
|
||||||
|
// Copyright 2012 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// +build !amd64
|
||||||
|
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
// newCipher calls the newCipherGeneric function
|
||||||
|
// directly. Platforms with hardware accelerated
|
||||||
|
// implementations of AES should implement their
|
||||||
|
// own version of newCipher (which may then call
|
||||||
|
// newCipherGeneric if needed).
|
||||||
|
func newCipher(key []byte) (Block, error) {
|
||||||
|
return newCipherGeneric(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
// expandKey is used by BenchmarkExpand and should
|
||||||
|
// call an assembly implementation if one is available.
|
||||||
|
func expandKey(key []byte, enc, dec []uint32) {
|
||||||
|
expandKeyGo(key, enc, dec)
|
||||||
|
}
|
|
@ -0,0 +1,358 @@
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// package aes12 implements AES encryption (formerly Rijndael), as defined in
|
||||||
|
// U.S. Federal Information Processing Standards Publication 197.
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
// This file contains AES constants - 8720 bytes of initialized data.
|
||||||
|
|
||||||
|
// http://www.csrc.nist.gov/publications/fips/fips197/fips-197.pdf
|
||||||
|
|
||||||
|
// AES is based on the mathematical behavior of binary polynomials
|
||||||
|
// (polynomials over GF(2)) modulo the irreducible polynomial x⁸ + x⁴ + x³ + x + 1.
|
||||||
|
// Addition of these binary polynomials corresponds to binary xor.
|
||||||
|
// Reducing mod poly corresponds to binary xor with poly every
|
||||||
|
// time a 0x100 bit appears.
|
||||||
|
const poly = 1<<8 | 1<<4 | 1<<3 | 1<<1 | 1<<0 // x⁸ + x⁴ + x³ + x + 1
|
||||||
|
|
||||||
|
// Powers of x mod poly in GF(2).
|
||||||
|
var powx = [16]byte{
|
||||||
|
0x01,
|
||||||
|
0x02,
|
||||||
|
0x04,
|
||||||
|
0x08,
|
||||||
|
0x10,
|
||||||
|
0x20,
|
||||||
|
0x40,
|
||||||
|
0x80,
|
||||||
|
0x1b,
|
||||||
|
0x36,
|
||||||
|
0x6c,
|
||||||
|
0xd8,
|
||||||
|
0xab,
|
||||||
|
0x4d,
|
||||||
|
0x9a,
|
||||||
|
0x2f,
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIPS-197 Figure 7. S-box substitution values in hexadecimal format.
|
||||||
|
var sbox0 = [256]byte{
|
||||||
|
0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5, 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76,
|
||||||
|
0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0, 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0,
|
||||||
|
0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc, 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15,
|
||||||
|
0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a, 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75,
|
||||||
|
0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0, 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84,
|
||||||
|
0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b, 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf,
|
||||||
|
0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85, 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8,
|
||||||
|
0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5, 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2,
|
||||||
|
0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17, 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73,
|
||||||
|
0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88, 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb,
|
||||||
|
0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c, 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79,
|
||||||
|
0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9, 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08,
|
||||||
|
0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6, 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a,
|
||||||
|
0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e, 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e,
|
||||||
|
0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94, 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf,
|
||||||
|
0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68, 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16,
|
||||||
|
}
|
||||||
|
|
||||||
|
// FIPS-197 Figure 14. Inverse S-box substitution values in hexadecimal format.
|
||||||
|
var sbox1 = [256]byte{
|
||||||
|
0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38, 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb,
|
||||||
|
0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87, 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb,
|
||||||
|
0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d, 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e,
|
||||||
|
0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2, 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25,
|
||||||
|
0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16, 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92,
|
||||||
|
0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda, 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84,
|
||||||
|
0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a, 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06,
|
||||||
|
0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02, 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b,
|
||||||
|
0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea, 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73,
|
||||||
|
0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85, 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e,
|
||||||
|
0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89, 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b,
|
||||||
|
0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20, 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4,
|
||||||
|
0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31, 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f,
|
||||||
|
0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d, 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef,
|
||||||
|
0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0, 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61,
|
||||||
|
0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26, 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup tables for encryption.
|
||||||
|
// These can be recomputed by adapting the tests in aes_test.go.
|
||||||
|
|
||||||
|
var te0 = [256]uint32{
|
||||||
|
0xc66363a5, 0xf87c7c84, 0xee777799, 0xf67b7b8d, 0xfff2f20d, 0xd66b6bbd, 0xde6f6fb1, 0x91c5c554,
|
||||||
|
0x60303050, 0x02010103, 0xce6767a9, 0x562b2b7d, 0xe7fefe19, 0xb5d7d762, 0x4dababe6, 0xec76769a,
|
||||||
|
0x8fcaca45, 0x1f82829d, 0x89c9c940, 0xfa7d7d87, 0xeffafa15, 0xb25959eb, 0x8e4747c9, 0xfbf0f00b,
|
||||||
|
0x41adadec, 0xb3d4d467, 0x5fa2a2fd, 0x45afafea, 0x239c9cbf, 0x53a4a4f7, 0xe4727296, 0x9bc0c05b,
|
||||||
|
0x75b7b7c2, 0xe1fdfd1c, 0x3d9393ae, 0x4c26266a, 0x6c36365a, 0x7e3f3f41, 0xf5f7f702, 0x83cccc4f,
|
||||||
|
0x6834345c, 0x51a5a5f4, 0xd1e5e534, 0xf9f1f108, 0xe2717193, 0xabd8d873, 0x62313153, 0x2a15153f,
|
||||||
|
0x0804040c, 0x95c7c752, 0x46232365, 0x9dc3c35e, 0x30181828, 0x379696a1, 0x0a05050f, 0x2f9a9ab5,
|
||||||
|
0x0e070709, 0x24121236, 0x1b80809b, 0xdfe2e23d, 0xcdebeb26, 0x4e272769, 0x7fb2b2cd, 0xea75759f,
|
||||||
|
0x1209091b, 0x1d83839e, 0x582c2c74, 0x341a1a2e, 0x361b1b2d, 0xdc6e6eb2, 0xb45a5aee, 0x5ba0a0fb,
|
||||||
|
0xa45252f6, 0x763b3b4d, 0xb7d6d661, 0x7db3b3ce, 0x5229297b, 0xdde3e33e, 0x5e2f2f71, 0x13848497,
|
||||||
|
0xa65353f5, 0xb9d1d168, 0x00000000, 0xc1eded2c, 0x40202060, 0xe3fcfc1f, 0x79b1b1c8, 0xb65b5bed,
|
||||||
|
0xd46a6abe, 0x8dcbcb46, 0x67bebed9, 0x7239394b, 0x944a4ade, 0x984c4cd4, 0xb05858e8, 0x85cfcf4a,
|
||||||
|
0xbbd0d06b, 0xc5efef2a, 0x4faaaae5, 0xedfbfb16, 0x864343c5, 0x9a4d4dd7, 0x66333355, 0x11858594,
|
||||||
|
0x8a4545cf, 0xe9f9f910, 0x04020206, 0xfe7f7f81, 0xa05050f0, 0x783c3c44, 0x259f9fba, 0x4ba8a8e3,
|
||||||
|
0xa25151f3, 0x5da3a3fe, 0x804040c0, 0x058f8f8a, 0x3f9292ad, 0x219d9dbc, 0x70383848, 0xf1f5f504,
|
||||||
|
0x63bcbcdf, 0x77b6b6c1, 0xafdada75, 0x42212163, 0x20101030, 0xe5ffff1a, 0xfdf3f30e, 0xbfd2d26d,
|
||||||
|
0x81cdcd4c, 0x180c0c14, 0x26131335, 0xc3ecec2f, 0xbe5f5fe1, 0x359797a2, 0x884444cc, 0x2e171739,
|
||||||
|
0x93c4c457, 0x55a7a7f2, 0xfc7e7e82, 0x7a3d3d47, 0xc86464ac, 0xba5d5de7, 0x3219192b, 0xe6737395,
|
||||||
|
0xc06060a0, 0x19818198, 0x9e4f4fd1, 0xa3dcdc7f, 0x44222266, 0x542a2a7e, 0x3b9090ab, 0x0b888883,
|
||||||
|
0x8c4646ca, 0xc7eeee29, 0x6bb8b8d3, 0x2814143c, 0xa7dede79, 0xbc5e5ee2, 0x160b0b1d, 0xaddbdb76,
|
||||||
|
0xdbe0e03b, 0x64323256, 0x743a3a4e, 0x140a0a1e, 0x924949db, 0x0c06060a, 0x4824246c, 0xb85c5ce4,
|
||||||
|
0x9fc2c25d, 0xbdd3d36e, 0x43acacef, 0xc46262a6, 0x399191a8, 0x319595a4, 0xd3e4e437, 0xf279798b,
|
||||||
|
0xd5e7e732, 0x8bc8c843, 0x6e373759, 0xda6d6db7, 0x018d8d8c, 0xb1d5d564, 0x9c4e4ed2, 0x49a9a9e0,
|
||||||
|
0xd86c6cb4, 0xac5656fa, 0xf3f4f407, 0xcfeaea25, 0xca6565af, 0xf47a7a8e, 0x47aeaee9, 0x10080818,
|
||||||
|
0x6fbabad5, 0xf0787888, 0x4a25256f, 0x5c2e2e72, 0x381c1c24, 0x57a6a6f1, 0x73b4b4c7, 0x97c6c651,
|
||||||
|
0xcbe8e823, 0xa1dddd7c, 0xe874749c, 0x3e1f1f21, 0x964b4bdd, 0x61bdbddc, 0x0d8b8b86, 0x0f8a8a85,
|
||||||
|
0xe0707090, 0x7c3e3e42, 0x71b5b5c4, 0xcc6666aa, 0x904848d8, 0x06030305, 0xf7f6f601, 0x1c0e0e12,
|
||||||
|
0xc26161a3, 0x6a35355f, 0xae5757f9, 0x69b9b9d0, 0x17868691, 0x99c1c158, 0x3a1d1d27, 0x279e9eb9,
|
||||||
|
0xd9e1e138, 0xebf8f813, 0x2b9898b3, 0x22111133, 0xd26969bb, 0xa9d9d970, 0x078e8e89, 0x339494a7,
|
||||||
|
0x2d9b9bb6, 0x3c1e1e22, 0x15878792, 0xc9e9e920, 0x87cece49, 0xaa5555ff, 0x50282878, 0xa5dfdf7a,
|
||||||
|
0x038c8c8f, 0x59a1a1f8, 0x09898980, 0x1a0d0d17, 0x65bfbfda, 0xd7e6e631, 0x844242c6, 0xd06868b8,
|
||||||
|
0x824141c3, 0x299999b0, 0x5a2d2d77, 0x1e0f0f11, 0x7bb0b0cb, 0xa85454fc, 0x6dbbbbd6, 0x2c16163a,
|
||||||
|
}
|
||||||
|
var te1 = [256]uint32{
|
||||||
|
0xa5c66363, 0x84f87c7c, 0x99ee7777, 0x8df67b7b, 0x0dfff2f2, 0xbdd66b6b, 0xb1de6f6f, 0x5491c5c5,
|
||||||
|
0x50603030, 0x03020101, 0xa9ce6767, 0x7d562b2b, 0x19e7fefe, 0x62b5d7d7, 0xe64dabab, 0x9aec7676,
|
||||||
|
0x458fcaca, 0x9d1f8282, 0x4089c9c9, 0x87fa7d7d, 0x15effafa, 0xebb25959, 0xc98e4747, 0x0bfbf0f0,
|
||||||
|
0xec41adad, 0x67b3d4d4, 0xfd5fa2a2, 0xea45afaf, 0xbf239c9c, 0xf753a4a4, 0x96e47272, 0x5b9bc0c0,
|
||||||
|
0xc275b7b7, 0x1ce1fdfd, 0xae3d9393, 0x6a4c2626, 0x5a6c3636, 0x417e3f3f, 0x02f5f7f7, 0x4f83cccc,
|
||||||
|
0x5c683434, 0xf451a5a5, 0x34d1e5e5, 0x08f9f1f1, 0x93e27171, 0x73abd8d8, 0x53623131, 0x3f2a1515,
|
||||||
|
0x0c080404, 0x5295c7c7, 0x65462323, 0x5e9dc3c3, 0x28301818, 0xa1379696, 0x0f0a0505, 0xb52f9a9a,
|
||||||
|
0x090e0707, 0x36241212, 0x9b1b8080, 0x3ddfe2e2, 0x26cdebeb, 0x694e2727, 0xcd7fb2b2, 0x9fea7575,
|
||||||
|
0x1b120909, 0x9e1d8383, 0x74582c2c, 0x2e341a1a, 0x2d361b1b, 0xb2dc6e6e, 0xeeb45a5a, 0xfb5ba0a0,
|
||||||
|
0xf6a45252, 0x4d763b3b, 0x61b7d6d6, 0xce7db3b3, 0x7b522929, 0x3edde3e3, 0x715e2f2f, 0x97138484,
|
||||||
|
0xf5a65353, 0x68b9d1d1, 0x00000000, 0x2cc1eded, 0x60402020, 0x1fe3fcfc, 0xc879b1b1, 0xedb65b5b,
|
||||||
|
0xbed46a6a, 0x468dcbcb, 0xd967bebe, 0x4b723939, 0xde944a4a, 0xd4984c4c, 0xe8b05858, 0x4a85cfcf,
|
||||||
|
0x6bbbd0d0, 0x2ac5efef, 0xe54faaaa, 0x16edfbfb, 0xc5864343, 0xd79a4d4d, 0x55663333, 0x94118585,
|
||||||
|
0xcf8a4545, 0x10e9f9f9, 0x06040202, 0x81fe7f7f, 0xf0a05050, 0x44783c3c, 0xba259f9f, 0xe34ba8a8,
|
||||||
|
0xf3a25151, 0xfe5da3a3, 0xc0804040, 0x8a058f8f, 0xad3f9292, 0xbc219d9d, 0x48703838, 0x04f1f5f5,
|
||||||
|
0xdf63bcbc, 0xc177b6b6, 0x75afdada, 0x63422121, 0x30201010, 0x1ae5ffff, 0x0efdf3f3, 0x6dbfd2d2,
|
||||||
|
0x4c81cdcd, 0x14180c0c, 0x35261313, 0x2fc3ecec, 0xe1be5f5f, 0xa2359797, 0xcc884444, 0x392e1717,
|
||||||
|
0x5793c4c4, 0xf255a7a7, 0x82fc7e7e, 0x477a3d3d, 0xacc86464, 0xe7ba5d5d, 0x2b321919, 0x95e67373,
|
||||||
|
0xa0c06060, 0x98198181, 0xd19e4f4f, 0x7fa3dcdc, 0x66442222, 0x7e542a2a, 0xab3b9090, 0x830b8888,
|
||||||
|
0xca8c4646, 0x29c7eeee, 0xd36bb8b8, 0x3c281414, 0x79a7dede, 0xe2bc5e5e, 0x1d160b0b, 0x76addbdb,
|
||||||
|
0x3bdbe0e0, 0x56643232, 0x4e743a3a, 0x1e140a0a, 0xdb924949, 0x0a0c0606, 0x6c482424, 0xe4b85c5c,
|
||||||
|
0x5d9fc2c2, 0x6ebdd3d3, 0xef43acac, 0xa6c46262, 0xa8399191, 0xa4319595, 0x37d3e4e4, 0x8bf27979,
|
||||||
|
0x32d5e7e7, 0x438bc8c8, 0x596e3737, 0xb7da6d6d, 0x8c018d8d, 0x64b1d5d5, 0xd29c4e4e, 0xe049a9a9,
|
||||||
|
0xb4d86c6c, 0xfaac5656, 0x07f3f4f4, 0x25cfeaea, 0xafca6565, 0x8ef47a7a, 0xe947aeae, 0x18100808,
|
||||||
|
0xd56fbaba, 0x88f07878, 0x6f4a2525, 0x725c2e2e, 0x24381c1c, 0xf157a6a6, 0xc773b4b4, 0x5197c6c6,
|
||||||
|
0x23cbe8e8, 0x7ca1dddd, 0x9ce87474, 0x213e1f1f, 0xdd964b4b, 0xdc61bdbd, 0x860d8b8b, 0x850f8a8a,
|
||||||
|
0x90e07070, 0x427c3e3e, 0xc471b5b5, 0xaacc6666, 0xd8904848, 0x05060303, 0x01f7f6f6, 0x121c0e0e,
|
||||||
|
0xa3c26161, 0x5f6a3535, 0xf9ae5757, 0xd069b9b9, 0x91178686, 0x5899c1c1, 0x273a1d1d, 0xb9279e9e,
|
||||||
|
0x38d9e1e1, 0x13ebf8f8, 0xb32b9898, 0x33221111, 0xbbd26969, 0x70a9d9d9, 0x89078e8e, 0xa7339494,
|
||||||
|
0xb62d9b9b, 0x223c1e1e, 0x92158787, 0x20c9e9e9, 0x4987cece, 0xffaa5555, 0x78502828, 0x7aa5dfdf,
|
||||||
|
0x8f038c8c, 0xf859a1a1, 0x80098989, 0x171a0d0d, 0xda65bfbf, 0x31d7e6e6, 0xc6844242, 0xb8d06868,
|
||||||
|
0xc3824141, 0xb0299999, 0x775a2d2d, 0x111e0f0f, 0xcb7bb0b0, 0xfca85454, 0xd66dbbbb, 0x3a2c1616,
|
||||||
|
}
|
||||||
|
var te2 = [256]uint32{
|
||||||
|
0x63a5c663, 0x7c84f87c, 0x7799ee77, 0x7b8df67b, 0xf20dfff2, 0x6bbdd66b, 0x6fb1de6f, 0xc55491c5,
|
||||||
|
0x30506030, 0x01030201, 0x67a9ce67, 0x2b7d562b, 0xfe19e7fe, 0xd762b5d7, 0xabe64dab, 0x769aec76,
|
||||||
|
0xca458fca, 0x829d1f82, 0xc94089c9, 0x7d87fa7d, 0xfa15effa, 0x59ebb259, 0x47c98e47, 0xf00bfbf0,
|
||||||
|
0xadec41ad, 0xd467b3d4, 0xa2fd5fa2, 0xafea45af, 0x9cbf239c, 0xa4f753a4, 0x7296e472, 0xc05b9bc0,
|
||||||
|
0xb7c275b7, 0xfd1ce1fd, 0x93ae3d93, 0x266a4c26, 0x365a6c36, 0x3f417e3f, 0xf702f5f7, 0xcc4f83cc,
|
||||||
|
0x345c6834, 0xa5f451a5, 0xe534d1e5, 0xf108f9f1, 0x7193e271, 0xd873abd8, 0x31536231, 0x153f2a15,
|
||||||
|
0x040c0804, 0xc75295c7, 0x23654623, 0xc35e9dc3, 0x18283018, 0x96a13796, 0x050f0a05, 0x9ab52f9a,
|
||||||
|
0x07090e07, 0x12362412, 0x809b1b80, 0xe23ddfe2, 0xeb26cdeb, 0x27694e27, 0xb2cd7fb2, 0x759fea75,
|
||||||
|
0x091b1209, 0x839e1d83, 0x2c74582c, 0x1a2e341a, 0x1b2d361b, 0x6eb2dc6e, 0x5aeeb45a, 0xa0fb5ba0,
|
||||||
|
0x52f6a452, 0x3b4d763b, 0xd661b7d6, 0xb3ce7db3, 0x297b5229, 0xe33edde3, 0x2f715e2f, 0x84971384,
|
||||||
|
0x53f5a653, 0xd168b9d1, 0x00000000, 0xed2cc1ed, 0x20604020, 0xfc1fe3fc, 0xb1c879b1, 0x5bedb65b,
|
||||||
|
0x6abed46a, 0xcb468dcb, 0xbed967be, 0x394b7239, 0x4ade944a, 0x4cd4984c, 0x58e8b058, 0xcf4a85cf,
|
||||||
|
0xd06bbbd0, 0xef2ac5ef, 0xaae54faa, 0xfb16edfb, 0x43c58643, 0x4dd79a4d, 0x33556633, 0x85941185,
|
||||||
|
0x45cf8a45, 0xf910e9f9, 0x02060402, 0x7f81fe7f, 0x50f0a050, 0x3c44783c, 0x9fba259f, 0xa8e34ba8,
|
||||||
|
0x51f3a251, 0xa3fe5da3, 0x40c08040, 0x8f8a058f, 0x92ad3f92, 0x9dbc219d, 0x38487038, 0xf504f1f5,
|
||||||
|
0xbcdf63bc, 0xb6c177b6, 0xda75afda, 0x21634221, 0x10302010, 0xff1ae5ff, 0xf30efdf3, 0xd26dbfd2,
|
||||||
|
0xcd4c81cd, 0x0c14180c, 0x13352613, 0xec2fc3ec, 0x5fe1be5f, 0x97a23597, 0x44cc8844, 0x17392e17,
|
||||||
|
0xc45793c4, 0xa7f255a7, 0x7e82fc7e, 0x3d477a3d, 0x64acc864, 0x5de7ba5d, 0x192b3219, 0x7395e673,
|
||||||
|
0x60a0c060, 0x81981981, 0x4fd19e4f, 0xdc7fa3dc, 0x22664422, 0x2a7e542a, 0x90ab3b90, 0x88830b88,
|
||||||
|
0x46ca8c46, 0xee29c7ee, 0xb8d36bb8, 0x143c2814, 0xde79a7de, 0x5ee2bc5e, 0x0b1d160b, 0xdb76addb,
|
||||||
|
0xe03bdbe0, 0x32566432, 0x3a4e743a, 0x0a1e140a, 0x49db9249, 0x060a0c06, 0x246c4824, 0x5ce4b85c,
|
||||||
|
0xc25d9fc2, 0xd36ebdd3, 0xacef43ac, 0x62a6c462, 0x91a83991, 0x95a43195, 0xe437d3e4, 0x798bf279,
|
||||||
|
0xe732d5e7, 0xc8438bc8, 0x37596e37, 0x6db7da6d, 0x8d8c018d, 0xd564b1d5, 0x4ed29c4e, 0xa9e049a9,
|
||||||
|
0x6cb4d86c, 0x56faac56, 0xf407f3f4, 0xea25cfea, 0x65afca65, 0x7a8ef47a, 0xaee947ae, 0x08181008,
|
||||||
|
0xbad56fba, 0x7888f078, 0x256f4a25, 0x2e725c2e, 0x1c24381c, 0xa6f157a6, 0xb4c773b4, 0xc65197c6,
|
||||||
|
0xe823cbe8, 0xdd7ca1dd, 0x749ce874, 0x1f213e1f, 0x4bdd964b, 0xbddc61bd, 0x8b860d8b, 0x8a850f8a,
|
||||||
|
0x7090e070, 0x3e427c3e, 0xb5c471b5, 0x66aacc66, 0x48d89048, 0x03050603, 0xf601f7f6, 0x0e121c0e,
|
||||||
|
0x61a3c261, 0x355f6a35, 0x57f9ae57, 0xb9d069b9, 0x86911786, 0xc15899c1, 0x1d273a1d, 0x9eb9279e,
|
||||||
|
0xe138d9e1, 0xf813ebf8, 0x98b32b98, 0x11332211, 0x69bbd269, 0xd970a9d9, 0x8e89078e, 0x94a73394,
|
||||||
|
0x9bb62d9b, 0x1e223c1e, 0x87921587, 0xe920c9e9, 0xce4987ce, 0x55ffaa55, 0x28785028, 0xdf7aa5df,
|
||||||
|
0x8c8f038c, 0xa1f859a1, 0x89800989, 0x0d171a0d, 0xbfda65bf, 0xe631d7e6, 0x42c68442, 0x68b8d068,
|
||||||
|
0x41c38241, 0x99b02999, 0x2d775a2d, 0x0f111e0f, 0xb0cb7bb0, 0x54fca854, 0xbbd66dbb, 0x163a2c16,
|
||||||
|
}
|
||||||
|
var te3 = [256]uint32{
|
||||||
|
0x6363a5c6, 0x7c7c84f8, 0x777799ee, 0x7b7b8df6, 0xf2f20dff, 0x6b6bbdd6, 0x6f6fb1de, 0xc5c55491,
|
||||||
|
0x30305060, 0x01010302, 0x6767a9ce, 0x2b2b7d56, 0xfefe19e7, 0xd7d762b5, 0xababe64d, 0x76769aec,
|
||||||
|
0xcaca458f, 0x82829d1f, 0xc9c94089, 0x7d7d87fa, 0xfafa15ef, 0x5959ebb2, 0x4747c98e, 0xf0f00bfb,
|
||||||
|
0xadadec41, 0xd4d467b3, 0xa2a2fd5f, 0xafafea45, 0x9c9cbf23, 0xa4a4f753, 0x727296e4, 0xc0c05b9b,
|
||||||
|
0xb7b7c275, 0xfdfd1ce1, 0x9393ae3d, 0x26266a4c, 0x36365a6c, 0x3f3f417e, 0xf7f702f5, 0xcccc4f83,
|
||||||
|
0x34345c68, 0xa5a5f451, 0xe5e534d1, 0xf1f108f9, 0x717193e2, 0xd8d873ab, 0x31315362, 0x15153f2a,
|
||||||
|
0x04040c08, 0xc7c75295, 0x23236546, 0xc3c35e9d, 0x18182830, 0x9696a137, 0x05050f0a, 0x9a9ab52f,
|
||||||
|
0x0707090e, 0x12123624, 0x80809b1b, 0xe2e23ddf, 0xebeb26cd, 0x2727694e, 0xb2b2cd7f, 0x75759fea,
|
||||||
|
0x09091b12, 0x83839e1d, 0x2c2c7458, 0x1a1a2e34, 0x1b1b2d36, 0x6e6eb2dc, 0x5a5aeeb4, 0xa0a0fb5b,
|
||||||
|
0x5252f6a4, 0x3b3b4d76, 0xd6d661b7, 0xb3b3ce7d, 0x29297b52, 0xe3e33edd, 0x2f2f715e, 0x84849713,
|
||||||
|
0x5353f5a6, 0xd1d168b9, 0x00000000, 0xeded2cc1, 0x20206040, 0xfcfc1fe3, 0xb1b1c879, 0x5b5bedb6,
|
||||||
|
0x6a6abed4, 0xcbcb468d, 0xbebed967, 0x39394b72, 0x4a4ade94, 0x4c4cd498, 0x5858e8b0, 0xcfcf4a85,
|
||||||
|
0xd0d06bbb, 0xefef2ac5, 0xaaaae54f, 0xfbfb16ed, 0x4343c586, 0x4d4dd79a, 0x33335566, 0x85859411,
|
||||||
|
0x4545cf8a, 0xf9f910e9, 0x02020604, 0x7f7f81fe, 0x5050f0a0, 0x3c3c4478, 0x9f9fba25, 0xa8a8e34b,
|
||||||
|
0x5151f3a2, 0xa3a3fe5d, 0x4040c080, 0x8f8f8a05, 0x9292ad3f, 0x9d9dbc21, 0x38384870, 0xf5f504f1,
|
||||||
|
0xbcbcdf63, 0xb6b6c177, 0xdada75af, 0x21216342, 0x10103020, 0xffff1ae5, 0xf3f30efd, 0xd2d26dbf,
|
||||||
|
0xcdcd4c81, 0x0c0c1418, 0x13133526, 0xecec2fc3, 0x5f5fe1be, 0x9797a235, 0x4444cc88, 0x1717392e,
|
||||||
|
0xc4c45793, 0xa7a7f255, 0x7e7e82fc, 0x3d3d477a, 0x6464acc8, 0x5d5de7ba, 0x19192b32, 0x737395e6,
|
||||||
|
0x6060a0c0, 0x81819819, 0x4f4fd19e, 0xdcdc7fa3, 0x22226644, 0x2a2a7e54, 0x9090ab3b, 0x8888830b,
|
||||||
|
0x4646ca8c, 0xeeee29c7, 0xb8b8d36b, 0x14143c28, 0xdede79a7, 0x5e5ee2bc, 0x0b0b1d16, 0xdbdb76ad,
|
||||||
|
0xe0e03bdb, 0x32325664, 0x3a3a4e74, 0x0a0a1e14, 0x4949db92, 0x06060a0c, 0x24246c48, 0x5c5ce4b8,
|
||||||
|
0xc2c25d9f, 0xd3d36ebd, 0xacacef43, 0x6262a6c4, 0x9191a839, 0x9595a431, 0xe4e437d3, 0x79798bf2,
|
||||||
|
0xe7e732d5, 0xc8c8438b, 0x3737596e, 0x6d6db7da, 0x8d8d8c01, 0xd5d564b1, 0x4e4ed29c, 0xa9a9e049,
|
||||||
|
0x6c6cb4d8, 0x5656faac, 0xf4f407f3, 0xeaea25cf, 0x6565afca, 0x7a7a8ef4, 0xaeaee947, 0x08081810,
|
||||||
|
0xbabad56f, 0x787888f0, 0x25256f4a, 0x2e2e725c, 0x1c1c2438, 0xa6a6f157, 0xb4b4c773, 0xc6c65197,
|
||||||
|
0xe8e823cb, 0xdddd7ca1, 0x74749ce8, 0x1f1f213e, 0x4b4bdd96, 0xbdbddc61, 0x8b8b860d, 0x8a8a850f,
|
||||||
|
0x707090e0, 0x3e3e427c, 0xb5b5c471, 0x6666aacc, 0x4848d890, 0x03030506, 0xf6f601f7, 0x0e0e121c,
|
||||||
|
0x6161a3c2, 0x35355f6a, 0x5757f9ae, 0xb9b9d069, 0x86869117, 0xc1c15899, 0x1d1d273a, 0x9e9eb927,
|
||||||
|
0xe1e138d9, 0xf8f813eb, 0x9898b32b, 0x11113322, 0x6969bbd2, 0xd9d970a9, 0x8e8e8907, 0x9494a733,
|
||||||
|
0x9b9bb62d, 0x1e1e223c, 0x87879215, 0xe9e920c9, 0xcece4987, 0x5555ffaa, 0x28287850, 0xdfdf7aa5,
|
||||||
|
0x8c8c8f03, 0xa1a1f859, 0x89898009, 0x0d0d171a, 0xbfbfda65, 0xe6e631d7, 0x4242c684, 0x6868b8d0,
|
||||||
|
0x4141c382, 0x9999b029, 0x2d2d775a, 0x0f0f111e, 0xb0b0cb7b, 0x5454fca8, 0xbbbbd66d, 0x16163a2c,
|
||||||
|
}
|
||||||
|
|
||||||
|
// Lookup tables for decryption.
|
||||||
|
// These can be recomputed by adapting the tests in aes_test.go.
|
||||||
|
|
||||||
|
var td0 = [256]uint32{
|
||||||
|
0x51f4a750, 0x7e416553, 0x1a17a4c3, 0x3a275e96, 0x3bab6bcb, 0x1f9d45f1, 0xacfa58ab, 0x4be30393,
|
||||||
|
0x2030fa55, 0xad766df6, 0x88cc7691, 0xf5024c25, 0x4fe5d7fc, 0xc52acbd7, 0x26354480, 0xb562a38f,
|
||||||
|
0xdeb15a49, 0x25ba1b67, 0x45ea0e98, 0x5dfec0e1, 0xc32f7502, 0x814cf012, 0x8d4697a3, 0x6bd3f9c6,
|
||||||
|
0x038f5fe7, 0x15929c95, 0xbf6d7aeb, 0x955259da, 0xd4be832d, 0x587421d3, 0x49e06929, 0x8ec9c844,
|
||||||
|
0x75c2896a, 0xf48e7978, 0x99583e6b, 0x27b971dd, 0xbee14fb6, 0xf088ad17, 0xc920ac66, 0x7dce3ab4,
|
||||||
|
0x63df4a18, 0xe51a3182, 0x97513360, 0x62537f45, 0xb16477e0, 0xbb6bae84, 0xfe81a01c, 0xf9082b94,
|
||||||
|
0x70486858, 0x8f45fd19, 0x94de6c87, 0x527bf8b7, 0xab73d323, 0x724b02e2, 0xe31f8f57, 0x6655ab2a,
|
||||||
|
0xb2eb2807, 0x2fb5c203, 0x86c57b9a, 0xd33708a5, 0x302887f2, 0x23bfa5b2, 0x02036aba, 0xed16825c,
|
||||||
|
0x8acf1c2b, 0xa779b492, 0xf307f2f0, 0x4e69e2a1, 0x65daf4cd, 0x0605bed5, 0xd134621f, 0xc4a6fe8a,
|
||||||
|
0x342e539d, 0xa2f355a0, 0x058ae132, 0xa4f6eb75, 0x0b83ec39, 0x4060efaa, 0x5e719f06, 0xbd6e1051,
|
||||||
|
0x3e218af9, 0x96dd063d, 0xdd3e05ae, 0x4de6bd46, 0x91548db5, 0x71c45d05, 0x0406d46f, 0x605015ff,
|
||||||
|
0x1998fb24, 0xd6bde997, 0x894043cc, 0x67d99e77, 0xb0e842bd, 0x07898b88, 0xe7195b38, 0x79c8eedb,
|
||||||
|
0xa17c0a47, 0x7c420fe9, 0xf8841ec9, 0x00000000, 0x09808683, 0x322bed48, 0x1e1170ac, 0x6c5a724e,
|
||||||
|
0xfd0efffb, 0x0f853856, 0x3daed51e, 0x362d3927, 0x0a0fd964, 0x685ca621, 0x9b5b54d1, 0x24362e3a,
|
||||||
|
0x0c0a67b1, 0x9357e70f, 0xb4ee96d2, 0x1b9b919e, 0x80c0c54f, 0x61dc20a2, 0x5a774b69, 0x1c121a16,
|
||||||
|
0xe293ba0a, 0xc0a02ae5, 0x3c22e043, 0x121b171d, 0x0e090d0b, 0xf28bc7ad, 0x2db6a8b9, 0x141ea9c8,
|
||||||
|
0x57f11985, 0xaf75074c, 0xee99ddbb, 0xa37f60fd, 0xf701269f, 0x5c72f5bc, 0x44663bc5, 0x5bfb7e34,
|
||||||
|
0x8b432976, 0xcb23c6dc, 0xb6edfc68, 0xb8e4f163, 0xd731dcca, 0x42638510, 0x13972240, 0x84c61120,
|
||||||
|
0x854a247d, 0xd2bb3df8, 0xaef93211, 0xc729a16d, 0x1d9e2f4b, 0xdcb230f3, 0x0d8652ec, 0x77c1e3d0,
|
||||||
|
0x2bb3166c, 0xa970b999, 0x119448fa, 0x47e96422, 0xa8fc8cc4, 0xa0f03f1a, 0x567d2cd8, 0x223390ef,
|
||||||
|
0x87494ec7, 0xd938d1c1, 0x8ccaa2fe, 0x98d40b36, 0xa6f581cf, 0xa57ade28, 0xdab78e26, 0x3fadbfa4,
|
||||||
|
0x2c3a9de4, 0x5078920d, 0x6a5fcc9b, 0x547e4662, 0xf68d13c2, 0x90d8b8e8, 0x2e39f75e, 0x82c3aff5,
|
||||||
|
0x9f5d80be, 0x69d0937c, 0x6fd52da9, 0xcf2512b3, 0xc8ac993b, 0x10187da7, 0xe89c636e, 0xdb3bbb7b,
|
||||||
|
0xcd267809, 0x6e5918f4, 0xec9ab701, 0x834f9aa8, 0xe6956e65, 0xaaffe67e, 0x21bccf08, 0xef15e8e6,
|
||||||
|
0xbae79bd9, 0x4a6f36ce, 0xea9f09d4, 0x29b07cd6, 0x31a4b2af, 0x2a3f2331, 0xc6a59430, 0x35a266c0,
|
||||||
|
0x744ebc37, 0xfc82caa6, 0xe090d0b0, 0x33a7d815, 0xf104984a, 0x41ecdaf7, 0x7fcd500e, 0x1791f62f,
|
||||||
|
0x764dd68d, 0x43efb04d, 0xccaa4d54, 0xe49604df, 0x9ed1b5e3, 0x4c6a881b, 0xc12c1fb8, 0x4665517f,
|
||||||
|
0x9d5eea04, 0x018c355d, 0xfa877473, 0xfb0b412e, 0xb3671d5a, 0x92dbd252, 0xe9105633, 0x6dd64713,
|
||||||
|
0x9ad7618c, 0x37a10c7a, 0x59f8148e, 0xeb133c89, 0xcea927ee, 0xb761c935, 0xe11ce5ed, 0x7a47b13c,
|
||||||
|
0x9cd2df59, 0x55f2733f, 0x1814ce79, 0x73c737bf, 0x53f7cdea, 0x5ffdaa5b, 0xdf3d6f14, 0x7844db86,
|
||||||
|
0xcaaff381, 0xb968c43e, 0x3824342c, 0xc2a3405f, 0x161dc372, 0xbce2250c, 0x283c498b, 0xff0d9541,
|
||||||
|
0x39a80171, 0x080cb3de, 0xd8b4e49c, 0x6456c190, 0x7bcb8461, 0xd532b670, 0x486c5c74, 0xd0b85742,
|
||||||
|
}
|
||||||
|
var td1 = [256]uint32{
|
||||||
|
0x5051f4a7, 0x537e4165, 0xc31a17a4, 0x963a275e, 0xcb3bab6b, 0xf11f9d45, 0xabacfa58, 0x934be303,
|
||||||
|
0x552030fa, 0xf6ad766d, 0x9188cc76, 0x25f5024c, 0xfc4fe5d7, 0xd7c52acb, 0x80263544, 0x8fb562a3,
|
||||||
|
0x49deb15a, 0x6725ba1b, 0x9845ea0e, 0xe15dfec0, 0x02c32f75, 0x12814cf0, 0xa38d4697, 0xc66bd3f9,
|
||||||
|
0xe7038f5f, 0x9515929c, 0xebbf6d7a, 0xda955259, 0x2dd4be83, 0xd3587421, 0x2949e069, 0x448ec9c8,
|
||||||
|
0x6a75c289, 0x78f48e79, 0x6b99583e, 0xdd27b971, 0xb6bee14f, 0x17f088ad, 0x66c920ac, 0xb47dce3a,
|
||||||
|
0x1863df4a, 0x82e51a31, 0x60975133, 0x4562537f, 0xe0b16477, 0x84bb6bae, 0x1cfe81a0, 0x94f9082b,
|
||||||
|
0x58704868, 0x198f45fd, 0x8794de6c, 0xb7527bf8, 0x23ab73d3, 0xe2724b02, 0x57e31f8f, 0x2a6655ab,
|
||||||
|
0x07b2eb28, 0x032fb5c2, 0x9a86c57b, 0xa5d33708, 0xf2302887, 0xb223bfa5, 0xba02036a, 0x5ced1682,
|
||||||
|
0x2b8acf1c, 0x92a779b4, 0xf0f307f2, 0xa14e69e2, 0xcd65daf4, 0xd50605be, 0x1fd13462, 0x8ac4a6fe,
|
||||||
|
0x9d342e53, 0xa0a2f355, 0x32058ae1, 0x75a4f6eb, 0x390b83ec, 0xaa4060ef, 0x065e719f, 0x51bd6e10,
|
||||||
|
0xf93e218a, 0x3d96dd06, 0xaedd3e05, 0x464de6bd, 0xb591548d, 0x0571c45d, 0x6f0406d4, 0xff605015,
|
||||||
|
0x241998fb, 0x97d6bde9, 0xcc894043, 0x7767d99e, 0xbdb0e842, 0x8807898b, 0x38e7195b, 0xdb79c8ee,
|
||||||
|
0x47a17c0a, 0xe97c420f, 0xc9f8841e, 0x00000000, 0x83098086, 0x48322bed, 0xac1e1170, 0x4e6c5a72,
|
||||||
|
0xfbfd0eff, 0x560f8538, 0x1e3daed5, 0x27362d39, 0x640a0fd9, 0x21685ca6, 0xd19b5b54, 0x3a24362e,
|
||||||
|
0xb10c0a67, 0x0f9357e7, 0xd2b4ee96, 0x9e1b9b91, 0x4f80c0c5, 0xa261dc20, 0x695a774b, 0x161c121a,
|
||||||
|
0x0ae293ba, 0xe5c0a02a, 0x433c22e0, 0x1d121b17, 0x0b0e090d, 0xadf28bc7, 0xb92db6a8, 0xc8141ea9,
|
||||||
|
0x8557f119, 0x4caf7507, 0xbbee99dd, 0xfda37f60, 0x9ff70126, 0xbc5c72f5, 0xc544663b, 0x345bfb7e,
|
||||||
|
0x768b4329, 0xdccb23c6, 0x68b6edfc, 0x63b8e4f1, 0xcad731dc, 0x10426385, 0x40139722, 0x2084c611,
|
||||||
|
0x7d854a24, 0xf8d2bb3d, 0x11aef932, 0x6dc729a1, 0x4b1d9e2f, 0xf3dcb230, 0xec0d8652, 0xd077c1e3,
|
||||||
|
0x6c2bb316, 0x99a970b9, 0xfa119448, 0x2247e964, 0xc4a8fc8c, 0x1aa0f03f, 0xd8567d2c, 0xef223390,
|
||||||
|
0xc787494e, 0xc1d938d1, 0xfe8ccaa2, 0x3698d40b, 0xcfa6f581, 0x28a57ade, 0x26dab78e, 0xa43fadbf,
|
||||||
|
0xe42c3a9d, 0x0d507892, 0x9b6a5fcc, 0x62547e46, 0xc2f68d13, 0xe890d8b8, 0x5e2e39f7, 0xf582c3af,
|
||||||
|
0xbe9f5d80, 0x7c69d093, 0xa96fd52d, 0xb3cf2512, 0x3bc8ac99, 0xa710187d, 0x6ee89c63, 0x7bdb3bbb,
|
||||||
|
0x09cd2678, 0xf46e5918, 0x01ec9ab7, 0xa8834f9a, 0x65e6956e, 0x7eaaffe6, 0x0821bccf, 0xe6ef15e8,
|
||||||
|
0xd9bae79b, 0xce4a6f36, 0xd4ea9f09, 0xd629b07c, 0xaf31a4b2, 0x312a3f23, 0x30c6a594, 0xc035a266,
|
||||||
|
0x37744ebc, 0xa6fc82ca, 0xb0e090d0, 0x1533a7d8, 0x4af10498, 0xf741ecda, 0x0e7fcd50, 0x2f1791f6,
|
||||||
|
0x8d764dd6, 0x4d43efb0, 0x54ccaa4d, 0xdfe49604, 0xe39ed1b5, 0x1b4c6a88, 0xb8c12c1f, 0x7f466551,
|
||||||
|
0x049d5eea, 0x5d018c35, 0x73fa8774, 0x2efb0b41, 0x5ab3671d, 0x5292dbd2, 0x33e91056, 0x136dd647,
|
||||||
|
0x8c9ad761, 0x7a37a10c, 0x8e59f814, 0x89eb133c, 0xeecea927, 0x35b761c9, 0xede11ce5, 0x3c7a47b1,
|
||||||
|
0x599cd2df, 0x3f55f273, 0x791814ce, 0xbf73c737, 0xea53f7cd, 0x5b5ffdaa, 0x14df3d6f, 0x867844db,
|
||||||
|
0x81caaff3, 0x3eb968c4, 0x2c382434, 0x5fc2a340, 0x72161dc3, 0x0cbce225, 0x8b283c49, 0x41ff0d95,
|
||||||
|
0x7139a801, 0xde080cb3, 0x9cd8b4e4, 0x906456c1, 0x617bcb84, 0x70d532b6, 0x74486c5c, 0x42d0b857,
|
||||||
|
}
|
||||||
|
var td2 = [256]uint32{
|
||||||
|
0xa75051f4, 0x65537e41, 0xa4c31a17, 0x5e963a27, 0x6bcb3bab, 0x45f11f9d, 0x58abacfa, 0x03934be3,
|
||||||
|
0xfa552030, 0x6df6ad76, 0x769188cc, 0x4c25f502, 0xd7fc4fe5, 0xcbd7c52a, 0x44802635, 0xa38fb562,
|
||||||
|
0x5a49deb1, 0x1b6725ba, 0x0e9845ea, 0xc0e15dfe, 0x7502c32f, 0xf012814c, 0x97a38d46, 0xf9c66bd3,
|
||||||
|
0x5fe7038f, 0x9c951592, 0x7aebbf6d, 0x59da9552, 0x832dd4be, 0x21d35874, 0x692949e0, 0xc8448ec9,
|
||||||
|
0x896a75c2, 0x7978f48e, 0x3e6b9958, 0x71dd27b9, 0x4fb6bee1, 0xad17f088, 0xac66c920, 0x3ab47dce,
|
||||||
|
0x4a1863df, 0x3182e51a, 0x33609751, 0x7f456253, 0x77e0b164, 0xae84bb6b, 0xa01cfe81, 0x2b94f908,
|
||||||
|
0x68587048, 0xfd198f45, 0x6c8794de, 0xf8b7527b, 0xd323ab73, 0x02e2724b, 0x8f57e31f, 0xab2a6655,
|
||||||
|
0x2807b2eb, 0xc2032fb5, 0x7b9a86c5, 0x08a5d337, 0x87f23028, 0xa5b223bf, 0x6aba0203, 0x825ced16,
|
||||||
|
0x1c2b8acf, 0xb492a779, 0xf2f0f307, 0xe2a14e69, 0xf4cd65da, 0xbed50605, 0x621fd134, 0xfe8ac4a6,
|
||||||
|
0x539d342e, 0x55a0a2f3, 0xe132058a, 0xeb75a4f6, 0xec390b83, 0xefaa4060, 0x9f065e71, 0x1051bd6e,
|
||||||
|
0x8af93e21, 0x063d96dd, 0x05aedd3e, 0xbd464de6, 0x8db59154, 0x5d0571c4, 0xd46f0406, 0x15ff6050,
|
||||||
|
0xfb241998, 0xe997d6bd, 0x43cc8940, 0x9e7767d9, 0x42bdb0e8, 0x8b880789, 0x5b38e719, 0xeedb79c8,
|
||||||
|
0x0a47a17c, 0x0fe97c42, 0x1ec9f884, 0x00000000, 0x86830980, 0xed48322b, 0x70ac1e11, 0x724e6c5a,
|
||||||
|
0xfffbfd0e, 0x38560f85, 0xd51e3dae, 0x3927362d, 0xd9640a0f, 0xa621685c, 0x54d19b5b, 0x2e3a2436,
|
||||||
|
0x67b10c0a, 0xe70f9357, 0x96d2b4ee, 0x919e1b9b, 0xc54f80c0, 0x20a261dc, 0x4b695a77, 0x1a161c12,
|
||||||
|
0xba0ae293, 0x2ae5c0a0, 0xe0433c22, 0x171d121b, 0x0d0b0e09, 0xc7adf28b, 0xa8b92db6, 0xa9c8141e,
|
||||||
|
0x198557f1, 0x074caf75, 0xddbbee99, 0x60fda37f, 0x269ff701, 0xf5bc5c72, 0x3bc54466, 0x7e345bfb,
|
||||||
|
0x29768b43, 0xc6dccb23, 0xfc68b6ed, 0xf163b8e4, 0xdccad731, 0x85104263, 0x22401397, 0x112084c6,
|
||||||
|
0x247d854a, 0x3df8d2bb, 0x3211aef9, 0xa16dc729, 0x2f4b1d9e, 0x30f3dcb2, 0x52ec0d86, 0xe3d077c1,
|
||||||
|
0x166c2bb3, 0xb999a970, 0x48fa1194, 0x642247e9, 0x8cc4a8fc, 0x3f1aa0f0, 0x2cd8567d, 0x90ef2233,
|
||||||
|
0x4ec78749, 0xd1c1d938, 0xa2fe8cca, 0x0b3698d4, 0x81cfa6f5, 0xde28a57a, 0x8e26dab7, 0xbfa43fad,
|
||||||
|
0x9de42c3a, 0x920d5078, 0xcc9b6a5f, 0x4662547e, 0x13c2f68d, 0xb8e890d8, 0xf75e2e39, 0xaff582c3,
|
||||||
|
0x80be9f5d, 0x937c69d0, 0x2da96fd5, 0x12b3cf25, 0x993bc8ac, 0x7da71018, 0x636ee89c, 0xbb7bdb3b,
|
||||||
|
0x7809cd26, 0x18f46e59, 0xb701ec9a, 0x9aa8834f, 0x6e65e695, 0xe67eaaff, 0xcf0821bc, 0xe8e6ef15,
|
||||||
|
0x9bd9bae7, 0x36ce4a6f, 0x09d4ea9f, 0x7cd629b0, 0xb2af31a4, 0x23312a3f, 0x9430c6a5, 0x66c035a2,
|
||||||
|
0xbc37744e, 0xcaa6fc82, 0xd0b0e090, 0xd81533a7, 0x984af104, 0xdaf741ec, 0x500e7fcd, 0xf62f1791,
|
||||||
|
0xd68d764d, 0xb04d43ef, 0x4d54ccaa, 0x04dfe496, 0xb5e39ed1, 0x881b4c6a, 0x1fb8c12c, 0x517f4665,
|
||||||
|
0xea049d5e, 0x355d018c, 0x7473fa87, 0x412efb0b, 0x1d5ab367, 0xd25292db, 0x5633e910, 0x47136dd6,
|
||||||
|
0x618c9ad7, 0x0c7a37a1, 0x148e59f8, 0x3c89eb13, 0x27eecea9, 0xc935b761, 0xe5ede11c, 0xb13c7a47,
|
||||||
|
0xdf599cd2, 0x733f55f2, 0xce791814, 0x37bf73c7, 0xcdea53f7, 0xaa5b5ffd, 0x6f14df3d, 0xdb867844,
|
||||||
|
0xf381caaf, 0xc43eb968, 0x342c3824, 0x405fc2a3, 0xc372161d, 0x250cbce2, 0x498b283c, 0x9541ff0d,
|
||||||
|
0x017139a8, 0xb3de080c, 0xe49cd8b4, 0xc1906456, 0x84617bcb, 0xb670d532, 0x5c74486c, 0x5742d0b8,
|
||||||
|
}
|
||||||
|
var td3 = [256]uint32{
|
||||||
|
0xf4a75051, 0x4165537e, 0x17a4c31a, 0x275e963a, 0xab6bcb3b, 0x9d45f11f, 0xfa58abac, 0xe303934b,
|
||||||
|
0x30fa5520, 0x766df6ad, 0xcc769188, 0x024c25f5, 0xe5d7fc4f, 0x2acbd7c5, 0x35448026, 0x62a38fb5,
|
||||||
|
0xb15a49de, 0xba1b6725, 0xea0e9845, 0xfec0e15d, 0x2f7502c3, 0x4cf01281, 0x4697a38d, 0xd3f9c66b,
|
||||||
|
0x8f5fe703, 0x929c9515, 0x6d7aebbf, 0x5259da95, 0xbe832dd4, 0x7421d358, 0xe0692949, 0xc9c8448e,
|
||||||
|
0xc2896a75, 0x8e7978f4, 0x583e6b99, 0xb971dd27, 0xe14fb6be, 0x88ad17f0, 0x20ac66c9, 0xce3ab47d,
|
||||||
|
0xdf4a1863, 0x1a3182e5, 0x51336097, 0x537f4562, 0x6477e0b1, 0x6bae84bb, 0x81a01cfe, 0x082b94f9,
|
||||||
|
0x48685870, 0x45fd198f, 0xde6c8794, 0x7bf8b752, 0x73d323ab, 0x4b02e272, 0x1f8f57e3, 0x55ab2a66,
|
||||||
|
0xeb2807b2, 0xb5c2032f, 0xc57b9a86, 0x3708a5d3, 0x2887f230, 0xbfa5b223, 0x036aba02, 0x16825ced,
|
||||||
|
0xcf1c2b8a, 0x79b492a7, 0x07f2f0f3, 0x69e2a14e, 0xdaf4cd65, 0x05bed506, 0x34621fd1, 0xa6fe8ac4,
|
||||||
|
0x2e539d34, 0xf355a0a2, 0x8ae13205, 0xf6eb75a4, 0x83ec390b, 0x60efaa40, 0x719f065e, 0x6e1051bd,
|
||||||
|
0x218af93e, 0xdd063d96, 0x3e05aedd, 0xe6bd464d, 0x548db591, 0xc45d0571, 0x06d46f04, 0x5015ff60,
|
||||||
|
0x98fb2419, 0xbde997d6, 0x4043cc89, 0xd99e7767, 0xe842bdb0, 0x898b8807, 0x195b38e7, 0xc8eedb79,
|
||||||
|
0x7c0a47a1, 0x420fe97c, 0x841ec9f8, 0x00000000, 0x80868309, 0x2bed4832, 0x1170ac1e, 0x5a724e6c,
|
||||||
|
0x0efffbfd, 0x8538560f, 0xaed51e3d, 0x2d392736, 0x0fd9640a, 0x5ca62168, 0x5b54d19b, 0x362e3a24,
|
||||||
|
0x0a67b10c, 0x57e70f93, 0xee96d2b4, 0x9b919e1b, 0xc0c54f80, 0xdc20a261, 0x774b695a, 0x121a161c,
|
||||||
|
0x93ba0ae2, 0xa02ae5c0, 0x22e0433c, 0x1b171d12, 0x090d0b0e, 0x8bc7adf2, 0xb6a8b92d, 0x1ea9c814,
|
||||||
|
0xf1198557, 0x75074caf, 0x99ddbbee, 0x7f60fda3, 0x01269ff7, 0x72f5bc5c, 0x663bc544, 0xfb7e345b,
|
||||||
|
0x4329768b, 0x23c6dccb, 0xedfc68b6, 0xe4f163b8, 0x31dccad7, 0x63851042, 0x97224013, 0xc6112084,
|
||||||
|
0x4a247d85, 0xbb3df8d2, 0xf93211ae, 0x29a16dc7, 0x9e2f4b1d, 0xb230f3dc, 0x8652ec0d, 0xc1e3d077,
|
||||||
|
0xb3166c2b, 0x70b999a9, 0x9448fa11, 0xe9642247, 0xfc8cc4a8, 0xf03f1aa0, 0x7d2cd856, 0x3390ef22,
|
||||||
|
0x494ec787, 0x38d1c1d9, 0xcaa2fe8c, 0xd40b3698, 0xf581cfa6, 0x7ade28a5, 0xb78e26da, 0xadbfa43f,
|
||||||
|
0x3a9de42c, 0x78920d50, 0x5fcc9b6a, 0x7e466254, 0x8d13c2f6, 0xd8b8e890, 0x39f75e2e, 0xc3aff582,
|
||||||
|
0x5d80be9f, 0xd0937c69, 0xd52da96f, 0x2512b3cf, 0xac993bc8, 0x187da710, 0x9c636ee8, 0x3bbb7bdb,
|
||||||
|
0x267809cd, 0x5918f46e, 0x9ab701ec, 0x4f9aa883, 0x956e65e6, 0xffe67eaa, 0xbccf0821, 0x15e8e6ef,
|
||||||
|
0xe79bd9ba, 0x6f36ce4a, 0x9f09d4ea, 0xb07cd629, 0xa4b2af31, 0x3f23312a, 0xa59430c6, 0xa266c035,
|
||||||
|
0x4ebc3774, 0x82caa6fc, 0x90d0b0e0, 0xa7d81533, 0x04984af1, 0xecdaf741, 0xcd500e7f, 0x91f62f17,
|
||||||
|
0x4dd68d76, 0xefb04d43, 0xaa4d54cc, 0x9604dfe4, 0xd1b5e39e, 0x6a881b4c, 0x2c1fb8c1, 0x65517f46,
|
||||||
|
0x5eea049d, 0x8c355d01, 0x877473fa, 0x0b412efb, 0x671d5ab3, 0xdbd25292, 0x105633e9, 0xd647136d,
|
||||||
|
0xd7618c9a, 0xa10c7a37, 0xf8148e59, 0x133c89eb, 0xa927eece, 0x61c935b7, 0x1ce5ede1, 0x47b13c7a,
|
||||||
|
0xd2df599c, 0xf2733f55, 0x14ce7918, 0xc737bf73, 0xf7cdea53, 0xfdaa5b5f, 0x3d6f14df, 0x44db8678,
|
||||||
|
0xaff381ca, 0x68c43eb9, 0x24342c38, 0xa3405fc2, 0x1dc37216, 0xe2250cbc, 0x3c498b28, 0x0d9541ff,
|
||||||
|
0xa8017139, 0x0cb3de08, 0xb4e49cd8, 0x56c19064, 0xcb84617b, 0x32b670d5, 0x6c5c7448, 0xb85742d0,
|
||||||
|
}
|
|
@ -0,0 +1,401 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/subtle"
|
||||||
|
"errors"
|
||||||
|
)
|
||||||
|
|
||||||
|
// AEAD is a cipher mode providing authenticated encryption with associated
|
||||||
|
// data. For a description of the methodology, see
|
||||||
|
// https://en.wikipedia.org/wiki/Authenticated_encryption
|
||||||
|
type AEAD interface {
|
||||||
|
// NonceSize returns the size of the nonce that must be passed to Seal
|
||||||
|
// and Open.
|
||||||
|
NonceSize() int
|
||||||
|
|
||||||
|
// Overhead returns the maximum difference between the lengths of a
|
||||||
|
// plaintext and its ciphertext.
|
||||||
|
Overhead() int
|
||||||
|
|
||||||
|
// Seal encrypts and authenticates plaintext, authenticates the
|
||||||
|
// additional data and appends the result to dst, returning the updated
|
||||||
|
// slice. The nonce must be NonceSize() bytes long and unique for all
|
||||||
|
// time, for a given key.
|
||||||
|
//
|
||||||
|
// The plaintext and dst may alias exactly or not at all. To reuse
|
||||||
|
// plaintext's storage for the encrypted output, use plaintext[:0] as dst.
|
||||||
|
Seal(dst, nonce, plaintext, additionalData []byte) []byte
|
||||||
|
|
||||||
|
// Open decrypts and authenticates ciphertext, authenticates the
|
||||||
|
// additional data and, if successful, appends the resulting plaintext
|
||||||
|
// to dst, returning the updated slice. The nonce must be NonceSize()
|
||||||
|
// bytes long and both it and the additional data must match the
|
||||||
|
// value passed to Seal.
|
||||||
|
//
|
||||||
|
// The ciphertext and dst may alias exactly or not at all. To reuse
|
||||||
|
// ciphertext's storage for the decrypted output, use ciphertext[:0] as dst.
|
||||||
|
//
|
||||||
|
// Even if the function fails, the contents of dst, up to its capacity,
|
||||||
|
// may be overwritten.
|
||||||
|
Open(dst, nonce, ciphertext, additionalData []byte) ([]byte, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcmAble is an interface implemented by ciphers that have a specific optimized
|
||||||
|
// implementation of GCM, like crypto/aes. NewGCM will check for this interface
|
||||||
|
// and return the specific AEAD if found.
|
||||||
|
type gcmAble interface {
|
||||||
|
NewGCM(int) (AEAD, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcmFieldElement represents a value in GF(2¹²⁸). In order to reflect the GCM
|
||||||
|
// standard and make getUint64 suitable for marshaling these values, the bits
|
||||||
|
// are stored backwards. For example:
|
||||||
|
// the coefficient of x⁰ can be obtained by v.low >> 63.
|
||||||
|
// the coefficient of x⁶³ can be obtained by v.low & 1.
|
||||||
|
// the coefficient of x⁶⁴ can be obtained by v.high >> 63.
|
||||||
|
// the coefficient of x¹²⁷ can be obtained by v.high & 1.
|
||||||
|
type gcmFieldElement struct {
|
||||||
|
low, high uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcm represents a Galois Counter Mode with a specific key. See
|
||||||
|
// http://csrc.nist.gov/groups/ST/toolkit/BCM/documents/proposedmodes/gcm/gcm-revised-spec.pdf
|
||||||
|
type gcm struct {
|
||||||
|
cipher Block
|
||||||
|
nonceSize int
|
||||||
|
// productTable contains the first sixteen powers of the key, H.
|
||||||
|
// However, they are in bit reversed order. See NewGCMWithNonceSize.
|
||||||
|
productTable [16]gcmFieldElement
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGCM returns the given 128-bit, block cipher wrapped in Galois Counter Mode
|
||||||
|
// with the standard nonce length.
|
||||||
|
func NewGCM(cipher Block) (AEAD, error) {
|
||||||
|
return NewGCMWithNonceSize(cipher, gcmStandardNonceSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGCMWithNonceSize returns the given 128-bit, block cipher wrapped in Galois
|
||||||
|
// Counter Mode, which accepts nonces of the given length.
|
||||||
|
//
|
||||||
|
// Only use this function if you require compatibility with an existing
|
||||||
|
// cryptosystem that uses non-standard nonce lengths. All other users should use
|
||||||
|
// NewGCM, which is faster and more resistant to misuse.
|
||||||
|
func NewGCMWithNonceSize(cipher Block, size int) (AEAD, error) {
|
||||||
|
if cipher, ok := cipher.(gcmAble); ok {
|
||||||
|
return cipher.NewGCM(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
if cipher.BlockSize() != gcmBlockSize {
|
||||||
|
return nil, errors.New("cipher: NewGCM requires 128-bit block cipher")
|
||||||
|
}
|
||||||
|
|
||||||
|
var key [gcmBlockSize]byte
|
||||||
|
cipher.Encrypt(key[:], key[:])
|
||||||
|
|
||||||
|
g := &gcm{cipher: cipher, nonceSize: size}
|
||||||
|
|
||||||
|
// We precompute 16 multiples of |key|. However, when we do lookups
|
||||||
|
// into this table we'll be using bits from a field element and
|
||||||
|
// therefore the bits will be in the reverse order. So normally one
|
||||||
|
// would expect, say, 4*key to be in index 4 of the table but due to
|
||||||
|
// this bit ordering it will actually be in index 0010 (base 2) = 2.
|
||||||
|
x := gcmFieldElement{
|
||||||
|
getUint64(key[:8]),
|
||||||
|
getUint64(key[8:]),
|
||||||
|
}
|
||||||
|
g.productTable[reverseBits(1)] = x
|
||||||
|
|
||||||
|
for i := 2; i < 16; i += 2 {
|
||||||
|
g.productTable[reverseBits(i)] = gcmDouble(&g.productTable[reverseBits(i/2)])
|
||||||
|
g.productTable[reverseBits(i+1)] = gcmAdd(&g.productTable[reverseBits(i)], &x)
|
||||||
|
}
|
||||||
|
|
||||||
|
return g, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
gcmBlockSize = 16
|
||||||
|
gcmTagSize = 12
|
||||||
|
gcmStandardNonceSize = 12
|
||||||
|
)
|
||||||
|
|
||||||
|
func (g *gcm) NonceSize() int {
|
||||||
|
return g.nonceSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (*gcm) Overhead() int {
|
||||||
|
return gcmTagSize
|
||||||
|
}
|
||||||
|
|
||||||
|
func (g *gcm) Seal(dst, nonce, plaintext, data []byte) []byte {
|
||||||
|
if len(nonce) != g.nonceSize {
|
||||||
|
panic("cipher: incorrect nonce length given to GCM")
|
||||||
|
}
|
||||||
|
ret, out := sliceForAppend(dst, len(plaintext)+gcmTagSize)
|
||||||
|
|
||||||
|
var counter, tagMask [gcmBlockSize]byte
|
||||||
|
g.deriveCounter(&counter, nonce)
|
||||||
|
|
||||||
|
g.cipher.Encrypt(tagMask[:], counter[:])
|
||||||
|
gcmInc32(&counter)
|
||||||
|
|
||||||
|
g.counterCrypt(out, plaintext, &counter)
|
||||||
|
|
||||||
|
tag := make([]byte, 16)
|
||||||
|
g.auth(tag, out[:len(plaintext)], data, &tagMask)
|
||||||
|
copy(ret[len(ret)-12:], tag)
|
||||||
|
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
var errOpen = errors.New("cipher: message authentication failed")
|
||||||
|
|
||||||
|
func (g *gcm) Open(dst, nonce, ciphertext, data []byte) ([]byte, error) {
|
||||||
|
if len(nonce) != g.nonceSize {
|
||||||
|
panic("cipher: incorrect nonce length given to GCM")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ciphertext) < gcmTagSize {
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
tag := ciphertext[len(ciphertext)-gcmTagSize:]
|
||||||
|
ciphertext = ciphertext[:len(ciphertext)-gcmTagSize]
|
||||||
|
|
||||||
|
var counter, tagMask [gcmBlockSize]byte
|
||||||
|
g.deriveCounter(&counter, nonce)
|
||||||
|
|
||||||
|
g.cipher.Encrypt(tagMask[:], counter[:])
|
||||||
|
gcmInc32(&counter)
|
||||||
|
|
||||||
|
var expectedTag [gcmBlockSize]byte
|
||||||
|
g.auth(expectedTag[:], ciphertext, data, &tagMask)
|
||||||
|
|
||||||
|
ret, out := sliceForAppend(dst, len(ciphertext))
|
||||||
|
|
||||||
|
if subtle.ConstantTimeCompare(expectedTag[:gcmTagSize], tag) != 1 {
|
||||||
|
// The AESNI code decrypts and authenticates concurrently, and
|
||||||
|
// so overwrites dst in the event of a tag mismatch. That
|
||||||
|
// behaviour is mimicked here in order to be consistent across
|
||||||
|
// platforms.
|
||||||
|
for i := range out {
|
||||||
|
out[i] = 0
|
||||||
|
}
|
||||||
|
return nil, errOpen
|
||||||
|
}
|
||||||
|
|
||||||
|
g.counterCrypt(out, ciphertext, &counter)
|
||||||
|
|
||||||
|
return ret, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// reverseBits reverses the order of the bits of 4-bit number in i.
|
||||||
|
func reverseBits(i int) int {
|
||||||
|
i = ((i << 2) & 0xc) | ((i >> 2) & 0x3)
|
||||||
|
i = ((i << 1) & 0xa) | ((i >> 1) & 0x5)
|
||||||
|
return i
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcmAdd adds two elements of GF(2¹²⁸) and returns the sum.
|
||||||
|
func gcmAdd(x, y *gcmFieldElement) gcmFieldElement {
|
||||||
|
// Addition in a characteristic 2 field is just XOR.
|
||||||
|
return gcmFieldElement{x.low ^ y.low, x.high ^ y.high}
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcmDouble returns the result of doubling an element of GF(2¹²⁸).
|
||||||
|
func gcmDouble(x *gcmFieldElement) (double gcmFieldElement) {
|
||||||
|
msbSet := x.high&1 == 1
|
||||||
|
|
||||||
|
// Because of the bit-ordering, doubling is actually a right shift.
|
||||||
|
double.high = x.high >> 1
|
||||||
|
double.high |= x.low << 63
|
||||||
|
double.low = x.low >> 1
|
||||||
|
|
||||||
|
// If the most-significant bit was set before shifting then it,
|
||||||
|
// conceptually, becomes a term of x^128. This is greater than the
|
||||||
|
// irreducible polynomial so the result has to be reduced. The
|
||||||
|
// irreducible polynomial is 1+x+x^2+x^7+x^128. We can subtract that to
|
||||||
|
// eliminate the term at x^128 which also means subtracting the other
|
||||||
|
// four terms. In characteristic 2 fields, subtraction == addition ==
|
||||||
|
// XOR.
|
||||||
|
if msbSet {
|
||||||
|
double.low ^= 0xe100000000000000
|
||||||
|
}
|
||||||
|
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var gcmReductionTable = []uint16{
|
||||||
|
0x0000, 0x1c20, 0x3840, 0x2460, 0x7080, 0x6ca0, 0x48c0, 0x54e0,
|
||||||
|
0xe100, 0xfd20, 0xd940, 0xc560, 0x9180, 0x8da0, 0xa9c0, 0xb5e0,
|
||||||
|
}
|
||||||
|
|
||||||
|
// mul sets y to y*H, where H is the GCM key, fixed during NewGCMWithNonceSize.
|
||||||
|
func (g *gcm) mul(y *gcmFieldElement) {
|
||||||
|
var z gcmFieldElement
|
||||||
|
|
||||||
|
for i := 0; i < 2; i++ {
|
||||||
|
word := y.high
|
||||||
|
if i == 1 {
|
||||||
|
word = y.low
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiplication works by multiplying z by 16 and adding in
|
||||||
|
// one of the precomputed multiples of H.
|
||||||
|
for j := 0; j < 64; j += 4 {
|
||||||
|
msw := z.high & 0xf
|
||||||
|
z.high >>= 4
|
||||||
|
z.high |= z.low << 60
|
||||||
|
z.low >>= 4
|
||||||
|
z.low ^= uint64(gcmReductionTable[msw]) << 48
|
||||||
|
|
||||||
|
// the values in |table| are ordered for
|
||||||
|
// little-endian bit positions. See the comment
|
||||||
|
// in NewGCMWithNonceSize.
|
||||||
|
t := &g.productTable[word&0xf]
|
||||||
|
|
||||||
|
z.low ^= t.low
|
||||||
|
z.high ^= t.high
|
||||||
|
word >>= 4
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
*y = z
|
||||||
|
}
|
||||||
|
|
||||||
|
// updateBlocks extends y with more polynomial terms from blocks, based on
|
||||||
|
// Horner's rule. There must be a multiple of gcmBlockSize bytes in blocks.
|
||||||
|
func (g *gcm) updateBlocks(y *gcmFieldElement, blocks []byte) {
|
||||||
|
for len(blocks) > 0 {
|
||||||
|
y.low ^= getUint64(blocks)
|
||||||
|
y.high ^= getUint64(blocks[8:])
|
||||||
|
g.mul(y)
|
||||||
|
blocks = blocks[gcmBlockSize:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// update extends y with more polynomial terms from data. If data is not a
|
||||||
|
// multiple of gcmBlockSize bytes long then the remainder is zero padded.
|
||||||
|
func (g *gcm) update(y *gcmFieldElement, data []byte) {
|
||||||
|
fullBlocks := (len(data) >> 4) << 4
|
||||||
|
g.updateBlocks(y, data[:fullBlocks])
|
||||||
|
|
||||||
|
if len(data) != fullBlocks {
|
||||||
|
var partialBlock [gcmBlockSize]byte
|
||||||
|
copy(partialBlock[:], data[fullBlocks:])
|
||||||
|
g.updateBlocks(y, partialBlock[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// gcmInc32 treats the final four bytes of counterBlock as a big-endian value
|
||||||
|
// and increments it.
|
||||||
|
func gcmInc32(counterBlock *[16]byte) {
|
||||||
|
for i := gcmBlockSize - 1; i >= gcmBlockSize-4; i-- {
|
||||||
|
counterBlock[i]++
|
||||||
|
if counterBlock[i] != 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// sliceForAppend takes a slice and a requested number of bytes. It returns a
|
||||||
|
// slice with the contents of the given slice followed by that many bytes and a
|
||||||
|
// second slice that aliases into it and contains only the extra bytes. If the
|
||||||
|
// original slice has sufficient capacity then no allocation is performed.
|
||||||
|
func sliceForAppend(in []byte, n int) (head, tail []byte) {
|
||||||
|
if total := len(in) + n; cap(in) >= total {
|
||||||
|
head = in[:total]
|
||||||
|
} else {
|
||||||
|
head = make([]byte, total)
|
||||||
|
copy(head, in)
|
||||||
|
}
|
||||||
|
tail = head[len(in):]
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// counterCrypt crypts in to out using g.cipher in counter mode.
|
||||||
|
func (g *gcm) counterCrypt(out, in []byte, counter *[gcmBlockSize]byte) {
|
||||||
|
var mask [gcmBlockSize]byte
|
||||||
|
|
||||||
|
for len(in) >= gcmBlockSize {
|
||||||
|
g.cipher.Encrypt(mask[:], counter[:])
|
||||||
|
gcmInc32(counter)
|
||||||
|
|
||||||
|
xorWords(out, in, mask[:])
|
||||||
|
out = out[gcmBlockSize:]
|
||||||
|
in = in[gcmBlockSize:]
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(in) > 0 {
|
||||||
|
g.cipher.Encrypt(mask[:], counter[:])
|
||||||
|
gcmInc32(counter)
|
||||||
|
xorBytes(out, in, mask[:])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// deriveCounter computes the initial GCM counter state from the given nonce.
|
||||||
|
// See NIST SP 800-38D, section 7.1. This assumes that counter is filled with
|
||||||
|
// zeros on entry.
|
||||||
|
func (g *gcm) deriveCounter(counter *[gcmBlockSize]byte, nonce []byte) {
|
||||||
|
// GCM has two modes of operation with respect to the initial counter
|
||||||
|
// state: a "fast path" for 96-bit (12-byte) nonces, and a "slow path"
|
||||||
|
// for nonces of other lengths. For a 96-bit nonce, the nonce, along
|
||||||
|
// with a four-byte big-endian counter starting at one, is used
|
||||||
|
// directly as the starting counter. For other nonce sizes, the counter
|
||||||
|
// is computed by passing it through the GHASH function.
|
||||||
|
if len(nonce) == gcmStandardNonceSize {
|
||||||
|
copy(counter[:], nonce)
|
||||||
|
counter[gcmBlockSize-1] = 1
|
||||||
|
} else {
|
||||||
|
var y gcmFieldElement
|
||||||
|
g.update(&y, nonce)
|
||||||
|
y.high ^= uint64(len(nonce)) * 8
|
||||||
|
g.mul(&y)
|
||||||
|
putUint64(counter[:8], y.low)
|
||||||
|
putUint64(counter[8:], y.high)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// auth calculates GHASH(ciphertext, additionalData), masks the result with
|
||||||
|
// tagMask and writes the result to out.
|
||||||
|
func (g *gcm) auth(out, ciphertext, additionalData []byte, tagMask *[gcmBlockSize]byte) {
|
||||||
|
var y gcmFieldElement
|
||||||
|
g.update(&y, additionalData)
|
||||||
|
g.update(&y, ciphertext)
|
||||||
|
|
||||||
|
y.low ^= uint64(len(additionalData)) * 8
|
||||||
|
y.high ^= uint64(len(ciphertext)) * 8
|
||||||
|
|
||||||
|
g.mul(&y)
|
||||||
|
|
||||||
|
putUint64(out, y.low)
|
||||||
|
putUint64(out[8:], y.high)
|
||||||
|
|
||||||
|
xorWords(out, out, tagMask[:])
|
||||||
|
}
|
||||||
|
|
||||||
|
func getUint64(data []byte) uint64 {
|
||||||
|
r := uint64(data[0])<<56 |
|
||||||
|
uint64(data[1])<<48 |
|
||||||
|
uint64(data[2])<<40 |
|
||||||
|
uint64(data[3])<<32 |
|
||||||
|
uint64(data[4])<<24 |
|
||||||
|
uint64(data[5])<<16 |
|
||||||
|
uint64(data[6])<<8 |
|
||||||
|
uint64(data[7])
|
||||||
|
return r
|
||||||
|
}
|
||||||
|
|
||||||
|
func putUint64(out []byte, v uint64) {
|
||||||
|
out[0] = byte(v >> 56)
|
||||||
|
out[1] = byte(v >> 48)
|
||||||
|
out[2] = byte(v >> 40)
|
||||||
|
out[3] = byte(v >> 32)
|
||||||
|
out[4] = byte(v >> 24)
|
||||||
|
out[5] = byte(v >> 16)
|
||||||
|
out[6] = byte(v >> 8)
|
||||||
|
out[7] = byte(v)
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,120 @@
|
||||||
|
package aes12_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/aes"
|
||||||
|
"crypto/cipher"
|
||||||
|
"crypto/rand"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/aes12"
|
||||||
|
)
|
||||||
|
|
||||||
|
const plaintextLen = 1000
|
||||||
|
|
||||||
|
var (
|
||||||
|
key []byte
|
||||||
|
nonce []byte
|
||||||
|
aad []byte
|
||||||
|
plaintext []byte
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
key = make([]byte, 32)
|
||||||
|
rand.Read(key)
|
||||||
|
nonce = make([]byte, 12)
|
||||||
|
rand.Read(nonce)
|
||||||
|
aad = make([]byte, 42)
|
||||||
|
rand.Read(aad)
|
||||||
|
plaintext = make([]byte, plaintextLen)
|
||||||
|
rand.Read(plaintext)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncryption(t *testing.T) {
|
||||||
|
c, err := aes12.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := aes12.NewGCM(c)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
ciphertext := gcm.Seal(nil, nonce, plaintext, aad)
|
||||||
|
|
||||||
|
if len(ciphertext) != plaintextLen+12 {
|
||||||
|
t.Fatal("expected ciphertext to have len(plaintext)+12")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that it matches the stdlib
|
||||||
|
stdC, err := aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
stdGcm, err := cipher.NewGCM(stdC)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
stdCiphertext := stdGcm.Seal(nil, nonce, plaintext, aad)
|
||||||
|
if !bytes.Equal(ciphertext, stdCiphertext[:len(stdCiphertext)-4]) {
|
||||||
|
t.Fatal("did not match stdlib's ciphertext")
|
||||||
|
}
|
||||||
|
|
||||||
|
decrypted, err := gcm.Open(nil, nonce, ciphertext, aad)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(plaintext, decrypted) {
|
||||||
|
t.Fatal("decryption yielded unexpected result")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestInplaceEncryption(t *testing.T) {
|
||||||
|
c, err := aes12.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
gcm, err := aes12.NewGCM(c)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := make([]byte, len(plaintext), len(plaintext)+12)
|
||||||
|
copy(buffer, plaintext)
|
||||||
|
|
||||||
|
ciphertext := gcm.Seal(buffer[:0], nonce, buffer, aad)
|
||||||
|
|
||||||
|
if len(ciphertext) != plaintextLen+12 {
|
||||||
|
t.Fatal("expected ciphertext to have len(plaintext)+12")
|
||||||
|
}
|
||||||
|
buffer = buffer[:len(plaintext)+12]
|
||||||
|
if !bytes.Equal(ciphertext, buffer) {
|
||||||
|
t.Fatal("ciphertext != buffer")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Test that it matches the stdlib
|
||||||
|
stdC, err := aes.NewCipher(key)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
stdGcm, err := cipher.NewGCM(stdC)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
stdCiphertext := stdGcm.Seal(nil, nonce, plaintext, aad)
|
||||||
|
if !bytes.Equal(ciphertext, stdCiphertext[:len(stdCiphertext)-4]) {
|
||||||
|
t.Fatal("did not match stdlib's ciphertext")
|
||||||
|
}
|
||||||
|
|
||||||
|
decrypted, err := gcm.Open(buffer[:0], nonce, buffer, aad)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatal(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !bytes.Equal(plaintext, decrypted) {
|
||||||
|
t.Fatal("decryption yielded unexpected result")
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,84 @@
|
||||||
|
// Copyright 2013 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
package aes12
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
const wordSize = int(unsafe.Sizeof(uintptr(0)))
|
||||||
|
const supportsUnaligned = runtime.GOARCH == "386" || runtime.GOARCH == "amd64" || runtime.GOARCH == "ppc64" || runtime.GOARCH == "ppc64le" || runtime.GOARCH == "s390x"
|
||||||
|
|
||||||
|
// fastXORBytes xors in bulk. It only works on architectures that
|
||||||
|
// support unaligned read/writes.
|
||||||
|
func fastXORBytes(dst, a, b []byte) int {
|
||||||
|
n := len(a)
|
||||||
|
if len(b) < n {
|
||||||
|
n = len(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
w := n / wordSize
|
||||||
|
if w > 0 {
|
||||||
|
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||||
|
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||||
|
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||||
|
for i := 0; i < w; i++ {
|
||||||
|
dw[i] = aw[i] ^ bw[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := (n - n%wordSize); i < n; i++ {
|
||||||
|
dst[i] = a[i] ^ b[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
func safeXORBytes(dst, a, b []byte) int {
|
||||||
|
n := len(a)
|
||||||
|
if len(b) < n {
|
||||||
|
n = len(b)
|
||||||
|
}
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
dst[i] = a[i] ^ b[i]
|
||||||
|
}
|
||||||
|
return n
|
||||||
|
}
|
||||||
|
|
||||||
|
// xorBytes xors the bytes in a and b. The destination is assumed to have enough
|
||||||
|
// space. Returns the number of bytes xor'd.
|
||||||
|
func xorBytes(dst, a, b []byte) int {
|
||||||
|
if supportsUnaligned {
|
||||||
|
return fastXORBytes(dst, a, b)
|
||||||
|
} else {
|
||||||
|
// TODO(hanwen): if (dst, a, b) have common alignment
|
||||||
|
// we could still try fastXORBytes. It is not clear
|
||||||
|
// how often this happens, and it's only worth it if
|
||||||
|
// the block encryption itself is hardware
|
||||||
|
// accelerated.
|
||||||
|
return safeXORBytes(dst, a, b)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// fastXORWords XORs multiples of 4 or 8 bytes (depending on architecture.)
|
||||||
|
// The arguments are assumed to be of equal length.
|
||||||
|
func fastXORWords(dst, a, b []byte) {
|
||||||
|
dw := *(*[]uintptr)(unsafe.Pointer(&dst))
|
||||||
|
aw := *(*[]uintptr)(unsafe.Pointer(&a))
|
||||||
|
bw := *(*[]uintptr)(unsafe.Pointer(&b))
|
||||||
|
n := len(b) / wordSize
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
dw[i] = aw[i] ^ bw[i]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func xorWords(dst, a, b []byte) {
|
||||||
|
if supportsUnaligned {
|
||||||
|
fastXORWords(dst, a, b)
|
||||||
|
} else {
|
||||||
|
safeXORBytes(dst, a, b)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
The MIT License (MIT)
|
||||||
|
|
||||||
|
Copyright (c) 2016 Lucas Clemente
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,3 @@
|
||||||
|
# fnv128a
|
||||||
|
|
||||||
|
Implementation of the FNV-1a 128bit hash in go
|
|
@ -0,0 +1,87 @@
|
||||||
|
// Package fnv128a implements FNV-1 and FNV-1a, non-cryptographic hash functions
|
||||||
|
// created by Glenn Fowler, Landon Curt Noll, and Phong Vo.
|
||||||
|
// See https://en.wikipedia.org/wiki/Fowler-Noll-Vo_hash_function.
|
||||||
|
//
|
||||||
|
// Write() algorithm taken and modified from github.com/romain-jacotin/quic
|
||||||
|
package fnv128a
|
||||||
|
|
||||||
|
import "hash"
|
||||||
|
|
||||||
|
// Hash128 is the common interface implemented by all 128-bit hash functions.
|
||||||
|
type Hash128 interface {
|
||||||
|
hash.Hash
|
||||||
|
Sum128() (uint64, uint64)
|
||||||
|
}
|
||||||
|
|
||||||
|
type sum128a struct {
|
||||||
|
v0, v1, v2, v3 uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ Hash128 = &sum128a{}
|
||||||
|
|
||||||
|
// New1 returns a new 128-bit FNV-1a hash.Hash.
|
||||||
|
func New() Hash128 {
|
||||||
|
s := &sum128a{}
|
||||||
|
s.Reset()
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sum128a) Reset() {
|
||||||
|
s.v0 = 0x6295C58D
|
||||||
|
s.v1 = 0x62B82175
|
||||||
|
s.v2 = 0x07BB0142
|
||||||
|
s.v3 = 0x6C62272E
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sum128a) Sum128() (uint64, uint64) {
|
||||||
|
return s.v3<<32 | s.v2, s.v1<<32 | s.v0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sum128a) Write(data []byte) (int, error) {
|
||||||
|
var t0, t1, t2, t3 uint64
|
||||||
|
const fnv128PrimeLow = 0x0000013B
|
||||||
|
const fnv128PrimeShift = 24
|
||||||
|
|
||||||
|
for _, v := range data {
|
||||||
|
// xor the bottom with the current octet
|
||||||
|
s.v0 ^= uint64(v)
|
||||||
|
|
||||||
|
// multiply by the 128 bit FNV magic prime mod 2^128
|
||||||
|
// fnv_prime = 309485009821345068724781371 (decimal)
|
||||||
|
// = 0x0000000001000000000000000000013B (hexadecimal)
|
||||||
|
// = 0x00000000 0x01000000 0x00000000 0x0000013B (in 4*32 words)
|
||||||
|
// = 0x0 1<<fnv128PrimeShift 0x0 fnv128PrimeLow
|
||||||
|
//
|
||||||
|
// fnv128PrimeLow = 0x0000013B
|
||||||
|
// fnv128PrimeShift = 24
|
||||||
|
|
||||||
|
// multiply by the lowest order digit base 2^32 and by the other non-zero digit
|
||||||
|
t0 = s.v0 * fnv128PrimeLow
|
||||||
|
t1 = s.v1 * fnv128PrimeLow
|
||||||
|
t2 = s.v2*fnv128PrimeLow + s.v0<<fnv128PrimeShift
|
||||||
|
t3 = s.v3*fnv128PrimeLow + s.v1<<fnv128PrimeShift
|
||||||
|
|
||||||
|
// propagate carries
|
||||||
|
t1 += (t0 >> 32)
|
||||||
|
t2 += (t1 >> 32)
|
||||||
|
t3 += (t2 >> 32)
|
||||||
|
|
||||||
|
s.v0 = t0 & 0xffffffff
|
||||||
|
s.v1 = t1 & 0xffffffff
|
||||||
|
s.v2 = t2 & 0xffffffff
|
||||||
|
s.v3 = t3 // & 0xffffffff
|
||||||
|
// Doing a s.v3 &= 0xffffffff is not really needed since it simply
|
||||||
|
// removes multiples of 2^128. We can discard these excess bits
|
||||||
|
// outside of the loop when writing the hash in Little Endian.
|
||||||
|
}
|
||||||
|
|
||||||
|
return len(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *sum128a) Size() int { return 16 }
|
||||||
|
|
||||||
|
func (s *sum128a) BlockSize() int { return 1 }
|
||||||
|
|
||||||
|
func (s *sum128a) Sum(in []byte) []byte {
|
||||||
|
panic("FNV: not supported")
|
||||||
|
}
|
|
@ -0,0 +1,24 @@
|
||||||
|
package fnv128a_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/fnv128a"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNullHash(t *testing.T) {
|
||||||
|
hash := fnv128a.New()
|
||||||
|
h, l := hash.Sum128()
|
||||||
|
if h != 0x6c62272e07bb0142 || l != 0x62b821756295c58d {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestHash(t *testing.T) {
|
||||||
|
hash := fnv128a.New()
|
||||||
|
_, err := hash.Write([]byte("foobar"))
|
||||||
|
h, l := hash.Sum128()
|
||||||
|
if err != nil || h != 0x343e1662793c64bf || l != 0x6f0d3597ba446f18 {
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016 Lucas Clemente
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,3 @@
|
||||||
|
# certsets
|
||||||
|
|
||||||
|
Common certificate sets for quic-go
|
5824
vendor/github.com/lucas-clemente/quic-go-certificates/cert_set_2.go
generated
vendored
Normal file
5824
vendor/github.com/lucas-clemente/quic-go-certificates/cert_set_2.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
5456
vendor/github.com/lucas-clemente/quic-go-certificates/cert_set_3.go
generated
vendored
Normal file
5456
vendor/github.com/lucas-clemente/quic-go-certificates/cert_set_3.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
34
vendor/github.com/lucas-clemente/quic-go-certificates/createCertSets.rb
generated
vendored
Executable file
34
vendor/github.com/lucas-clemente/quic-go-certificates/createCertSets.rb
generated
vendored
Executable file
|
@ -0,0 +1,34 @@
|
||||||
|
#!/usr/bin/env ruby
|
||||||
|
#
|
||||||
|
# Extract the common certificate sets from the chromium source to go
|
||||||
|
#
|
||||||
|
# Usage:
|
||||||
|
# createCertSets.rb 1 ~/src/chromium/src/net/quic/crypto/common_cert_set_1*
|
||||||
|
# createCertSets.rb 2 ~/src/chromium/src/net/quic/crypto/common_cert_set_2*
|
||||||
|
|
||||||
|
n = ARGV.shift
|
||||||
|
mainFile = ARGV.shift
|
||||||
|
dataFiles = ARGV
|
||||||
|
|
||||||
|
data = "package certsets\n"
|
||||||
|
data += File.read(mainFile)
|
||||||
|
data += (dataFiles.map{|p| File.read(p)}).join
|
||||||
|
|
||||||
|
# Good enough
|
||||||
|
data.gsub!(/\/\*(.*?)\*\//m, '')
|
||||||
|
data.gsub!(/^#include.+/, '')
|
||||||
|
data.gsub!(/^#if 0(.*?)\n#endif/m, '')
|
||||||
|
|
||||||
|
data.gsub!(/^static const size_t kNumCerts.+/, '')
|
||||||
|
data.gsub!(/static const size_t kLens[^}]+};/m, '')
|
||||||
|
|
||||||
|
data.gsub!('static const unsigned char* const kCerts[] = {', "var CertSet#{n} = [][]byte{")
|
||||||
|
data.gsub!('static const uint64_t kHash = UINT64_C', "const CertSet#{n}Hash uint64 = ")
|
||||||
|
|
||||||
|
data.gsub!(/static const unsigned char kDERCert(\d+)\[\] = /, "var kDERCert\\1 = []byte")
|
||||||
|
|
||||||
|
data.gsub!(/kDERCert(\d+)/, "certSet#{n}Cert\\1")
|
||||||
|
|
||||||
|
File.write("cert_set_#{n}.go", data)
|
||||||
|
|
||||||
|
system("gofmt -w -s cert_set_#{n}.go")
|
|
@ -0,0 +1,5 @@
|
||||||
|
root = true
|
||||||
|
|
||||||
|
[*]
|
||||||
|
indent_style = tab
|
||||||
|
indent_size = 2
|
|
@ -0,0 +1,3 @@
|
||||||
|
debug
|
||||||
|
debug.test
|
||||||
|
main
|
|
@ -0,0 +1,39 @@
|
||||||
|
sudo: required
|
||||||
|
|
||||||
|
addons:
|
||||||
|
hosts:
|
||||||
|
- quic.clemente.io
|
||||||
|
|
||||||
|
language: go
|
||||||
|
|
||||||
|
services:
|
||||||
|
- docker
|
||||||
|
|
||||||
|
go:
|
||||||
|
- 1.7.5
|
||||||
|
- 1.8
|
||||||
|
|
||||||
|
# first part of the GOARCH workaround
|
||||||
|
# setting the GOARCH directly doesn't work, since the value will be overwritten later
|
||||||
|
# so set it to a temporary environment variable first
|
||||||
|
env:
|
||||||
|
- TRAVIS_GOARCH=amd64 TESTMODE=unit
|
||||||
|
- TRAVIS_GOARCH=amd64 TESTMODE=integration
|
||||||
|
- TRAVIS_GOARCH=386 TESTMODE=unit
|
||||||
|
- TRAVIS_GOARCH=386 TESTMODE=integration
|
||||||
|
|
||||||
|
# second part of the GOARCH workaround
|
||||||
|
# now actually set the GOARCH env variable to the value of the temporary variable set earlier
|
||||||
|
before_install:
|
||||||
|
- go get golang.org/x/tools/cmd/cover
|
||||||
|
- go get github.com/onsi/ginkgo/ginkgo
|
||||||
|
- go get github.com/onsi/gomega
|
||||||
|
- export GOARCH=$TRAVIS_GOARCH
|
||||||
|
- go env # for debugging
|
||||||
|
|
||||||
|
script:
|
||||||
|
# Retry building up to 3 times as documented here: https://docs.travis-ci.com/user/common-build-problems/#travis_retry
|
||||||
|
- travis_retry .travis/script.sh
|
||||||
|
|
||||||
|
after_success:
|
||||||
|
- .travis/after_success.sh
|
|
@ -0,0 +1,21 @@
|
||||||
|
MIT License
|
||||||
|
|
||||||
|
Copyright (c) 2016 the quic-go authors & Google, Inc.
|
||||||
|
|
||||||
|
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||||
|
of this software and associated documentation files (the "Software"), to deal
|
||||||
|
in the Software without restriction, including without limitation the rights
|
||||||
|
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||||
|
copies of the Software, and to permit persons to whom the Software is
|
||||||
|
furnished to do so, subject to the following conditions:
|
||||||
|
|
||||||
|
The above copyright notice and this permission notice shall be included in all
|
||||||
|
copies or substantial portions of the Software.
|
||||||
|
|
||||||
|
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||||
|
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||||
|
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||||
|
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||||
|
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||||
|
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||||
|
SOFTWARE.
|
|
@ -0,0 +1,68 @@
|
||||||
|
# A QUIC implementation in pure Go
|
||||||
|
|
||||||
|
<img src="docs/quic.png" width=303 height=124>
|
||||||
|
|
||||||
|
[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/lucas-clemente/quic-go)
|
||||||
|
[![Linux Build Status](https://img.shields.io/travis/lucas-clemente/quic-go/master.svg?style=flat-square&label=linux+build)](https://travis-ci.org/lucas-clemente/quic-go)
|
||||||
|
[![Windows Build Status](https://img.shields.io/appveyor/ci/lucas-clemente/quic-go/master.svg?style=flat-square&label=windows+build)](https://ci.appveyor.com/project/lucas-clemente/quic-go/branch/master)
|
||||||
|
[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/)
|
||||||
|
|
||||||
|
quic-go is an implementation of the [QUIC](https://en.wikipedia.org/wiki/QUIC) protocol in Go.
|
||||||
|
|
||||||
|
## Roadmap
|
||||||
|
|
||||||
|
quic-go is compatible with the current version(s) of Google Chrome and QUIC as deployed on Google's servers. We're actively tracking the development of the Chrome code to ensure compatibility as the protocol evolves. In that process, we're dropping support for old QUIC versions.
|
||||||
|
As Google's QUIC versions are expected to converge towards the [IETF QUIC draft](https://github.com/quicwg/base-drafts), quic-go will eventually implement that draft.
|
||||||
|
|
||||||
|
## Guides
|
||||||
|
|
||||||
|
We currently support Go 1.7+.
|
||||||
|
|
||||||
|
Installing deps:
|
||||||
|
|
||||||
|
go get -t
|
||||||
|
|
||||||
|
Running tests:
|
||||||
|
|
||||||
|
go test ./...
|
||||||
|
|
||||||
|
### Running the example server
|
||||||
|
|
||||||
|
go run example/main.go -www /var/www/
|
||||||
|
|
||||||
|
Using the `quic_client` from chromium:
|
||||||
|
|
||||||
|
quic_client --host=127.0.0.1 --port=6121 --v=1 https://quic.clemente.io
|
||||||
|
|
||||||
|
Using Chrome:
|
||||||
|
|
||||||
|
/Applications/Google\ Chrome.app/Contents/MacOS/Google\ Chrome --user-data-dir=/tmp/chrome --no-proxy-server --enable-quic --origin-to-force-quic-on=quic.clemente.io:443 --host-resolver-rules='MAP quic.clemente.io:443 127.0.0.1:6121' https://quic.clemente.io
|
||||||
|
|
||||||
|
### Using the example client
|
||||||
|
|
||||||
|
go run example/client/main.go https://clemente.io
|
||||||
|
|
||||||
|
## Usage
|
||||||
|
|
||||||
|
### As a server
|
||||||
|
|
||||||
|
See the [example server](example/main.go) or try out [Caddy](https://github.com/mholt/caddy) (from version 0.9, [instructions here](https://github.com/mholt/caddy/wiki/QUIC)). Starting a QUIC server is very similar to the standard lib http in go:
|
||||||
|
|
||||||
|
```go
|
||||||
|
http.Handle("/", http.FileServer(http.Dir(wwwDir)))
|
||||||
|
h2quic.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil)
|
||||||
|
```
|
||||||
|
|
||||||
|
### As a client
|
||||||
|
|
||||||
|
See the [example client](example/client/main.go). Use a `QuicRoundTripper` as a `Transport` in a `http.Client`.
|
||||||
|
|
||||||
|
```go
|
||||||
|
http.Client{
|
||||||
|
Transport: &h2quic.QuicRoundTripper{},
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [want-help](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aopen+is%3Aissue+label%3Awant-help). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
|
|
@ -0,0 +1,7 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
_ "github.com/clipperhouse/linkedlist"
|
||||||
|
_ "github.com/clipperhouse/slice"
|
||||||
|
_ "github.com/clipperhouse/stringer"
|
||||||
|
)
|
13
vendor/github.com/lucas-clemente/quic-go/ackhandler/ackhandler_suite_test.go
generated
vendored
Normal file
13
vendor/github.com/lucas-clemente/quic-go/ackhandler/ackhandler_suite_test.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCrypto(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "AckHandler Suite")
|
||||||
|
}
|
|
@ -0,0 +1,30 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// SentPacketHandler handles ACKs received for outgoing packets
|
||||||
|
type SentPacketHandler interface {
|
||||||
|
SentPacket(packet *Packet) error
|
||||||
|
ReceivedAck(ackFrame *frames.AckFrame, withPacketNumber protocol.PacketNumber, recvTime time.Time) error
|
||||||
|
|
||||||
|
SendingAllowed() bool
|
||||||
|
GetStopWaitingFrame(force bool) *frames.StopWaitingFrame
|
||||||
|
DequeuePacketForRetransmission() (packet *Packet)
|
||||||
|
GetLeastUnacked() protocol.PacketNumber
|
||||||
|
|
||||||
|
GetAlarmTimeout() time.Time
|
||||||
|
OnAlarm()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceivedPacketHandler handles ACKs needed to send for incoming packets
|
||||||
|
type ReceivedPacketHandler interface {
|
||||||
|
ReceivedPacket(packetNumber protocol.PacketNumber, shouldInstigateAck bool) error
|
||||||
|
ReceivedStopWaiting(*frames.StopWaitingFrame) error
|
||||||
|
|
||||||
|
GetAckFrame() *frames.AckFrame
|
||||||
|
}
|
|
@ -0,0 +1,34 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A Packet is a packet
|
||||||
|
// +gen linkedlist
|
||||||
|
type Packet struct {
|
||||||
|
PacketNumber protocol.PacketNumber
|
||||||
|
Frames []frames.Frame
|
||||||
|
Length protocol.ByteCount
|
||||||
|
EncryptionLevel protocol.EncryptionLevel
|
||||||
|
|
||||||
|
SendTime time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetFramesForRetransmission gets all the frames for retransmission
|
||||||
|
func (p *Packet) GetFramesForRetransmission() []frames.Frame {
|
||||||
|
var fs []frames.Frame
|
||||||
|
for _, frame := range p.Frames {
|
||||||
|
switch frame.(type) {
|
||||||
|
case *frames.AckFrame:
|
||||||
|
continue
|
||||||
|
case *frames.StopWaitingFrame:
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fs = append(fs, frame)
|
||||||
|
}
|
||||||
|
return fs
|
||||||
|
}
|
214
vendor/github.com/lucas-clemente/quic-go/ackhandler/packet_linkedlist.go
generated
vendored
Normal file
214
vendor/github.com/lucas-clemente/quic-go/ackhandler/packet_linkedlist.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
||||||
|
// Generated by: main
|
||||||
|
// TypeWriter: linkedlist
|
||||||
|
// Directive: +gen on Packet
|
||||||
|
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
// List is a modification of http://golang.org/pkg/container/list/
|
||||||
|
// Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
// PacketElement is an element of a linked list.
|
||||||
|
type PacketElement struct {
|
||||||
|
// Next and previous pointers in the doubly-linked list of elements.
|
||||||
|
// To simplify the implementation, internally a list l is implemented
|
||||||
|
// as a ring, such that &l.root is both the next element of the last
|
||||||
|
// list element (l.Back()) and the previous element of the first list
|
||||||
|
// element (l.Front()).
|
||||||
|
next, prev *PacketElement
|
||||||
|
|
||||||
|
// The list to which this element belongs.
|
||||||
|
list *PacketList
|
||||||
|
|
||||||
|
// The value stored with this element.
|
||||||
|
Value Packet
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns the next list element or nil.
|
||||||
|
func (e *PacketElement) Next() *PacketElement {
|
||||||
|
if p := e.next; e.list != nil && p != &e.list.root {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prev returns the previous list element or nil.
|
||||||
|
func (e *PacketElement) Prev() *PacketElement {
|
||||||
|
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PacketList represents a doubly linked list.
|
||||||
|
// The zero value for PacketList is an empty list ready to use.
|
||||||
|
type PacketList struct {
|
||||||
|
root PacketElement // sentinel list element, only &root, root.prev, and root.next are used
|
||||||
|
len int // current list length excluding (this) sentinel element
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init initializes or clears list l.
|
||||||
|
func (l *PacketList) Init() *PacketList {
|
||||||
|
l.root.next = &l.root
|
||||||
|
l.root.prev = &l.root
|
||||||
|
l.len = 0
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPacketList returns an initialized list.
|
||||||
|
func NewPacketList() *PacketList { return new(PacketList).Init() }
|
||||||
|
|
||||||
|
// Len returns the number of elements of list l.
|
||||||
|
// The complexity is O(1).
|
||||||
|
func (l *PacketList) Len() int { return l.len }
|
||||||
|
|
||||||
|
// Front returns the first element of list l or nil.
|
||||||
|
func (l *PacketList) Front() *PacketElement {
|
||||||
|
if l.len == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return l.root.next
|
||||||
|
}
|
||||||
|
|
||||||
|
// Back returns the last element of list l or nil.
|
||||||
|
func (l *PacketList) Back() *PacketElement {
|
||||||
|
if l.len == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return l.root.prev
|
||||||
|
}
|
||||||
|
|
||||||
|
// lazyInit lazily initializes a zero PacketList value.
|
||||||
|
func (l *PacketList) lazyInit() {
|
||||||
|
if l.root.next == nil {
|
||||||
|
l.Init()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// insert inserts e after at, increments l.len, and returns e.
|
||||||
|
func (l *PacketList) insert(e, at *PacketElement) *PacketElement {
|
||||||
|
n := at.next
|
||||||
|
at.next = e
|
||||||
|
e.prev = at
|
||||||
|
e.next = n
|
||||||
|
n.prev = e
|
||||||
|
e.list = l
|
||||||
|
l.len++
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// insertValue is a convenience wrapper for insert(&PacketElement{Value: v}, at).
|
||||||
|
func (l *PacketList) insertValue(v Packet, at *PacketElement) *PacketElement {
|
||||||
|
return l.insert(&PacketElement{Value: v}, at)
|
||||||
|
}
|
||||||
|
|
||||||
|
// remove removes e from its list, decrements l.len, and returns e.
|
||||||
|
func (l *PacketList) remove(e *PacketElement) *PacketElement {
|
||||||
|
e.prev.next = e.next
|
||||||
|
e.next.prev = e.prev
|
||||||
|
e.next = nil // avoid memory leaks
|
||||||
|
e.prev = nil // avoid memory leaks
|
||||||
|
e.list = nil
|
||||||
|
l.len--
|
||||||
|
return e
|
||||||
|
}
|
||||||
|
|
||||||
|
// Remove removes e from l if e is an element of list l.
|
||||||
|
// It returns the element value e.Value.
|
||||||
|
func (l *PacketList) Remove(e *PacketElement) Packet {
|
||||||
|
if e.list == l {
|
||||||
|
// if e.list == l, l must have been initialized when e was inserted
|
||||||
|
// in l or l == nil (e is a zero PacketElement) and l.remove will crash
|
||||||
|
l.remove(e)
|
||||||
|
}
|
||||||
|
return e.Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||||
|
func (l *PacketList) PushFront(v Packet) *PacketElement {
|
||||||
|
l.lazyInit()
|
||||||
|
return l.insertValue(v, &l.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBack inserts a new element e with value v at the back of list l and returns e.
|
||||||
|
func (l *PacketList) PushBack(v Packet) *PacketElement {
|
||||||
|
l.lazyInit()
|
||||||
|
return l.insertValue(v, l.root.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
|
||||||
|
// If mark is not an element of l, the list is not modified.
|
||||||
|
func (l *PacketList) InsertBefore(v Packet, mark *PacketElement) *PacketElement {
|
||||||
|
if mark.list != l {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// see comment in PacketList.Remove about initialization of l
|
||||||
|
return l.insertValue(v, mark.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
|
||||||
|
// If mark is not an element of l, the list is not modified.
|
||||||
|
func (l *PacketList) InsertAfter(v Packet, mark *PacketElement) *PacketElement {
|
||||||
|
if mark.list != l {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
// see comment in PacketList.Remove about initialization of l
|
||||||
|
return l.insertValue(v, mark)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToFront moves element e to the front of list l.
|
||||||
|
// If e is not an element of l, the list is not modified.
|
||||||
|
func (l *PacketList) MoveToFront(e *PacketElement) {
|
||||||
|
if e.list != l || l.root.next == e {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// see comment in PacketList.Remove about initialization of l
|
||||||
|
l.insert(l.remove(e), &l.root)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveToBack moves element e to the back of list l.
|
||||||
|
// If e is not an element of l, the list is not modified.
|
||||||
|
func (l *PacketList) MoveToBack(e *PacketElement) {
|
||||||
|
if e.list != l || l.root.prev == e {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// see comment in PacketList.Remove about initialization of l
|
||||||
|
l.insert(l.remove(e), l.root.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveBefore moves element e to its new position before mark.
|
||||||
|
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||||
|
func (l *PacketList) MoveBefore(e, mark *PacketElement) {
|
||||||
|
if e.list != l || e == mark || mark.list != l {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.insert(l.remove(e), mark.prev)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MoveAfter moves element e to its new position after mark.
|
||||||
|
// If e is not an element of l, or e == mark, the list is not modified.
|
||||||
|
func (l *PacketList) MoveAfter(e, mark *PacketElement) {
|
||||||
|
if e.list != l || e == mark || mark.list != l {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
l.insert(l.remove(e), mark)
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushBackList inserts a copy of an other list at the back of list l.
|
||||||
|
// The lists l and other may be the same.
|
||||||
|
func (l *PacketList) PushBackList(other *PacketList) {
|
||||||
|
l.lazyInit()
|
||||||
|
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
|
||||||
|
l.insertValue(e.Value, l.root.prev)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// PushFrontList inserts a copy of an other list at the front of list l.
|
||||||
|
// The lists l and other may be the same.
|
||||||
|
func (l *PacketList) PushFrontList(other *PacketList) {
|
||||||
|
l.lazyInit()
|
||||||
|
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
|
||||||
|
l.insertValue(e.Value, &l.root)
|
||||||
|
}
|
||||||
|
}
|
51
vendor/github.com/lucas-clemente/quic-go/ackhandler/packet_test.go
generated
vendored
Normal file
51
vendor/github.com/lucas-clemente/quic-go/ackhandler/packet_test.go
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Packet", func() {
|
||||||
|
Context("getting frames for retransmission", func() {
|
||||||
|
ackFrame := &frames.AckFrame{LargestAcked: 13}
|
||||||
|
stopWaitingFrame := &frames.StopWaitingFrame{LeastUnacked: 7331}
|
||||||
|
windowUpdateFrame := &frames.WindowUpdateFrame{StreamID: 999}
|
||||||
|
|
||||||
|
streamFrame := &frames.StreamFrame{
|
||||||
|
StreamID: 5,
|
||||||
|
Data: []byte{0x13, 0x37},
|
||||||
|
}
|
||||||
|
|
||||||
|
rstStreamFrame := &frames.RstStreamFrame{
|
||||||
|
StreamID: 555,
|
||||||
|
ErrorCode: 1337,
|
||||||
|
}
|
||||||
|
|
||||||
|
It("returns nil if there are no retransmittable frames", func() {
|
||||||
|
packet := &Packet{
|
||||||
|
Frames: []frames.Frame{ackFrame, stopWaitingFrame},
|
||||||
|
}
|
||||||
|
Expect(packet.GetFramesForRetransmission()).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns all retransmittable frames", func() {
|
||||||
|
packet := &Packet{
|
||||||
|
Frames: []frames.Frame{
|
||||||
|
windowUpdateFrame,
|
||||||
|
ackFrame,
|
||||||
|
stopWaitingFrame,
|
||||||
|
streamFrame,
|
||||||
|
rstStreamFrame,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
fs := packet.GetFramesForRetransmission()
|
||||||
|
Expect(fs).To(ContainElement(streamFrame))
|
||||||
|
Expect(fs).To(ContainElement(rstStreamFrame))
|
||||||
|
Expect(fs).To(ContainElement(windowUpdateFrame))
|
||||||
|
Expect(fs).ToNot(ContainElement(stopWaitingFrame))
|
||||||
|
Expect(fs).ToNot(ContainElement(ackFrame))
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
||||||
|
})
|
166
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_handler.go
generated
vendored
Normal file
166
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,166 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDuplicatePacket occurres when a duplicate packet is received
|
||||||
|
ErrDuplicatePacket = errors.New("ReceivedPacketHandler: Duplicate Packet")
|
||||||
|
// ErrPacketSmallerThanLastStopWaiting occurs when a packet arrives with a packet number smaller than the largest LeastUnacked of a StopWaitingFrame. If this error occurs, the packet should be ignored
|
||||||
|
ErrPacketSmallerThanLastStopWaiting = errors.New("ReceivedPacketHandler: Packet number smaller than highest StopWaiting")
|
||||||
|
)
|
||||||
|
|
||||||
|
var errInvalidPacketNumber = errors.New("ReceivedPacketHandler: Invalid packet number")
|
||||||
|
|
||||||
|
type receivedPacketHandler struct {
|
||||||
|
largestObserved protocol.PacketNumber
|
||||||
|
ignorePacketsBelow protocol.PacketNumber
|
||||||
|
largestObservedReceivedTime time.Time
|
||||||
|
|
||||||
|
packetHistory *receivedPacketHistory
|
||||||
|
|
||||||
|
ackSendDelay time.Duration
|
||||||
|
|
||||||
|
packetsReceivedSinceLastAck int
|
||||||
|
retransmittablePacketsReceivedSinceLastAck int
|
||||||
|
ackQueued bool
|
||||||
|
ackAlarm time.Time
|
||||||
|
ackAlarmResetCallback func(time.Time)
|
||||||
|
lastAck *frames.AckFrame
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReceivedPacketHandler creates a new receivedPacketHandler
|
||||||
|
func NewReceivedPacketHandler(ackAlarmResetCallback func(time.Time)) ReceivedPacketHandler {
|
||||||
|
// create a stopped timer, see https://github.com/golang/go/issues/12721#issuecomment-143010182
|
||||||
|
timer := time.NewTimer(0)
|
||||||
|
<-timer.C
|
||||||
|
|
||||||
|
return &receivedPacketHandler{
|
||||||
|
packetHistory: newReceivedPacketHistory(),
|
||||||
|
ackAlarmResetCallback: ackAlarmResetCallback,
|
||||||
|
ackSendDelay: protocol.AckSendDelay,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *receivedPacketHandler) ReceivedPacket(packetNumber protocol.PacketNumber, shouldInstigateAck bool) error {
|
||||||
|
if packetNumber == 0 {
|
||||||
|
return errInvalidPacketNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the packet number is smaller than the largest LeastUnacked value of a StopWaiting we received, we cannot detect if this packet has a duplicate number
|
||||||
|
// the packet has to be ignored anyway
|
||||||
|
if packetNumber <= h.ignorePacketsBelow {
|
||||||
|
return ErrPacketSmallerThanLastStopWaiting
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.packetHistory.IsDuplicate(packetNumber) {
|
||||||
|
return ErrDuplicatePacket
|
||||||
|
}
|
||||||
|
|
||||||
|
err := h.packetHistory.ReceivedPacket(packetNumber)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if packetNumber > h.largestObserved {
|
||||||
|
h.largestObserved = packetNumber
|
||||||
|
h.largestObservedReceivedTime = time.Now()
|
||||||
|
}
|
||||||
|
|
||||||
|
h.maybeQueueAck(packetNumber, shouldInstigateAck)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *receivedPacketHandler) ReceivedStopWaiting(f *frames.StopWaitingFrame) error {
|
||||||
|
// ignore if StopWaiting is unneeded, because we already received a StopWaiting with a higher LeastUnacked
|
||||||
|
if h.ignorePacketsBelow >= f.LeastUnacked {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
h.ignorePacketsBelow = f.LeastUnacked - 1
|
||||||
|
|
||||||
|
h.packetHistory.DeleteBelow(f.LeastUnacked)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *receivedPacketHandler) maybeQueueAck(packetNumber protocol.PacketNumber, shouldInstigateAck bool) {
|
||||||
|
var ackAlarmSet bool
|
||||||
|
h.packetsReceivedSinceLastAck++
|
||||||
|
|
||||||
|
if shouldInstigateAck {
|
||||||
|
h.retransmittablePacketsReceivedSinceLastAck++
|
||||||
|
}
|
||||||
|
|
||||||
|
// always ack the first packet
|
||||||
|
if h.lastAck == nil {
|
||||||
|
h.ackQueued = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Always send an ack every 20 packets in order to allow the peer to discard
|
||||||
|
// information from the SentPacketManager and provide an RTT measurement.
|
||||||
|
if h.packetsReceivedSinceLastAck >= protocol.MaxPacketsReceivedBeforeAckSend {
|
||||||
|
h.ackQueued = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// if the packet number is smaller than the largest acked packet, it must have been reported missing with the last ACK
|
||||||
|
// note that it cannot be a duplicate because they're already filtered out by ReceivedPacket()
|
||||||
|
if h.lastAck != nil && packetNumber < h.lastAck.LargestAcked {
|
||||||
|
h.ackQueued = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if a new missing range above the previously was created
|
||||||
|
if h.lastAck != nil && h.packetHistory.GetHighestAckRange().FirstPacketNumber > h.lastAck.LargestAcked {
|
||||||
|
h.ackQueued = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if !h.ackQueued && shouldInstigateAck {
|
||||||
|
if h.retransmittablePacketsReceivedSinceLastAck >= protocol.RetransmittablePacketsBeforeAck {
|
||||||
|
h.ackQueued = true
|
||||||
|
} else {
|
||||||
|
if h.ackAlarm.IsZero() {
|
||||||
|
h.ackAlarm = time.Now().Add(h.ackSendDelay)
|
||||||
|
ackAlarmSet = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if h.ackQueued {
|
||||||
|
// cancel the ack alarm
|
||||||
|
h.ackAlarm = time.Time{}
|
||||||
|
ackAlarmSet = false
|
||||||
|
}
|
||||||
|
|
||||||
|
if ackAlarmSet {
|
||||||
|
h.ackAlarmResetCallback(h.ackAlarm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *receivedPacketHandler) GetAckFrame() *frames.AckFrame {
|
||||||
|
if !h.ackQueued && (h.ackAlarm.IsZero() || h.ackAlarm.After(time.Now())) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ackRanges := h.packetHistory.GetAckRanges()
|
||||||
|
ack := &frames.AckFrame{
|
||||||
|
LargestAcked: h.largestObserved,
|
||||||
|
LowestAcked: ackRanges[len(ackRanges)-1].FirstPacketNumber,
|
||||||
|
PacketReceivedTime: h.largestObservedReceivedTime,
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ackRanges) > 1 {
|
||||||
|
ack.AckRanges = ackRanges
|
||||||
|
}
|
||||||
|
|
||||||
|
h.lastAck = ack
|
||||||
|
h.ackAlarm = time.Time{}
|
||||||
|
h.ackQueued = false
|
||||||
|
h.packetsReceivedSinceLastAck = 0
|
||||||
|
h.retransmittablePacketsReceivedSinceLastAck = 0
|
||||||
|
|
||||||
|
return ack
|
||||||
|
}
|
328
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_handler_test.go
generated
vendored
Normal file
328
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_handler_test.go
generated
vendored
Normal file
|
@ -0,0 +1,328 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("receivedPacketHandler", func() {
|
||||||
|
var (
|
||||||
|
handler *receivedPacketHandler
|
||||||
|
ackAlarmCallbackCalled bool
|
||||||
|
)
|
||||||
|
|
||||||
|
ackAlarmCallback := func(time.Time) {
|
||||||
|
ackAlarmCallbackCalled = true
|
||||||
|
}
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
ackAlarmCallbackCalled = false
|
||||||
|
handler = NewReceivedPacketHandler(ackAlarmCallback).(*receivedPacketHandler)
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("accepting packets", func() {
|
||||||
|
It("handles a packet that arrives late", func() {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(1), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedPacket(protocol.PacketNumber(3), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedPacket(protocol.PacketNumber(2), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("rejects packets with packet number 0", func() {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(0), true)
|
||||||
|
Expect(err).To(MatchError(errInvalidPacketNumber))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("rejects a duplicate package", func() {
|
||||||
|
for i := 1; i < 5; i++ {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(i), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
err := handler.ReceivedPacket(4, true)
|
||||||
|
Expect(err).To(MatchError(ErrDuplicatePacket))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("ignores a packet with PacketNumber less than the LeastUnacked of a previously received StopWaiting", func() {
|
||||||
|
err := handler.ReceivedPacket(5, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: 10})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedPacket(9, true)
|
||||||
|
Expect(err).To(MatchError(ErrPacketSmallerThanLastStopWaiting))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not ignore a packet with PacketNumber equal to LeastUnacked of a previously received StopWaiting", func() {
|
||||||
|
err := handler.ReceivedPacket(5, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: 10})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedPacket(10, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("saves the time when each packet arrived", func() {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(3), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.largestObservedReceivedTime).To(BeTemporally("~", time.Now(), 10*time.Millisecond))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("updates the largestObserved and the largestObservedReceivedTime", func() {
|
||||||
|
handler.largestObserved = 3
|
||||||
|
handler.largestObservedReceivedTime = time.Now().Add(-1 * time.Second)
|
||||||
|
err := handler.ReceivedPacket(5, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.largestObserved).To(Equal(protocol.PacketNumber(5)))
|
||||||
|
Expect(handler.largestObservedReceivedTime).To(BeTemporally("~", time.Now(), 10*time.Millisecond))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't update the largestObserved and the largestObservedReceivedTime for a belated packet", func() {
|
||||||
|
timestamp := time.Now().Add(-1 * time.Second)
|
||||||
|
handler.largestObserved = 5
|
||||||
|
handler.largestObservedReceivedTime = timestamp
|
||||||
|
err := handler.ReceivedPacket(4, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.largestObserved).To(Equal(protocol.PacketNumber(5)))
|
||||||
|
Expect(handler.largestObservedReceivedTime).To(Equal(timestamp))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't store more than MaxTrackedReceivedPackets packets", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
for i := protocol.PacketNumber(3); i < 3+protocol.MaxTrackedReceivedPackets-1; i++ {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(i), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
err = handler.ReceivedPacket(protocol.PacketNumber(protocol.MaxTrackedReceivedPackets)+10, true)
|
||||||
|
Expect(err).To(MatchError(errTooManyOutstandingReceivedPackets))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("passes on errors from receivedPacketHistory", func() {
|
||||||
|
var err error
|
||||||
|
for i := protocol.PacketNumber(0); i < 5*protocol.MaxTrackedReceivedAckRanges; i++ {
|
||||||
|
err = handler.ReceivedPacket(2*i+1, true)
|
||||||
|
// this will eventually return an error
|
||||||
|
// details about when exactly the receivedPacketHistory errors are tested there
|
||||||
|
if err != nil {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Expect(err).To(MatchError(errTooManyOutstandingReceivedAckRanges))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("handling STOP_WAITING frames", func() {
|
||||||
|
It("increases the ignorePacketsBelow number", func() {
|
||||||
|
err := handler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: protocol.PacketNumber(12)})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ignorePacketsBelow).To(Equal(protocol.PacketNumber(11)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("increase the ignorePacketsBelow number, even if all packets below the LeastUnacked were already acked", func() {
|
||||||
|
for i := 1; i < 20; i++ {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(i), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
err := handler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: protocol.PacketNumber(12)})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ignorePacketsBelow).To(Equal(protocol.PacketNumber(11)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not decrease the ignorePacketsBelow number when an out-of-order StopWaiting arrives", func() {
|
||||||
|
err := handler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: protocol.PacketNumber(12)})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ignorePacketsBelow).To(Equal(protocol.PacketNumber(11)))
|
||||||
|
err = handler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: protocol.PacketNumber(6)})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ignorePacketsBelow).To(Equal(protocol.PacketNumber(11)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("ACKs", func() {
|
||||||
|
Context("queueing ACKs", func() {
|
||||||
|
receiveAndAck10Packets := func() {
|
||||||
|
for i := 1; i <= 10; i++ {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(i), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
Expect(handler.GetAckFrame()).ToNot(BeNil())
|
||||||
|
Expect(handler.ackQueued).To(BeFalse())
|
||||||
|
ackAlarmCallbackCalled = false
|
||||||
|
}
|
||||||
|
|
||||||
|
It("always queues an ACK for the first packet", func() {
|
||||||
|
err := handler.ReceivedPacket(1, false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeTrue())
|
||||||
|
Expect(ackAlarmCallbackCalled).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("only queues one ACK for many non-retransmittable packets", func() {
|
||||||
|
receiveAndAck10Packets()
|
||||||
|
for i := 11; i < 10+protocol.MaxPacketsReceivedBeforeAckSend; i++ {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(i), false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeFalse())
|
||||||
|
}
|
||||||
|
err := handler.ReceivedPacket(10+protocol.MaxPacketsReceivedBeforeAckSend, false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeTrue())
|
||||||
|
Expect(ackAlarmCallbackCalled).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("queues an ACK for every second retransmittable packet, if they are arriving fast", func() {
|
||||||
|
receiveAndAck10Packets()
|
||||||
|
err := handler.ReceivedPacket(11, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeFalse())
|
||||||
|
Expect(ackAlarmCallbackCalled).To(BeTrue())
|
||||||
|
ackAlarmCallbackCalled = false
|
||||||
|
err = handler.ReceivedPacket(12, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeTrue())
|
||||||
|
Expect(ackAlarmCallbackCalled).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("only sets the timer when receiving a retransmittable packets", func() {
|
||||||
|
receiveAndAck10Packets()
|
||||||
|
err := handler.ReceivedPacket(11, false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeFalse())
|
||||||
|
Expect(handler.ackAlarm).To(BeZero())
|
||||||
|
err = handler.ReceivedPacket(12, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeFalse())
|
||||||
|
Expect(handler.ackAlarm).ToNot(BeZero())
|
||||||
|
Expect(ackAlarmCallbackCalled).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("queues an ACK if it was reported missing before", func() {
|
||||||
|
receiveAndAck10Packets()
|
||||||
|
err := handler.ReceivedPacket(11, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedPacket(13, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
ack := handler.GetAckFrame() // ACK: 1 and 3, missing: 2
|
||||||
|
Expect(ack).ToNot(BeNil())
|
||||||
|
Expect(ack.HasMissingRanges()).To(BeTrue())
|
||||||
|
Expect(handler.ackQueued).To(BeFalse())
|
||||||
|
err = handler.ReceivedPacket(12, false)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.ackQueued).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("queues an ACK if it creates a new missing range", func() {
|
||||||
|
receiveAndAck10Packets()
|
||||||
|
for i := 11; i < 16; i++ {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(i), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
Expect(handler.GetAckFrame()).ToNot(BeNil())
|
||||||
|
handler.ReceivedPacket(20, true) // we now know that packets 16 to 19 are missing
|
||||||
|
Expect(handler.ackQueued).To(BeTrue())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("ACK generation", func() {
|
||||||
|
BeforeEach(func() {
|
||||||
|
handler.ackQueued = true
|
||||||
|
})
|
||||||
|
|
||||||
|
It("generates a simple ACK frame", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedPacket(2, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
ack := handler.GetAckFrame()
|
||||||
|
Expect(ack).ToNot(BeNil())
|
||||||
|
Expect(ack.LargestAcked).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
Expect(ack.LowestAcked).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
Expect(ack.AckRanges).To(BeEmpty())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("saves the last sent ACK", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
ack := handler.GetAckFrame()
|
||||||
|
Expect(ack).ToNot(BeNil())
|
||||||
|
Expect(handler.lastAck).To(Equal(ack))
|
||||||
|
err = handler.ReceivedPacket(2, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
handler.ackQueued = true
|
||||||
|
ack = handler.GetAckFrame()
|
||||||
|
Expect(ack).ToNot(BeNil())
|
||||||
|
Expect(handler.lastAck).To(Equal(ack))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("generates an ACK frame with missing packets", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.ReceivedPacket(4, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
ack := handler.GetAckFrame()
|
||||||
|
Expect(ack).ToNot(BeNil())
|
||||||
|
Expect(ack.LargestAcked).To(Equal(protocol.PacketNumber(4)))
|
||||||
|
Expect(ack.LowestAcked).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
Expect(ack.AckRanges).To(HaveLen(2))
|
||||||
|
Expect(ack.AckRanges[0]).To(Equal(frames.AckRange{FirstPacketNumber: 4, LastPacketNumber: 4}))
|
||||||
|
Expect(ack.AckRanges[1]).To(Equal(frames.AckRange{FirstPacketNumber: 1, LastPacketNumber: 1}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("deletes packets from the packetHistory after receiving a StopWaiting, after continuously received packets", func() {
|
||||||
|
for i := 1; i <= 12; i++ {
|
||||||
|
err := handler.ReceivedPacket(protocol.PacketNumber(i), true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
err := handler.ReceivedStopWaiting(&frames.StopWaitingFrame{LeastUnacked: protocol.PacketNumber(6)})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
// check that the packets were deleted from the receivedPacketHistory by checking the values in an ACK frame
|
||||||
|
ack := handler.GetAckFrame()
|
||||||
|
Expect(ack).ToNot(BeNil())
|
||||||
|
Expect(ack.LargestAcked).To(Equal(protocol.PacketNumber(12)))
|
||||||
|
Expect(ack.LowestAcked).To(Equal(protocol.PacketNumber(6)))
|
||||||
|
Expect(ack.HasMissingRanges()).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("resets all counters needed for the ACK queueing decision when sending an ACK", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
handler.ackAlarm = time.Now().Add(-time.Minute)
|
||||||
|
Expect(handler.GetAckFrame()).ToNot(BeNil())
|
||||||
|
Expect(handler.packetsReceivedSinceLastAck).To(BeZero())
|
||||||
|
Expect(handler.ackAlarm).To(BeZero())
|
||||||
|
Expect(handler.retransmittablePacketsReceivedSinceLastAck).To(BeZero())
|
||||||
|
Expect(handler.ackQueued).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't generate an ACK when none is queued and the timer is not set", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
handler.ackQueued = false
|
||||||
|
handler.ackAlarm = time.Time{}
|
||||||
|
Expect(handler.GetAckFrame()).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't generate an ACK when none is queued and the timer has not yet expired", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
handler.ackQueued = false
|
||||||
|
handler.ackAlarm = time.Now().Add(time.Minute)
|
||||||
|
Expect(handler.GetAckFrame()).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("generates an ACK when the timer has expired", func() {
|
||||||
|
err := handler.ReceivedPacket(1, true)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
handler.ackQueued = false
|
||||||
|
handler.ackAlarm = time.Now().Add(-time.Minute)
|
||||||
|
Expect(handler.GetAckFrame()).ToNot(BeNil())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
145
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_history.go
generated
vendored
Normal file
145
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_history.go
generated
vendored
Normal file
|
@ -0,0 +1,145 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/qerr"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type receivedPacketHistory struct {
|
||||||
|
ranges *utils.PacketIntervalList
|
||||||
|
|
||||||
|
// the map is used as a replacement for a set here. The bool is always supposed to be set to true
|
||||||
|
receivedPacketNumbers map[protocol.PacketNumber]bool
|
||||||
|
lowestInReceivedPacketNumbers protocol.PacketNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errTooManyOutstandingReceivedAckRanges = qerr.Error(qerr.TooManyOutstandingReceivedPackets, "Too many outstanding received ACK ranges")
|
||||||
|
errTooManyOutstandingReceivedPackets = qerr.Error(qerr.TooManyOutstandingReceivedPackets, "Too many outstanding received packets")
|
||||||
|
)
|
||||||
|
|
||||||
|
// newReceivedPacketHistory creates a new received packet history
|
||||||
|
func newReceivedPacketHistory() *receivedPacketHistory {
|
||||||
|
return &receivedPacketHistory{
|
||||||
|
ranges: utils.NewPacketIntervalList(),
|
||||||
|
receivedPacketNumbers: make(map[protocol.PacketNumber]bool),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReceivedPacket registers a packet with PacketNumber p and updates the ranges
|
||||||
|
func (h *receivedPacketHistory) ReceivedPacket(p protocol.PacketNumber) error {
|
||||||
|
if h.ranges.Len() >= protocol.MaxTrackedReceivedAckRanges {
|
||||||
|
return errTooManyOutstandingReceivedAckRanges
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(h.receivedPacketNumbers) >= protocol.MaxTrackedReceivedPackets {
|
||||||
|
return errTooManyOutstandingReceivedPackets
|
||||||
|
}
|
||||||
|
|
||||||
|
h.receivedPacketNumbers[p] = true
|
||||||
|
|
||||||
|
if h.ranges.Len() == 0 {
|
||||||
|
h.ranges.PushBack(utils.PacketInterval{Start: p, End: p})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
for el := h.ranges.Back(); el != nil; el = el.Prev() {
|
||||||
|
// p already included in an existing range. Nothing to do here
|
||||||
|
if p >= el.Value.Start && p <= el.Value.End {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var rangeExtended bool
|
||||||
|
if el.Value.End == p-1 { // extend a range at the end
|
||||||
|
rangeExtended = true
|
||||||
|
el.Value.End = p
|
||||||
|
} else if el.Value.Start == p+1 { // extend a range at the beginning
|
||||||
|
rangeExtended = true
|
||||||
|
el.Value.Start = p
|
||||||
|
}
|
||||||
|
|
||||||
|
// if a range was extended (either at the beginning or at the end, maybe it is possible to merge two ranges into one)
|
||||||
|
if rangeExtended {
|
||||||
|
prev := el.Prev()
|
||||||
|
if prev != nil && prev.Value.End+1 == el.Value.Start { // merge two ranges
|
||||||
|
prev.Value.End = el.Value.End
|
||||||
|
h.ranges.Remove(el)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return nil // if the two ranges were not merge, we're done here
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new range at the end
|
||||||
|
if p > el.Value.End {
|
||||||
|
h.ranges.InsertAfter(utils.PacketInterval{Start: p, End: p}, el)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// create a new range at the beginning
|
||||||
|
h.ranges.InsertBefore(utils.PacketInterval{Start: p, End: p}, h.ranges.Front())
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteBelow deletes all entries below the leastUnacked packet number
|
||||||
|
func (h *receivedPacketHistory) DeleteBelow(leastUnacked protocol.PacketNumber) {
|
||||||
|
h.lowestInReceivedPacketNumbers = utils.MaxPacketNumber(h.lowestInReceivedPacketNumbers, leastUnacked)
|
||||||
|
|
||||||
|
nextEl := h.ranges.Front()
|
||||||
|
for el := h.ranges.Front(); nextEl != nil; el = nextEl {
|
||||||
|
nextEl = el.Next()
|
||||||
|
|
||||||
|
if leastUnacked > el.Value.Start && leastUnacked <= el.Value.End {
|
||||||
|
for i := el.Value.Start; i < leastUnacked; i++ { // adjust start value of a range
|
||||||
|
delete(h.receivedPacketNumbers, i)
|
||||||
|
}
|
||||||
|
el.Value.Start = leastUnacked
|
||||||
|
} else if el.Value.End < leastUnacked { // delete a whole range
|
||||||
|
for i := el.Value.Start; i <= el.Value.End; i++ {
|
||||||
|
delete(h.receivedPacketNumbers, i)
|
||||||
|
}
|
||||||
|
h.ranges.Remove(el)
|
||||||
|
} else { // no ranges affected. Nothing to do
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsDuplicate determines if a packet should be regarded as a duplicate packet
|
||||||
|
// note that after receiving a StopWaitingFrame, all packets below the LeastUnacked should be regarded as duplicates, even if the packet was just delayed
|
||||||
|
func (h *receivedPacketHistory) IsDuplicate(p protocol.PacketNumber) bool {
|
||||||
|
if p < h.lowestInReceivedPacketNumbers {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
_, ok := h.receivedPacketNumbers[p]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetAckRanges gets a slice of all AckRanges that can be used in an AckFrame
|
||||||
|
func (h *receivedPacketHistory) GetAckRanges() []frames.AckRange {
|
||||||
|
if h.ranges.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var ackRanges []frames.AckRange
|
||||||
|
|
||||||
|
for el := h.ranges.Back(); el != nil; el = el.Prev() {
|
||||||
|
ackRanges = append(ackRanges, frames.AckRange{FirstPacketNumber: el.Value.Start, LastPacketNumber: el.Value.End})
|
||||||
|
}
|
||||||
|
|
||||||
|
return ackRanges
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *receivedPacketHistory) GetHighestAckRange() frames.AckRange {
|
||||||
|
ackRange := frames.AckRange{}
|
||||||
|
if h.ranges.Len() > 0 {
|
||||||
|
r := h.ranges.Back().Value
|
||||||
|
ackRange.FirstPacketNumber = r.Start
|
||||||
|
ackRange.LastPacketNumber = r.End
|
||||||
|
}
|
||||||
|
return ackRange
|
||||||
|
}
|
337
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_history_test.go
generated
vendored
Normal file
337
vendor/github.com/lucas-clemente/quic-go/ackhandler/received_packet_history_test.go
generated
vendored
Normal file
|
@ -0,0 +1,337 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("receivedPacketHistory", func() {
|
||||||
|
var (
|
||||||
|
hist *receivedPacketHistory
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
hist = newReceivedPacketHistory()
|
||||||
|
})
|
||||||
|
|
||||||
|
// check if the ranges PacketIntervalList contains exactly the same packet number as the receivedPacketNumbers
|
||||||
|
historiesConsistent := func() bool {
|
||||||
|
// check if a packet number is contained in any of the ranges
|
||||||
|
containedInRanges := func(p protocol.PacketNumber) bool {
|
||||||
|
for el := hist.ranges.Front(); el != nil; el = el.Next() {
|
||||||
|
if p >= el.Value.Start && p <= el.Value.End {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// first check if all packets contained in the ranges are present in the map
|
||||||
|
for el := hist.ranges.Front(); el != nil; el = el.Next() {
|
||||||
|
for i := el.Value.Start; i <= el.Value.Start; i++ {
|
||||||
|
_, ok := hist.receivedPacketNumbers[i]
|
||||||
|
if !ok {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// then check if all packets in the map are contained in any of the ranges
|
||||||
|
for i := range hist.receivedPacketNumbers {
|
||||||
|
if !containedInRanges(i) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
Context("ranges", func() {
|
||||||
|
It("adds the first packet", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 4}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't care about duplicate packets", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 4}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("adds a few consecutive packets", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 6}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't care about a duplicate packet contained in an existing range", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 6}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("extends a range at the front", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(3)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 3, End: 4}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("creates a new range when a packet is lost", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(2))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 4}))
|
||||||
|
Expect(hist.ranges.Back().Value).To(Equal(utils.PacketInterval{Start: 6, End: 6}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("creates a new range in between two ranges", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(10)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(2))
|
||||||
|
hist.ReceivedPacket(7)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(3))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 4}))
|
||||||
|
Expect(hist.ranges.Front().Next().Value).To(Equal(utils.PacketInterval{Start: 7, End: 7}))
|
||||||
|
Expect(hist.ranges.Back().Value).To(Equal(utils.PacketInterval{Start: 10, End: 10}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("creates a new range before an existing range for a belated packet", func() {
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(2))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 4}))
|
||||||
|
Expect(hist.ranges.Back().Value).To(Equal(utils.PacketInterval{Start: 6, End: 6}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("extends a previous range at the end", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(7)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(2))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 5}))
|
||||||
|
Expect(hist.ranges.Back().Value).To(Equal(utils.PacketInterval{Start: 7, End: 7}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("extends a range at the front", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(7)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(2))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 4}))
|
||||||
|
Expect(hist.ranges.Back().Value).To(Equal(utils.PacketInterval{Start: 6, End: 7}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("closes a range", func() {
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(2))
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 6}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("closes a range in the middle", func() {
|
||||||
|
hist.ReceivedPacket(1)
|
||||||
|
hist.ReceivedPacket(10)
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(4))
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(3))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 1, End: 1}))
|
||||||
|
Expect(hist.ranges.Front().Next().Value).To(Equal(utils.PacketInterval{Start: 4, End: 6}))
|
||||||
|
Expect(hist.ranges.Back().Value).To(Equal(utils.PacketInterval{Start: 10, End: 10}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("deleting", func() {
|
||||||
|
It("does nothing when the history is empty", func() {
|
||||||
|
hist.DeleteBelow(5)
|
||||||
|
Expect(hist.ranges.Len()).To(BeZero())
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("deletes a range", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
hist.ReceivedPacket(10)
|
||||||
|
hist.DeleteBelow(6)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 10, End: 10}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("deletes multiple ranges", func() {
|
||||||
|
hist.ReceivedPacket(1)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
hist.ReceivedPacket(10)
|
||||||
|
hist.DeleteBelow(8)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 10, End: 10}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("adjusts a range, if leastUnacked lies inside it", func() {
|
||||||
|
hist.ReceivedPacket(3)
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
hist.DeleteBelow(4)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 6}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("adjusts a range, if leastUnacked is the last of the range", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
hist.ReceivedPacket(10)
|
||||||
|
hist.DeleteBelow(5)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(2))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 5, End: 5}))
|
||||||
|
Expect(hist.ranges.Back().Value).To(Equal(utils.PacketInterval{Start: 10, End: 10}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("keeps a one-packet range, if leastUnacked is exactly that value", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.DeleteBelow(4)
|
||||||
|
Expect(hist.ranges.Len()).To(Equal(1))
|
||||||
|
Expect(hist.ranges.Front().Value).To(Equal(utils.PacketInterval{Start: 4, End: 4}))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("DoS protection", func() {
|
||||||
|
It("doesn't create more than MaxTrackedReceivedAckRanges ranges", func() {
|
||||||
|
for i := protocol.PacketNumber(1); i <= protocol.MaxTrackedReceivedAckRanges; i++ {
|
||||||
|
err := hist.ReceivedPacket(2 * i)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
err := hist.ReceivedPacket(2*protocol.MaxTrackedReceivedAckRanges + 2)
|
||||||
|
Expect(err).To(MatchError(errTooManyOutstandingReceivedAckRanges))
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't store more than MaxTrackedReceivedPackets packets", func() {
|
||||||
|
err := hist.ReceivedPacket(1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
for i := protocol.PacketNumber(3); i < 3+protocol.MaxTrackedReceivedPackets-1; i++ {
|
||||||
|
err := hist.ReceivedPacket(protocol.PacketNumber(i))
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
err = hist.ReceivedPacket(protocol.PacketNumber(protocol.MaxTrackedReceivedPackets) + 10)
|
||||||
|
Expect(err).To(MatchError(errTooManyOutstandingReceivedPackets))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't consider already deleted ranges for MaxTrackedReceivedAckRanges", func() {
|
||||||
|
for i := protocol.PacketNumber(1); i <= protocol.MaxTrackedReceivedAckRanges; i++ {
|
||||||
|
err := hist.ReceivedPacket(2 * i)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
err := hist.ReceivedPacket(2*protocol.MaxTrackedReceivedAckRanges + 2)
|
||||||
|
Expect(err).To(MatchError(errTooManyOutstandingReceivedAckRanges))
|
||||||
|
hist.DeleteBelow(protocol.MaxTrackedReceivedAckRanges) // deletes about half of the ranges
|
||||||
|
err = hist.ReceivedPacket(2*protocol.MaxTrackedReceivedAckRanges + 4)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(historiesConsistent()).To(BeTrue())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("duplicate packet detection", func() {
|
||||||
|
It("detects duplicates for existing ranges", func() {
|
||||||
|
hist.ReceivedPacket(2)
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
Expect(hist.IsDuplicate(1)).To(BeFalse())
|
||||||
|
Expect(hist.IsDuplicate(2)).To(BeTrue())
|
||||||
|
Expect(hist.IsDuplicate(3)).To(BeFalse())
|
||||||
|
Expect(hist.IsDuplicate(4)).To(BeTrue())
|
||||||
|
Expect(hist.IsDuplicate(5)).To(BeTrue())
|
||||||
|
Expect(hist.IsDuplicate(6)).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("detects duplicates after a range has been deleted", func() {
|
||||||
|
hist.ReceivedPacket(2)
|
||||||
|
hist.ReceivedPacket(3)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
hist.DeleteBelow(5)
|
||||||
|
for i := 1; i < 5; i++ {
|
||||||
|
Expect(hist.IsDuplicate(protocol.PacketNumber(i))).To(BeTrue())
|
||||||
|
}
|
||||||
|
Expect(hist.IsDuplicate(5)).To(BeFalse())
|
||||||
|
Expect(hist.IsDuplicate(6)).To(BeTrue())
|
||||||
|
Expect(hist.IsDuplicate(7)).To(BeFalse())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("ACK range export", func() {
|
||||||
|
It("returns nil if there are no ranges", func() {
|
||||||
|
Expect(hist.GetAckRanges()).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets a single ACK range", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
ackRanges := hist.GetAckRanges()
|
||||||
|
Expect(ackRanges).To(HaveLen(1))
|
||||||
|
Expect(ackRanges[0]).To(Equal(frames.AckRange{FirstPacketNumber: 4, LastPacketNumber: 5}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets multiple ACK ranges", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
hist.ReceivedPacket(1)
|
||||||
|
hist.ReceivedPacket(11)
|
||||||
|
hist.ReceivedPacket(10)
|
||||||
|
hist.ReceivedPacket(2)
|
||||||
|
ackRanges := hist.GetAckRanges()
|
||||||
|
Expect(ackRanges).To(HaveLen(3))
|
||||||
|
Expect(ackRanges[0]).To(Equal(frames.AckRange{FirstPacketNumber: 10, LastPacketNumber: 11}))
|
||||||
|
Expect(ackRanges[1]).To(Equal(frames.AckRange{FirstPacketNumber: 4, LastPacketNumber: 6}))
|
||||||
|
Expect(ackRanges[2]).To(Equal(frames.AckRange{FirstPacketNumber: 1, LastPacketNumber: 2}))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("Getting the highest ACK range", func() {
|
||||||
|
It("returns the zero value if there are no ranges", func() {
|
||||||
|
Expect(hist.GetHighestAckRange()).To(BeZero())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets a single ACK range", func() {
|
||||||
|
hist.ReceivedPacket(4)
|
||||||
|
hist.ReceivedPacket(5)
|
||||||
|
Expect(hist.GetHighestAckRange()).To(Equal(frames.AckRange{FirstPacketNumber: 4, LastPacketNumber: 5}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets the highest of multiple ACK ranges", func() {
|
||||||
|
hist.ReceivedPacket(3)
|
||||||
|
hist.ReceivedPacket(6)
|
||||||
|
hist.ReceivedPacket(7)
|
||||||
|
Expect(hist.GetHighestAckRange()).To(Equal(frames.AckRange{FirstPacketNumber: 6, LastPacketNumber: 7}))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
392
vendor/github.com/lucas-clemente/quic-go/ackhandler/sent_packet_handler.go
generated
vendored
Normal file
392
vendor/github.com/lucas-clemente/quic-go/ackhandler/sent_packet_handler.go
generated
vendored
Normal file
|
@ -0,0 +1,392 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/congestion"
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/qerr"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Maximum reordering in time space before time based loss detection considers a packet lost.
|
||||||
|
// In fraction of an RTT.
|
||||||
|
timeReorderingFraction = 1.0 / 8
|
||||||
|
// defaultRTOTimeout is the RTO time on new connections
|
||||||
|
defaultRTOTimeout = 500 * time.Millisecond
|
||||||
|
// Minimum time in the future an RTO alarm may be set for.
|
||||||
|
minRTOTimeout = 200 * time.Millisecond
|
||||||
|
// maxRTOTimeout is the maximum RTO time
|
||||||
|
maxRTOTimeout = 60 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrDuplicateOrOutOfOrderAck occurs when a duplicate or an out-of-order ACK is received
|
||||||
|
ErrDuplicateOrOutOfOrderAck = errors.New("SentPacketHandler: Duplicate or out-of-order ACK")
|
||||||
|
// ErrTooManyTrackedSentPackets occurs when the sentPacketHandler has to keep track of too many packets
|
||||||
|
ErrTooManyTrackedSentPackets = errors.New("Too many outstanding non-acked and non-retransmitted packets")
|
||||||
|
// ErrAckForSkippedPacket occurs when the client sent an ACK for a packet number that we intentionally skipped
|
||||||
|
ErrAckForSkippedPacket = qerr.Error(qerr.InvalidAckData, "Received an ACK for a skipped packet number")
|
||||||
|
errAckForUnsentPacket = qerr.Error(qerr.InvalidAckData, "Received ACK for an unsent package")
|
||||||
|
)
|
||||||
|
|
||||||
|
var errPacketNumberNotIncreasing = errors.New("Already sent a packet with a higher packet number")
|
||||||
|
|
||||||
|
type sentPacketHandler struct {
|
||||||
|
lastSentPacketNumber protocol.PacketNumber
|
||||||
|
skippedPackets []protocol.PacketNumber
|
||||||
|
|
||||||
|
LargestAcked protocol.PacketNumber
|
||||||
|
|
||||||
|
largestReceivedPacketWithAck protocol.PacketNumber
|
||||||
|
|
||||||
|
packetHistory *PacketList
|
||||||
|
stopWaitingManager stopWaitingManager
|
||||||
|
|
||||||
|
retransmissionQueue []*Packet
|
||||||
|
|
||||||
|
bytesInFlight protocol.ByteCount
|
||||||
|
|
||||||
|
congestion congestion.SendAlgorithm
|
||||||
|
rttStats *congestion.RTTStats
|
||||||
|
|
||||||
|
// The number of times an RTO has been sent without receiving an ack.
|
||||||
|
rtoCount uint32
|
||||||
|
|
||||||
|
// The time at which the next packet will be considered lost based on early transmit or exceeding the reordering window in time.
|
||||||
|
lossTime time.Time
|
||||||
|
|
||||||
|
// The alarm timeout
|
||||||
|
alarm time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSentPacketHandler creates a new sentPacketHandler
|
||||||
|
func NewSentPacketHandler(rttStats *congestion.RTTStats) SentPacketHandler {
|
||||||
|
congestion := congestion.NewCubicSender(
|
||||||
|
congestion.DefaultClock{},
|
||||||
|
rttStats,
|
||||||
|
false, /* don't use reno since chromium doesn't (why?) */
|
||||||
|
protocol.InitialCongestionWindow,
|
||||||
|
protocol.DefaultMaxCongestionWindow,
|
||||||
|
)
|
||||||
|
|
||||||
|
return &sentPacketHandler{
|
||||||
|
packetHistory: NewPacketList(),
|
||||||
|
stopWaitingManager: stopWaitingManager{},
|
||||||
|
rttStats: rttStats,
|
||||||
|
congestion: congestion,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) largestInOrderAcked() protocol.PacketNumber {
|
||||||
|
if f := h.packetHistory.Front(); f != nil {
|
||||||
|
return f.Value.PacketNumber - 1
|
||||||
|
}
|
||||||
|
return h.LargestAcked
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) SentPacket(packet *Packet) error {
|
||||||
|
if packet.PacketNumber <= h.lastSentPacketNumber {
|
||||||
|
return errPacketNumberNotIncreasing
|
||||||
|
}
|
||||||
|
|
||||||
|
if protocol.PacketNumber(len(h.retransmissionQueue)+h.packetHistory.Len()+1) > protocol.MaxTrackedSentPackets {
|
||||||
|
return ErrTooManyTrackedSentPackets
|
||||||
|
}
|
||||||
|
|
||||||
|
for p := h.lastSentPacketNumber + 1; p < packet.PacketNumber; p++ {
|
||||||
|
h.skippedPackets = append(h.skippedPackets, p)
|
||||||
|
|
||||||
|
if len(h.skippedPackets) > protocol.MaxTrackedSkippedPackets {
|
||||||
|
h.skippedPackets = h.skippedPackets[1:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
now := time.Now()
|
||||||
|
packet.SendTime = now
|
||||||
|
if packet.Length == 0 {
|
||||||
|
return errors.New("SentPacketHandler: packet cannot be empty")
|
||||||
|
}
|
||||||
|
h.bytesInFlight += packet.Length
|
||||||
|
|
||||||
|
h.lastSentPacketNumber = packet.PacketNumber
|
||||||
|
h.packetHistory.PushBack(*packet)
|
||||||
|
|
||||||
|
h.congestion.OnPacketSent(
|
||||||
|
now,
|
||||||
|
h.bytesInFlight,
|
||||||
|
packet.PacketNumber,
|
||||||
|
packet.Length,
|
||||||
|
true, /* TODO: is retransmittable */
|
||||||
|
)
|
||||||
|
|
||||||
|
h.updateLossDetectionAlarm()
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) ReceivedAck(ackFrame *frames.AckFrame, withPacketNumber protocol.PacketNumber, rcvTime time.Time) error {
|
||||||
|
if ackFrame.LargestAcked > h.lastSentPacketNumber {
|
||||||
|
return errAckForUnsentPacket
|
||||||
|
}
|
||||||
|
|
||||||
|
// duplicate or out-of-order ACK
|
||||||
|
if withPacketNumber <= h.largestReceivedPacketWithAck {
|
||||||
|
return ErrDuplicateOrOutOfOrderAck
|
||||||
|
}
|
||||||
|
h.largestReceivedPacketWithAck = withPacketNumber
|
||||||
|
|
||||||
|
// ignore repeated ACK (ACKs that don't have a higher LargestAcked than the last ACK)
|
||||||
|
if ackFrame.LargestAcked <= h.largestInOrderAcked() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
h.LargestAcked = ackFrame.LargestAcked
|
||||||
|
|
||||||
|
if h.skippedPacketsAcked(ackFrame) {
|
||||||
|
return ErrAckForSkippedPacket
|
||||||
|
}
|
||||||
|
|
||||||
|
rttUpdated := h.maybeUpdateRTT(ackFrame.LargestAcked, ackFrame.DelayTime, rcvTime)
|
||||||
|
|
||||||
|
if rttUpdated {
|
||||||
|
h.congestion.MaybeExitSlowStart()
|
||||||
|
}
|
||||||
|
|
||||||
|
ackedPackets, err := h.determineNewlyAckedPackets(ackFrame)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(ackedPackets) > 0 {
|
||||||
|
for _, p := range ackedPackets {
|
||||||
|
h.onPacketAcked(p)
|
||||||
|
h.congestion.OnPacketAcked(p.Value.PacketNumber, p.Value.Length, h.bytesInFlight)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
h.detectLostPackets()
|
||||||
|
h.updateLossDetectionAlarm()
|
||||||
|
|
||||||
|
h.garbageCollectSkippedPackets()
|
||||||
|
h.stopWaitingManager.ReceivedAck(ackFrame)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) determineNewlyAckedPackets(ackFrame *frames.AckFrame) ([]*PacketElement, error) {
|
||||||
|
var ackedPackets []*PacketElement
|
||||||
|
ackRangeIndex := 0
|
||||||
|
for el := h.packetHistory.Front(); el != nil; el = el.Next() {
|
||||||
|
packet := el.Value
|
||||||
|
packetNumber := packet.PacketNumber
|
||||||
|
|
||||||
|
// Ignore packets below the LowestAcked
|
||||||
|
if packetNumber < ackFrame.LowestAcked {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// Break after LargestAcked is reached
|
||||||
|
if packetNumber > ackFrame.LargestAcked {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if ackFrame.HasMissingRanges() {
|
||||||
|
ackRange := ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex]
|
||||||
|
|
||||||
|
for packetNumber > ackRange.LastPacketNumber && ackRangeIndex < len(ackFrame.AckRanges)-1 {
|
||||||
|
ackRangeIndex++
|
||||||
|
ackRange = ackFrame.AckRanges[len(ackFrame.AckRanges)-1-ackRangeIndex]
|
||||||
|
}
|
||||||
|
|
||||||
|
if packetNumber >= ackRange.FirstPacketNumber { // packet i contained in ACK range
|
||||||
|
if packetNumber > ackRange.LastPacketNumber {
|
||||||
|
return nil, fmt.Errorf("BUG: ackhandler would have acked wrong packet 0x%x, while evaluating range 0x%x -> 0x%x", packetNumber, ackRange.FirstPacketNumber, ackRange.LastPacketNumber)
|
||||||
|
}
|
||||||
|
ackedPackets = append(ackedPackets, el)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ackedPackets = append(ackedPackets, el)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ackedPackets, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) maybeUpdateRTT(largestAcked protocol.PacketNumber, ackDelay time.Duration, rcvTime time.Time) bool {
|
||||||
|
for el := h.packetHistory.Front(); el != nil; el = el.Next() {
|
||||||
|
packet := el.Value
|
||||||
|
if packet.PacketNumber == largestAcked {
|
||||||
|
h.rttStats.UpdateRTT(rcvTime.Sub(packet.SendTime), ackDelay, time.Now())
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Packets are sorted by number, so we can stop searching
|
||||||
|
if packet.PacketNumber > largestAcked {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) updateLossDetectionAlarm() {
|
||||||
|
// Cancel the alarm if no packets are outstanding
|
||||||
|
if h.packetHistory.Len() == 0 {
|
||||||
|
h.alarm = time.Time{}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(#496): Handle handshake packets separately
|
||||||
|
// TODO(#497): TLP
|
||||||
|
if !h.lossTime.IsZero() {
|
||||||
|
// Early retransmit timer or time loss detection.
|
||||||
|
h.alarm = h.lossTime
|
||||||
|
} else {
|
||||||
|
// RTO
|
||||||
|
h.alarm = time.Now().Add(h.computeRTOTimeout())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) detectLostPackets() {
|
||||||
|
h.lossTime = time.Time{}
|
||||||
|
now := time.Now()
|
||||||
|
|
||||||
|
maxRTT := float64(utils.MaxDuration(h.rttStats.LatestRTT(), h.rttStats.SmoothedRTT()))
|
||||||
|
delayUntilLost := time.Duration((1.0 + timeReorderingFraction) * maxRTT)
|
||||||
|
|
||||||
|
var lostPackets []*PacketElement
|
||||||
|
for el := h.packetHistory.Front(); el != nil; el = el.Next() {
|
||||||
|
packet := el.Value
|
||||||
|
|
||||||
|
if packet.PacketNumber > h.LargestAcked {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
timeSinceSent := now.Sub(packet.SendTime)
|
||||||
|
if timeSinceSent > delayUntilLost {
|
||||||
|
lostPackets = append(lostPackets, el)
|
||||||
|
} else if h.lossTime.IsZero() {
|
||||||
|
// Note: This conditional is only entered once per call
|
||||||
|
h.lossTime = now.Add(delayUntilLost - timeSinceSent)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(lostPackets) > 0 {
|
||||||
|
for _, p := range lostPackets {
|
||||||
|
h.queuePacketForRetransmission(p)
|
||||||
|
h.congestion.OnPacketLost(p.Value.PacketNumber, p.Value.Length, h.bytesInFlight)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) OnAlarm() {
|
||||||
|
// TODO(#496): Handle handshake packets separately
|
||||||
|
// TODO(#497): TLP
|
||||||
|
if !h.lossTime.IsZero() {
|
||||||
|
// Early retransmit or time loss detection
|
||||||
|
h.detectLostPackets()
|
||||||
|
} else {
|
||||||
|
// RTO
|
||||||
|
h.retransmitOldestTwoPackets()
|
||||||
|
h.rtoCount++
|
||||||
|
}
|
||||||
|
|
||||||
|
h.updateLossDetectionAlarm()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) GetAlarmTimeout() time.Time {
|
||||||
|
return h.alarm
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) onPacketAcked(packetElement *PacketElement) {
|
||||||
|
h.bytesInFlight -= packetElement.Value.Length
|
||||||
|
h.rtoCount = 0
|
||||||
|
// TODO(#497): h.tlpCount = 0
|
||||||
|
h.packetHistory.Remove(packetElement)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) DequeuePacketForRetransmission() *Packet {
|
||||||
|
if len(h.retransmissionQueue) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
queueLen := len(h.retransmissionQueue)
|
||||||
|
// packets are usually NACKed in descending order. So use the slice as a stack
|
||||||
|
packet := h.retransmissionQueue[queueLen-1]
|
||||||
|
h.retransmissionQueue = h.retransmissionQueue[:queueLen-1]
|
||||||
|
return packet
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) GetLeastUnacked() protocol.PacketNumber {
|
||||||
|
return h.largestInOrderAcked() + 1
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) GetStopWaitingFrame(force bool) *frames.StopWaitingFrame {
|
||||||
|
return h.stopWaitingManager.GetStopWaitingFrame(force)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) SendingAllowed() bool {
|
||||||
|
congestionLimited := h.bytesInFlight > h.congestion.GetCongestionWindow()
|
||||||
|
maxTrackedLimited := protocol.PacketNumber(len(h.retransmissionQueue)+h.packetHistory.Len()) >= protocol.MaxTrackedSentPackets
|
||||||
|
return !(congestionLimited || maxTrackedLimited)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) retransmitOldestTwoPackets() {
|
||||||
|
if p := h.packetHistory.Front(); p != nil {
|
||||||
|
h.queueRTO(p)
|
||||||
|
}
|
||||||
|
if p := h.packetHistory.Front(); p != nil {
|
||||||
|
h.queueRTO(p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) queueRTO(el *PacketElement) {
|
||||||
|
packet := &el.Value
|
||||||
|
utils.Debugf("\tQueueing packet 0x%x for retransmission (RTO)", packet.PacketNumber)
|
||||||
|
h.queuePacketForRetransmission(el)
|
||||||
|
h.congestion.OnPacketLost(packet.PacketNumber, packet.Length, h.bytesInFlight)
|
||||||
|
h.congestion.OnRetransmissionTimeout(true)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) queuePacketForRetransmission(packetElement *PacketElement) {
|
||||||
|
packet := &packetElement.Value
|
||||||
|
h.bytesInFlight -= packet.Length
|
||||||
|
h.retransmissionQueue = append(h.retransmissionQueue, packet)
|
||||||
|
|
||||||
|
h.packetHistory.Remove(packetElement)
|
||||||
|
|
||||||
|
// strictly speaking, this is only necessary for RTO retransmissions
|
||||||
|
// this is because FastRetransmissions are triggered by missing ranges in ACKs, and then the LargestAcked will already be higher than the packet number of the retransmitted packet
|
||||||
|
h.stopWaitingManager.QueuedRetransmissionForPacketNumber(packet.PacketNumber)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) computeRTOTimeout() time.Duration {
|
||||||
|
rto := h.congestion.RetransmissionDelay()
|
||||||
|
if rto == 0 {
|
||||||
|
rto = defaultRTOTimeout
|
||||||
|
}
|
||||||
|
rto = utils.MaxDuration(rto, minRTOTimeout)
|
||||||
|
// Exponential backoff
|
||||||
|
rto = rto << h.rtoCount
|
||||||
|
return utils.MinDuration(rto, maxRTOTimeout)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) skippedPacketsAcked(ackFrame *frames.AckFrame) bool {
|
||||||
|
for _, p := range h.skippedPackets {
|
||||||
|
if ackFrame.AcksPacket(p) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *sentPacketHandler) garbageCollectSkippedPackets() {
|
||||||
|
lioa := h.largestInOrderAcked()
|
||||||
|
deleteIndex := 0
|
||||||
|
for i, p := range h.skippedPackets {
|
||||||
|
if p <= lioa {
|
||||||
|
deleteIndex = i + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
h.skippedPackets = h.skippedPackets[deleteIndex:]
|
||||||
|
}
|
778
vendor/github.com/lucas-clemente/quic-go/ackhandler/sent_packet_handler_test.go
generated
vendored
Normal file
778
vendor/github.com/lucas-clemente/quic-go/ackhandler/sent_packet_handler_test.go
generated
vendored
Normal file
|
@ -0,0 +1,778 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/congestion"
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockCongestion struct {
|
||||||
|
argsOnPacketSent []interface{}
|
||||||
|
maybeExitSlowStart bool
|
||||||
|
onRetransmissionTimeout bool
|
||||||
|
getCongestionWindow bool
|
||||||
|
packetsAcked [][]interface{}
|
||||||
|
packetsLost [][]interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) TimeUntilSend(now time.Time, bytesInFlight protocol.ByteCount) time.Duration {
|
||||||
|
panic("not implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) OnPacketSent(sentTime time.Time, bytesInFlight protocol.ByteCount, packetNumber protocol.PacketNumber, bytes protocol.ByteCount, isRetransmittable bool) bool {
|
||||||
|
m.argsOnPacketSent = []interface{}{sentTime, bytesInFlight, packetNumber, bytes, isRetransmittable}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) GetCongestionWindow() protocol.ByteCount {
|
||||||
|
m.getCongestionWindow = true
|
||||||
|
return protocol.DefaultTCPMSS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) MaybeExitSlowStart() {
|
||||||
|
m.maybeExitSlowStart = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) OnRetransmissionTimeout(packetsRetransmitted bool) {
|
||||||
|
m.onRetransmissionTimeout = true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) RetransmissionDelay() time.Duration {
|
||||||
|
return defaultRTOTimeout
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) SetNumEmulatedConnections(n int) { panic("not implemented") }
|
||||||
|
func (m *mockCongestion) OnConnectionMigration() { panic("not implemented") }
|
||||||
|
func (m *mockCongestion) SetSlowStartLargeReduction(enabled bool) { panic("not implemented") }
|
||||||
|
|
||||||
|
func (m *mockCongestion) OnPacketAcked(n protocol.PacketNumber, l protocol.ByteCount, bif protocol.ByteCount) {
|
||||||
|
m.packetsAcked = append(m.packetsAcked, []interface{}{n, l, bif})
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockCongestion) OnPacketLost(n protocol.PacketNumber, l protocol.ByteCount, bif protocol.ByteCount) {
|
||||||
|
m.packetsLost = append(m.packetsLost, []interface{}{n, l, bif})
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ = Describe("SentPacketHandler", func() {
|
||||||
|
var (
|
||||||
|
handler *sentPacketHandler
|
||||||
|
streamFrame frames.StreamFrame
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
rttStats := &congestion.RTTStats{}
|
||||||
|
handler = NewSentPacketHandler(rttStats).(*sentPacketHandler)
|
||||||
|
streamFrame = frames.StreamFrame{
|
||||||
|
StreamID: 5,
|
||||||
|
Data: []byte{0x13, 0x37},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
getPacketElement := func(p protocol.PacketNumber) *PacketElement {
|
||||||
|
for el := handler.packetHistory.Front(); el != nil; el = el.Next() {
|
||||||
|
if el.Value.PacketNumber == p {
|
||||||
|
return el
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
It("gets the LeastUnacked packet number", func() {
|
||||||
|
handler.LargestAcked = 0x1337
|
||||||
|
Expect(handler.GetLeastUnacked()).To(Equal(protocol.PacketNumber(0x1337 + 1)))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("registering sent packets", func() {
|
||||||
|
It("accepts two consecutive packets", func() {
|
||||||
|
packet1 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
packet2 := Packet{PacketNumber: 2, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
err := handler.SentPacket(&packet1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&packet2)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.lastSentPacketNumber).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
Expect(handler.packetHistory.Front().Value.PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
Expect(handler.packetHistory.Back().Value.PacketNumber).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(3)))
|
||||||
|
Expect(handler.skippedPackets).To(BeEmpty())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("rejects packets with the same packet number", func() {
|
||||||
|
packet1 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
packet2 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
err := handler.SentPacket(&packet1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&packet2)
|
||||||
|
Expect(err).To(MatchError(errPacketNumberNotIncreasing))
|
||||||
|
Expect(handler.lastSentPacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
Expect(handler.packetHistory.Front().Value.PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(1)))
|
||||||
|
Expect(handler.skippedPackets).To(BeEmpty())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("rejects packets with decreasing packet number", func() {
|
||||||
|
packet1 := Packet{PacketNumber: 2, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
packet2 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
err := handler.SentPacket(&packet1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&packet2)
|
||||||
|
Expect(err).To(MatchError(errPacketNumberNotIncreasing))
|
||||||
|
Expect(handler.lastSentPacketNumber).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
Expect(handler.packetHistory.Front().Value.PacketNumber).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(1)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("stores the sent time", func() {
|
||||||
|
packet := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
err := handler.SentPacket(&packet)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.packetHistory.Front().Value.SendTime.Unix()).To(BeNumerically("~", time.Now().Unix(), 1))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("skipped packet numbers", func() {
|
||||||
|
It("works with non-consecutive packet numbers", func() {
|
||||||
|
packet1 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
packet2 := Packet{PacketNumber: 3, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
err := handler.SentPacket(&packet1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&packet2)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.lastSentPacketNumber).To(Equal(protocol.PacketNumber(3)))
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(3)))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(3)))
|
||||||
|
Expect(handler.skippedPackets).To(HaveLen(1))
|
||||||
|
Expect(handler.skippedPackets[0]).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("recognizes multiple skipped packets", func() {
|
||||||
|
packet1 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
packet2 := Packet{PacketNumber: 3, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
packet3 := Packet{PacketNumber: 5, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
err := handler.SentPacket(&packet1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&packet2)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&packet3)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.skippedPackets).To(HaveLen(2))
|
||||||
|
Expect(handler.skippedPackets).To(Equal([]protocol.PacketNumber{2, 4}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("recognizes multiple consecutive skipped packets", func() {
|
||||||
|
packet1 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
packet2 := Packet{PacketNumber: 4, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
err := handler.SentPacket(&packet1)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&packet2)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.skippedPackets).To(HaveLen(2))
|
||||||
|
Expect(handler.skippedPackets).To(Equal([]protocol.PacketNumber{2, 3}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("limits the lengths of the skipped packet slice", func() {
|
||||||
|
for i := 0; i < protocol.MaxTrackedSkippedPackets+5; i++ {
|
||||||
|
packet := Packet{PacketNumber: protocol.PacketNumber(2*i + 1), Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
err := handler.SentPacket(&packet)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
Expect(handler.skippedPackets).To(HaveLen(protocol.MaxUndecryptablePackets))
|
||||||
|
Expect(handler.skippedPackets[0]).To(Equal(protocol.PacketNumber(10)))
|
||||||
|
Expect(handler.skippedPackets[protocol.MaxTrackedSkippedPackets-1]).To(Equal(protocol.PacketNumber(10 + 2*(protocol.MaxTrackedSkippedPackets-1))))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("garbage collection", func() {
|
||||||
|
It("keeps all packet numbers above the LargestAcked", func() {
|
||||||
|
handler.skippedPackets = []protocol.PacketNumber{2, 5, 8, 10}
|
||||||
|
handler.LargestAcked = 1
|
||||||
|
handler.garbageCollectSkippedPackets()
|
||||||
|
Expect(handler.skippedPackets).To(Equal([]protocol.PacketNumber{2, 5, 8, 10}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("doesn't keep packet numbers below the LargestAcked", func() {
|
||||||
|
handler.skippedPackets = []protocol.PacketNumber{1, 5, 8, 10}
|
||||||
|
handler.LargestAcked = 5
|
||||||
|
handler.garbageCollectSkippedPackets()
|
||||||
|
Expect(handler.skippedPackets).To(Equal([]protocol.PacketNumber{8, 10}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("deletes all packet numbers if LargestAcked is sufficiently high", func() {
|
||||||
|
handler.skippedPackets = []protocol.PacketNumber{1, 5, 10}
|
||||||
|
handler.LargestAcked = 15
|
||||||
|
handler.garbageCollectSkippedPackets()
|
||||||
|
Expect(handler.skippedPackets).To(BeEmpty())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("DoS mitigation", func() {
|
||||||
|
It("checks the size of the packet history, for unacked packets", func() {
|
||||||
|
i := protocol.PacketNumber(1)
|
||||||
|
for ; i <= protocol.MaxTrackedSentPackets; i++ {
|
||||||
|
packet := Packet{PacketNumber: protocol.PacketNumber(i), Length: 1}
|
||||||
|
err := handler.SentPacket(&packet)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
}
|
||||||
|
packet := Packet{PacketNumber: protocol.PacketNumber(i), Length: 1}
|
||||||
|
err := handler.SentPacket(&packet)
|
||||||
|
Expect(err).To(MatchError(ErrTooManyTrackedSentPackets))
|
||||||
|
})
|
||||||
|
|
||||||
|
// TODO: add a test that the length of the retransmission queue is considered, even if packets have already been ACKed. Relevant once we drop support for QUIC 33 and earlier
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("ACK processing", func() {
|
||||||
|
var packets []*Packet
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
packets = []*Packet{
|
||||||
|
{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 2, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 3, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 4, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 5, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 6, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 7, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 8, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 9, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 10, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 12, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
}
|
||||||
|
for _, packet := range packets {
|
||||||
|
err := handler.SentPacket(packet)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
}
|
||||||
|
// Increase RTT, because the tests would be flaky otherwise
|
||||||
|
handler.rttStats.UpdateRTT(time.Hour, 0, time.Now())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets))))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("ACK validation", func() {
|
||||||
|
It("rejects duplicate ACKs", func() {
|
||||||
|
largestAcked := 3
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: protocol.PacketNumber(largestAcked),
|
||||||
|
LowestAcked: 1,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1337, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 3)))
|
||||||
|
err = handler.ReceivedAck(&ack, 1337, time.Now())
|
||||||
|
Expect(err).To(MatchError(ErrDuplicateOrOutOfOrderAck))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 3)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("rejects out of order ACKs", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 3,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1337, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 3)))
|
||||||
|
err = handler.ReceivedAck(&ack, 1337-1, time.Now())
|
||||||
|
Expect(err).To(MatchError(ErrDuplicateOrOutOfOrderAck))
|
||||||
|
Expect(handler.LargestAcked).To(Equal(protocol.PacketNumber(3)))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 3)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("rejects ACKs with a too high LargestAcked packet number", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: packets[len(packets)-1].PacketNumber + 1337,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1, time.Now())
|
||||||
|
Expect(err).To(MatchError(errAckForUnsentPacket))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets))))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("ignores repeated ACKs", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 3,
|
||||||
|
LowestAcked: 1,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1337, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 3)))
|
||||||
|
err = handler.ReceivedAck(&ack, 1337+1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.LargestAcked).To(Equal(protocol.PacketNumber(3)))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 3)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("rejects ACKs for skipped packets", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 12,
|
||||||
|
LowestAcked: 5,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1337, time.Now())
|
||||||
|
Expect(err).To(MatchError(ErrAckForSkippedPacket))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("accepts an ACK that correctly nacks a skipped packet", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 12,
|
||||||
|
LowestAcked: 5,
|
||||||
|
AckRanges: []frames.AckRange{
|
||||||
|
{FirstPacketNumber: 12, LastPacketNumber: 12},
|
||||||
|
{FirstPacketNumber: 5, LastPacketNumber: 10},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1337, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.LargestAcked).ToNot(BeZero())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("acks and nacks the right packets", func() {
|
||||||
|
It("adjusts the LargestAcked", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 5,
|
||||||
|
LowestAcked: 1,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.LargestAcked).To(Equal(protocol.PacketNumber(5)))
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
for i := 6; i <= 10; i++ {
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(i)))
|
||||||
|
el = el.Next()
|
||||||
|
}
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(12)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("ACKs all packets for an ACK frame with no missing packets", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 8,
|
||||||
|
LowestAcked: 2,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(9)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(10)))
|
||||||
|
Expect(el.Next().Value.PacketNumber).To(Equal(protocol.PacketNumber(12)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("handles an ACK frame with one missing packet range", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 9,
|
||||||
|
LowestAcked: 2,
|
||||||
|
AckRanges: []frames.AckRange{ // packets 4 and 5 were lost
|
||||||
|
{FirstPacketNumber: 6, LastPacketNumber: 9},
|
||||||
|
{FirstPacketNumber: 2, LastPacketNumber: 3},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(4)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(5)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(10)))
|
||||||
|
Expect(el.Next().Value.PacketNumber).To(Equal(protocol.PacketNumber(12)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("Does not ack packets below the LowestAcked", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 8,
|
||||||
|
LowestAcked: 3,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
Expect(el.Next().Value.PacketNumber).To(Equal(protocol.PacketNumber(9)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("handles an ACK with multiple missing packet ranges", func() {
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 9,
|
||||||
|
LowestAcked: 1,
|
||||||
|
AckRanges: []frames.AckRange{ // packets 2, 4 and 5, and 8 were lost
|
||||||
|
{FirstPacketNumber: 9, LastPacketNumber: 9},
|
||||||
|
{FirstPacketNumber: 6, LastPacketNumber: 7},
|
||||||
|
{FirstPacketNumber: 3, LastPacketNumber: 3},
|
||||||
|
{FirstPacketNumber: 1, LastPacketNumber: 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(2)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(4)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(5)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(8)))
|
||||||
|
el = el.Next()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(10)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("processes an ACK frame that would be sent after a late arrival of a packet", func() {
|
||||||
|
largestObserved := 6
|
||||||
|
ack1 := frames.AckFrame{
|
||||||
|
LargestAcked: protocol.PacketNumber(largestObserved),
|
||||||
|
LowestAcked: 1,
|
||||||
|
AckRanges: []frames.AckRange{
|
||||||
|
{FirstPacketNumber: 4, LastPacketNumber: protocol.PacketNumber(largestObserved)},
|
||||||
|
{FirstPacketNumber: 1, LastPacketNumber: 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack1, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 5)))
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(3)))
|
||||||
|
ack2 := frames.AckFrame{
|
||||||
|
LargestAcked: protocol.PacketNumber(largestObserved),
|
||||||
|
LowestAcked: 1,
|
||||||
|
}
|
||||||
|
err = handler.ReceivedAck(&ack2, 2, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 6)))
|
||||||
|
Expect(handler.packetHistory.Front().Value.PacketNumber).To(Equal(protocol.PacketNumber(7)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("processes an ACK frame that would be sent after a late arrival of a packet and another packet", func() {
|
||||||
|
ack1 := frames.AckFrame{
|
||||||
|
LargestAcked: 6,
|
||||||
|
LowestAcked: 1,
|
||||||
|
AckRanges: []frames.AckRange{
|
||||||
|
{FirstPacketNumber: 4, LastPacketNumber: 6},
|
||||||
|
{FirstPacketNumber: 1, LastPacketNumber: 2},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack1, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 5)))
|
||||||
|
el := handler.packetHistory.Front()
|
||||||
|
Expect(el.Value.PacketNumber).To(Equal(protocol.PacketNumber(3)))
|
||||||
|
ack2 := frames.AckFrame{
|
||||||
|
LargestAcked: 7,
|
||||||
|
LowestAcked: 1,
|
||||||
|
}
|
||||||
|
err = handler.ReceivedAck(&ack2, 2, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 7)))
|
||||||
|
Expect(handler.packetHistory.Front().Value.PacketNumber).To(Equal(protocol.PacketNumber(8)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("processes an ACK that contains old ACK ranges", func() {
|
||||||
|
ack1 := frames.AckFrame{
|
||||||
|
LargestAcked: 6,
|
||||||
|
LowestAcked: 1,
|
||||||
|
}
|
||||||
|
err := handler.ReceivedAck(&ack1, 1, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.packetHistory.Front().Value.PacketNumber).To(Equal(protocol.PacketNumber(7)))
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 6)))
|
||||||
|
ack2 := frames.AckFrame{
|
||||||
|
LargestAcked: 10,
|
||||||
|
LowestAcked: 1,
|
||||||
|
AckRanges: []frames.AckRange{
|
||||||
|
{FirstPacketNumber: 8, LastPacketNumber: 10},
|
||||||
|
{FirstPacketNumber: 3, LastPacketNumber: 3},
|
||||||
|
{FirstPacketNumber: 1, LastPacketNumber: 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = handler.ReceivedAck(&ack2, 2, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(len(packets) - 6 - 3)))
|
||||||
|
Expect(handler.packetHistory.Front().Value.PacketNumber).To(Equal(protocol.PacketNumber(7)))
|
||||||
|
Expect(handler.packetHistory.Back().Value.PacketNumber).To(Equal(protocol.PacketNumber(12)))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("calculating RTT", func() {
|
||||||
|
It("computes the RTT", func() {
|
||||||
|
now := time.Now()
|
||||||
|
// First, fake the sent times of the first, second and last packet
|
||||||
|
getPacketElement(1).Value.SendTime = now.Add(-10 * time.Minute)
|
||||||
|
getPacketElement(2).Value.SendTime = now.Add(-5 * time.Minute)
|
||||||
|
getPacketElement(6).Value.SendTime = now.Add(-1 * time.Minute)
|
||||||
|
// Now, check that the proper times are used when calculating the deltas
|
||||||
|
err := handler.ReceivedAck(&frames.AckFrame{LargestAcked: 1}, 1, time.Now())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.rttStats.LatestRTT()).To(BeNumerically("~", 10*time.Minute, 1*time.Second))
|
||||||
|
err = handler.ReceivedAck(&frames.AckFrame{LargestAcked: 2}, 2, time.Now())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.rttStats.LatestRTT()).To(BeNumerically("~", 5*time.Minute, 1*time.Second))
|
||||||
|
err = handler.ReceivedAck(&frames.AckFrame{LargestAcked: 6}, 3, time.Now())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.rttStats.LatestRTT()).To(BeNumerically("~", 1*time.Minute, 1*time.Second))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("uses the DelayTime in the ack frame", func() {
|
||||||
|
now := time.Now()
|
||||||
|
getPacketElement(1).Value.SendTime = now.Add(-10 * time.Minute)
|
||||||
|
err := handler.ReceivedAck(&frames.AckFrame{LargestAcked: 1, DelayTime: 5 * time.Minute}, 1, time.Now())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.rttStats.LatestRTT()).To(BeNumerically("~", 5*time.Minute, 1*time.Second))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("Retransmission handling", func() {
|
||||||
|
var packets []*Packet
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
packets = []*Packet{
|
||||||
|
{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 2, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 3, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 4, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 5, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 6, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
{PacketNumber: 7, Frames: []frames.Frame{&streamFrame}, Length: 1},
|
||||||
|
}
|
||||||
|
for _, packet := range packets {
|
||||||
|
handler.SentPacket(packet)
|
||||||
|
}
|
||||||
|
// Increase RTT, because the tests would be flaky otherwise
|
||||||
|
handler.rttStats.UpdateRTT(time.Minute, 0, time.Now())
|
||||||
|
// Ack a single packet so that we have non-RTO timings
|
||||||
|
handler.ReceivedAck(&frames.AckFrame{LargestAcked: 2, LowestAcked: 2}, 1, time.Now())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(6)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not dequeue a packet if no ack has been received", func() {
|
||||||
|
Expect(handler.DequeuePacketForRetransmission()).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("dequeues a packet for retransmission", func() {
|
||||||
|
getPacketElement(1).Value.SendTime = time.Now().Add(-time.Hour)
|
||||||
|
handler.OnAlarm()
|
||||||
|
Expect(getPacketElement(1)).To(BeNil())
|
||||||
|
Expect(handler.retransmissionQueue).To(HaveLen(1))
|
||||||
|
Expect(handler.retransmissionQueue[0].PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
packet := handler.DequeuePacketForRetransmission()
|
||||||
|
Expect(packet).ToNot(BeNil())
|
||||||
|
Expect(packet.PacketNumber).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
Expect(handler.DequeuePacketForRetransmission()).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("StopWaitings", func() {
|
||||||
|
It("gets a StopWaitingFrame", func() {
|
||||||
|
ack := frames.AckFrame{LargestAcked: 5, LowestAcked: 5}
|
||||||
|
err := handler.ReceivedAck(&ack, 2, time.Now())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(handler.GetStopWaitingFrame(false)).To(Equal(&frames.StopWaitingFrame{LeastUnacked: 6}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets a StopWaitingFrame after queueing a retransmission", func() {
|
||||||
|
handler.queuePacketForRetransmission(getPacketElement(5))
|
||||||
|
Expect(handler.GetStopWaitingFrame(false)).To(Equal(&frames.StopWaitingFrame{LeastUnacked: 6}))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
It("calculates bytes in flight", func() {
|
||||||
|
packet1 := Packet{PacketNumber: 1, Frames: []frames.Frame{&streamFrame}, Length: 1}
|
||||||
|
packet2 := Packet{PacketNumber: 2, Frames: []frames.Frame{&streamFrame}, Length: 2}
|
||||||
|
packet3 := Packet{PacketNumber: 3, Frames: []frames.Frame{&streamFrame}, Length: 3}
|
||||||
|
err := handler.SentPacket(&packet1)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(1)))
|
||||||
|
err = handler.SentPacket(&packet2)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(1 + 2)))
|
||||||
|
err = handler.SentPacket(&packet3)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(1 + 2 + 3)))
|
||||||
|
|
||||||
|
// Increase RTT, because the tests would be flaky otherwise
|
||||||
|
handler.rttStats.UpdateRTT(time.Minute, 0, time.Now())
|
||||||
|
|
||||||
|
// ACK 1 and 3, NACK 2
|
||||||
|
ack := frames.AckFrame{
|
||||||
|
LargestAcked: 3,
|
||||||
|
LowestAcked: 1,
|
||||||
|
AckRanges: []frames.AckRange{
|
||||||
|
{FirstPacketNumber: 3, LastPacketNumber: 3},
|
||||||
|
{FirstPacketNumber: 1, LastPacketNumber: 1},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
err = handler.ReceivedAck(&ack, 1, time.Now())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(2)))
|
||||||
|
|
||||||
|
handler.packetHistory.Front().Value.SendTime = time.Now().Add(-time.Hour)
|
||||||
|
handler.OnAlarm()
|
||||||
|
|
||||||
|
Expect(handler.bytesInFlight).To(Equal(protocol.ByteCount(0)))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("congestion", func() {
|
||||||
|
var (
|
||||||
|
cong *mockCongestion
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
cong = &mockCongestion{}
|
||||||
|
handler.congestion = cong
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should call OnSent", func() {
|
||||||
|
p := &Packet{
|
||||||
|
PacketNumber: 1,
|
||||||
|
Length: 42,
|
||||||
|
}
|
||||||
|
err := handler.SentPacket(p)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(cong.argsOnPacketSent[1]).To(Equal(protocol.ByteCount(42)))
|
||||||
|
Expect(cong.argsOnPacketSent[2]).To(Equal(protocol.PacketNumber(1)))
|
||||||
|
Expect(cong.argsOnPacketSent[3]).To(Equal(protocol.ByteCount(42)))
|
||||||
|
Expect(cong.argsOnPacketSent[4]).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should call MaybeExitSlowStart and OnPacketAcked", func() {
|
||||||
|
handler.SentPacket(&Packet{PacketNumber: 1, Frames: []frames.Frame{}, Length: 1})
|
||||||
|
handler.SentPacket(&Packet{PacketNumber: 2, Frames: []frames.Frame{}, Length: 1})
|
||||||
|
err := handler.ReceivedAck(&frames.AckFrame{LargestAcked: 1, LowestAcked: 1}, 1, time.Now())
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(cong.maybeExitSlowStart).To(BeTrue())
|
||||||
|
Expect(cong.packetsAcked).To(BeEquivalentTo([][]interface{}{
|
||||||
|
{protocol.PacketNumber(1), protocol.ByteCount(1), protocol.ByteCount(1)},
|
||||||
|
}))
|
||||||
|
Expect(cong.packetsLost).To(BeEmpty())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("should call MaybeExitSlowStart and OnPacketLost", func() {
|
||||||
|
handler.SentPacket(&Packet{PacketNumber: 1, Frames: []frames.Frame{}, Length: 1})
|
||||||
|
handler.SentPacket(&Packet{PacketNumber: 2, Frames: []frames.Frame{}, Length: 1})
|
||||||
|
handler.SentPacket(&Packet{PacketNumber: 3, Frames: []frames.Frame{}, Length: 1})
|
||||||
|
handler.OnAlarm() // RTO, meaning 2 lost packets
|
||||||
|
Expect(cong.maybeExitSlowStart).To(BeFalse())
|
||||||
|
Expect(cong.onRetransmissionTimeout).To(BeTrue())
|
||||||
|
Expect(cong.packetsAcked).To(BeEmpty())
|
||||||
|
Expect(cong.packetsLost).To(BeEquivalentTo([][]interface{}{
|
||||||
|
{protocol.PacketNumber(1), protocol.ByteCount(1), protocol.ByteCount(2)},
|
||||||
|
{protocol.PacketNumber(2), protocol.ByteCount(1), protocol.ByteCount(1)},
|
||||||
|
}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("allows or denies sending based on congestion", func() {
|
||||||
|
Expect(handler.SendingAllowed()).To(BeTrue())
|
||||||
|
err := handler.SentPacket(&Packet{PacketNumber: 1, Frames: []frames.Frame{}, Length: protocol.DefaultTCPMSS + 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.SendingAllowed()).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("allows or denies sending based on the number of tracked packets", func() {
|
||||||
|
Expect(handler.SendingAllowed()).To(BeTrue())
|
||||||
|
handler.retransmissionQueue = make([]*Packet, protocol.MaxTrackedSentPackets)
|
||||||
|
Expect(handler.SendingAllowed()).To(BeFalse())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("calculating RTO", func() {
|
||||||
|
It("uses default RTO", func() {
|
||||||
|
Expect(handler.computeRTOTimeout()).To(Equal(defaultRTOTimeout))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("uses RTO from rttStats", func() {
|
||||||
|
rtt := time.Second
|
||||||
|
expected := rtt + rtt/2*4
|
||||||
|
handler.rttStats.UpdateRTT(rtt, 0, time.Now())
|
||||||
|
Expect(handler.computeRTOTimeout()).To(Equal(expected))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("limits RTO min", func() {
|
||||||
|
rtt := time.Millisecond
|
||||||
|
handler.rttStats.UpdateRTT(rtt, 0, time.Now())
|
||||||
|
Expect(handler.computeRTOTimeout()).To(Equal(minRTOTimeout))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("limits RTO max", func() {
|
||||||
|
rtt := time.Hour
|
||||||
|
handler.rttStats.UpdateRTT(rtt, 0, time.Now())
|
||||||
|
Expect(handler.computeRTOTimeout()).To(Equal(maxRTOTimeout))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("implements exponential backoff", func() {
|
||||||
|
handler.rtoCount = 0
|
||||||
|
Expect(handler.computeRTOTimeout()).To(Equal(defaultRTOTimeout))
|
||||||
|
handler.rtoCount = 1
|
||||||
|
Expect(handler.computeRTOTimeout()).To(Equal(2 * defaultRTOTimeout))
|
||||||
|
handler.rtoCount = 2
|
||||||
|
Expect(handler.computeRTOTimeout()).To(Equal(4 * defaultRTOTimeout))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("Delay-based loss detection", func() {
|
||||||
|
It("detects a packet as lost", func() {
|
||||||
|
err := handler.SentPacket(&Packet{PacketNumber: 1, Length: 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&Packet{PacketNumber: 2, Length: 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.lossTime.IsZero()).To(BeTrue())
|
||||||
|
|
||||||
|
err = handler.ReceivedAck(&frames.AckFrame{LargestAcked: 2, LowestAcked: 2}, 1, time.Now().Add(time.Hour))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.lossTime.IsZero()).To(BeFalse())
|
||||||
|
|
||||||
|
// RTT is around 1h now.
|
||||||
|
// The formula is (1+1/8) * RTT, so this should be around that number
|
||||||
|
Expect(handler.lossTime.Sub(time.Now())).To(BeNumerically("~", time.Hour*9/8, time.Minute))
|
||||||
|
Expect(handler.GetAlarmTimeout().Sub(time.Now())).To(BeNumerically("~", time.Hour*9/8, time.Minute))
|
||||||
|
|
||||||
|
handler.packetHistory.Front().Value.SendTime = time.Now().Add(-2 * time.Hour)
|
||||||
|
handler.OnAlarm()
|
||||||
|
Expect(handler.DequeuePacketForRetransmission()).NotTo(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not detect packets as lost without ACKs", func() {
|
||||||
|
err := handler.SentPacket(&Packet{PacketNumber: 1, Length: 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&Packet{PacketNumber: 2, Length: 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&Packet{PacketNumber: 3, Length: 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.lossTime.IsZero()).To(BeTrue())
|
||||||
|
|
||||||
|
err = handler.ReceivedAck(&frames.AckFrame{LargestAcked: 1, LowestAcked: 1}, 1, time.Now().Add(time.Hour))
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
Expect(handler.lossTime.IsZero()).To(BeTrue())
|
||||||
|
Expect(handler.GetAlarmTimeout().Sub(time.Now())).To(BeNumerically("~", handler.computeRTOTimeout(), time.Minute))
|
||||||
|
|
||||||
|
// This means RTO, so both packets should be lost
|
||||||
|
handler.OnAlarm()
|
||||||
|
Expect(handler.DequeuePacketForRetransmission()).ToNot(BeNil())
|
||||||
|
Expect(handler.DequeuePacketForRetransmission()).ToNot(BeNil())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("RTO retransmission", func() {
|
||||||
|
It("queues two packets if RTO expires", func() {
|
||||||
|
err := handler.SentPacket(&Packet{PacketNumber: 1, Length: 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
err = handler.SentPacket(&Packet{PacketNumber: 2, Length: 1})
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
|
||||||
|
handler.rttStats.UpdateRTT(time.Hour, 0, time.Now())
|
||||||
|
Expect(handler.lossTime.IsZero()).To(BeTrue())
|
||||||
|
Expect(handler.GetAlarmTimeout().Sub(time.Now())).To(BeNumerically("~", handler.computeRTOTimeout(), time.Minute))
|
||||||
|
|
||||||
|
handler.OnAlarm()
|
||||||
|
Expect(handler.DequeuePacketForRetransmission()).ToNot(BeNil())
|
||||||
|
Expect(handler.DequeuePacketForRetransmission()).ToNot(BeNil())
|
||||||
|
|
||||||
|
Expect(handler.rtoCount).To(BeEquivalentTo(1))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
42
vendor/github.com/lucas-clemente/quic-go/ackhandler/stop_waiting_manager.go
generated
vendored
Normal file
42
vendor/github.com/lucas-clemente/quic-go/ackhandler/stop_waiting_manager.go
generated
vendored
Normal file
|
@ -0,0 +1,42 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This stopWaitingManager is not supposed to satisfy the StopWaitingManager interface, which is a remnant of the legacy AckHandler, and should be remove once we drop support for QUIC 33
|
||||||
|
type stopWaitingManager struct {
|
||||||
|
largestLeastUnackedSent protocol.PacketNumber
|
||||||
|
nextLeastUnacked protocol.PacketNumber
|
||||||
|
|
||||||
|
lastStopWaitingFrame *frames.StopWaitingFrame
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stopWaitingManager) GetStopWaitingFrame(force bool) *frames.StopWaitingFrame {
|
||||||
|
if s.nextLeastUnacked <= s.largestLeastUnackedSent {
|
||||||
|
if force {
|
||||||
|
return s.lastStopWaitingFrame
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.largestLeastUnackedSent = s.nextLeastUnacked
|
||||||
|
swf := &frames.StopWaitingFrame{
|
||||||
|
LeastUnacked: s.nextLeastUnacked,
|
||||||
|
}
|
||||||
|
s.lastStopWaitingFrame = swf
|
||||||
|
return swf
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stopWaitingManager) ReceivedAck(ack *frames.AckFrame) {
|
||||||
|
if ack.LargestAcked >= s.nextLeastUnacked {
|
||||||
|
s.nextLeastUnacked = ack.LargestAcked + 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *stopWaitingManager) QueuedRetransmissionForPacketNumber(p protocol.PacketNumber) {
|
||||||
|
if p >= s.nextLeastUnacked {
|
||||||
|
s.nextLeastUnacked = p + 1
|
||||||
|
}
|
||||||
|
}
|
55
vendor/github.com/lucas-clemente/quic-go/ackhandler/stop_waiting_manager_test.go
generated
vendored
Normal file
55
vendor/github.com/lucas-clemente/quic-go/ackhandler/stop_waiting_manager_test.go
generated
vendored
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
package ackhandler
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lucas-clemente/quic-go/frames"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("StopWaitingManager", func() {
|
||||||
|
var manager *stopWaitingManager
|
||||||
|
BeforeEach(func() {
|
||||||
|
manager = &stopWaitingManager{}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns nil in the beginning", func() {
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).To(BeNil())
|
||||||
|
Expect(manager.GetStopWaitingFrame(true)).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("returns a StopWaitingFrame, when a new ACK arrives", func() {
|
||||||
|
manager.ReceivedAck(&frames.AckFrame{LargestAcked: 10})
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).To(Equal(&frames.StopWaitingFrame{LeastUnacked: 11}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not decrease the LeastUnacked", func() {
|
||||||
|
manager.ReceivedAck(&frames.AckFrame{LargestAcked: 10})
|
||||||
|
manager.ReceivedAck(&frames.AckFrame{LargestAcked: 9})
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).To(Equal(&frames.StopWaitingFrame{LeastUnacked: 11}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not send the same StopWaitingFrame twice", func() {
|
||||||
|
manager.ReceivedAck(&frames.AckFrame{LargestAcked: 10})
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).ToNot(BeNil())
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets the same StopWaitingFrame twice, if forced", func() {
|
||||||
|
manager.ReceivedAck(&frames.AckFrame{LargestAcked: 10})
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).ToNot(BeNil())
|
||||||
|
Expect(manager.GetStopWaitingFrame(true)).ToNot(BeNil())
|
||||||
|
Expect(manager.GetStopWaitingFrame(true)).ToNot(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("increases the LeastUnacked when a retransmission is queued", func() {
|
||||||
|
manager.ReceivedAck(&frames.AckFrame{LargestAcked: 10})
|
||||||
|
manager.QueuedRetransmissionForPacketNumber(20)
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).To(Equal(&frames.StopWaitingFrame{LeastUnacked: 21}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("does not decrease the LeastUnacked when a retransmission is queued", func() {
|
||||||
|
manager.ReceivedAck(&frames.AckFrame{LargestAcked: 10})
|
||||||
|
manager.QueuedRetransmissionForPacketNumber(9)
|
||||||
|
Expect(manager.GetStopWaitingFrame(false)).To(Equal(&frames.StopWaitingFrame{LeastUnacked: 11}))
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,34 @@
|
||||||
|
version: "{build}"
|
||||||
|
|
||||||
|
os: Windows Server 2012 R2
|
||||||
|
|
||||||
|
environment:
|
||||||
|
GOPATH: c:\gopath
|
||||||
|
CGO_ENABLED: 0
|
||||||
|
matrix:
|
||||||
|
- GOARCH: 386
|
||||||
|
- GOARCH: amd64
|
||||||
|
|
||||||
|
clone_folder: c:\gopath\src\github.com\lucas-clemente\quic-go
|
||||||
|
|
||||||
|
install:
|
||||||
|
- rmdir c:\go /s /q
|
||||||
|
- appveyor DownloadFile https://storage.googleapis.com/golang/go1.8.windows-amd64.zip
|
||||||
|
- 7z x go1.8.windows-amd64.zip -y -oC:\ > NUL
|
||||||
|
- set PATH=%PATH%;%GOPATH%\bin\windows_%GOARCH%;%GOPATH%\bin
|
||||||
|
- echo %PATH%
|
||||||
|
- echo %GOPATH%
|
||||||
|
- git submodule update --init --recursive
|
||||||
|
- go get github.com/onsi/ginkgo/ginkgo
|
||||||
|
- go get github.com/onsi/gomega
|
||||||
|
- go version
|
||||||
|
- go env
|
||||||
|
- go get -v -t ./...
|
||||||
|
|
||||||
|
build_script:
|
||||||
|
- rm -r integrationtests
|
||||||
|
- ginkgo -r --randomizeAllSpecs --randomizeSuites --trace --progress
|
||||||
|
|
||||||
|
test: off
|
||||||
|
|
||||||
|
deploy: off
|
|
@ -0,0 +1,78 @@
|
||||||
|
package quic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"crypto/tls"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"math/rand"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/testdata"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Benchmarks", func() {
|
||||||
|
dataLen := 50 /* MB */ * (1 << 20)
|
||||||
|
data := make([]byte, dataLen)
|
||||||
|
|
||||||
|
rand.Seed(time.Now().UnixNano())
|
||||||
|
|
||||||
|
for i := range protocol.SupportedVersions {
|
||||||
|
version := protocol.SupportedVersions[i]
|
||||||
|
|
||||||
|
Context(fmt.Sprintf("with version %d", version), func() {
|
||||||
|
Measure("transferring a file", func(b Benchmarker) {
|
||||||
|
rand.Read(data) // no need to check for an error. math.Rand.Read never errors
|
||||||
|
|
||||||
|
// start the server
|
||||||
|
sconf := &Config{
|
||||||
|
TLSConfig: testdata.GetTLSConfig(),
|
||||||
|
ConnState: func(sess Session, cs ConnState) {
|
||||||
|
if cs != ConnStateForwardSecure {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
defer GinkgoRecover()
|
||||||
|
str, err := sess.OpenStream()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
_, err = str.Write(data)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = str.Close()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
},
|
||||||
|
}
|
||||||
|
ln, err := ListenAddr("localhost:0", sconf)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
// Serve will error as soon as ln is closed. Ignore all errors here
|
||||||
|
go ln.Serve()
|
||||||
|
|
||||||
|
// start the client
|
||||||
|
cconf := &Config{
|
||||||
|
TLSConfig: &tls.Config{InsecureSkipVerify: true},
|
||||||
|
}
|
||||||
|
sess, err := DialAddr(ln.Addr().String(), cconf)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
str, err := sess.AcceptStream()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
|
||||||
|
buf := &bytes.Buffer{}
|
||||||
|
// measure the time it takes to download the dataLen bytes
|
||||||
|
// note we're measuring the time for the transfer, i.e. excluding the handshake
|
||||||
|
runtime := b.Time("transfer time", func() {
|
||||||
|
_, err := io.Copy(buf, str)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
})
|
||||||
|
// this is *a lot* faster than Expect(buf.Bytes()).To(Equal(data))
|
||||||
|
Expect(bytes.Equal(buf.Bytes(), data)).To(BeTrue())
|
||||||
|
|
||||||
|
b.RecordValue("transfer rate [MB/s]", float64(dataLen)/1e6/runtime.Seconds())
|
||||||
|
|
||||||
|
ln.Close()
|
||||||
|
sess.Close(nil)
|
||||||
|
}, 6)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
})
|
|
@ -0,0 +1,26 @@
|
||||||
|
package quic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
var bufferPool sync.Pool
|
||||||
|
|
||||||
|
func getPacketBuffer() []byte {
|
||||||
|
return bufferPool.Get().([]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
func putPacketBuffer(buf []byte) {
|
||||||
|
if cap(buf) != int(protocol.MaxReceivePacketSize) {
|
||||||
|
panic("putPacketBuffer called with packet of wrong size!")
|
||||||
|
}
|
||||||
|
bufferPool.Put(buf[:0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
bufferPool.New = func() interface{} {
|
||||||
|
return make([]byte, 0, protocol.MaxReceivePacketSize)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,32 @@
|
||||||
|
package quic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Buffer Pool", func() {
|
||||||
|
It("returns buffers of correct len and cap", func() {
|
||||||
|
buf := getPacketBuffer()
|
||||||
|
Expect(buf).To(HaveLen(0))
|
||||||
|
Expect(buf).To(HaveCap(int(protocol.MaxReceivePacketSize)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("zeroes put buffers' length", func() {
|
||||||
|
for i := 0; i < 1000; i++ {
|
||||||
|
buf := getPacketBuffer()
|
||||||
|
putPacketBuffer(buf[0:10])
|
||||||
|
buf = getPacketBuffer()
|
||||||
|
Expect(buf).To(HaveLen(0))
|
||||||
|
Expect(buf).To(HaveCap(int(protocol.MaxReceivePacketSize)))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("panics if wrong-sized buffers are passed", func() {
|
||||||
|
Expect(func() {
|
||||||
|
putPacketBuffer([]byte{0})
|
||||||
|
}).To(Panic())
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,261 @@
|
||||||
|
package quic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/qerr"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
type client struct {
|
||||||
|
mutex sync.Mutex
|
||||||
|
connStateChangeOrErrCond sync.Cond
|
||||||
|
listenErr error
|
||||||
|
|
||||||
|
conn connection
|
||||||
|
hostname string
|
||||||
|
|
||||||
|
config *Config
|
||||||
|
connState ConnState
|
||||||
|
|
||||||
|
connectionID protocol.ConnectionID
|
||||||
|
version protocol.VersionNumber
|
||||||
|
|
||||||
|
session packetHandler
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
errCloseSessionForNewVersion = errors.New("closing session in order to recreate it with a new version")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Dial establishes a new QUIC connection to a server using a net.PacketConn.
|
||||||
|
// The host parameter is used for SNI.
|
||||||
|
func Dial(pconn net.PacketConn, remoteAddr net.Addr, host string, config *Config) (Session, error) {
|
||||||
|
connID, err := utils.GenerateConnectionID()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
hostname, _, err := net.SplitHostPort(host)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
c := &client{
|
||||||
|
conn: &conn{pconn: pconn, currentAddr: remoteAddr},
|
||||||
|
connectionID: connID,
|
||||||
|
hostname: hostname,
|
||||||
|
config: config,
|
||||||
|
version: protocol.SupportedVersions[len(protocol.SupportedVersions)-1], // use the highest supported version by default
|
||||||
|
}
|
||||||
|
|
||||||
|
c.connStateChangeOrErrCond.L = &c.mutex
|
||||||
|
|
||||||
|
err = c.createNewSession(nil)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
utils.Infof("Starting new connection to %s (%s), connectionID %x, version %d", hostname, c.conn.RemoteAddr().String(), c.connectionID, c.version)
|
||||||
|
|
||||||
|
return c.establishConnection()
|
||||||
|
}
|
||||||
|
|
||||||
|
// DialAddr establishes a new QUIC connection to a server.
|
||||||
|
// The hostname for SNI is taken from the given address.
|
||||||
|
func DialAddr(addr string, config *Config) (Session, error) {
|
||||||
|
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return Dial(udpConn, udpAddr, addr, config)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) establishConnection() (Session, error) {
|
||||||
|
go c.listen()
|
||||||
|
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
|
for {
|
||||||
|
if c.listenErr != nil {
|
||||||
|
return nil, c.listenErr
|
||||||
|
}
|
||||||
|
if c.config.ConnState != nil && c.connState >= ConnStateVersionNegotiated {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if c.config.ConnState == nil && c.connState == ConnStateForwardSecure {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.connStateChangeOrErrCond.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
return c.session, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listen listens
|
||||||
|
func (c *client) listen() {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
for {
|
||||||
|
var n int
|
||||||
|
var addr net.Addr
|
||||||
|
data := getPacketBuffer()
|
||||||
|
data = data[:protocol.MaxReceivePacketSize]
|
||||||
|
// The packet size should not exceed protocol.MaxReceivePacketSize bytes
|
||||||
|
// If it does, we only read a truncated packet, which will then end up undecryptable
|
||||||
|
n, addr, err = c.conn.Read(data)
|
||||||
|
if err != nil {
|
||||||
|
if !strings.HasSuffix(err.Error(), "use of closed network connection") {
|
||||||
|
c.session.Close(err)
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
data = data[:n]
|
||||||
|
|
||||||
|
err = c.handlePacket(addr, data)
|
||||||
|
if err != nil {
|
||||||
|
utils.Errorf("error handling packet: %s", err.Error())
|
||||||
|
c.session.Close(err)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mutex.Lock()
|
||||||
|
c.listenErr = err
|
||||||
|
c.connStateChangeOrErrCond.Signal()
|
||||||
|
c.mutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) handlePacket(remoteAddr net.Addr, packet []byte) error {
|
||||||
|
rcvTime := time.Now()
|
||||||
|
|
||||||
|
r := bytes.NewReader(packet)
|
||||||
|
hdr, err := ParsePublicHeader(r, protocol.PerspectiveServer)
|
||||||
|
if err != nil {
|
||||||
|
return qerr.Error(qerr.InvalidPacketHeader, err.Error())
|
||||||
|
}
|
||||||
|
hdr.Raw = packet[:len(packet)-r.Len()]
|
||||||
|
|
||||||
|
c.mutex.Lock()
|
||||||
|
defer c.mutex.Unlock()
|
||||||
|
|
||||||
|
// ignore delayed / duplicated version negotiation packets
|
||||||
|
if c.connState >= ConnStateVersionNegotiated && hdr.VersionFlag {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// this is the first packet after the client sent a packet with the VersionFlag set
|
||||||
|
// if the server doesn't send a version negotiation packet, it supports the suggested version
|
||||||
|
if !hdr.VersionFlag && c.connState == ConnStateInitial {
|
||||||
|
c.connState = ConnStateVersionNegotiated
|
||||||
|
c.connStateChangeOrErrCond.Signal()
|
||||||
|
if c.config.ConnState != nil {
|
||||||
|
go c.config.ConnState(c.session, ConnStateVersionNegotiated)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if hdr.VersionFlag {
|
||||||
|
err = c.handlePacketWithVersionFlag(hdr)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
c.session.handlePacket(&receivedPacket{
|
||||||
|
remoteAddr: remoteAddr,
|
||||||
|
publicHeader: hdr,
|
||||||
|
data: packet[len(packet)-r.Len():],
|
||||||
|
rcvTime: rcvTime,
|
||||||
|
})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) handlePacketWithVersionFlag(hdr *PublicHeader) error {
|
||||||
|
for _, v := range hdr.SupportedVersions {
|
||||||
|
// check if the server sent the offered version in supported versions
|
||||||
|
if v == c.version {
|
||||||
|
return qerr.Error(qerr.InvalidVersionNegotiationPacket, "Server already supports client's version and should have accepted the connection.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
ok, highestSupportedVersion := protocol.HighestSupportedVersion(hdr.SupportedVersions)
|
||||||
|
if !ok {
|
||||||
|
return qerr.InvalidVersion
|
||||||
|
}
|
||||||
|
|
||||||
|
// switch to negotiated version
|
||||||
|
c.version = highestSupportedVersion
|
||||||
|
c.connState = ConnStateVersionNegotiated
|
||||||
|
var err error
|
||||||
|
c.connectionID, err = utils.GenerateConnectionID()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
utils.Infof("Switching to QUIC version %d. New connection ID: %x", highestSupportedVersion, c.connectionID)
|
||||||
|
|
||||||
|
c.session.Close(errCloseSessionForNewVersion)
|
||||||
|
err = c.createNewSession(hdr.SupportedVersions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if c.config.ConnState != nil {
|
||||||
|
go c.config.ConnState(c.session, ConnStateVersionNegotiated)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil // version negotiation packets have no payload
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) cryptoChangeCallback(_ Session, isForwardSecure bool) {
|
||||||
|
var state ConnState
|
||||||
|
if isForwardSecure {
|
||||||
|
state = ConnStateForwardSecure
|
||||||
|
} else {
|
||||||
|
state = ConnStateSecure
|
||||||
|
}
|
||||||
|
|
||||||
|
c.mutex.Lock()
|
||||||
|
c.connState = state
|
||||||
|
c.connStateChangeOrErrCond.Signal()
|
||||||
|
c.mutex.Unlock()
|
||||||
|
|
||||||
|
if c.config.ConnState != nil {
|
||||||
|
go c.config.ConnState(c.session, state)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) createNewSession(negotiatedVersions []protocol.VersionNumber) error {
|
||||||
|
var err error
|
||||||
|
c.session, err = newClientSession(
|
||||||
|
c.conn,
|
||||||
|
c.hostname,
|
||||||
|
c.version,
|
||||||
|
c.connectionID,
|
||||||
|
c.config.TLSConfig,
|
||||||
|
c.closeCallback,
|
||||||
|
c.cryptoChangeCallback,
|
||||||
|
negotiatedVersions)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
go c.session.run()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *client) closeCallback(_ protocol.ConnectionID) {
|
||||||
|
utils.Infof("Connection %x closed.", c.connectionID)
|
||||||
|
c.conn.Close()
|
||||||
|
}
|
|
@ -0,0 +1,270 @@
|
||||||
|
package quic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"net"
|
||||||
|
"reflect"
|
||||||
|
"unsafe"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/qerr"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Client", func() {
|
||||||
|
var (
|
||||||
|
cl *client
|
||||||
|
config *Config
|
||||||
|
sess *mockSession
|
||||||
|
packetConn *mockPacketConn
|
||||||
|
addr net.Addr
|
||||||
|
versionNegotiateConnStateCalled bool
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
Eventually(areSessionsRunning).Should(BeFalse())
|
||||||
|
versionNegotiateConnStateCalled = false
|
||||||
|
packetConn = &mockPacketConn{}
|
||||||
|
config = &Config{
|
||||||
|
ConnState: func(_ Session, state ConnState) {
|
||||||
|
if state == ConnStateVersionNegotiated {
|
||||||
|
versionNegotiateConnStateCalled = true
|
||||||
|
}
|
||||||
|
},
|
||||||
|
}
|
||||||
|
addr = &net.UDPAddr{IP: net.IPv4(192, 168, 100, 200), Port: 1337}
|
||||||
|
sess = &mockSession{connectionID: 0x1337}
|
||||||
|
cl = &client{
|
||||||
|
config: config,
|
||||||
|
connectionID: 0x1337,
|
||||||
|
session: sess,
|
||||||
|
version: protocol.Version36,
|
||||||
|
conn: &conn{pconn: packetConn, currentAddr: addr},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
AfterEach(func() {
|
||||||
|
if s, ok := cl.session.(*session); ok {
|
||||||
|
s.Close(nil)
|
||||||
|
}
|
||||||
|
Eventually(areSessionsRunning).Should(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("Dialing", func() {
|
||||||
|
It("creates a new client", func() {
|
||||||
|
packetConn.dataToRead = []byte{0x0, 0x1, 0x0}
|
||||||
|
var err error
|
||||||
|
sess, err := Dial(packetConn, addr, "quic.clemente.io:1337", config)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(*(*[]protocol.VersionNumber)(unsafe.Pointer(reflect.ValueOf(sess.(*session).cryptoSetup).Elem().FieldByName("negotiatedVersions").UnsafeAddr()))).To(BeNil())
|
||||||
|
Expect(*(*string)(unsafe.Pointer(reflect.ValueOf(sess.(*session).cryptoSetup).Elem().FieldByName("hostname").UnsafeAddr()))).To(Equal("quic.clemente.io"))
|
||||||
|
sess.Close(nil)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("errors when receiving an invalid first packet from the server", func() {
|
||||||
|
packetConn.dataToRead = []byte{0xff}
|
||||||
|
sess, err := Dial(packetConn, addr, "quic.clemente.io:1337", config)
|
||||||
|
Expect(err).To(HaveOccurred())
|
||||||
|
Expect(sess).To(BeNil())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("errors when receiving an error from the connection", func() {
|
||||||
|
testErr := errors.New("connection error")
|
||||||
|
packetConn.readErr = testErr
|
||||||
|
_, err := Dial(packetConn, addr, "quic.clemente.io:1337", config)
|
||||||
|
Expect(err).To(MatchError(testErr))
|
||||||
|
})
|
||||||
|
|
||||||
|
// now we're only testing that Dial doesn't return directly after version negotiation
|
||||||
|
PIt("doesn't return after version negotiation is established if no ConnState is defined", func() {
|
||||||
|
// TODO(#506): Fix test
|
||||||
|
packetConn.dataToRead = []byte{0x0, 0x1, 0x0}
|
||||||
|
config.ConnState = nil
|
||||||
|
var dialReturned bool
|
||||||
|
go func() {
|
||||||
|
defer GinkgoRecover()
|
||||||
|
_, err := Dial(packetConn, addr, "quic.clemente.io:1337", config)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
dialReturned = true
|
||||||
|
}()
|
||||||
|
Consistently(func() bool { return dialReturned }).Should(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("only establishes a connection once it is forward-secure if no ConnState is defined", func() {
|
||||||
|
config.ConnState = nil
|
||||||
|
client := &client{conn: &conn{pconn: packetConn, currentAddr: addr}, config: config}
|
||||||
|
client.connStateChangeOrErrCond.L = &client.mutex
|
||||||
|
var returned bool
|
||||||
|
go func() {
|
||||||
|
defer GinkgoRecover()
|
||||||
|
_, err := client.establishConnection()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
returned = true
|
||||||
|
}()
|
||||||
|
Consistently(func() bool { return returned }).Should(BeFalse())
|
||||||
|
// switch to a secure connection
|
||||||
|
client.cryptoChangeCallback(nil, false)
|
||||||
|
Consistently(func() bool { return returned }).Should(BeFalse())
|
||||||
|
// switch to a forward-secure connection
|
||||||
|
client.cryptoChangeCallback(nil, true)
|
||||||
|
Eventually(func() bool { return returned }).Should(BeTrue())
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
It("errors on invalid public header", func() {
|
||||||
|
err := cl.handlePacket(nil, nil)
|
||||||
|
Expect(err.(*qerr.QuicError).ErrorCode).To(Equal(qerr.InvalidPacketHeader))
|
||||||
|
})
|
||||||
|
|
||||||
|
// this test requires a real session (because it calls the close callback) and a real UDP conn (because it unblocks and errors when it is closed)
|
||||||
|
It("properly closes", func(done Done) {
|
||||||
|
Eventually(areSessionsRunning).Should(BeFalse())
|
||||||
|
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1)})
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
cl.conn = &conn{pconn: udpConn}
|
||||||
|
err = cl.createNewSession(nil)
|
||||||
|
Expect(err).NotTo(HaveOccurred())
|
||||||
|
testErr := errors.New("test error")
|
||||||
|
Eventually(areSessionsRunning).Should(BeTrue())
|
||||||
|
|
||||||
|
var stoppedListening bool
|
||||||
|
go func() {
|
||||||
|
cl.listen()
|
||||||
|
stoppedListening = true
|
||||||
|
}()
|
||||||
|
|
||||||
|
err = cl.session.Close(testErr)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Eventually(func() bool { return stoppedListening }).Should(BeTrue())
|
||||||
|
Eventually(areSessionsRunning).Should(BeFalse())
|
||||||
|
close(done)
|
||||||
|
}, 10)
|
||||||
|
|
||||||
|
It("creates new sessions with the right parameters", func() {
|
||||||
|
cl.session = nil
|
||||||
|
cl.hostname = "hostname"
|
||||||
|
err := cl.createNewSession(nil)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(cl.session).ToNot(BeNil())
|
||||||
|
Expect(cl.session.(*session).connectionID).To(Equal(cl.connectionID))
|
||||||
|
Expect(cl.session.(*session).version).To(Equal(cl.version))
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("handling packets", func() {
|
||||||
|
It("handles packets", func() {
|
||||||
|
ph := PublicHeader{
|
||||||
|
PacketNumber: 1,
|
||||||
|
PacketNumberLen: protocol.PacketNumberLen2,
|
||||||
|
ConnectionID: 0x1337,
|
||||||
|
}
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
err := ph.Write(b, protocol.Version36, protocol.PerspectiveServer)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
packetConn.dataToRead = b.Bytes()
|
||||||
|
|
||||||
|
Expect(sess.packetCount).To(BeZero())
|
||||||
|
var stoppedListening bool
|
||||||
|
go func() {
|
||||||
|
cl.listen()
|
||||||
|
// it should continue listening when receiving valid packets
|
||||||
|
stoppedListening = true
|
||||||
|
}()
|
||||||
|
|
||||||
|
Eventually(func() int { return sess.packetCount }).Should(Equal(1))
|
||||||
|
Expect(sess.closed).To(BeFalse())
|
||||||
|
Consistently(func() bool { return stoppedListening }).Should(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("closes the session when encountering an error while handling a packet", func() {
|
||||||
|
Expect(sess.closeReason).ToNot(HaveOccurred())
|
||||||
|
packetConn.dataToRead = bytes.Repeat([]byte{0xff}, 100)
|
||||||
|
cl.listen()
|
||||||
|
Expect(sess.closed).To(BeTrue())
|
||||||
|
Expect(sess.closeReason).To(HaveOccurred())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("closes the session when encountering an error while reading from the connection", func() {
|
||||||
|
testErr := errors.New("test error")
|
||||||
|
packetConn.readErr = testErr
|
||||||
|
cl.listen()
|
||||||
|
Expect(sess.closed).To(BeTrue())
|
||||||
|
Expect(sess.closeReason).To(MatchError(testErr))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
|
||||||
|
Context("version negotiation", func() {
|
||||||
|
getVersionNegotiation := func(versions []protocol.VersionNumber) []byte {
|
||||||
|
oldVersionNegotiationPacket := composeVersionNegotiation(0x1337)
|
||||||
|
oldSupportVersionTags := protocol.SupportedVersionsAsTags
|
||||||
|
var b bytes.Buffer
|
||||||
|
for _, v := range versions {
|
||||||
|
s := make([]byte, 4)
|
||||||
|
binary.LittleEndian.PutUint32(s, protocol.VersionNumberToTag(v))
|
||||||
|
b.Write(s)
|
||||||
|
}
|
||||||
|
protocol.SupportedVersionsAsTags = b.Bytes()
|
||||||
|
packet := composeVersionNegotiation(cl.connectionID)
|
||||||
|
protocol.SupportedVersionsAsTags = oldSupportVersionTags
|
||||||
|
Expect(composeVersionNegotiation(0x1337)).To(Equal(oldVersionNegotiationPacket))
|
||||||
|
return packet
|
||||||
|
}
|
||||||
|
|
||||||
|
It("recognizes that a packet without VersionFlag means that the server accepted the suggested version", func() {
|
||||||
|
ph := PublicHeader{
|
||||||
|
PacketNumber: 1,
|
||||||
|
PacketNumberLen: protocol.PacketNumberLen2,
|
||||||
|
ConnectionID: 0x1337,
|
||||||
|
}
|
||||||
|
b := &bytes.Buffer{}
|
||||||
|
err := ph.Write(b, protocol.VersionWhatever, protocol.PerspectiveServer)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
err = cl.handlePacket(nil, b.Bytes())
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(cl.connState).To(Equal(ConnStateVersionNegotiated))
|
||||||
|
Eventually(func() bool { return versionNegotiateConnStateCalled }).Should(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("changes the version after receiving a version negotiation packet", func() {
|
||||||
|
newVersion := protocol.Version35
|
||||||
|
Expect(newVersion).ToNot(Equal(cl.version))
|
||||||
|
Expect(sess.packetCount).To(BeZero())
|
||||||
|
cl.connectionID = 0x1337
|
||||||
|
err := cl.handlePacket(nil, getVersionNegotiation([]protocol.VersionNumber{newVersion}))
|
||||||
|
Expect(cl.version).To(Equal(newVersion))
|
||||||
|
Expect(cl.connState).To(Equal(ConnStateVersionNegotiated))
|
||||||
|
Eventually(func() bool { return versionNegotiateConnStateCalled }).Should(BeTrue())
|
||||||
|
// it swapped the sessions
|
||||||
|
Expect(cl.session).ToNot(Equal(sess))
|
||||||
|
Expect(cl.connectionID).ToNot(Equal(0x1337)) // it generated a new connection ID
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
// it didn't pass the version negoation packet to the session (since it has no payload)
|
||||||
|
Expect(sess.packetCount).To(BeZero())
|
||||||
|
Expect(*(*[]protocol.VersionNumber)(unsafe.Pointer(reflect.ValueOf(cl.session.(*session).cryptoSetup).Elem().FieldByName("negotiatedVersions").UnsafeAddr()))).To(Equal([]protocol.VersionNumber{35}))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("errors if no matching version is found", func() {
|
||||||
|
err := cl.handlePacket(nil, getVersionNegotiation([]protocol.VersionNumber{1}))
|
||||||
|
Expect(err).To(MatchError(qerr.InvalidVersion))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("ignores delayed version negotiation packets", func() {
|
||||||
|
// if the version was not yet negotiated, handlePacket would return a VersionNegotiationMismatch error, see above test
|
||||||
|
cl.connState = ConnStateVersionNegotiated
|
||||||
|
Expect(sess.packetCount).To(BeZero())
|
||||||
|
err := cl.handlePacket(nil, getVersionNegotiation([]protocol.VersionNumber{1}))
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(cl.connState).To(Equal(ConnStateVersionNegotiated))
|
||||||
|
Expect(sess.packetCount).To(BeZero())
|
||||||
|
Consistently(func() bool { return versionNegotiateConnStateCalled }).Should(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("errors if the server should have accepted the offered version", func() {
|
||||||
|
err := cl.handlePacket(nil, getVersionNegotiation([]protocol.VersionNumber{cl.version}))
|
||||||
|
Expect(err).To(MatchError(qerr.Error(qerr.InvalidVersionNegotiationPacket, "Server already supports client's version and should have accepted the connection.")))
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,13 @@
|
||||||
|
coverage:
|
||||||
|
round: nearest
|
||||||
|
ignore:
|
||||||
|
- ackhandler/packet_linkedlist.go
|
||||||
|
- h2quic/gzipreader.go
|
||||||
|
- h2quic/response.go
|
||||||
|
- utils/byteinterval_linkedlist.go
|
||||||
|
- utils/packetinterval_linkedlist.go
|
||||||
|
status:
|
||||||
|
project:
|
||||||
|
default:
|
||||||
|
threshold: 0.5
|
||||||
|
patch: false
|
|
@ -0,0 +1,22 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Bandwidth of a connection
|
||||||
|
type Bandwidth uint64
|
||||||
|
|
||||||
|
const (
|
||||||
|
// BitsPerSecond is 1 bit per second
|
||||||
|
BitsPerSecond Bandwidth = 1
|
||||||
|
// BytesPerSecond is 1 byte per second
|
||||||
|
BytesPerSecond = 8 * BitsPerSecond
|
||||||
|
)
|
||||||
|
|
||||||
|
// BandwidthFromDelta calculates the bandwidth from a number of bytes and a time delta
|
||||||
|
func BandwidthFromDelta(bytes protocol.ByteCount, delta time.Duration) Bandwidth {
|
||||||
|
return Bandwidth(bytes) * Bandwidth(time.Second) / Bandwidth(delta) * BytesPerSecond
|
||||||
|
}
|
14
vendor/github.com/lucas-clemente/quic-go/congestion/bandwidth_test.go
generated
vendored
Normal file
14
vendor/github.com/lucas-clemente/quic-go/congestion/bandwidth_test.go
generated
vendored
Normal file
|
@ -0,0 +1,14 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Bandwidth", func() {
|
||||||
|
It("converts from time delta", func() {
|
||||||
|
Expect(BandwidthFromDelta(1, time.Millisecond)).To(Equal(1000 * BytesPerSecond))
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,18 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import "time"
|
||||||
|
|
||||||
|
// A Clock returns the current time
|
||||||
|
type Clock interface {
|
||||||
|
Now() time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultClock implements the Clock interface using the Go stdlib clock.
|
||||||
|
type DefaultClock struct{}
|
||||||
|
|
||||||
|
var _ Clock = DefaultClock{}
|
||||||
|
|
||||||
|
// Now gets the current time
|
||||||
|
func (DefaultClock) Now() time.Time {
|
||||||
|
return time.Now()
|
||||||
|
}
|
13
vendor/github.com/lucas-clemente/quic-go/congestion/congestion_suite_test.go
generated
vendored
Normal file
13
vendor/github.com/lucas-clemente/quic-go/congestion/congestion_suite_test.go
generated
vendored
Normal file
|
@ -0,0 +1,13 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"testing"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestCongestion(t *testing.T) {
|
||||||
|
RegisterFailHandler(Fail)
|
||||||
|
RunSpecs(t, "Congestion Suite")
|
||||||
|
}
|
|
@ -0,0 +1,228 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// This cubic implementation is based on the one found in Chromiums's QUIC
|
||||||
|
// implementation, in the files net/quic/congestion_control/cubic.{hh,cc}.
|
||||||
|
|
||||||
|
// Constants based on TCP defaults.
|
||||||
|
// The following constants are in 2^10 fractions of a second instead of ms to
|
||||||
|
// allow a 10 shift right to divide.
|
||||||
|
|
||||||
|
// 1024*1024^3 (first 1024 is from 0.100^3)
|
||||||
|
// where 0.100 is 100 ms which is the scaling
|
||||||
|
// round trip time.
|
||||||
|
const cubeScale = 40
|
||||||
|
const cubeCongestionWindowScale = 410
|
||||||
|
const cubeFactor protocol.PacketNumber = 1 << cubeScale / cubeCongestionWindowScale
|
||||||
|
|
||||||
|
const defaultNumConnections = 2
|
||||||
|
|
||||||
|
// Default Cubic backoff factor
|
||||||
|
const beta float32 = 0.7
|
||||||
|
|
||||||
|
// Additional backoff factor when loss occurs in the concave part of the Cubic
|
||||||
|
// curve. This additional backoff factor is expected to give up bandwidth to
|
||||||
|
// new concurrent flows and speed up convergence.
|
||||||
|
const betaLastMax float32 = 0.85
|
||||||
|
|
||||||
|
// If true, Cubic's epoch is shifted when the sender is application-limited.
|
||||||
|
const shiftQuicCubicEpochWhenAppLimited = true
|
||||||
|
|
||||||
|
const maxCubicTimeInterval = 30 * time.Millisecond
|
||||||
|
|
||||||
|
// Cubic implements the cubic algorithm from TCP
|
||||||
|
type Cubic struct {
|
||||||
|
clock Clock
|
||||||
|
// Number of connections to simulate.
|
||||||
|
numConnections int
|
||||||
|
// Time when this cycle started, after last loss event.
|
||||||
|
epoch time.Time
|
||||||
|
// Time when sender went into application-limited period. Zero if not in
|
||||||
|
// application-limited period.
|
||||||
|
appLimitedStartTime time.Time
|
||||||
|
// Time when we updated last_congestion_window.
|
||||||
|
lastUpdateTime time.Time
|
||||||
|
// Last congestion window (in packets) used.
|
||||||
|
lastCongestionWindow protocol.PacketNumber
|
||||||
|
// Max congestion window (in packets) used just before last loss event.
|
||||||
|
// Note: to improve fairness to other streams an additional back off is
|
||||||
|
// applied to this value if the new value is below our latest value.
|
||||||
|
lastMaxCongestionWindow protocol.PacketNumber
|
||||||
|
// Number of acked packets since the cycle started (epoch).
|
||||||
|
ackedPacketsCount protocol.PacketNumber
|
||||||
|
// TCP Reno equivalent congestion window in packets.
|
||||||
|
estimatedTCPcongestionWindow protocol.PacketNumber
|
||||||
|
// Origin point of cubic function.
|
||||||
|
originPointCongestionWindow protocol.PacketNumber
|
||||||
|
// Time to origin point of cubic function in 2^10 fractions of a second.
|
||||||
|
timeToOriginPoint uint32
|
||||||
|
// Last congestion window in packets computed by cubic function.
|
||||||
|
lastTargetCongestionWindow protocol.PacketNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCubic returns a new Cubic instance
|
||||||
|
func NewCubic(clock Clock) *Cubic {
|
||||||
|
c := &Cubic{
|
||||||
|
clock: clock,
|
||||||
|
numConnections: defaultNumConnections,
|
||||||
|
}
|
||||||
|
c.Reset()
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset is called after a timeout to reset the cubic state
|
||||||
|
func (c *Cubic) Reset() {
|
||||||
|
c.epoch = time.Time{}
|
||||||
|
c.appLimitedStartTime = time.Time{}
|
||||||
|
c.lastUpdateTime = time.Time{}
|
||||||
|
c.lastCongestionWindow = 0
|
||||||
|
c.lastMaxCongestionWindow = 0
|
||||||
|
c.ackedPacketsCount = 0
|
||||||
|
c.estimatedTCPcongestionWindow = 0
|
||||||
|
c.originPointCongestionWindow = 0
|
||||||
|
c.timeToOriginPoint = 0
|
||||||
|
c.lastTargetCongestionWindow = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cubic) alpha() float32 {
|
||||||
|
// TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
|
||||||
|
// beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
|
||||||
|
// We derive the equivalent alpha for an N-connection emulation as:
|
||||||
|
b := c.beta()
|
||||||
|
return 3 * float32(c.numConnections) * float32(c.numConnections) * (1 - b) / (1 + b)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *Cubic) beta() float32 {
|
||||||
|
// kNConnectionBeta is the backoff factor after loss for our N-connection
|
||||||
|
// emulation, which emulates the effective backoff of an ensemble of N
|
||||||
|
// TCP-Reno connections on a single loss event. The effective multiplier is
|
||||||
|
// computed as:
|
||||||
|
return (float32(c.numConnections) - 1 + beta) / float32(c.numConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnApplicationLimited is called on ack arrival when sender is unable to use
|
||||||
|
// the available congestion window. Resets Cubic state during quiescence.
|
||||||
|
func (c *Cubic) OnApplicationLimited() {
|
||||||
|
if shiftQuicCubicEpochWhenAppLimited {
|
||||||
|
// When sender is not using the available congestion window, Cubic's epoch
|
||||||
|
// should not continue growing. Record the time when sender goes into an
|
||||||
|
// app-limited period here, to compensate later when cwnd growth happens.
|
||||||
|
if c.appLimitedStartTime.IsZero() {
|
||||||
|
c.appLimitedStartTime = c.clock.Now()
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// When sender is not using the available congestion window, Cubic's epoch
|
||||||
|
// should not continue growing. Reset the epoch when in such a period.
|
||||||
|
c.epoch = time.Time{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// CongestionWindowAfterPacketLoss computes a new congestion window to use after
|
||||||
|
// a loss event. Returns the new congestion window in packets. The new
|
||||||
|
// congestion window is a multiplicative decrease of our current window.
|
||||||
|
func (c *Cubic) CongestionWindowAfterPacketLoss(currentCongestionWindow protocol.PacketNumber) protocol.PacketNumber {
|
||||||
|
if currentCongestionWindow < c.lastMaxCongestionWindow {
|
||||||
|
// We never reached the old max, so assume we are competing with another
|
||||||
|
// flow. Use our extra back off factor to allow the other flow to go up.
|
||||||
|
c.lastMaxCongestionWindow = protocol.PacketNumber(betaLastMax * float32(currentCongestionWindow))
|
||||||
|
} else {
|
||||||
|
c.lastMaxCongestionWindow = currentCongestionWindow
|
||||||
|
}
|
||||||
|
c.epoch = time.Time{} // Reset time.
|
||||||
|
return protocol.PacketNumber(float32(currentCongestionWindow) * c.beta())
|
||||||
|
}
|
||||||
|
|
||||||
|
// CongestionWindowAfterAck computes a new congestion window to use after a received ACK.
|
||||||
|
// Returns the new congestion window in packets. The new congestion window
|
||||||
|
// follows a cubic function that depends on the time passed since last
|
||||||
|
// packet loss.
|
||||||
|
func (c *Cubic) CongestionWindowAfterAck(currentCongestionWindow protocol.PacketNumber, delayMin time.Duration) protocol.PacketNumber {
|
||||||
|
c.ackedPacketsCount++ // Packets acked.
|
||||||
|
currentTime := c.clock.Now()
|
||||||
|
|
||||||
|
// Cubic is "independent" of RTT, the update is limited by the time elapsed.
|
||||||
|
if c.lastCongestionWindow == currentCongestionWindow && (currentTime.Sub(c.lastUpdateTime) <= maxCubicTimeInterval) {
|
||||||
|
return utils.MaxPacketNumber(c.lastTargetCongestionWindow, c.estimatedTCPcongestionWindow)
|
||||||
|
}
|
||||||
|
c.lastCongestionWindow = currentCongestionWindow
|
||||||
|
c.lastUpdateTime = currentTime
|
||||||
|
|
||||||
|
if c.epoch.IsZero() {
|
||||||
|
// First ACK after a loss event.
|
||||||
|
c.epoch = currentTime // Start of epoch.
|
||||||
|
c.ackedPacketsCount = 1 // Reset count.
|
||||||
|
// Reset estimated_tcp_congestion_window_ to be in sync with cubic.
|
||||||
|
c.estimatedTCPcongestionWindow = currentCongestionWindow
|
||||||
|
if c.lastMaxCongestionWindow <= currentCongestionWindow {
|
||||||
|
c.timeToOriginPoint = 0
|
||||||
|
c.originPointCongestionWindow = currentCongestionWindow
|
||||||
|
} else {
|
||||||
|
c.timeToOriginPoint = uint32(math.Cbrt(float64(cubeFactor * (c.lastMaxCongestionWindow - currentCongestionWindow))))
|
||||||
|
c.originPointCongestionWindow = c.lastMaxCongestionWindow
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// If sender was app-limited, then freeze congestion window growth during
|
||||||
|
// app-limited period. Continue growth now by shifting the epoch-start
|
||||||
|
// through the app-limited period.
|
||||||
|
if shiftQuicCubicEpochWhenAppLimited && !c.appLimitedStartTime.IsZero() {
|
||||||
|
shift := currentTime.Sub(c.appLimitedStartTime)
|
||||||
|
c.epoch = c.epoch.Add(shift)
|
||||||
|
c.appLimitedStartTime = time.Time{}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Change the time unit from microseconds to 2^10 fractions per second. Take
|
||||||
|
// the round trip time in account. This is done to allow us to use shift as a
|
||||||
|
// divide operator.
|
||||||
|
elapsedTime := int64((currentTime.Add(delayMin).Sub(c.epoch)/time.Microsecond)<<10) / 1000000
|
||||||
|
|
||||||
|
offset := int64(c.timeToOriginPoint) - elapsedTime
|
||||||
|
// Right-shifts of negative, signed numbers have
|
||||||
|
// implementation-dependent behavior. Force the offset to be
|
||||||
|
// positive, similar to the kernel implementation.
|
||||||
|
if offset < 0 {
|
||||||
|
offset = -offset
|
||||||
|
}
|
||||||
|
deltaCongestionWindow := protocol.PacketNumber((cubeCongestionWindowScale * offset * offset * offset) >> cubeScale)
|
||||||
|
var targetCongestionWindow protocol.PacketNumber
|
||||||
|
if elapsedTime > int64(c.timeToOriginPoint) {
|
||||||
|
targetCongestionWindow = c.originPointCongestionWindow + deltaCongestionWindow
|
||||||
|
} else {
|
||||||
|
targetCongestionWindow = c.originPointCongestionWindow - deltaCongestionWindow
|
||||||
|
}
|
||||||
|
// With dynamic beta/alpha based on number of active streams, it is possible
|
||||||
|
// for the required_ack_count to become much lower than acked_packets_count_
|
||||||
|
// suddenly, leading to more than one iteration through the following loop.
|
||||||
|
for {
|
||||||
|
// Update estimated TCP congestion_window.
|
||||||
|
requiredAckCount := protocol.PacketNumber(float32(c.estimatedTCPcongestionWindow) / c.alpha())
|
||||||
|
if c.ackedPacketsCount < requiredAckCount {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
c.ackedPacketsCount -= requiredAckCount
|
||||||
|
c.estimatedTCPcongestionWindow++
|
||||||
|
}
|
||||||
|
|
||||||
|
// We have a new cubic congestion window.
|
||||||
|
c.lastTargetCongestionWindow = targetCongestionWindow
|
||||||
|
|
||||||
|
// Compute target congestion_window based on cubic target and estimated TCP
|
||||||
|
// congestion_window, use highest (fastest).
|
||||||
|
if targetCongestionWindow < c.estimatedTCPcongestionWindow {
|
||||||
|
targetCongestionWindow = c.estimatedTCPcongestionWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
return targetCongestionWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNumConnections sets the number of emulated connections
|
||||||
|
func (c *Cubic) SetNumConnections(n int) {
|
||||||
|
c.numConnections = n
|
||||||
|
}
|
298
vendor/github.com/lucas-clemente/quic-go/congestion/cubic_sender.go
generated
vendored
Normal file
298
vendor/github.com/lucas-clemente/quic-go/congestion/cubic_sender.go
generated
vendored
Normal file
|
@ -0,0 +1,298 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxBurstBytes = 3 * protocol.DefaultTCPMSS
|
||||||
|
defaultMinimumCongestionWindow protocol.PacketNumber = 2
|
||||||
|
renoBeta float32 = 0.7 // Reno backoff factor.
|
||||||
|
)
|
||||||
|
|
||||||
|
type cubicSender struct {
|
||||||
|
hybridSlowStart HybridSlowStart
|
||||||
|
prr PrrSender
|
||||||
|
rttStats *RTTStats
|
||||||
|
stats connectionStats
|
||||||
|
cubic *Cubic
|
||||||
|
|
||||||
|
reno bool
|
||||||
|
|
||||||
|
// Track the largest packet that has been sent.
|
||||||
|
largestSentPacketNumber protocol.PacketNumber
|
||||||
|
|
||||||
|
// Track the largest packet that has been acked.
|
||||||
|
largestAckedPacketNumber protocol.PacketNumber
|
||||||
|
|
||||||
|
// Track the largest packet number outstanding when a CWND cutback occurs.
|
||||||
|
largestSentAtLastCutback protocol.PacketNumber
|
||||||
|
|
||||||
|
// Congestion window in packets.
|
||||||
|
congestionWindow protocol.PacketNumber
|
||||||
|
|
||||||
|
// Slow start congestion window in packets, aka ssthresh.
|
||||||
|
slowstartThreshold protocol.PacketNumber
|
||||||
|
|
||||||
|
// Whether the last loss event caused us to exit slowstart.
|
||||||
|
// Used for stats collection of slowstartPacketsLost
|
||||||
|
lastCutbackExitedSlowstart bool
|
||||||
|
|
||||||
|
// When true, exit slow start with large cutback of congestion window.
|
||||||
|
slowStartLargeReduction bool
|
||||||
|
|
||||||
|
// Minimum congestion window in packets.
|
||||||
|
minCongestionWindow protocol.PacketNumber
|
||||||
|
|
||||||
|
// Maximum number of outstanding packets for tcp.
|
||||||
|
maxTCPCongestionWindow protocol.PacketNumber
|
||||||
|
|
||||||
|
// Number of connections to simulate.
|
||||||
|
numConnections int
|
||||||
|
|
||||||
|
// ACK counter for the Reno implementation.
|
||||||
|
congestionWindowCount protocol.ByteCount
|
||||||
|
|
||||||
|
initialCongestionWindow protocol.PacketNumber
|
||||||
|
initialMaxCongestionWindow protocol.PacketNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewCubicSender makes a new cubic sender
|
||||||
|
func NewCubicSender(clock Clock, rttStats *RTTStats, reno bool, initialCongestionWindow, initialMaxCongestionWindow protocol.PacketNumber) SendAlgorithmWithDebugInfo {
|
||||||
|
return &cubicSender{
|
||||||
|
rttStats: rttStats,
|
||||||
|
initialCongestionWindow: initialCongestionWindow,
|
||||||
|
initialMaxCongestionWindow: initialMaxCongestionWindow,
|
||||||
|
congestionWindow: initialCongestionWindow,
|
||||||
|
minCongestionWindow: defaultMinimumCongestionWindow,
|
||||||
|
slowstartThreshold: initialMaxCongestionWindow,
|
||||||
|
maxTCPCongestionWindow: initialMaxCongestionWindow,
|
||||||
|
numConnections: defaultNumConnections,
|
||||||
|
cubic: NewCubic(clock),
|
||||||
|
reno: reno,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) TimeUntilSend(now time.Time, bytesInFlight protocol.ByteCount) time.Duration {
|
||||||
|
if c.InRecovery() {
|
||||||
|
// PRR is used when in recovery.
|
||||||
|
return c.prr.TimeUntilSend(c.GetCongestionWindow(), bytesInFlight, c.GetSlowStartThreshold())
|
||||||
|
}
|
||||||
|
if c.GetCongestionWindow() > bytesInFlight {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return utils.InfDuration
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) OnPacketSent(sentTime time.Time, bytesInFlight protocol.ByteCount, packetNumber protocol.PacketNumber, bytes protocol.ByteCount, isRetransmittable bool) bool {
|
||||||
|
// Only update bytesInFlight for data packets.
|
||||||
|
if !isRetransmittable {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if c.InRecovery() {
|
||||||
|
// PRR is used when in recovery.
|
||||||
|
c.prr.OnPacketSent(bytes)
|
||||||
|
}
|
||||||
|
c.largestSentPacketNumber = packetNumber
|
||||||
|
c.hybridSlowStart.OnPacketSent(packetNumber)
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) InRecovery() bool {
|
||||||
|
return c.largestAckedPacketNumber <= c.largestSentAtLastCutback && c.largestAckedPacketNumber != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) InSlowStart() bool {
|
||||||
|
return c.GetCongestionWindow() < c.GetSlowStartThreshold()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) GetCongestionWindow() protocol.ByteCount {
|
||||||
|
return protocol.ByteCount(c.congestionWindow) * protocol.DefaultTCPMSS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) GetSlowStartThreshold() protocol.ByteCount {
|
||||||
|
return protocol.ByteCount(c.slowstartThreshold) * protocol.DefaultTCPMSS
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) ExitSlowstart() {
|
||||||
|
c.slowstartThreshold = c.congestionWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) SlowstartThreshold() protocol.PacketNumber {
|
||||||
|
return c.slowstartThreshold
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) MaybeExitSlowStart() {
|
||||||
|
if c.InSlowStart() && c.hybridSlowStart.ShouldExitSlowStart(c.rttStats.LatestRTT(), c.rttStats.MinRTT(), c.GetCongestionWindow()/protocol.DefaultTCPMSS) {
|
||||||
|
c.ExitSlowstart()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) OnPacketAcked(ackedPacketNumber protocol.PacketNumber, ackedBytes protocol.ByteCount, bytesInFlight protocol.ByteCount) {
|
||||||
|
c.largestAckedPacketNumber = utils.MaxPacketNumber(ackedPacketNumber, c.largestAckedPacketNumber)
|
||||||
|
if c.InRecovery() {
|
||||||
|
// PRR is used when in recovery.
|
||||||
|
c.prr.OnPacketAcked(ackedBytes)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.maybeIncreaseCwnd(ackedPacketNumber, ackedBytes, bytesInFlight)
|
||||||
|
if c.InSlowStart() {
|
||||||
|
c.hybridSlowStart.OnPacketAcked(ackedPacketNumber)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) OnPacketLost(packetNumber protocol.PacketNumber, lostBytes protocol.ByteCount, bytesInFlight protocol.ByteCount) {
|
||||||
|
// TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
|
||||||
|
// already sent should be treated as a single loss event, since it's expected.
|
||||||
|
if packetNumber <= c.largestSentAtLastCutback {
|
||||||
|
if c.lastCutbackExitedSlowstart {
|
||||||
|
c.stats.slowstartPacketsLost++
|
||||||
|
c.stats.slowstartBytesLost += lostBytes
|
||||||
|
if c.slowStartLargeReduction {
|
||||||
|
if c.stats.slowstartPacketsLost == 1 || (c.stats.slowstartBytesLost/protocol.DefaultTCPMSS) > (c.stats.slowstartBytesLost-lostBytes)/protocol.DefaultTCPMSS {
|
||||||
|
// Reduce congestion window by 1 for every mss of bytes lost.
|
||||||
|
c.congestionWindow = utils.MaxPacketNumber(c.congestionWindow-1, c.minCongestionWindow)
|
||||||
|
}
|
||||||
|
c.slowstartThreshold = c.congestionWindow
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.lastCutbackExitedSlowstart = c.InSlowStart()
|
||||||
|
if c.InSlowStart() {
|
||||||
|
c.stats.slowstartPacketsLost++
|
||||||
|
}
|
||||||
|
|
||||||
|
c.prr.OnPacketLost(bytesInFlight)
|
||||||
|
|
||||||
|
// TODO(chromium): Separate out all of slow start into a separate class.
|
||||||
|
if c.slowStartLargeReduction && c.InSlowStart() {
|
||||||
|
c.congestionWindow = c.congestionWindow - 1
|
||||||
|
} else if c.reno {
|
||||||
|
c.congestionWindow = protocol.PacketNumber(float32(c.congestionWindow) * c.RenoBeta())
|
||||||
|
} else {
|
||||||
|
c.congestionWindow = c.cubic.CongestionWindowAfterPacketLoss(c.congestionWindow)
|
||||||
|
}
|
||||||
|
// Enforce a minimum congestion window.
|
||||||
|
if c.congestionWindow < c.minCongestionWindow {
|
||||||
|
c.congestionWindow = c.minCongestionWindow
|
||||||
|
}
|
||||||
|
c.slowstartThreshold = c.congestionWindow
|
||||||
|
c.largestSentAtLastCutback = c.largestSentPacketNumber
|
||||||
|
// reset packet count from congestion avoidance mode. We start
|
||||||
|
// counting again when we're out of recovery.
|
||||||
|
c.congestionWindowCount = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) RenoBeta() float32 {
|
||||||
|
// kNConnectionBeta is the backoff factor after loss for our N-connection
|
||||||
|
// emulation, which emulates the effective backoff of an ensemble of N
|
||||||
|
// TCP-Reno connections on a single loss event. The effective multiplier is
|
||||||
|
// computed as:
|
||||||
|
return (float32(c.numConnections) - 1. + renoBeta) / float32(c.numConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Called when we receive an ack. Normal TCP tracks how many packets one ack
|
||||||
|
// represents, but quic has a separate ack for each packet.
|
||||||
|
func (c *cubicSender) maybeIncreaseCwnd(ackedPacketNumber protocol.PacketNumber, ackedBytes protocol.ByteCount, bytesInFlight protocol.ByteCount) {
|
||||||
|
// Do not increase the congestion window unless the sender is close to using
|
||||||
|
// the current window.
|
||||||
|
if !c.isCwndLimited(bytesInFlight) {
|
||||||
|
c.cubic.OnApplicationLimited()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.congestionWindow >= c.maxTCPCongestionWindow {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.InSlowStart() {
|
||||||
|
// TCP slow start, exponential growth, increase by one for each ACK.
|
||||||
|
c.congestionWindow++
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if c.reno {
|
||||||
|
// Classic Reno congestion avoidance.
|
||||||
|
c.congestionWindowCount++
|
||||||
|
// Divide by num_connections to smoothly increase the CWND at a faster
|
||||||
|
// rate than conventional Reno.
|
||||||
|
if protocol.PacketNumber(c.congestionWindowCount*protocol.ByteCount(c.numConnections)) >= c.congestionWindow {
|
||||||
|
c.congestionWindow++
|
||||||
|
c.congestionWindowCount = 0
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
c.congestionWindow = utils.MinPacketNumber(c.maxTCPCongestionWindow, c.cubic.CongestionWindowAfterAck(c.congestionWindow, c.rttStats.MinRTT()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *cubicSender) isCwndLimited(bytesInFlight protocol.ByteCount) bool {
|
||||||
|
congestionWindow := c.GetCongestionWindow()
|
||||||
|
if bytesInFlight >= congestionWindow {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
availableBytes := congestionWindow - bytesInFlight
|
||||||
|
slowStartLimited := c.InSlowStart() && bytesInFlight > congestionWindow/2
|
||||||
|
return slowStartLimited || availableBytes <= maxBurstBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// BandwidthEstimate returns the current bandwidth estimate
|
||||||
|
func (c *cubicSender) BandwidthEstimate() Bandwidth {
|
||||||
|
srtt := c.rttStats.SmoothedRTT()
|
||||||
|
if srtt == 0 {
|
||||||
|
// If we haven't measured an rtt, the bandwidth estimate is unknown.
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return BandwidthFromDelta(c.GetCongestionWindow(), srtt)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HybridSlowStart returns the hybrid slow start instance for testing
|
||||||
|
func (c *cubicSender) HybridSlowStart() *HybridSlowStart {
|
||||||
|
return &c.hybridSlowStart
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetNumEmulatedConnections sets the number of emulated connections
|
||||||
|
func (c *cubicSender) SetNumEmulatedConnections(n int) {
|
||||||
|
c.numConnections = utils.Max(n, 1)
|
||||||
|
c.cubic.SetNumConnections(c.numConnections)
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnRetransmissionTimeout is called on an retransmission timeout
|
||||||
|
func (c *cubicSender) OnRetransmissionTimeout(packetsRetransmitted bool) {
|
||||||
|
c.largestSentAtLastCutback = 0
|
||||||
|
if !packetsRetransmitted {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
c.hybridSlowStart.Restart()
|
||||||
|
c.cubic.Reset()
|
||||||
|
c.slowstartThreshold = c.congestionWindow / 2
|
||||||
|
c.congestionWindow = c.minCongestionWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConnectionMigration is called when the connection is migrated (?)
|
||||||
|
func (c *cubicSender) OnConnectionMigration() {
|
||||||
|
c.hybridSlowStart.Restart()
|
||||||
|
c.prr = PrrSender{}
|
||||||
|
c.largestSentPacketNumber = 0
|
||||||
|
c.largestAckedPacketNumber = 0
|
||||||
|
c.largestSentAtLastCutback = 0
|
||||||
|
c.lastCutbackExitedSlowstart = false
|
||||||
|
c.cubic.Reset()
|
||||||
|
c.congestionWindowCount = 0
|
||||||
|
c.congestionWindow = c.initialCongestionWindow
|
||||||
|
c.slowstartThreshold = c.initialMaxCongestionWindow
|
||||||
|
c.maxTCPCongestionWindow = c.initialMaxCongestionWindow
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetSlowStartLargeReduction allows enabling the SSLR experiment
|
||||||
|
func (c *cubicSender) SetSlowStartLargeReduction(enabled bool) {
|
||||||
|
c.slowStartLargeReduction = enabled
|
||||||
|
}
|
||||||
|
|
||||||
|
// RetransmissionDelay gives the time to retransmission
|
||||||
|
func (c *cubicSender) RetransmissionDelay() time.Duration {
|
||||||
|
if c.rttStats.SmoothedRTT() == 0 {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return c.rttStats.SmoothedRTT() + c.rttStats.MeanDeviation()*4
|
||||||
|
}
|
814
vendor/github.com/lucas-clemente/quic-go/congestion/cubic_sender_test.go
generated
vendored
Normal file
814
vendor/github.com/lucas-clemente/quic-go/congestion/cubic_sender_test.go
generated
vendored
Normal file
|
@ -0,0 +1,814 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
const initialCongestionWindowPackets protocol.PacketNumber = 10
|
||||||
|
const defaultWindowTCP = protocol.ByteCount(initialCongestionWindowPackets) * protocol.DefaultTCPMSS
|
||||||
|
|
||||||
|
type mockClock time.Time
|
||||||
|
|
||||||
|
func (c *mockClock) Now() time.Time {
|
||||||
|
return time.Time(*c)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockClock) Advance(d time.Duration) {
|
||||||
|
*c = mockClock(time.Time(*c).Add(d))
|
||||||
|
}
|
||||||
|
|
||||||
|
const MaxCongestionWindow = protocol.PacketNumber(200)
|
||||||
|
|
||||||
|
var _ = Describe("Cubic Sender", func() {
|
||||||
|
var (
|
||||||
|
sender SendAlgorithmWithDebugInfo
|
||||||
|
clock mockClock
|
||||||
|
bytesInFlight protocol.ByteCount
|
||||||
|
packetNumber protocol.PacketNumber
|
||||||
|
ackedPacketNumber protocol.PacketNumber
|
||||||
|
rttStats *RTTStats
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
bytesInFlight = 0
|
||||||
|
packetNumber = 1
|
||||||
|
ackedPacketNumber = 0
|
||||||
|
clock = mockClock{}
|
||||||
|
rttStats = NewRTTStats()
|
||||||
|
sender = NewCubicSender(&clock, rttStats, true /*reno*/, initialCongestionWindowPackets, MaxCongestionWindow)
|
||||||
|
})
|
||||||
|
|
||||||
|
SendAvailableSendWindowLen := func(packetLength protocol.ByteCount) int {
|
||||||
|
// Send as long as TimeUntilSend returns Zero.
|
||||||
|
packets_sent := 0
|
||||||
|
can_send := sender.TimeUntilSend(clock.Now(), bytesInFlight) == 0
|
||||||
|
for can_send {
|
||||||
|
sender.OnPacketSent(clock.Now(), bytesInFlight, packetNumber, packetLength, true)
|
||||||
|
packetNumber++
|
||||||
|
packets_sent++
|
||||||
|
bytesInFlight += packetLength
|
||||||
|
can_send = sender.TimeUntilSend(clock.Now(), bytesInFlight) == 0
|
||||||
|
}
|
||||||
|
return packets_sent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Normal is that TCP acks every other segment.
|
||||||
|
AckNPacketsLen := func(n int, packetLength protocol.ByteCount) {
|
||||||
|
rttStats.UpdateRTT(60*time.Millisecond, 0, clock.Now())
|
||||||
|
sender.MaybeExitSlowStart()
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
ackedPacketNumber++
|
||||||
|
sender.OnPacketAcked(ackedPacketNumber, packetLength, bytesInFlight)
|
||||||
|
}
|
||||||
|
bytesInFlight -= protocol.ByteCount(n) * packetLength
|
||||||
|
clock.Advance(time.Millisecond)
|
||||||
|
}
|
||||||
|
|
||||||
|
LoseNPacketsLen := func(n int, packetLength protocol.ByteCount) {
|
||||||
|
for i := 0; i < n; i++ {
|
||||||
|
ackedPacketNumber++
|
||||||
|
sender.OnPacketLost(ackedPacketNumber, packetLength, bytesInFlight)
|
||||||
|
}
|
||||||
|
bytesInFlight -= protocol.ByteCount(n) * packetLength
|
||||||
|
}
|
||||||
|
|
||||||
|
// Does not increment acked_packet_number_.
|
||||||
|
LosePacket := func(number protocol.PacketNumber) {
|
||||||
|
sender.OnPacketLost(number, protocol.DefaultTCPMSS, bytesInFlight)
|
||||||
|
bytesInFlight -= protocol.DefaultTCPMSS
|
||||||
|
}
|
||||||
|
|
||||||
|
SendAvailableSendWindow := func() int { return SendAvailableSendWindowLen(protocol.DefaultTCPMSS) }
|
||||||
|
AckNPackets := func(n int) { AckNPacketsLen(n, protocol.DefaultTCPMSS) }
|
||||||
|
LoseNPackets := func(n int) { LoseNPacketsLen(n, protocol.DefaultTCPMSS) }
|
||||||
|
|
||||||
|
It("simpler sender", func() {
|
||||||
|
// At startup make sure we are at the default.
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(defaultWindowTCP))
|
||||||
|
// At startup make sure we can send.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), 0)).To(BeZero())
|
||||||
|
// Make sure we can send.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), 0)).To(BeZero())
|
||||||
|
// And that window is un-affected.
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(defaultWindowTCP))
|
||||||
|
|
||||||
|
// Fill the send window with data, then verify that we can't send.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), sender.GetCongestionWindow())).ToNot(BeZero())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("application limited slow start", func() {
|
||||||
|
// Send exactly 10 packets and ensure the CWND ends at 14 packets.
|
||||||
|
const kNumberOfAcks = 5
|
||||||
|
// At startup make sure we can send.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), 0)).To(BeZero())
|
||||||
|
// Make sure we can send.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), 0)).To(BeZero())
|
||||||
|
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
bytesToSend := sender.GetCongestionWindow()
|
||||||
|
// It's expected 2 acks will arrive when the bytes_in_flight are greater than
|
||||||
|
// half the CWND.
|
||||||
|
Expect(bytesToSend).To(Equal(defaultWindowTCP + protocol.DefaultTCPMSS*2*2))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("exponential slow start", func() {
|
||||||
|
const kNumberOfAcks = 20
|
||||||
|
// At startup make sure we can send.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), 0)).To(BeZero())
|
||||||
|
Expect(sender.BandwidthEstimate()).To(BeZero())
|
||||||
|
// Make sure we can send.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), 0)).To(BeZero())
|
||||||
|
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
cwnd := sender.GetCongestionWindow()
|
||||||
|
Expect(cwnd).To(Equal(defaultWindowTCP + protocol.DefaultTCPMSS*2*kNumberOfAcks))
|
||||||
|
Expect(sender.BandwidthEstimate()).To(Equal(BandwidthFromDelta(cwnd, rttStats.SmoothedRTT())))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("slow start packet loss", func() {
|
||||||
|
sender.SetNumEmulatedConnections(1)
|
||||||
|
const kNumberOfAcks = 10
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Lose a packet to exit slow start.
|
||||||
|
LoseNPackets(1)
|
||||||
|
packets_in_recovery_window := expected_send_window / protocol.DefaultTCPMSS
|
||||||
|
|
||||||
|
// We should now have fallen out of slow start with a reduced window.
|
||||||
|
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Recovery phase. We need to ack every packet in the recovery window before
|
||||||
|
// we exit recovery.
|
||||||
|
number_of_packets_in_window := expected_send_window / protocol.DefaultTCPMSS
|
||||||
|
AckNPackets(int(packets_in_recovery_window))
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// We need to ack an entire window before we increase CWND by 1.
|
||||||
|
AckNPackets(int(number_of_packets_in_window) - 2)
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Next ack should increase cwnd by 1.
|
||||||
|
AckNPackets(1)
|
||||||
|
expected_send_window += protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Now RTO and ensure slow start gets reset.
|
||||||
|
Expect(sender.HybridSlowStart().Started()).To(BeTrue())
|
||||||
|
sender.OnRetransmissionTimeout(true)
|
||||||
|
Expect(sender.HybridSlowStart().Started()).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("slow start packet loss with large reduction", func() {
|
||||||
|
sender.SetSlowStartLargeReduction(true)
|
||||||
|
|
||||||
|
sender.SetNumEmulatedConnections(1)
|
||||||
|
const kNumberOfAcks = 10
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Lose a packet to exit slow start. We should now have fallen out of
|
||||||
|
// slow start with a window reduced by 1.
|
||||||
|
LoseNPackets(1)
|
||||||
|
expected_send_window -= protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Lose 5 packets in recovery and verify that congestion window is reduced
|
||||||
|
// further.
|
||||||
|
LoseNPackets(5)
|
||||||
|
expected_send_window -= 5 * protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
packets_in_recovery_window := expected_send_window / protocol.DefaultTCPMSS
|
||||||
|
|
||||||
|
// Recovery phase. We need to ack every packet in the recovery window before
|
||||||
|
// we exit recovery.
|
||||||
|
number_of_packets_in_window := expected_send_window / protocol.DefaultTCPMSS
|
||||||
|
AckNPackets(int(packets_in_recovery_window))
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// We need to ack the rest of the window before cwnd increases by 1.
|
||||||
|
AckNPackets(int(number_of_packets_in_window - 1))
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Next ack should increase cwnd by 1.
|
||||||
|
AckNPackets(1)
|
||||||
|
expected_send_window += protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Now RTO and ensure slow start gets reset.
|
||||||
|
Expect(sender.HybridSlowStart().Started()).To(BeTrue())
|
||||||
|
sender.OnRetransmissionTimeout(true)
|
||||||
|
Expect(sender.HybridSlowStart().Started()).To(BeFalse())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("slow start half packet loss with large reduction", func() {
|
||||||
|
sender.SetSlowStartLargeReduction(true)
|
||||||
|
|
||||||
|
sender.SetNumEmulatedConnections(1)
|
||||||
|
const kNumberOfAcks = 10
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window in half sized packets.
|
||||||
|
SendAvailableSendWindowLen(protocol.DefaultTCPMSS / 2)
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindowLen(protocol.DefaultTCPMSS / 2)
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Lose a packet to exit slow start. We should now have fallen out of
|
||||||
|
// slow start with a window reduced by 1.
|
||||||
|
LoseNPackets(1)
|
||||||
|
expected_send_window -= protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Lose 10 packets in recovery and verify that congestion window is reduced
|
||||||
|
// by 5 packets.
|
||||||
|
LoseNPacketsLen(10, protocol.DefaultTCPMSS/2)
|
||||||
|
expected_send_window -= 5 * protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("no PRR when less than one packet in flight", func() {
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
LoseNPackets(int(initialCongestionWindowPackets) - 1)
|
||||||
|
AckNPackets(1)
|
||||||
|
// PRR will allow 2 packets for every ack during recovery.
|
||||||
|
Expect(SendAvailableSendWindow()).To(Equal(2))
|
||||||
|
// Simulate abandoning all packets by supplying a bytes_in_flight of 0.
|
||||||
|
// PRR should now allow a packet to be sent, even though prr's state
|
||||||
|
// variables believe it has sent enough packets.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), 0)).To(BeZero())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("slow start packet loss PRR", func() {
|
||||||
|
sender.SetNumEmulatedConnections(1)
|
||||||
|
// Test based on the first example in RFC6937.
|
||||||
|
// Ack 10 packets in 5 acks to raise the CWND to 20, as in the example.
|
||||||
|
const kNumberOfAcks = 5
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
LoseNPackets(1)
|
||||||
|
|
||||||
|
// We should now have fallen out of slow start with a reduced window.
|
||||||
|
send_window_before_loss := expected_send_window
|
||||||
|
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Testing TCP proportional rate reduction.
|
||||||
|
// We should send packets paced over the received acks for the remaining
|
||||||
|
// outstanding packets. The number of packets before we exit recovery is the
|
||||||
|
// original CWND minus the packet that has been lost and the one which
|
||||||
|
// triggered the loss.
|
||||||
|
remaining_packets_in_recovery := send_window_before_loss/protocol.DefaultTCPMSS - 2
|
||||||
|
|
||||||
|
for i := protocol.ByteCount(0); i < remaining_packets_in_recovery; i++ {
|
||||||
|
AckNPackets(1)
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
}
|
||||||
|
|
||||||
|
// We need to ack another window before we increase CWND by 1.
|
||||||
|
number_of_packets_in_window := expected_send_window / protocol.DefaultTCPMSS
|
||||||
|
for i := protocol.ByteCount(0); i < number_of_packets_in_window; i++ {
|
||||||
|
AckNPackets(1)
|
||||||
|
Expect(SendAvailableSendWindow()).To(Equal(1))
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
}
|
||||||
|
|
||||||
|
AckNPackets(1)
|
||||||
|
expected_send_window += protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("slow start burst packet loss PRR", func() {
|
||||||
|
sender.SetNumEmulatedConnections(1)
|
||||||
|
// Test based on the second example in RFC6937, though we also implement
|
||||||
|
// forward acknowledgements, so the first two incoming acks will trigger
|
||||||
|
// PRR immediately.
|
||||||
|
// Ack 20 packets in 10 acks to raise the CWND to 30.
|
||||||
|
const kNumberOfAcks = 10
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Lose one more than the congestion window reduction, so that after loss,
|
||||||
|
// bytes_in_flight is lesser than the congestion window.
|
||||||
|
send_window_after_loss := protocol.ByteCount(renoBeta * float32(expected_send_window))
|
||||||
|
num_packets_to_lose := (expected_send_window-send_window_after_loss)/protocol.DefaultTCPMSS + 1
|
||||||
|
LoseNPackets(int(num_packets_to_lose))
|
||||||
|
// Immediately after the loss, ensure at least one packet can be sent.
|
||||||
|
// Losses without subsequent acks can occur with timer based loss detection.
|
||||||
|
Expect(sender.TimeUntilSend(clock.Now(), bytesInFlight)).To(BeZero())
|
||||||
|
AckNPackets(1)
|
||||||
|
|
||||||
|
// We should now have fallen out of slow start with a reduced window.
|
||||||
|
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Only 2 packets should be allowed to be sent, per PRR-SSRB
|
||||||
|
Expect(SendAvailableSendWindow()).To(Equal(2))
|
||||||
|
|
||||||
|
// Ack the next packet, which triggers another loss.
|
||||||
|
LoseNPackets(1)
|
||||||
|
AckNPackets(1)
|
||||||
|
|
||||||
|
// Send 2 packets to simulate PRR-SSRB.
|
||||||
|
Expect(SendAvailableSendWindow()).To(Equal(2))
|
||||||
|
|
||||||
|
// Ack the next packet, which triggers another loss.
|
||||||
|
LoseNPackets(1)
|
||||||
|
AckNPackets(1)
|
||||||
|
|
||||||
|
// Send 2 packets to simulate PRR-SSRB.
|
||||||
|
Expect(SendAvailableSendWindow()).To(Equal(2))
|
||||||
|
|
||||||
|
// Exit recovery and return to sending at the new rate.
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
AckNPackets(1)
|
||||||
|
Expect(SendAvailableSendWindow()).To(Equal(1))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("RTO congestion window", func() {
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(defaultWindowTCP))
|
||||||
|
Expect(sender.SlowstartThreshold()).To(Equal(MaxCongestionWindow))
|
||||||
|
|
||||||
|
// Expect the window to decrease to the minimum once the RTO fires
|
||||||
|
// and slow start threshold to be set to 1/2 of the CWND.
|
||||||
|
sender.OnRetransmissionTimeout(true)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(2 * protocol.DefaultTCPMSS)))
|
||||||
|
Expect(sender.SlowstartThreshold()).To(Equal(protocol.PacketNumber(5)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("RTO congestion window no retransmission", func() {
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(defaultWindowTCP))
|
||||||
|
|
||||||
|
// Expect the window to remain unchanged if the RTO fires but no
|
||||||
|
// packets are retransmitted.
|
||||||
|
sender.OnRetransmissionTimeout(false)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(defaultWindowTCP))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("retransmission delay", func() {
|
||||||
|
const kRttMs = 10 * time.Millisecond
|
||||||
|
const kDeviationMs = 3 * time.Millisecond
|
||||||
|
Expect(sender.RetransmissionDelay()).To(BeZero())
|
||||||
|
|
||||||
|
rttStats.UpdateRTT(kRttMs, 0, clock.Now())
|
||||||
|
|
||||||
|
// Initial value is to set the median deviation to half of the initial
|
||||||
|
// rtt, the median in then multiplied by a factor of 4 and finally the
|
||||||
|
// smoothed rtt is added which is the initial rtt.
|
||||||
|
expected_delay := kRttMs + kRttMs/2*4
|
||||||
|
Expect(sender.RetransmissionDelay()).To(Equal(expected_delay))
|
||||||
|
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
// run to make sure that we converge.
|
||||||
|
rttStats.UpdateRTT(kRttMs+kDeviationMs, 0, clock.Now())
|
||||||
|
rttStats.UpdateRTT(kRttMs-kDeviationMs, 0, clock.Now())
|
||||||
|
}
|
||||||
|
expected_delay = kRttMs + kDeviationMs*4
|
||||||
|
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(BeNumerically("~", kRttMs, time.Millisecond))
|
||||||
|
Expect(sender.RetransmissionDelay()).To(BeNumerically("~", expected_delay, time.Millisecond))
|
||||||
|
Expect(sender.BandwidthEstimate() / BytesPerSecond).To(Equal(Bandwidth(
|
||||||
|
sender.GetCongestionWindow() * protocol.ByteCount(time.Second) / protocol.ByteCount(rttStats.SmoothedRTT()),
|
||||||
|
)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("slow start max send window", func() {
|
||||||
|
const kMaxCongestionWindowTCP = 50
|
||||||
|
const kNumberOfAcks = 100
|
||||||
|
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindowTCP)
|
||||||
|
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
expected_send_window := kMaxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expected_send_window)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("tcp reno max congestion window", func() {
|
||||||
|
const kMaxCongestionWindowTCP = 50
|
||||||
|
const kNumberOfAcks = 1000
|
||||||
|
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindowTCP)
|
||||||
|
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
// Make sure we fall out of slow start.
|
||||||
|
LoseNPackets(1)
|
||||||
|
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_send_window := kMaxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expected_send_window)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("tcp cubic max congestion window", func() {
|
||||||
|
const kMaxCongestionWindowTCP = 50
|
||||||
|
// Set to 10000 to compensate for small cubic alpha.
|
||||||
|
const kNumberOfAcks = 10000
|
||||||
|
|
||||||
|
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindowTCP)
|
||||||
|
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
// Make sure we fall out of slow start.
|
||||||
|
LoseNPackets(1)
|
||||||
|
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
|
||||||
|
expected_send_window := kMaxCongestionWindowTCP * protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(protocol.ByteCount(expected_send_window)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("tcp cubic reset epoch on quiescence", func() {
|
||||||
|
const kMaxCongestionWindow = 50
|
||||||
|
const kMaxCongestionWindowBytes = kMaxCongestionWindow * protocol.DefaultTCPMSS
|
||||||
|
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindow)
|
||||||
|
|
||||||
|
num_sent := SendAvailableSendWindow()
|
||||||
|
|
||||||
|
// Make sure we fall out of slow start.
|
||||||
|
saved_cwnd := sender.GetCongestionWindow()
|
||||||
|
LoseNPackets(1)
|
||||||
|
Expect(saved_cwnd).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||||
|
|
||||||
|
// Ack the rest of the outstanding packets to get out of recovery.
|
||||||
|
for i := 1; i < num_sent; i++ {
|
||||||
|
AckNPackets(1)
|
||||||
|
}
|
||||||
|
Expect(bytesInFlight).To(BeZero())
|
||||||
|
|
||||||
|
// Send a new window of data and ack all; cubic growth should occur.
|
||||||
|
saved_cwnd = sender.GetCongestionWindow()
|
||||||
|
num_sent = SendAvailableSendWindow()
|
||||||
|
for i := 0; i < num_sent; i++ {
|
||||||
|
AckNPackets(1)
|
||||||
|
}
|
||||||
|
Expect(saved_cwnd).To(BeNumerically("<", sender.GetCongestionWindow()))
|
||||||
|
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||||
|
Expect(bytesInFlight).To(BeZero())
|
||||||
|
|
||||||
|
// Quiescent time of 100 seconds
|
||||||
|
clock.Advance(100 * time.Second)
|
||||||
|
|
||||||
|
// Send new window of data and ack one packet. Cubic epoch should have
|
||||||
|
// been reset; ensure cwnd increase is not dramatic.
|
||||||
|
saved_cwnd = sender.GetCongestionWindow()
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(1)
|
||||||
|
Expect(saved_cwnd).To(BeNumerically("~", sender.GetCongestionWindow(), protocol.DefaultTCPMSS))
|
||||||
|
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("tcp cubic shifted epoch on quiescence", func() {
|
||||||
|
const kMaxCongestionWindow = 50
|
||||||
|
const kMaxCongestionWindowBytes = kMaxCongestionWindow * protocol.DefaultTCPMSS
|
||||||
|
sender = NewCubicSender(&clock, rttStats, false, initialCongestionWindowPackets, kMaxCongestionWindow)
|
||||||
|
|
||||||
|
num_sent := SendAvailableSendWindow()
|
||||||
|
|
||||||
|
// Make sure we fall out of slow start.
|
||||||
|
saved_cwnd := sender.GetCongestionWindow()
|
||||||
|
LoseNPackets(1)
|
||||||
|
Expect(saved_cwnd).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||||
|
|
||||||
|
// Ack the rest of the outstanding packets to get out of recovery.
|
||||||
|
for i := 1; i < num_sent; i++ {
|
||||||
|
AckNPackets(1)
|
||||||
|
}
|
||||||
|
Expect(bytesInFlight).To(BeZero())
|
||||||
|
|
||||||
|
// Send a new window of data and ack all; cubic growth should occur.
|
||||||
|
saved_cwnd = sender.GetCongestionWindow()
|
||||||
|
num_sent = SendAvailableSendWindow()
|
||||||
|
for i := 0; i < num_sent; i++ {
|
||||||
|
AckNPackets(1)
|
||||||
|
}
|
||||||
|
Expect(saved_cwnd).To(BeNumerically("<", sender.GetCongestionWindow()))
|
||||||
|
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||||
|
Expect(bytesInFlight).To(BeZero())
|
||||||
|
|
||||||
|
// Quiescent time of 100 seconds
|
||||||
|
clock.Advance(100 * time.Second)
|
||||||
|
|
||||||
|
// Send new window of data and ack one packet. Cubic epoch should have
|
||||||
|
// been reset; ensure cwnd increase is not dramatic.
|
||||||
|
saved_cwnd = sender.GetCongestionWindow()
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(1)
|
||||||
|
Expect(saved_cwnd).To(BeNumerically("~", sender.GetCongestionWindow(), protocol.DefaultTCPMSS))
|
||||||
|
Expect(kMaxCongestionWindowBytes).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("multiple losses in one window", func() {
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
initial_window := sender.GetCongestionWindow()
|
||||||
|
LosePacket(ackedPacketNumber + 1)
|
||||||
|
post_loss_window := sender.GetCongestionWindow()
|
||||||
|
Expect(initial_window).To(BeNumerically(">", post_loss_window))
|
||||||
|
LosePacket(ackedPacketNumber + 3)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(post_loss_window))
|
||||||
|
LosePacket(packetNumber - 1)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(post_loss_window))
|
||||||
|
|
||||||
|
// Lose a later packet and ensure the window decreases.
|
||||||
|
LosePacket(packetNumber)
|
||||||
|
Expect(post_loss_window).To(BeNumerically(">", sender.GetCongestionWindow()))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("don't track ack packets", func() {
|
||||||
|
// Send a packet with no retransmittable data, and ensure it's not tracked.
|
||||||
|
Expect(sender.OnPacketSent(clock.Now(), bytesInFlight, packetNumber, protocol.DefaultTCPMSS, false)).To(BeFalse())
|
||||||
|
packetNumber++
|
||||||
|
|
||||||
|
// Send a data packet with retransmittable data, and ensure it is tracked.
|
||||||
|
Expect(sender.OnPacketSent(clock.Now(), bytesInFlight, packetNumber, protocol.DefaultTCPMSS, true)).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
// TEST_F(TcpCubicSenderPacketsTest, ConfigureInitialWindow) {
|
||||||
|
// QuicConfig config;
|
||||||
|
//
|
||||||
|
// QuicTagVector options;
|
||||||
|
// options.push_back(kIW03);
|
||||||
|
// QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
|
||||||
|
// sender.SetFromConfig(config, Perspective::IS_SERVER);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(3u))
|
||||||
|
//
|
||||||
|
// options.clear();
|
||||||
|
// options.push_back(kIW10);
|
||||||
|
// QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
|
||||||
|
// sender.SetFromConfig(config, Perspective::IS_SERVER);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(10u))
|
||||||
|
//
|
||||||
|
// options.clear();
|
||||||
|
// options.push_back(kIW20);
|
||||||
|
// QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
|
||||||
|
// sender.SetFromConfig(config, Perspective::IS_SERVER);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(20u))
|
||||||
|
//
|
||||||
|
// options.clear();
|
||||||
|
// options.push_back(kIW50);
|
||||||
|
// QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
|
||||||
|
// sender.SetFromConfig(config, Perspective::IS_SERVER);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(50u))
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// TEST_F(TcpCubicSenderPacketsTest, ConfigureMinimumWindow) {
|
||||||
|
// QuicConfig config;
|
||||||
|
//
|
||||||
|
// // Verify that kCOPT: kMIN1 forces the min CWND to 1 packet.
|
||||||
|
// QuicTagVector options;
|
||||||
|
// options.push_back(kMIN1);
|
||||||
|
// QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
|
||||||
|
// sender.SetFromConfig(config, Perspective::IS_SERVER);
|
||||||
|
// sender.OnRetransmissionTimeout(true);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(1u))
|
||||||
|
// }
|
||||||
|
|
||||||
|
It("2 connection congestion avoidance at end of recovery", func() {
|
||||||
|
sender.SetNumEmulatedConnections(2)
|
||||||
|
// Ack 10 packets in 5 acks to raise the CWND to 20.
|
||||||
|
const kNumberOfAcks = 5
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
LoseNPackets(1)
|
||||||
|
|
||||||
|
// We should now have fallen out of slow start with a reduced window.
|
||||||
|
expected_send_window = protocol.ByteCount(float32(expected_send_window) * sender.RenoBeta())
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// No congestion window growth should occur in recovery phase, i.e., until the
|
||||||
|
// currently outstanding 20 packets are acked.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.InRecovery()).To(BeTrue())
|
||||||
|
AckNPackets(2)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
}
|
||||||
|
Expect(sender.InRecovery()).To(BeFalse())
|
||||||
|
|
||||||
|
// Out of recovery now. Congestion window should not grow for half an RTT.
|
||||||
|
packets_in_send_window := expected_send_window / protocol.DefaultTCPMSS
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(int(packets_in_send_window/2 - 2))
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Next ack should increase congestion window by 1MSS.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
expected_send_window += protocol.DefaultTCPMSS
|
||||||
|
packets_in_send_window += 1
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Congestion window should remain steady again for half an RTT.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(int(packets_in_send_window/2 - 1))
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Next ack should cause congestion window to grow by 1MSS.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
expected_send_window += protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("1 connection congestion avoidance at end of recovery", func() {
|
||||||
|
sender.SetNumEmulatedConnections(1)
|
||||||
|
// Ack 10 packets in 5 acks to raise the CWND to 20.
|
||||||
|
const kNumberOfAcks = 5
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
LoseNPackets(1)
|
||||||
|
|
||||||
|
// We should now have fallen out of slow start with a reduced window.
|
||||||
|
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// No congestion window growth should occur in recovery phase, i.e., until the
|
||||||
|
// currently outstanding 20 packets are acked.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
Expect(sender.InRecovery()).To(BeTrue())
|
||||||
|
AckNPackets(2)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
}
|
||||||
|
Expect(sender.InRecovery()).To(BeFalse())
|
||||||
|
|
||||||
|
// Out of recovery now. Congestion window should not grow during RTT.
|
||||||
|
for i := protocol.ByteCount(0); i < expected_send_window/protocol.DefaultTCPMSS-2; i += 2 {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next ack should cause congestion window to grow by 1MSS.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
expected_send_window += protocol.DefaultTCPMSS
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
})
|
||||||
|
|
||||||
|
// TEST_F(TcpCubicSenderPacketsTest, BandwidthResumption) {
|
||||||
|
// // Test that when provided with CachedNetworkParameters and opted in to the
|
||||||
|
// // bandwidth resumption experiment, that the TcpCubicSenderPackets sets
|
||||||
|
// // initial CWND appropriately.
|
||||||
|
//
|
||||||
|
// // Set some common values.
|
||||||
|
// CachedNetworkParameters cached_network_params;
|
||||||
|
// const QuicPacketCount kNumberOfPackets = 123;
|
||||||
|
// const int kBandwidthEstimateBytesPerSecond =
|
||||||
|
// kNumberOfPackets * protocol.DefaultTCPMSS;
|
||||||
|
// cached_network_params.set_bandwidth_estimate_bytes_per_second(
|
||||||
|
// kBandwidthEstimateBytesPerSecond);
|
||||||
|
// cached_network_params.set_min_rtt_ms(1000);
|
||||||
|
//
|
||||||
|
// // Make sure that a bandwidth estimate results in a changed CWND.
|
||||||
|
// cached_network_params.set_timestamp(clock.WallNow().ToUNIXSeconds() -
|
||||||
|
// (kNumSecondsPerHour - 1));
|
||||||
|
// sender.ResumeConnectionState(cached_network_params, false);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(kNumberOfPackets))
|
||||||
|
//
|
||||||
|
// // Resumed CWND is limited to be in a sensible range.
|
||||||
|
// cached_network_params.set_bandwidth_estimate_bytes_per_second(
|
||||||
|
// (kMaxCongestionWindow + 1) * protocol.DefaultTCPMSS);
|
||||||
|
// sender.ResumeConnectionState(cached_network_params, false);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(kMaxCongestionWindow))
|
||||||
|
//
|
||||||
|
// cached_network_params.set_bandwidth_estimate_bytes_per_second(
|
||||||
|
// (kMinCongestionWindowForBandwidthResumption - 1) * protocol.DefaultTCPMSS);
|
||||||
|
// sender.ResumeConnectionState(cached_network_params, false);
|
||||||
|
// EXPECT_EQ(kMinCongestionWindowForBandwidthResumption,
|
||||||
|
// sender.congestion_window());
|
||||||
|
//
|
||||||
|
// // Resume to the max value.
|
||||||
|
// cached_network_params.set_max_bandwidth_estimate_bytes_per_second(
|
||||||
|
// (kMinCongestionWindowForBandwidthResumption + 10) * protocol.DefaultTCPMSS);
|
||||||
|
// sender.ResumeConnectionState(cached_network_params, true);
|
||||||
|
// EXPECT_EQ((kMinCongestionWindowForBandwidthResumption + 10) * protocol.DefaultTCPMSS,
|
||||||
|
// sender.GetCongestionWindow());
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// TEST_F(TcpCubicSenderPacketsTest, PaceBelowCWND) {
|
||||||
|
// QuicConfig config;
|
||||||
|
//
|
||||||
|
// // Verify that kCOPT: kMIN4 forces the min CWND to 1 packet, but allows up
|
||||||
|
// // to 4 to be sent.
|
||||||
|
// QuicTagVector options;
|
||||||
|
// options.push_back(kMIN4);
|
||||||
|
// QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
|
||||||
|
// sender.SetFromConfig(config, Perspective::IS_SERVER);
|
||||||
|
// sender.OnRetransmissionTimeout(true);
|
||||||
|
// Expect( sender.congestion_window()).To(Equal(1u))
|
||||||
|
// EXPECT_TRUE(
|
||||||
|
// sender.TimeUntilSend(QuicTime::Zero(), protocol.DefaultTCPMSS).IsZero());
|
||||||
|
// EXPECT_TRUE(
|
||||||
|
// sender.TimeUntilSend(QuicTime::Zero(), 2 * protocol.DefaultTCPMSS).IsZero());
|
||||||
|
// EXPECT_TRUE(
|
||||||
|
// sender.TimeUntilSend(QuicTime::Zero(), 3 * protocol.DefaultTCPMSS).IsZero());
|
||||||
|
// EXPECT_FALSE(
|
||||||
|
// sender.TimeUntilSend(QuicTime::Zero(), 4 * protocol.DefaultTCPMSS).IsZero());
|
||||||
|
// }
|
||||||
|
|
||||||
|
It("reset after connection migration", func() {
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(defaultWindowTCP))
|
||||||
|
Expect(sender.SlowstartThreshold()).To(Equal(MaxCongestionWindow))
|
||||||
|
|
||||||
|
// Starts with slow start.
|
||||||
|
sender.SetNumEmulatedConnections(1)
|
||||||
|
const kNumberOfAcks = 10
|
||||||
|
for i := 0; i < kNumberOfAcks; i++ {
|
||||||
|
// Send our full send window.
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
AckNPackets(2)
|
||||||
|
}
|
||||||
|
SendAvailableSendWindow()
|
||||||
|
expected_send_window := defaultWindowTCP + (protocol.DefaultTCPMSS * 2 * kNumberOfAcks)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
|
||||||
|
// Loses a packet to exit slow start.
|
||||||
|
LoseNPackets(1)
|
||||||
|
|
||||||
|
// We should now have fallen out of slow start with a reduced window. Slow
|
||||||
|
// start threshold is also updated.
|
||||||
|
expected_send_window = protocol.ByteCount(float32(expected_send_window) * renoBeta)
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(expected_send_window))
|
||||||
|
Expect(sender.SlowstartThreshold()).To(Equal(protocol.PacketNumber(expected_send_window / protocol.DefaultTCPMSS)))
|
||||||
|
|
||||||
|
// Resets cwnd and slow start threshold on connection migrations.
|
||||||
|
sender.OnConnectionMigration()
|
||||||
|
Expect(sender.GetCongestionWindow()).To(Equal(defaultWindowTCP))
|
||||||
|
Expect(sender.SlowstartThreshold()).To(Equal(MaxCongestionWindow))
|
||||||
|
Expect(sender.HybridSlowStart().Started()).To(BeFalse())
|
||||||
|
})
|
||||||
|
})
|
112
vendor/github.com/lucas-clemente/quic-go/congestion/cubic_test.go
generated
vendored
Normal file
112
vendor/github.com/lucas-clemente/quic-go/congestion/cubic_test.go
generated
vendored
Normal file
|
@ -0,0 +1,112 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
const kBeta float32 = 0.7 // Default Cubic backoff factor.
|
||||||
|
const kNumConnections uint32 = 2
|
||||||
|
const kNConnectionBeta float32 = (float32(kNumConnections) - 1 + kBeta) / float32(kNumConnections)
|
||||||
|
const kNConnectionAlpha float32 = 3 * float32(kNumConnections) * float32(kNumConnections) * (1 - kNConnectionBeta) / (1 + kNConnectionBeta)
|
||||||
|
|
||||||
|
var _ = Describe("Cubic", func() {
|
||||||
|
var (
|
||||||
|
clock mockClock
|
||||||
|
cubic *Cubic
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
clock = mockClock{}
|
||||||
|
cubic = NewCubic(&clock)
|
||||||
|
})
|
||||||
|
|
||||||
|
It("works above origin", func() {
|
||||||
|
// Convex growth.
|
||||||
|
const rtt_min = 100 * time.Millisecond
|
||||||
|
const rtt_min_s = float32(rtt_min/time.Millisecond) / 1000.0
|
||||||
|
current_cwnd := protocol.PacketNumber(10)
|
||||||
|
// Without the signed-integer, cubic-convex fix, we mistakenly
|
||||||
|
// increment cwnd after only one_ms_ and a single ack.
|
||||||
|
expected_cwnd := current_cwnd
|
||||||
|
// Initialize the state.
|
||||||
|
clock.Advance(time.Millisecond)
|
||||||
|
initial_time := clock.Now()
|
||||||
|
current_cwnd = cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)
|
||||||
|
Expect(current_cwnd).To(Equal(expected_cwnd))
|
||||||
|
current_cwnd = expected_cwnd
|
||||||
|
initial_cwnd := current_cwnd
|
||||||
|
// Normal TCP phase.
|
||||||
|
// The maximum number of expected reno RTTs can be calculated by
|
||||||
|
// finding the point where the cubic curve and the reno curve meet.
|
||||||
|
max_reno_rtts := int(math.Sqrt(float64(kNConnectionAlpha/(0.4*rtt_min_s*rtt_min_s*rtt_min_s))) - 1)
|
||||||
|
for i := 0; i < max_reno_rtts; i++ {
|
||||||
|
max_per_ack_cwnd := current_cwnd
|
||||||
|
for n := uint64(1); n < uint64(float32(max_per_ack_cwnd)/kNConnectionAlpha); n++ {
|
||||||
|
// Call once per ACK.
|
||||||
|
next_cwnd := cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)
|
||||||
|
Expect(next_cwnd).To(Equal(current_cwnd))
|
||||||
|
}
|
||||||
|
clock.Advance(100 * time.Millisecond)
|
||||||
|
current_cwnd = cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)
|
||||||
|
// When we fix convex mode and the uint64 arithmetic, we
|
||||||
|
// increase the expected_cwnd only after after the first 100ms,
|
||||||
|
// rather than after the initial 1ms.
|
||||||
|
expected_cwnd++
|
||||||
|
Expect(current_cwnd).To(Equal(expected_cwnd))
|
||||||
|
}
|
||||||
|
// Cubic phase.
|
||||||
|
for i := 0; i < 52; i++ {
|
||||||
|
for n := protocol.PacketNumber(1); n < current_cwnd; n++ {
|
||||||
|
// Call once per ACK.
|
||||||
|
Expect(cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)).To(Equal(current_cwnd))
|
||||||
|
}
|
||||||
|
clock.Advance(100 * time.Millisecond)
|
||||||
|
current_cwnd = cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)
|
||||||
|
}
|
||||||
|
// Total time elapsed so far; add min_rtt (0.1s) here as well.
|
||||||
|
elapsed_time_s := float32(clock.Now().Sub(initial_time)+rtt_min) / float32(time.Second)
|
||||||
|
// |expected_cwnd| is initial value of cwnd + K * t^3, where K = 0.4.
|
||||||
|
expected_cwnd = initial_cwnd + protocol.PacketNumber((elapsed_time_s*elapsed_time_s*elapsed_time_s*410)/1024)
|
||||||
|
Expect(current_cwnd).To(Equal(expected_cwnd))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("manages loss events", func() {
|
||||||
|
rtt_min := 100 * time.Millisecond
|
||||||
|
current_cwnd := protocol.PacketNumber(422)
|
||||||
|
expected_cwnd := current_cwnd
|
||||||
|
// Initialize the state.
|
||||||
|
clock.Advance(time.Millisecond)
|
||||||
|
Expect(cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)).To(Equal(expected_cwnd))
|
||||||
|
expected_cwnd = protocol.PacketNumber(float32(current_cwnd) * kNConnectionBeta)
|
||||||
|
Expect(cubic.CongestionWindowAfterPacketLoss(current_cwnd)).To(Equal(expected_cwnd))
|
||||||
|
expected_cwnd = protocol.PacketNumber(float32(current_cwnd) * kNConnectionBeta)
|
||||||
|
Expect(cubic.CongestionWindowAfterPacketLoss(current_cwnd)).To(Equal(expected_cwnd))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("works below origin", func() {
|
||||||
|
// Concave growth.
|
||||||
|
rtt_min := 100 * time.Millisecond
|
||||||
|
current_cwnd := protocol.PacketNumber(422)
|
||||||
|
expected_cwnd := current_cwnd
|
||||||
|
// Initialize the state.
|
||||||
|
clock.Advance(time.Millisecond)
|
||||||
|
Expect(cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)).To(Equal(expected_cwnd))
|
||||||
|
expected_cwnd = protocol.PacketNumber(float32(current_cwnd) * kNConnectionBeta)
|
||||||
|
Expect(cubic.CongestionWindowAfterPacketLoss(current_cwnd)).To(Equal(expected_cwnd))
|
||||||
|
current_cwnd = expected_cwnd
|
||||||
|
// First update after loss to initialize the epoch.
|
||||||
|
current_cwnd = cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)
|
||||||
|
// Cubic phase.
|
||||||
|
for i := 0; i < 40; i++ {
|
||||||
|
clock.Advance(100 * time.Millisecond)
|
||||||
|
current_cwnd = cubic.CongestionWindowAfterAck(current_cwnd, rtt_min)
|
||||||
|
}
|
||||||
|
expected_cwnd = 422
|
||||||
|
Expect(current_cwnd).To(Equal(expected_cwnd))
|
||||||
|
})
|
||||||
|
})
|
111
vendor/github.com/lucas-clemente/quic-go/congestion/hybrid_slow_start.go
generated
vendored
Normal file
111
vendor/github.com/lucas-clemente/quic-go/congestion/hybrid_slow_start.go
generated
vendored
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Note(pwestin): the magic clamping numbers come from the original code in
|
||||||
|
// tcp_cubic.c.
|
||||||
|
const hybridStartLowWindow = protocol.ByteCount(16)
|
||||||
|
|
||||||
|
// Number of delay samples for detecting the increase of delay.
|
||||||
|
const hybridStartMinSamples = uint32(8)
|
||||||
|
|
||||||
|
// Exit slow start if the min rtt has increased by more than 1/8th.
|
||||||
|
const hybridStartDelayFactorExp = 3 // 2^3 = 8
|
||||||
|
// The original paper specifies 2 and 8ms, but those have changed over time.
|
||||||
|
const hybridStartDelayMinThresholdUs = int64(4000)
|
||||||
|
const hybridStartDelayMaxThresholdUs = int64(16000)
|
||||||
|
|
||||||
|
// HybridSlowStart implements the TCP hybrid slow start algorithm
|
||||||
|
type HybridSlowStart struct {
|
||||||
|
endPacketNumber protocol.PacketNumber
|
||||||
|
lastSentPacketNumber protocol.PacketNumber
|
||||||
|
started bool
|
||||||
|
currentMinRTT time.Duration
|
||||||
|
rttSampleCount uint32
|
||||||
|
hystartFound bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// StartReceiveRound is called for the start of each receive round (burst) in the slow start phase.
|
||||||
|
func (s *HybridSlowStart) StartReceiveRound(lastSent protocol.PacketNumber) {
|
||||||
|
s.endPacketNumber = lastSent
|
||||||
|
s.currentMinRTT = 0
|
||||||
|
s.rttSampleCount = 0
|
||||||
|
s.started = true
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsEndOfRound returns true if this ack is the last packet number of our current slow start round.
|
||||||
|
func (s *HybridSlowStart) IsEndOfRound(ack protocol.PacketNumber) bool {
|
||||||
|
return s.endPacketNumber < ack
|
||||||
|
}
|
||||||
|
|
||||||
|
// ShouldExitSlowStart should be called on every new ack frame, since a new
|
||||||
|
// RTT measurement can be made then.
|
||||||
|
// rtt: the RTT for this ack packet.
|
||||||
|
// minRTT: is the lowest delay (RTT) we have seen during the session.
|
||||||
|
// congestionWindow: the congestion window in packets.
|
||||||
|
func (s *HybridSlowStart) ShouldExitSlowStart(latestRTT time.Duration, minRTT time.Duration, congestionWindow protocol.ByteCount) bool {
|
||||||
|
if !s.started {
|
||||||
|
// Time to start the hybrid slow start.
|
||||||
|
s.StartReceiveRound(s.lastSentPacketNumber)
|
||||||
|
}
|
||||||
|
if s.hystartFound {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
// Second detection parameter - delay increase detection.
|
||||||
|
// Compare the minimum delay (s.currentMinRTT) of the current
|
||||||
|
// burst of packets relative to the minimum delay during the session.
|
||||||
|
// Note: we only look at the first few(8) packets in each burst, since we
|
||||||
|
// only want to compare the lowest RTT of the burst relative to previous
|
||||||
|
// bursts.
|
||||||
|
s.rttSampleCount++
|
||||||
|
if s.rttSampleCount <= hybridStartMinSamples {
|
||||||
|
if s.currentMinRTT == 0 || s.currentMinRTT > latestRTT {
|
||||||
|
s.currentMinRTT = latestRTT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// We only need to check this once per round.
|
||||||
|
if s.rttSampleCount == hybridStartMinSamples {
|
||||||
|
// Divide minRTT by 8 to get a rtt increase threshold for exiting.
|
||||||
|
minRTTincreaseThresholdUs := int64(minRTT / time.Microsecond >> hybridStartDelayFactorExp)
|
||||||
|
// Ensure the rtt threshold is never less than 2ms or more than 16ms.
|
||||||
|
minRTTincreaseThresholdUs = utils.MinInt64(minRTTincreaseThresholdUs, hybridStartDelayMaxThresholdUs)
|
||||||
|
minRTTincreaseThreshold := time.Duration(utils.MaxInt64(minRTTincreaseThresholdUs, hybridStartDelayMinThresholdUs)) * time.Microsecond
|
||||||
|
|
||||||
|
if s.currentMinRTT > (minRTT + minRTTincreaseThreshold) {
|
||||||
|
s.hystartFound = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// Exit from slow start if the cwnd is greater than 16 and
|
||||||
|
// increasing delay is found.
|
||||||
|
return congestionWindow >= hybridStartLowWindow && s.hystartFound
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnPacketSent is called when a packet was sent
|
||||||
|
func (s *HybridSlowStart) OnPacketSent(packetNumber protocol.PacketNumber) {
|
||||||
|
s.lastSentPacketNumber = packetNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnPacketAcked gets invoked after ShouldExitSlowStart, so it's best to end
|
||||||
|
// the round when the final packet of the burst is received and start it on
|
||||||
|
// the next incoming ack.
|
||||||
|
func (s *HybridSlowStart) OnPacketAcked(ackedPacketNumber protocol.PacketNumber) {
|
||||||
|
if s.IsEndOfRound(ackedPacketNumber) {
|
||||||
|
s.started = false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Started returns true if started
|
||||||
|
func (s *HybridSlowStart) Started() bool {
|
||||||
|
return s.started
|
||||||
|
}
|
||||||
|
|
||||||
|
// Restart the slow start phase
|
||||||
|
func (s *HybridSlowStart) Restart() {
|
||||||
|
s.started = false
|
||||||
|
s.hystartFound = false
|
||||||
|
}
|
75
vendor/github.com/lucas-clemente/quic-go/congestion/hybrid_slow_start_test.go
generated
vendored
Normal file
75
vendor/github.com/lucas-clemente/quic-go/congestion/hybrid_slow_start_test.go
generated
vendored
Normal file
|
@ -0,0 +1,75 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("Hybrid slow start", func() {
|
||||||
|
var (
|
||||||
|
slowStart HybridSlowStart
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
slowStart = HybridSlowStart{}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("works in a simple case", func() {
|
||||||
|
packet_number := protocol.PacketNumber(1)
|
||||||
|
end_packet_number := protocol.PacketNumber(3)
|
||||||
|
slowStart.StartReceiveRound(end_packet_number)
|
||||||
|
|
||||||
|
packet_number++
|
||||||
|
Expect(slowStart.IsEndOfRound(packet_number)).To(BeFalse())
|
||||||
|
|
||||||
|
// Test duplicates.
|
||||||
|
Expect(slowStart.IsEndOfRound(packet_number)).To(BeFalse())
|
||||||
|
|
||||||
|
packet_number++
|
||||||
|
Expect(slowStart.IsEndOfRound(packet_number)).To(BeFalse())
|
||||||
|
packet_number++
|
||||||
|
Expect(slowStart.IsEndOfRound(packet_number)).To(BeTrue())
|
||||||
|
|
||||||
|
// Test without a new registered end_packet_number;
|
||||||
|
packet_number++
|
||||||
|
Expect(slowStart.IsEndOfRound(packet_number)).To(BeTrue())
|
||||||
|
|
||||||
|
end_packet_number = 20
|
||||||
|
slowStart.StartReceiveRound(end_packet_number)
|
||||||
|
for packet_number < end_packet_number {
|
||||||
|
packet_number++
|
||||||
|
Expect(slowStart.IsEndOfRound(packet_number)).To(BeFalse())
|
||||||
|
}
|
||||||
|
packet_number++
|
||||||
|
Expect(slowStart.IsEndOfRound(packet_number)).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
It("works with delay", func() {
|
||||||
|
rtt := 60 * time.Millisecond
|
||||||
|
// We expect to detect the increase at +1/8 of the RTT; hence at a typical
|
||||||
|
// RTT of 60ms the detection will happen at 67.5 ms.
|
||||||
|
const kHybridStartMinSamples = 8 // Number of acks required to trigger.
|
||||||
|
|
||||||
|
end_packet_number := protocol.PacketNumber(1)
|
||||||
|
end_packet_number++
|
||||||
|
slowStart.StartReceiveRound(end_packet_number)
|
||||||
|
|
||||||
|
// Will not trigger since our lowest RTT in our burst is the same as the long
|
||||||
|
// term RTT provided.
|
||||||
|
for n := 0; n < kHybridStartMinSamples; n++ {
|
||||||
|
Expect(slowStart.ShouldExitSlowStart(rtt+time.Duration(n)*time.Millisecond, rtt, 100)).To(BeFalse())
|
||||||
|
}
|
||||||
|
end_packet_number++
|
||||||
|
slowStart.StartReceiveRound(end_packet_number)
|
||||||
|
for n := 1; n < kHybridStartMinSamples; n++ {
|
||||||
|
Expect(slowStart.ShouldExitSlowStart(rtt+(time.Duration(n)+10)*time.Millisecond, rtt, 100)).To(BeFalse())
|
||||||
|
}
|
||||||
|
// Expect to trigger since all packets in this burst was above the long term
|
||||||
|
// RTT provided.
|
||||||
|
Expect(slowStart.ShouldExitSlowStart(rtt+10*time.Millisecond, rtt, 100)).To(BeTrue())
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
|
@ -0,0 +1,37 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
// A SendAlgorithm performs congestion control and calculates the congestion window
|
||||||
|
type SendAlgorithm interface {
|
||||||
|
TimeUntilSend(now time.Time, bytesInFlight protocol.ByteCount) time.Duration
|
||||||
|
OnPacketSent(sentTime time.Time, bytesInFlight protocol.ByteCount, packetNumber protocol.PacketNumber, bytes protocol.ByteCount, isRetransmittable bool) bool
|
||||||
|
GetCongestionWindow() protocol.ByteCount
|
||||||
|
MaybeExitSlowStart()
|
||||||
|
OnPacketAcked(number protocol.PacketNumber, ackedBytes protocol.ByteCount, bytesInFlight protocol.ByteCount)
|
||||||
|
OnPacketLost(number protocol.PacketNumber, lostBytes protocol.ByteCount, bytesInFlight protocol.ByteCount)
|
||||||
|
SetNumEmulatedConnections(n int)
|
||||||
|
OnRetransmissionTimeout(packetsRetransmitted bool)
|
||||||
|
OnConnectionMigration()
|
||||||
|
RetransmissionDelay() time.Duration
|
||||||
|
|
||||||
|
// Experiments
|
||||||
|
SetSlowStartLargeReduction(enabled bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendAlgorithmWithDebugInfo adds some debug functions to SendAlgorithm
|
||||||
|
type SendAlgorithmWithDebugInfo interface {
|
||||||
|
SendAlgorithm
|
||||||
|
BandwidthEstimate() Bandwidth
|
||||||
|
|
||||||
|
// Stuff only used in testing
|
||||||
|
|
||||||
|
HybridSlowStart() *HybridSlowStart
|
||||||
|
SlowstartThreshold() protocol.PacketNumber
|
||||||
|
RenoBeta() float32
|
||||||
|
InRecovery() bool
|
||||||
|
}
|
|
@ -0,0 +1,63 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
// PrrSender implements the Proportional Rate Reduction (PRR) per RFC 6937
|
||||||
|
type PrrSender struct {
|
||||||
|
bytesSentSinceLoss protocol.ByteCount
|
||||||
|
bytesDeliveredSinceLoss protocol.ByteCount
|
||||||
|
ackCountSinceLoss protocol.ByteCount
|
||||||
|
bytesInFlightBeforeLoss protocol.ByteCount
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnPacketSent should be called after a packet was sent
|
||||||
|
func (p *PrrSender) OnPacketSent(sentBytes protocol.ByteCount) {
|
||||||
|
p.bytesSentSinceLoss += sentBytes
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnPacketLost should be called on the first loss that triggers a recovery
|
||||||
|
// period and all other methods in this class should only be called when in
|
||||||
|
// recovery.
|
||||||
|
func (p *PrrSender) OnPacketLost(bytesInFlight protocol.ByteCount) {
|
||||||
|
p.bytesSentSinceLoss = 0
|
||||||
|
p.bytesInFlightBeforeLoss = bytesInFlight
|
||||||
|
p.bytesDeliveredSinceLoss = 0
|
||||||
|
p.ackCountSinceLoss = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnPacketAcked should be called after a packet was acked
|
||||||
|
func (p *PrrSender) OnPacketAcked(ackedBytes protocol.ByteCount) {
|
||||||
|
p.bytesDeliveredSinceLoss += ackedBytes
|
||||||
|
p.ackCountSinceLoss++
|
||||||
|
}
|
||||||
|
|
||||||
|
// TimeUntilSend calculates the time until a packet can be sent
|
||||||
|
func (p *PrrSender) TimeUntilSend(congestionWindow, bytesInFlight, slowstartThreshold protocol.ByteCount) time.Duration {
|
||||||
|
// Return QuicTime::Zero In order to ensure limited transmit always works.
|
||||||
|
if p.bytesSentSinceLoss == 0 || bytesInFlight < protocol.DefaultTCPMSS {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
if congestionWindow > bytesInFlight {
|
||||||
|
// During PRR-SSRB, limit outgoing packets to 1 extra MSS per ack, instead
|
||||||
|
// of sending the entire available window. This prevents burst retransmits
|
||||||
|
// when more packets are lost than the CWND reduction.
|
||||||
|
// limit = MAX(prr_delivered - prr_out, DeliveredData) + MSS
|
||||||
|
if p.bytesDeliveredSinceLoss+p.ackCountSinceLoss*protocol.DefaultTCPMSS <= p.bytesSentSinceLoss {
|
||||||
|
return utils.InfDuration
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
// Implement Proportional Rate Reduction (RFC6937).
|
||||||
|
// Checks a simplified version of the PRR formula that doesn't use division:
|
||||||
|
// AvailableSendWindow =
|
||||||
|
// CEIL(prr_delivered * ssthresh / BytesInFlightAtLoss) - prr_sent
|
||||||
|
if p.bytesDeliveredSinceLoss*slowstartThreshold > p.bytesSentSinceLoss*p.bytesInFlightBeforeLoss {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return utils.InfDuration
|
||||||
|
}
|
108
vendor/github.com/lucas-clemente/quic-go/congestion/prr_sender_test.go
generated
vendored
Normal file
108
vendor/github.com/lucas-clemente/quic-go/congestion/prr_sender_test.go
generated
vendored
Normal file
|
@ -0,0 +1,108 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("PRR sender", func() {
|
||||||
|
var (
|
||||||
|
prr PrrSender
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
prr = PrrSender{}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("single loss results in send on every other ack", func() {
|
||||||
|
num_packets_in_flight := protocol.ByteCount(50)
|
||||||
|
bytes_in_flight := num_packets_in_flight * protocol.DefaultTCPMSS
|
||||||
|
ssthresh_after_loss := num_packets_in_flight / 2
|
||||||
|
congestion_window := ssthresh_after_loss * protocol.DefaultTCPMSS
|
||||||
|
|
||||||
|
prr.OnPacketLost(bytes_in_flight)
|
||||||
|
// Ack a packet. PRR allows one packet to leave immediately.
|
||||||
|
prr.OnPacketAcked(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight -= protocol.DefaultTCPMSS
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(BeZero())
|
||||||
|
// Send retransmission.
|
||||||
|
prr.OnPacketSent(protocol.DefaultTCPMSS)
|
||||||
|
// PRR shouldn't allow sending any more packets.
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(Equal(utils.InfDuration))
|
||||||
|
|
||||||
|
// One packet is lost, and one ack was consumed above. PRR now paces
|
||||||
|
// transmissions through the remaining 48 acks. PRR will alternatively
|
||||||
|
// disallow and allow a packet to be sent in response to an ack.
|
||||||
|
for i := protocol.ByteCount(0); i < ssthresh_after_loss-1; i++ {
|
||||||
|
// Ack a packet. PRR shouldn't allow sending a packet in response.
|
||||||
|
prr.OnPacketAcked(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight -= protocol.DefaultTCPMSS
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(Equal(utils.InfDuration))
|
||||||
|
// Ack another packet. PRR should now allow sending a packet in response.
|
||||||
|
prr.OnPacketAcked(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight -= protocol.DefaultTCPMSS
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(BeZero())
|
||||||
|
// Send a packet in response.
|
||||||
|
prr.OnPacketSent(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight += protocol.DefaultTCPMSS
|
||||||
|
}
|
||||||
|
|
||||||
|
// Since bytes_in_flight is now equal to congestion_window, PRR now maintains
|
||||||
|
// packet conservation, allowing one packet to be sent in response to an ack.
|
||||||
|
Expect(bytes_in_flight).To(Equal(congestion_window))
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
// Ack a packet.
|
||||||
|
prr.OnPacketAcked(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight -= protocol.DefaultTCPMSS
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(BeZero())
|
||||||
|
// Send a packet in response, since PRR allows it.
|
||||||
|
prr.OnPacketSent(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight += protocol.DefaultTCPMSS
|
||||||
|
|
||||||
|
// Since bytes_in_flight is equal to the congestion_window,
|
||||||
|
// PRR disallows sending.
|
||||||
|
Expect(bytes_in_flight).To(Equal(congestion_window))
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(Equal(utils.InfDuration))
|
||||||
|
}
|
||||||
|
|
||||||
|
})
|
||||||
|
|
||||||
|
It("burst loss results in slow start", func() {
|
||||||
|
bytes_in_flight := protocol.ByteCount(20 * protocol.DefaultTCPMSS)
|
||||||
|
const num_packets_lost = 13
|
||||||
|
const ssthresh_after_loss = 10
|
||||||
|
const congestion_window = ssthresh_after_loss * protocol.DefaultTCPMSS
|
||||||
|
|
||||||
|
// Lose 13 packets.
|
||||||
|
bytes_in_flight -= num_packets_lost * protocol.DefaultTCPMSS
|
||||||
|
prr.OnPacketLost(bytes_in_flight)
|
||||||
|
|
||||||
|
// PRR-SSRB will allow the following 3 acks to send up to 2 packets.
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
prr.OnPacketAcked(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight -= protocol.DefaultTCPMSS
|
||||||
|
// PRR-SSRB should allow two packets to be sent.
|
||||||
|
for j := 0; j < 2; j++ {
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(BeZero())
|
||||||
|
// Send a packet in response.
|
||||||
|
prr.OnPacketSent(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight += protocol.DefaultTCPMSS
|
||||||
|
}
|
||||||
|
// PRR should allow no more than 2 packets in response to an ack.
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(Equal(utils.InfDuration))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Out of SSRB mode, PRR allows one send in response to each ack.
|
||||||
|
for i := 0; i < 10; i++ {
|
||||||
|
prr.OnPacketAcked(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight -= protocol.DefaultTCPMSS
|
||||||
|
Expect(prr.TimeUntilSend(congestion_window, bytes_in_flight, ssthresh_after_loss*protocol.DefaultTCPMSS)).To(BeZero())
|
||||||
|
// Send a packet in response.
|
||||||
|
prr.OnPacketSent(protocol.DefaultTCPMSS)
|
||||||
|
bytes_in_flight += protocol.DefaultTCPMSS
|
||||||
|
}
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,182 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
initialRTTus = 100 * 1000
|
||||||
|
rttAlpha float32 = 0.125
|
||||||
|
oneMinusAlpha float32 = (1 - rttAlpha)
|
||||||
|
rttBeta float32 = 0.25
|
||||||
|
oneMinusBeta float32 = (1 - rttBeta)
|
||||||
|
halfWindow float32 = 0.5
|
||||||
|
quarterWindow float32 = 0.25
|
||||||
|
)
|
||||||
|
|
||||||
|
type rttSample struct {
|
||||||
|
rtt time.Duration
|
||||||
|
time time.Time
|
||||||
|
}
|
||||||
|
|
||||||
|
// RTTStats provides round-trip statistics
|
||||||
|
type RTTStats struct {
|
||||||
|
initialRTTus int64
|
||||||
|
|
||||||
|
recentMinRTTwindow time.Duration
|
||||||
|
minRTT time.Duration
|
||||||
|
latestRTT time.Duration
|
||||||
|
smoothedRTT time.Duration
|
||||||
|
meanDeviation time.Duration
|
||||||
|
|
||||||
|
numMinRTTsamplesRemaining uint32
|
||||||
|
|
||||||
|
newMinRTT rttSample
|
||||||
|
recentMinRTT rttSample
|
||||||
|
halfWindowRTT rttSample
|
||||||
|
quarterWindowRTT rttSample
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewRTTStats makes a properly initialized RTTStats object
|
||||||
|
func NewRTTStats() *RTTStats {
|
||||||
|
return &RTTStats{
|
||||||
|
initialRTTus: initialRTTus,
|
||||||
|
recentMinRTTwindow: utils.InfDuration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// InitialRTTus is the initial RTT in us
|
||||||
|
func (r *RTTStats) InitialRTTus() int64 { return r.initialRTTus }
|
||||||
|
|
||||||
|
// MinRTT Returns the minRTT for the entire connection.
|
||||||
|
// May return Zero if no valid updates have occurred.
|
||||||
|
func (r *RTTStats) MinRTT() time.Duration { return r.minRTT }
|
||||||
|
|
||||||
|
// LatestRTT returns the most recent rtt measurement.
|
||||||
|
// May return Zero if no valid updates have occurred.
|
||||||
|
func (r *RTTStats) LatestRTT() time.Duration { return r.latestRTT }
|
||||||
|
|
||||||
|
// RecentMinRTT the minRTT since SampleNewRecentMinRtt has been called, or the
|
||||||
|
// minRTT for the entire connection if SampleNewMinRtt was never called.
|
||||||
|
func (r *RTTStats) RecentMinRTT() time.Duration { return r.recentMinRTT.rtt }
|
||||||
|
|
||||||
|
// SmoothedRTT returns the EWMA smoothed RTT for the connection.
|
||||||
|
// May return Zero if no valid updates have occurred.
|
||||||
|
func (r *RTTStats) SmoothedRTT() time.Duration { return r.smoothedRTT }
|
||||||
|
|
||||||
|
// GetQuarterWindowRTT gets the quarter window RTT
|
||||||
|
func (r *RTTStats) GetQuarterWindowRTT() time.Duration { return r.quarterWindowRTT.rtt }
|
||||||
|
|
||||||
|
// GetHalfWindowRTT gets the half window RTT
|
||||||
|
func (r *RTTStats) GetHalfWindowRTT() time.Duration { return r.halfWindowRTT.rtt }
|
||||||
|
|
||||||
|
// MeanDeviation gets the mean deviation
|
||||||
|
func (r *RTTStats) MeanDeviation() time.Duration { return r.meanDeviation }
|
||||||
|
|
||||||
|
// SetRecentMinRTTwindow sets how old a recent min rtt sample can be.
|
||||||
|
func (r *RTTStats) SetRecentMinRTTwindow(recentMinRTTwindow time.Duration) {
|
||||||
|
r.recentMinRTTwindow = recentMinRTTwindow
|
||||||
|
}
|
||||||
|
|
||||||
|
// UpdateRTT updates the RTT based on a new sample.
|
||||||
|
func (r *RTTStats) UpdateRTT(sendDelta, ackDelay time.Duration, now time.Time) {
|
||||||
|
if sendDelta == utils.InfDuration || sendDelta <= 0 {
|
||||||
|
utils.Debugf("Ignoring measured sendDelta, because it's is either infinite, zero, or negative: %d", sendDelta/time.Microsecond)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update r.minRTT first. r.minRTT does not use an rttSample corrected for
|
||||||
|
// ackDelay but the raw observed sendDelta, since poor clock granularity at
|
||||||
|
// the client may cause a high ackDelay to result in underestimation of the
|
||||||
|
// r.minRTT.
|
||||||
|
if r.minRTT == 0 || r.minRTT > sendDelta {
|
||||||
|
r.minRTT = sendDelta
|
||||||
|
}
|
||||||
|
r.updateRecentMinRTT(sendDelta, now)
|
||||||
|
|
||||||
|
// Correct for ackDelay if information received from the peer results in a
|
||||||
|
// positive RTT sample. Otherwise, we use the sendDelta as a reasonable
|
||||||
|
// measure for smoothedRTT.
|
||||||
|
sample := sendDelta
|
||||||
|
if sample > ackDelay {
|
||||||
|
sample -= ackDelay
|
||||||
|
}
|
||||||
|
r.latestRTT = sample
|
||||||
|
// First time call.
|
||||||
|
if r.smoothedRTT == 0 {
|
||||||
|
r.smoothedRTT = sample
|
||||||
|
r.meanDeviation = sample / 2
|
||||||
|
} else {
|
||||||
|
r.meanDeviation = time.Duration(oneMinusBeta*float32(r.meanDeviation/time.Microsecond)+rttBeta*float32(utils.AbsDuration(r.smoothedRTT-sample)/time.Microsecond)) * time.Microsecond
|
||||||
|
r.smoothedRTT = time.Duration((float32(r.smoothedRTT/time.Microsecond)*oneMinusAlpha)+(float32(sample/time.Microsecond)*rttAlpha)) * time.Microsecond
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *RTTStats) updateRecentMinRTT(sample time.Duration, now time.Time) { // Recent minRTT update.
|
||||||
|
if r.numMinRTTsamplesRemaining > 0 {
|
||||||
|
r.numMinRTTsamplesRemaining--
|
||||||
|
if r.newMinRTT.rtt == 0 || sample <= r.newMinRTT.rtt {
|
||||||
|
r.newMinRTT = rttSample{rtt: sample, time: now}
|
||||||
|
}
|
||||||
|
if r.numMinRTTsamplesRemaining == 0 {
|
||||||
|
r.recentMinRTT = r.newMinRTT
|
||||||
|
r.halfWindowRTT = r.newMinRTT
|
||||||
|
r.quarterWindowRTT = r.newMinRTT
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Update the three recent rtt samples.
|
||||||
|
if r.recentMinRTT.rtt == 0 || sample <= r.recentMinRTT.rtt {
|
||||||
|
r.recentMinRTT = rttSample{rtt: sample, time: now}
|
||||||
|
r.halfWindowRTT = r.recentMinRTT
|
||||||
|
r.quarterWindowRTT = r.recentMinRTT
|
||||||
|
} else if sample <= r.halfWindowRTT.rtt {
|
||||||
|
r.halfWindowRTT = rttSample{rtt: sample, time: now}
|
||||||
|
r.quarterWindowRTT = r.halfWindowRTT
|
||||||
|
} else if sample <= r.quarterWindowRTT.rtt {
|
||||||
|
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Expire old min rtt samples.
|
||||||
|
if r.recentMinRTT.time.Before(now.Add(-r.recentMinRTTwindow)) {
|
||||||
|
r.recentMinRTT = r.halfWindowRTT
|
||||||
|
r.halfWindowRTT = r.quarterWindowRTT
|
||||||
|
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
|
||||||
|
} else if r.halfWindowRTT.time.Before(now.Add(-time.Duration(float32(r.recentMinRTTwindow/time.Microsecond)*halfWindow) * time.Microsecond)) {
|
||||||
|
r.halfWindowRTT = r.quarterWindowRTT
|
||||||
|
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
|
||||||
|
} else if r.quarterWindowRTT.time.Before(now.Add(-time.Duration(float32(r.recentMinRTTwindow/time.Microsecond)*quarterWindow) * time.Microsecond)) {
|
||||||
|
r.quarterWindowRTT = rttSample{rtt: sample, time: now}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SampleNewRecentMinRTT forces RttStats to sample a new recent min rtt within the next
|
||||||
|
// |numSamples| UpdateRTT calls.
|
||||||
|
func (r *RTTStats) SampleNewRecentMinRTT(numSamples uint32) {
|
||||||
|
r.numMinRTTsamplesRemaining = numSamples
|
||||||
|
r.newMinRTT = rttSample{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OnConnectionMigration is called when connection migrates and rtt measurement needs to be reset.
|
||||||
|
func (r *RTTStats) OnConnectionMigration() {
|
||||||
|
r.latestRTT = 0
|
||||||
|
r.minRTT = 0
|
||||||
|
r.smoothedRTT = 0
|
||||||
|
r.meanDeviation = 0
|
||||||
|
r.initialRTTus = initialRTTus
|
||||||
|
r.numMinRTTsamplesRemaining = 0
|
||||||
|
r.recentMinRTTwindow = utils.InfDuration
|
||||||
|
r.recentMinRTT = rttSample{}
|
||||||
|
r.halfWindowRTT = rttSample{}
|
||||||
|
r.quarterWindowRTT = rttSample{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExpireSmoothedMetrics causes the smoothed_rtt to be increased to the latest_rtt if the latest_rtt
|
||||||
|
// is larger. The mean deviation is increased to the most recent deviation if
|
||||||
|
// it's larger.
|
||||||
|
func (r *RTTStats) ExpireSmoothedMetrics() {
|
||||||
|
r.meanDeviation = utils.MaxDuration(r.meanDeviation, utils.AbsDuration(r.smoothedRTT-r.latestRTT))
|
||||||
|
r.smoothedRTT = utils.MaxDuration(r.smoothedRTT, r.latestRTT)
|
||||||
|
}
|
214
vendor/github.com/lucas-clemente/quic-go/congestion/rtt_stats_test.go
generated
vendored
Normal file
214
vendor/github.com/lucas-clemente/quic-go/congestion/rtt_stats_test.go
generated
vendored
Normal file
|
@ -0,0 +1,214 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/utils"
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ = Describe("RTT stats", func() {
|
||||||
|
var (
|
||||||
|
rttStats *RTTStats
|
||||||
|
)
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
rttStats = NewRTTStats()
|
||||||
|
})
|
||||||
|
|
||||||
|
It("DefaultsBeforeUpdate", func() {
|
||||||
|
Expect(rttStats.InitialRTTus()).To(BeNumerically(">", 0))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal(time.Duration(0)))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal(time.Duration(0)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("SmoothedRTT", func() {
|
||||||
|
// Verify that ack_delay is corrected for in Smoothed RTT.
|
||||||
|
rttStats.UpdateRTT((300 * time.Millisecond), (100 * time.Millisecond), time.Time{})
|
||||||
|
Expect(rttStats.LatestRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
// Verify that effective RTT of zero does not change Smoothed RTT.
|
||||||
|
rttStats.UpdateRTT((200 * time.Millisecond), (200 * time.Millisecond), time.Time{})
|
||||||
|
Expect(rttStats.LatestRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
// Verify that large erroneous ack_delay does not change Smoothed RTT.
|
||||||
|
rttStats.UpdateRTT((200 * time.Millisecond), (300 * time.Millisecond), time.Time{})
|
||||||
|
Expect(rttStats.LatestRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("MinRTT", func() {
|
||||||
|
rttStats.UpdateRTT((200 * time.Millisecond), 0, time.Time{})
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
rttStats.UpdateRTT((10 * time.Millisecond), 0, time.Time{}.Add((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
rttStats.UpdateRTT((50 * time.Millisecond), 0, time.Time{}.Add((20 * time.Millisecond)))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
rttStats.UpdateRTT((50 * time.Millisecond), 0, time.Time{}.Add((30 * time.Millisecond)))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
rttStats.UpdateRTT((50 * time.Millisecond), 0, time.Time{}.Add((40 * time.Millisecond)))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
// Verify that ack_delay does not go into recording of MinRTT_.
|
||||||
|
rttStats.UpdateRTT((7 * time.Millisecond), (2 * time.Millisecond), time.Time{}.Add((50 * time.Millisecond)))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((7 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((7 * time.Millisecond)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("RecentMinRTT", func() {
|
||||||
|
rttStats.UpdateRTT((10 * time.Millisecond), 0, time.Time{})
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
|
||||||
|
rttStats.SampleNewRecentMinRTT(4)
|
||||||
|
for i := 0; i < 3; i++ {
|
||||||
|
rttStats.UpdateRTT((50 * time.Millisecond), 0, time.Time{})
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
}
|
||||||
|
rttStats.UpdateRTT((50 * time.Millisecond),
|
||||||
|
0, time.Time{})
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((50 * time.Millisecond)))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("WindowedRecentMinRTT", func() {
|
||||||
|
// Set the window to 99ms, so 25ms is more than a quarter rtt.
|
||||||
|
rttStats.SetRecentMinRTTwindow((99 * time.Millisecond))
|
||||||
|
|
||||||
|
now := time.Time{}
|
||||||
|
rtt_sample := (10 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(rtt_sample, 0, now)
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
|
||||||
|
// Gradually increase the rtt samples and ensure the RecentMinRTT starts
|
||||||
|
// rising.
|
||||||
|
for i := 0; i < 8; i++ {
|
||||||
|
now = now.Add((25 * time.Millisecond))
|
||||||
|
rtt_sample += (10 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(rtt_sample, 0, now)
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.GetQuarterWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.GetHalfWindowRTT()).To(Equal(rtt_sample - (10 * time.Millisecond)))
|
||||||
|
if i < 3 {
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(10 * time.Millisecond))
|
||||||
|
} else if i < 5 {
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(30 * time.Millisecond))
|
||||||
|
} else if i < 7 {
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(50 * time.Millisecond))
|
||||||
|
} else {
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(70 * time.Millisecond))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A new quarter rtt low sets that, but nothing else.
|
||||||
|
rtt_sample -= (5 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(rtt_sample, 0, now)
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.GetQuarterWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.GetHalfWindowRTT()).To(Equal(rtt_sample - (5 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((70 * time.Millisecond)))
|
||||||
|
|
||||||
|
// A new half rtt low sets that and the quarter rtt low.
|
||||||
|
rtt_sample -= (15 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(rtt_sample, 0, now)
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.GetQuarterWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.GetHalfWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal((70 * time.Millisecond)))
|
||||||
|
|
||||||
|
// A new full window loss sets the RecentMinRTT, but not MinRTT.
|
||||||
|
rtt_sample = (65 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(rtt_sample, 0, now)
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((10 * time.Millisecond)))
|
||||||
|
Expect(rttStats.GetQuarterWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.GetHalfWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(rtt_sample))
|
||||||
|
|
||||||
|
// A new all time low sets both the MinRTT and the RecentMinRTT.
|
||||||
|
rtt_sample = (5 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(rtt_sample, 0, now)
|
||||||
|
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.GetQuarterWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.GetHalfWindowRTT()).To(Equal(rtt_sample))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(rtt_sample))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("ExpireSmoothedMetrics", func() {
|
||||||
|
initial_rtt := (10 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(initial_rtt, 0, time.Time{})
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal(initial_rtt))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(initial_rtt))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal(initial_rtt))
|
||||||
|
|
||||||
|
Expect(rttStats.MeanDeviation()).To(Equal(initial_rtt / 2))
|
||||||
|
|
||||||
|
// Update once with a 20ms RTT.
|
||||||
|
doubled_rtt := initial_rtt * (2)
|
||||||
|
rttStats.UpdateRTT(doubled_rtt, 0, time.Time{})
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal(time.Duration(float32(initial_rtt) * 1.125)))
|
||||||
|
|
||||||
|
// Expire the smoothed metrics, increasing smoothed rtt and mean deviation.
|
||||||
|
rttStats.ExpireSmoothedMetrics()
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal(doubled_rtt))
|
||||||
|
Expect(rttStats.MeanDeviation()).To(Equal(time.Duration(float32(initial_rtt) * 0.875)))
|
||||||
|
|
||||||
|
// Now go back down to 5ms and expire the smoothed metrics, and ensure the
|
||||||
|
// mean deviation increases to 15ms.
|
||||||
|
half_rtt := initial_rtt / 2
|
||||||
|
rttStats.UpdateRTT(half_rtt, 0, time.Time{})
|
||||||
|
Expect(doubled_rtt).To(BeNumerically(">", rttStats.SmoothedRTT()))
|
||||||
|
Expect(initial_rtt).To(BeNumerically("<", rttStats.MeanDeviation()))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("UpdateRTTWithBadSendDeltas", func() {
|
||||||
|
// Make sure we ignore bad RTTs.
|
||||||
|
// base::test::MockLog log;
|
||||||
|
|
||||||
|
initial_rtt := (10 * time.Millisecond)
|
||||||
|
rttStats.UpdateRTT(initial_rtt, 0, time.Time{})
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal(initial_rtt))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(initial_rtt))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal(initial_rtt))
|
||||||
|
|
||||||
|
bad_send_deltas := []time.Duration{
|
||||||
|
0,
|
||||||
|
utils.InfDuration,
|
||||||
|
-1000 * time.Microsecond,
|
||||||
|
}
|
||||||
|
// log.StartCapturingLogs();
|
||||||
|
|
||||||
|
for _, bad_send_delta := range bad_send_deltas {
|
||||||
|
// SCOPED_TRACE(Message() << "bad_send_delta = "
|
||||||
|
// << bad_send_delta.ToMicroseconds());
|
||||||
|
// EXPECT_CALL(log, Log(LOG_WARNING, _, _, _, HasSubstr("Ignoring")));
|
||||||
|
rttStats.UpdateRTT(bad_send_delta, 0, time.Time{})
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal(initial_rtt))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(initial_rtt))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal(initial_rtt))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("ResetAfterConnectionMigrations", func() {
|
||||||
|
rttStats.UpdateRTT((300 * time.Millisecond), (100 * time.Millisecond), time.Time{})
|
||||||
|
Expect(rttStats.LatestRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal((200 * time.Millisecond)))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal((300 * time.Millisecond)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(300 * time.Millisecond))
|
||||||
|
|
||||||
|
// Reset rtt stats on connection migrations.
|
||||||
|
rttStats.OnConnectionMigration()
|
||||||
|
Expect(rttStats.LatestRTT()).To(Equal(time.Duration(0)))
|
||||||
|
Expect(rttStats.SmoothedRTT()).To(Equal(time.Duration(0)))
|
||||||
|
Expect(rttStats.MinRTT()).To(Equal(time.Duration(0)))
|
||||||
|
Expect(rttStats.RecentMinRTT()).To(Equal(time.Duration(0)))
|
||||||
|
})
|
||||||
|
|
||||||
|
})
|
|
@ -0,0 +1,8 @@
|
||||||
|
package congestion
|
||||||
|
|
||||||
|
import "github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
|
||||||
|
type connectionStats struct {
|
||||||
|
slowstartPacketsLost protocol.PacketNumber
|
||||||
|
slowstartBytesLost protocol.ByteCount
|
||||||
|
}
|
|
@ -0,0 +1,54 @@
|
||||||
|
package quic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net"
|
||||||
|
"sync"
|
||||||
|
)
|
||||||
|
|
||||||
|
type connection interface {
|
||||||
|
Write([]byte) error
|
||||||
|
Read([]byte) (int, net.Addr, error)
|
||||||
|
Close() error
|
||||||
|
LocalAddr() net.Addr
|
||||||
|
RemoteAddr() net.Addr
|
||||||
|
SetCurrentRemoteAddr(net.Addr)
|
||||||
|
}
|
||||||
|
|
||||||
|
type conn struct {
|
||||||
|
mutex sync.RWMutex
|
||||||
|
|
||||||
|
pconn net.PacketConn
|
||||||
|
currentAddr net.Addr
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ connection = &conn{}
|
||||||
|
|
||||||
|
func (c *conn) Write(p []byte) error {
|
||||||
|
_, err := c.pconn.WriteTo(p, c.currentAddr)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Read(p []byte) (int, net.Addr, error) {
|
||||||
|
return c.pconn.ReadFrom(p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) SetCurrentRemoteAddr(addr net.Addr) {
|
||||||
|
c.mutex.Lock()
|
||||||
|
c.currentAddr = addr
|
||||||
|
c.mutex.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) LocalAddr() net.Addr {
|
||||||
|
return c.pconn.LocalAddr()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) RemoteAddr() net.Addr {
|
||||||
|
c.mutex.RLock()
|
||||||
|
addr := c.currentAddr
|
||||||
|
c.mutex.RUnlock()
|
||||||
|
return addr
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *conn) Close() error {
|
||||||
|
return c.pconn.Close()
|
||||||
|
}
|
|
@ -0,0 +1,108 @@
|
||||||
|
package quic
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"net"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
. "github.com/onsi/ginkgo"
|
||||||
|
. "github.com/onsi/gomega"
|
||||||
|
)
|
||||||
|
|
||||||
|
type mockPacketConn struct {
|
||||||
|
addr net.Addr
|
||||||
|
dataToRead []byte
|
||||||
|
dataReadFrom net.Addr
|
||||||
|
readErr error
|
||||||
|
dataWritten bytes.Buffer
|
||||||
|
dataWrittenTo net.Addr
|
||||||
|
closed bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *mockPacketConn) ReadFrom(b []byte) (int, net.Addr, error) {
|
||||||
|
if c.readErr != nil {
|
||||||
|
return 0, nil, c.readErr
|
||||||
|
}
|
||||||
|
if c.dataToRead == nil { // block if there's no data
|
||||||
|
time.Sleep(time.Hour)
|
||||||
|
return 0, nil, io.EOF
|
||||||
|
}
|
||||||
|
n := copy(b, c.dataToRead)
|
||||||
|
c.dataToRead = nil
|
||||||
|
return n, c.dataReadFrom, nil
|
||||||
|
}
|
||||||
|
func (c *mockPacketConn) WriteTo(b []byte, addr net.Addr) (n int, err error) {
|
||||||
|
c.dataWrittenTo = addr
|
||||||
|
return c.dataWritten.Write(b)
|
||||||
|
}
|
||||||
|
func (c *mockPacketConn) Close() error { c.closed = true; return nil }
|
||||||
|
func (c *mockPacketConn) LocalAddr() net.Addr { return c.addr }
|
||||||
|
func (c *mockPacketConn) SetDeadline(t time.Time) error { panic("not implemented") }
|
||||||
|
func (c *mockPacketConn) SetReadDeadline(t time.Time) error { panic("not implemented") }
|
||||||
|
func (c *mockPacketConn) SetWriteDeadline(t time.Time) error { panic("not implemented") }
|
||||||
|
|
||||||
|
var _ net.PacketConn = &mockPacketConn{}
|
||||||
|
|
||||||
|
var _ = Describe("Connection", func() {
|
||||||
|
var c *conn
|
||||||
|
var packetConn *mockPacketConn
|
||||||
|
|
||||||
|
BeforeEach(func() {
|
||||||
|
addr := &net.UDPAddr{
|
||||||
|
IP: net.IPv4(192, 168, 100, 200),
|
||||||
|
Port: 1337,
|
||||||
|
}
|
||||||
|
packetConn = &mockPacketConn{}
|
||||||
|
c = &conn{
|
||||||
|
currentAddr: addr,
|
||||||
|
pconn: packetConn,
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
It("writes", func() {
|
||||||
|
err := c.Write([]byte("foobar"))
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(packetConn.dataWritten.Bytes()).To(Equal([]byte("foobar")))
|
||||||
|
Expect(packetConn.dataWrittenTo.String()).To(Equal("192.168.100.200:1337"))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("reads", func() {
|
||||||
|
packetConn.dataToRead = []byte("foo")
|
||||||
|
packetConn.dataReadFrom = &net.UDPAddr{IP: net.IPv4(127, 0, 0, 1), Port: 1336}
|
||||||
|
p := make([]byte, 10)
|
||||||
|
n, raddr, err := c.Read(p)
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(raddr.String()).To(Equal("127.0.0.1:1336"))
|
||||||
|
Expect(n).To(Equal(3))
|
||||||
|
Expect(p[0:3]).To(Equal([]byte("foo")))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets the remote address", func() {
|
||||||
|
Expect(c.RemoteAddr().String()).To(Equal("192.168.100.200:1337"))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("gets the local address", func() {
|
||||||
|
addr := &net.UDPAddr{
|
||||||
|
IP: net.IPv4(192, 168, 0, 1),
|
||||||
|
Port: 1234,
|
||||||
|
}
|
||||||
|
packetConn.addr = addr
|
||||||
|
Expect(c.LocalAddr()).To(Equal(addr))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("changes the remote address", func() {
|
||||||
|
addr := &net.UDPAddr{
|
||||||
|
IP: net.IPv4(127, 0, 0, 1),
|
||||||
|
Port: 7331,
|
||||||
|
}
|
||||||
|
c.SetCurrentRemoteAddr(addr)
|
||||||
|
Expect(c.RemoteAddr().String()).To(Equal(addr.String()))
|
||||||
|
})
|
||||||
|
|
||||||
|
It("closes", func() {
|
||||||
|
err := c.Close()
|
||||||
|
Expect(err).ToNot(HaveOccurred())
|
||||||
|
Expect(packetConn.closed).To(BeTrue())
|
||||||
|
})
|
||||||
|
})
|
|
@ -0,0 +1,9 @@
|
||||||
|
package crypto
|
||||||
|
|
||||||
|
import "github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
|
||||||
|
// An AEAD implements QUIC's authenticated encryption and associated data
|
||||||
|
type AEAD interface {
|
||||||
|
Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, error)
|
||||||
|
Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) []byte
|
||||||
|
}
|
|
@ -0,0 +1,58 @@
|
||||||
|
package crypto
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/cipher"
|
||||||
|
"errors"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/aes12"
|
||||||
|
|
||||||
|
"github.com/lucas-clemente/quic-go/protocol"
|
||||||
|
)
|
||||||
|
|
||||||
|
type aeadAESGCM struct {
|
||||||
|
otherIV []byte
|
||||||
|
myIV []byte
|
||||||
|
encrypter cipher.AEAD
|
||||||
|
decrypter cipher.AEAD
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewAEADAESGCM creates a AEAD using AES-GCM with 12 bytes tag size
|
||||||
|
//
|
||||||
|
// AES-GCM support is a bit hacky, since the go stdlib does not support 12 byte
|
||||||
|
// tag size, and couples the cipher and aes packages closely.
|
||||||
|
// See https://github.com/lucas-clemente/aes12.
|
||||||
|
func NewAEADAESGCM(otherKey []byte, myKey []byte, otherIV []byte, myIV []byte) (AEAD, error) {
|
||||||
|
if len(myKey) != 16 || len(otherKey) != 16 || len(myIV) != 4 || len(otherIV) != 4 {
|
||||||
|
return nil, errors.New("AES-GCM: expected 16-byte keys and 4-byte IVs")
|
||||||
|
}
|
||||||
|
encrypterCipher, err := aes12.NewCipher(myKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
encrypter, err := aes12.NewGCM(encrypterCipher)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
decrypterCipher, err := aes12.NewCipher(otherKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
decrypter, err := aes12.NewGCM(decrypterCipher)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &aeadAESGCM{
|
||||||
|
otherIV: otherIV,
|
||||||
|
myIV: myIV,
|
||||||
|
encrypter: encrypter,
|
||||||
|
decrypter: decrypter,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aead *aeadAESGCM) Open(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) ([]byte, error) {
|
||||||
|
return aead.decrypter.Open(dst, makeNonce(aead.otherIV, packetNumber), src, associatedData)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (aead *aeadAESGCM) Seal(dst, src []byte, packetNumber protocol.PacketNumber, associatedData []byte) []byte {
|
||||||
|
return aead.encrypter.Seal(dst, makeNonce(aead.myIV, packetNumber), src, associatedData)
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue