2017-03-26 19:50:20 +00:00
|
|
|
package kcp
|
|
|
|
|
|
|
|
import (
|
|
|
|
"encoding/binary"
|
|
|
|
"sync/atomic"
|
|
|
|
|
2018-01-03 19:19:49 +00:00
|
|
|
"github.com/klauspost/reedsolomon"
|
2017-03-26 19:50:20 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
const (
|
|
|
|
fecHeaderSize = 6
|
|
|
|
fecHeaderSizePlus2 = fecHeaderSize + 2 // plus 2B data size
|
|
|
|
typeData = 0xf1
|
|
|
|
typeFEC = 0xf2
|
|
|
|
)
|
|
|
|
|
|
|
|
type (
|
|
|
|
// fecPacket is a decoded FEC packet
|
|
|
|
fecPacket struct {
|
|
|
|
seqid uint32
|
|
|
|
flag uint16
|
|
|
|
data []byte
|
|
|
|
}
|
|
|
|
|
2017-10-06 15:29:20 +00:00
|
|
|
// fecDecoder for decoding incoming packets
|
|
|
|
fecDecoder struct {
|
2017-03-26 19:50:20 +00:00
|
|
|
rxlimit int // queue size limit
|
|
|
|
dataShards int
|
|
|
|
parityShards int
|
|
|
|
shardSize int
|
|
|
|
rx []fecPacket // ordered receive queue
|
|
|
|
|
|
|
|
// caches
|
|
|
|
decodeCache [][]byte
|
|
|
|
flagCache []bool
|
|
|
|
|
2018-01-03 19:19:49 +00:00
|
|
|
// zeros
|
|
|
|
zeros []byte
|
|
|
|
|
2017-03-26 19:50:20 +00:00
|
|
|
// RS decoder
|
|
|
|
codec reedsolomon.Encoder
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2017-10-06 15:29:20 +00:00
|
|
|
func newFECDecoder(rxlimit, dataShards, parityShards int) *fecDecoder {
|
2017-03-26 19:50:20 +00:00
|
|
|
if dataShards <= 0 || parityShards <= 0 {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
if rxlimit < dataShards+parityShards {
|
|
|
|
return nil
|
|
|
|
}
|
|
|
|
|
2018-01-03 19:19:49 +00:00
|
|
|
dec := new(fecDecoder)
|
|
|
|
dec.rxlimit = rxlimit
|
|
|
|
dec.dataShards = dataShards
|
|
|
|
dec.parityShards = parityShards
|
|
|
|
dec.shardSize = dataShards + parityShards
|
|
|
|
codec, err := reedsolomon.New(dataShards, parityShards)
|
2017-03-26 19:50:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
dec.codec = codec
|
|
|
|
dec.decodeCache = make([][]byte, dec.shardSize)
|
|
|
|
dec.flagCache = make([]bool, dec.shardSize)
|
|
|
|
dec.zeros = make([]byte, mtuLimit)
|
|
|
|
return dec
|
2017-03-26 19:50:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// decodeBytes a fec packet
|
2017-10-06 15:29:20 +00:00
|
|
|
func (dec *fecDecoder) decodeBytes(data []byte) fecPacket {
|
2017-03-26 19:50:20 +00:00
|
|
|
var pkt fecPacket
|
|
|
|
pkt.seqid = binary.LittleEndian.Uint32(data)
|
|
|
|
pkt.flag = binary.LittleEndian.Uint16(data[4:])
|
|
|
|
// allocate memory & copy
|
|
|
|
buf := xmitBuf.Get().([]byte)[:len(data)-6]
|
|
|
|
copy(buf, data[6:])
|
|
|
|
pkt.data = buf
|
|
|
|
return pkt
|
|
|
|
}
|
|
|
|
|
2017-10-06 15:29:20 +00:00
|
|
|
// decode a fec packet
|
|
|
|
func (dec *fecDecoder) decode(pkt fecPacket) (recovered [][]byte) {
|
2017-03-26 19:50:20 +00:00
|
|
|
// insertion
|
|
|
|
n := len(dec.rx) - 1
|
|
|
|
insertIdx := 0
|
|
|
|
for i := n; i >= 0; i-- {
|
|
|
|
if pkt.seqid == dec.rx[i].seqid { // de-duplicate
|
|
|
|
xmitBuf.Put(pkt.data)
|
|
|
|
return nil
|
|
|
|
} else if _itimediff(pkt.seqid, dec.rx[i].seqid) > 0 { // insertion
|
|
|
|
insertIdx = i + 1
|
|
|
|
break
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// insert into ordered rx queue
|
|
|
|
if insertIdx == n+1 {
|
|
|
|
dec.rx = append(dec.rx, pkt)
|
|
|
|
} else {
|
|
|
|
dec.rx = append(dec.rx, fecPacket{})
|
|
|
|
copy(dec.rx[insertIdx+1:], dec.rx[insertIdx:]) // shift right
|
|
|
|
dec.rx[insertIdx] = pkt
|
|
|
|
}
|
|
|
|
|
|
|
|
// shard range for current packet
|
|
|
|
shardBegin := pkt.seqid - pkt.seqid%uint32(dec.shardSize)
|
|
|
|
shardEnd := shardBegin + uint32(dec.shardSize) - 1
|
|
|
|
|
|
|
|
// max search range in ordered queue for current shard
|
|
|
|
searchBegin := insertIdx - int(pkt.seqid%uint32(dec.shardSize))
|
|
|
|
if searchBegin < 0 {
|
|
|
|
searchBegin = 0
|
|
|
|
}
|
|
|
|
searchEnd := searchBegin + dec.shardSize - 1
|
|
|
|
if searchEnd >= len(dec.rx) {
|
|
|
|
searchEnd = len(dec.rx) - 1
|
|
|
|
}
|
|
|
|
|
|
|
|
// re-construct datashards
|
|
|
|
if searchEnd-searchBegin+1 >= dec.dataShards {
|
|
|
|
var numshard, numDataShard, first, maxlen int
|
|
|
|
|
|
|
|
// zero cache
|
|
|
|
shards := dec.decodeCache
|
|
|
|
shardsflag := dec.flagCache
|
|
|
|
for k := range dec.decodeCache {
|
|
|
|
shards[k] = nil
|
|
|
|
shardsflag[k] = false
|
|
|
|
}
|
|
|
|
|
|
|
|
// shard assembly
|
|
|
|
for i := searchBegin; i <= searchEnd; i++ {
|
|
|
|
seqid := dec.rx[i].seqid
|
|
|
|
if _itimediff(seqid, shardEnd) > 0 {
|
|
|
|
break
|
|
|
|
} else if _itimediff(seqid, shardBegin) >= 0 {
|
|
|
|
shards[seqid%uint32(dec.shardSize)] = dec.rx[i].data
|
|
|
|
shardsflag[seqid%uint32(dec.shardSize)] = true
|
|
|
|
numshard++
|
|
|
|
if dec.rx[i].flag == typeData {
|
|
|
|
numDataShard++
|
|
|
|
}
|
|
|
|
if numshard == 1 {
|
|
|
|
first = i
|
|
|
|
}
|
|
|
|
if len(dec.rx[i].data) > maxlen {
|
|
|
|
maxlen = len(dec.rx[i].data)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if numDataShard == dec.dataShards {
|
|
|
|
// case 1: no lost data shards
|
|
|
|
dec.rx = dec.freeRange(first, numshard, dec.rx)
|
|
|
|
} else if numshard >= dec.dataShards {
|
|
|
|
// case 2: data shard lost, but recoverable from parity shard
|
|
|
|
for k := range shards {
|
|
|
|
if shards[k] != nil {
|
|
|
|
dlen := len(shards[k])
|
|
|
|
shards[k] = shards[k][:maxlen]
|
2018-01-03 19:19:49 +00:00
|
|
|
copy(shards[k][dlen:], dec.zeros)
|
2017-03-26 19:50:20 +00:00
|
|
|
}
|
|
|
|
}
|
2017-10-06 15:29:20 +00:00
|
|
|
if err := dec.codec.ReconstructData(shards); err == nil {
|
2017-03-26 19:50:20 +00:00
|
|
|
for k := range shards[:dec.dataShards] {
|
|
|
|
if !shardsflag[k] {
|
|
|
|
recovered = append(recovered, shards[k])
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
dec.rx = dec.freeRange(first, numshard, dec.rx)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// keep rxlimit
|
|
|
|
if len(dec.rx) > dec.rxlimit {
|
|
|
|
if dec.rx[0].flag == typeData { // record unrecoverable data
|
|
|
|
atomic.AddUint64(&DefaultSnmp.FECShortShards, 1)
|
|
|
|
}
|
|
|
|
dec.rx = dec.freeRange(0, 1, dec.rx)
|
|
|
|
}
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
|
|
|
// free a range of fecPacket, and zero for GC recycling
|
2017-10-06 15:29:20 +00:00
|
|
|
func (dec *fecDecoder) freeRange(first, n int, q []fecPacket) []fecPacket {
|
2017-03-26 19:50:20 +00:00
|
|
|
for i := first; i < first+n; i++ { // free
|
|
|
|
xmitBuf.Put(q[i].data)
|
|
|
|
}
|
|
|
|
copy(q[first:], q[first+n:])
|
|
|
|
for i := 0; i < n; i++ { // dereference data
|
|
|
|
q[len(q)-1-i].data = nil
|
|
|
|
}
|
|
|
|
return q[:len(q)-n]
|
|
|
|
}
|
|
|
|
|
|
|
|
type (
|
2017-10-06 15:29:20 +00:00
|
|
|
// fecEncoder for encoding outgoing packets
|
|
|
|
fecEncoder struct {
|
2017-03-26 19:50:20 +00:00
|
|
|
dataShards int
|
|
|
|
parityShards int
|
|
|
|
shardSize int
|
|
|
|
paws uint32 // Protect Against Wrapped Sequence numbers
|
|
|
|
next uint32 // next seqid
|
|
|
|
|
|
|
|
shardCount int // count the number of datashards collected
|
|
|
|
maxSize int // record maximum data length in datashard
|
|
|
|
|
|
|
|
headerOffset int // FEC header offset
|
|
|
|
payloadOffset int // FEC payload offset
|
|
|
|
|
|
|
|
// caches
|
|
|
|
shardCache [][]byte
|
|
|
|
encodeCache [][]byte
|
|
|
|
|
2018-01-03 19:19:49 +00:00
|
|
|
// zeros
|
|
|
|
zeros []byte
|
|
|
|
|
2017-03-26 19:50:20 +00:00
|
|
|
// RS encoder
|
|
|
|
codec reedsolomon.Encoder
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
2017-10-06 15:29:20 +00:00
|
|
|
func newFECEncoder(dataShards, parityShards, offset int) *fecEncoder {
|
2017-03-26 19:50:20 +00:00
|
|
|
if dataShards <= 0 || parityShards <= 0 {
|
|
|
|
return nil
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
enc := new(fecEncoder)
|
|
|
|
enc.dataShards = dataShards
|
|
|
|
enc.parityShards = parityShards
|
|
|
|
enc.shardSize = dataShards + parityShards
|
|
|
|
enc.paws = (0xffffffff/uint32(enc.shardSize) - 1) * uint32(enc.shardSize)
|
|
|
|
enc.headerOffset = offset
|
|
|
|
enc.payloadOffset = enc.headerOffset + fecHeaderSize
|
|
|
|
|
|
|
|
codec, err := reedsolomon.New(dataShards, parityShards)
|
2017-03-26 19:50:20 +00:00
|
|
|
if err != nil {
|
|
|
|
return nil
|
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
enc.codec = codec
|
2017-03-26 19:50:20 +00:00
|
|
|
|
|
|
|
// caches
|
2018-01-03 19:19:49 +00:00
|
|
|
enc.encodeCache = make([][]byte, enc.shardSize)
|
|
|
|
enc.shardCache = make([][]byte, enc.shardSize)
|
|
|
|
for k := range enc.shardCache {
|
|
|
|
enc.shardCache[k] = make([]byte, mtuLimit)
|
2017-03-26 19:50:20 +00:00
|
|
|
}
|
2018-01-03 19:19:49 +00:00
|
|
|
enc.zeros = make([]byte, mtuLimit)
|
|
|
|
return enc
|
2017-03-26 19:50:20 +00:00
|
|
|
}
|
|
|
|
|
2017-10-06 15:29:20 +00:00
|
|
|
// encode the packet, output parity shards if we have enough datashards
|
|
|
|
// the content of returned parityshards will change in next encode
|
|
|
|
func (enc *fecEncoder) encode(b []byte) (ps [][]byte) {
|
2017-03-26 19:50:20 +00:00
|
|
|
enc.markData(b[enc.headerOffset:])
|
|
|
|
binary.LittleEndian.PutUint16(b[enc.payloadOffset:], uint16(len(b[enc.payloadOffset:])))
|
|
|
|
|
|
|
|
// copy data to fec datashards
|
|
|
|
sz := len(b)
|
|
|
|
enc.shardCache[enc.shardCount] = enc.shardCache[enc.shardCount][:sz]
|
|
|
|
copy(enc.shardCache[enc.shardCount], b)
|
|
|
|
enc.shardCount++
|
|
|
|
|
|
|
|
// record max datashard length
|
|
|
|
if sz > enc.maxSize {
|
|
|
|
enc.maxSize = sz
|
|
|
|
}
|
|
|
|
|
|
|
|
// calculate Reed-Solomon Erasure Code
|
|
|
|
if enc.shardCount == enc.dataShards {
|
|
|
|
// bzero each datashard's tail
|
|
|
|
for i := 0; i < enc.dataShards; i++ {
|
|
|
|
shard := enc.shardCache[i]
|
|
|
|
slen := len(shard)
|
2018-01-03 19:19:49 +00:00
|
|
|
copy(shard[slen:enc.maxSize], enc.zeros)
|
2017-03-26 19:50:20 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// construct equal-sized slice with stripped header
|
|
|
|
cache := enc.encodeCache
|
|
|
|
for k := range cache {
|
|
|
|
cache[k] = enc.shardCache[k][enc.payloadOffset:enc.maxSize]
|
|
|
|
}
|
|
|
|
|
|
|
|
// rs encode
|
|
|
|
if err := enc.codec.Encode(cache); err == nil {
|
|
|
|
ps = enc.shardCache[enc.dataShards:]
|
|
|
|
for k := range ps {
|
|
|
|
enc.markFEC(ps[k][enc.headerOffset:])
|
|
|
|
ps[k] = ps[k][:enc.maxSize]
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// reset counters to zero
|
|
|
|
enc.shardCount = 0
|
|
|
|
enc.maxSize = 0
|
|
|
|
}
|
|
|
|
|
|
|
|
return
|
|
|
|
}
|
|
|
|
|
2017-10-06 15:29:20 +00:00
|
|
|
func (enc *fecEncoder) markData(data []byte) {
|
2017-03-26 19:50:20 +00:00
|
|
|
binary.LittleEndian.PutUint32(data, enc.next)
|
|
|
|
binary.LittleEndian.PutUint16(data[4:], typeData)
|
|
|
|
enc.next++
|
|
|
|
}
|
|
|
|
|
2017-10-06 15:29:20 +00:00
|
|
|
func (enc *fecEncoder) markFEC(data []byte) {
|
2017-03-26 19:50:20 +00:00
|
|
|
binary.LittleEndian.PutUint32(data, enc.next)
|
|
|
|
binary.LittleEndian.PutUint16(data[4:], typeFEC)
|
|
|
|
enc.next = (enc.next + 1) % enc.paws
|
|
|
|
}
|