|
@@ -3,8 +3,11 @@ package mux
|
|
|
import (
|
|
|
"errors"
|
|
|
"io"
|
|
|
+ "math"
|
|
|
"net"
|
|
|
+ "runtime"
|
|
|
"sync"
|
|
|
+ "sync/atomic"
|
|
|
"time"
|
|
|
|
|
|
"github.com/cnlh/nps/lib/common"
|
|
@@ -15,34 +18,36 @@ type conn struct {
|
|
|
getStatusCh chan struct{}
|
|
|
connStatusOkCh chan struct{}
|
|
|
connStatusFailCh chan struct{}
|
|
|
- readTimeOut time.Time
|
|
|
- writeTimeOut time.Time
|
|
|
connId int32
|
|
|
isClose bool
|
|
|
closeFlag bool // close conn flag
|
|
|
- receiveWindow *window
|
|
|
- sendWindow *window
|
|
|
- readCh waitingCh
|
|
|
- writeCh waitingCh
|
|
|
- mux *Mux
|
|
|
+ receiveWindow *ReceiveWindow
|
|
|
+ sendWindow *SendWindow
|
|
|
once sync.Once
|
|
|
+ //label string
|
|
|
}
|
|
|
|
|
|
-func NewConn(connId int32, mux *Mux) *conn {
|
|
|
+func NewConn(connId int32, mux *Mux, label ...string) *conn {
|
|
|
c := &conn{
|
|
|
getStatusCh: make(chan struct{}),
|
|
|
connStatusOkCh: make(chan struct{}),
|
|
|
connStatusFailCh: make(chan struct{}),
|
|
|
connId: connId,
|
|
|
- receiveWindow: new(window),
|
|
|
- sendWindow: new(window),
|
|
|
- mux: mux,
|
|
|
+ receiveWindow: new(ReceiveWindow),
|
|
|
+ sendWindow: new(SendWindow),
|
|
|
once: sync.Once{},
|
|
|
}
|
|
|
- c.receiveWindow.NewReceive()
|
|
|
- c.sendWindow.NewSend()
|
|
|
- c.readCh.new()
|
|
|
- c.writeCh.new()
|
|
|
+ //if len(label) > 0 {
|
|
|
+ // c.label = label[0]
|
|
|
+ //}
|
|
|
+ c.receiveWindow.New(mux)
|
|
|
+ c.sendWindow.New(mux)
|
|
|
+ //logm := &connLog{
|
|
|
+ // startTime: time.Now(),
|
|
|
+ // isClose: false,
|
|
|
+ // logs: []string{c.label + "new conn success"},
|
|
|
+ //}
|
|
|
+ //setM(label[0], int(connId), logm)
|
|
|
return c
|
|
|
}
|
|
|
|
|
@@ -50,39 +55,28 @@ func (s *conn) Read(buf []byte) (n int, err error) {
|
|
|
if s.isClose || buf == nil {
|
|
|
return 0, errors.New("the conn has closed")
|
|
|
}
|
|
|
- // waiting for takeout from receive window finish or timeout
|
|
|
- go s.readWindow(buf, s.readCh.nCh, s.readCh.errCh)
|
|
|
- if t := s.readTimeOut.Sub(time.Now()); t > 0 {
|
|
|
- timer := time.NewTimer(t)
|
|
|
- defer timer.Stop()
|
|
|
- select {
|
|
|
- case <-timer.C:
|
|
|
- return 0, errors.New("read timeout")
|
|
|
- case n = <-s.readCh.nCh:
|
|
|
- err = <-s.readCh.errCh
|
|
|
- }
|
|
|
- } else {
|
|
|
- n = <-s.readCh.nCh
|
|
|
- err = <-s.readCh.errCh
|
|
|
+ if len(buf) == 0 {
|
|
|
+ return 0, nil
|
|
|
}
|
|
|
+ // waiting for takeout from receive window finish or timeout
|
|
|
+ //now := time.Now()
|
|
|
+ n, err = s.receiveWindow.Read(buf, s.connId)
|
|
|
+ //t := time.Now().Sub(now)
|
|
|
+ //if t.Seconds() > 0.5 {
|
|
|
+ //logs.Warn("conn read long", n, t.Seconds())
|
|
|
+ //}
|
|
|
+ //var errstr string
|
|
|
+ //if err == nil {
|
|
|
+ // errstr = "err:nil"
|
|
|
+ //} else {
|
|
|
+ // errstr = err.Error()
|
|
|
+ //}
|
|
|
+ //d := getM(s.label, int(s.connId))
|
|
|
+ //d.logs = append(d.logs, s.label+"read "+strconv.Itoa(n)+" "+errstr+" "+string(buf[:100]))
|
|
|
+ //setM(s.label, int(s.connId), d)
|
|
|
return
|
|
|
}
|
|
|
|
|
|
-func (s *conn) readWindow(buf []byte, nCh chan int, errCh chan error) {
|
|
|
- n, err := s.receiveWindow.Read(buf)
|
|
|
- if s.receiveWindow.WindowFull {
|
|
|
- if s.receiveWindow.Size() > 0 {
|
|
|
- // window.Read may be invoked before window.Write, and WindowFull flag change to true
|
|
|
- // so make sure that receiveWindow is free some space
|
|
|
- s.receiveWindow.WindowFull = false
|
|
|
- s.mux.sendInfo(common.MUX_MSG_SEND_OK, s.connId, s.receiveWindow.Size())
|
|
|
- // acknowledge other side, have empty some receive window space
|
|
|
- }
|
|
|
- }
|
|
|
- nCh <- n
|
|
|
- errCh <- err
|
|
|
-}
|
|
|
-
|
|
|
func (s *conn) Write(buf []byte) (n int, err error) {
|
|
|
if s.isClose {
|
|
|
return 0, errors.New("the conn has closed")
|
|
@@ -91,45 +85,18 @@ func (s *conn) Write(buf []byte) (n int, err error) {
|
|
|
//s.Close()
|
|
|
return 0, errors.New("io: write on closed conn")
|
|
|
}
|
|
|
- s.sendWindow.SetSendBuf(buf) // set the buf to send window
|
|
|
- go s.write(s.writeCh.nCh, s.writeCh.errCh)
|
|
|
- // waiting for send to other side or timeout
|
|
|
- if t := s.writeTimeOut.Sub(time.Now()); t > 0 {
|
|
|
- timer := time.NewTimer(t)
|
|
|
- defer timer.Stop()
|
|
|
- select {
|
|
|
- case <-timer.C:
|
|
|
- return 0, errors.New("write timeout")
|
|
|
- case n = <-s.writeCh.nCh:
|
|
|
- err = <-s.writeCh.errCh
|
|
|
- }
|
|
|
- } else {
|
|
|
- n = <-s.writeCh.nCh
|
|
|
- err = <-s.writeCh.errCh
|
|
|
+ if len(buf) == 0 {
|
|
|
+ return 0, nil
|
|
|
}
|
|
|
+ //logs.Warn("write buf", len(buf))
|
|
|
+ //now := time.Now()
|
|
|
+ n, err = s.sendWindow.WriteFull(buf, s.connId)
|
|
|
+ //t := time.Now().Sub(now)
|
|
|
+ //if t.Seconds() > 0.5 {
|
|
|
+ // logs.Warn("conn write long", n, t.Seconds())
|
|
|
+ //}
|
|
|
return
|
|
|
}
|
|
|
-func (s *conn) write(nCh chan int, errCh chan error) {
|
|
|
- var n int
|
|
|
- var err error
|
|
|
- for {
|
|
|
- buf, err := s.sendWindow.WriteTo()
|
|
|
- // get the usable window size buf from send window
|
|
|
- if buf == nil && err == io.EOF {
|
|
|
- // send window is drain, break the loop
|
|
|
- err = nil
|
|
|
- break
|
|
|
- }
|
|
|
- if err != nil {
|
|
|
- break
|
|
|
- }
|
|
|
- n += len(buf)
|
|
|
- s.mux.sendInfo(common.MUX_NEW_MSG, s.connId, buf)
|
|
|
- // send to other side, not send nil data to other side
|
|
|
- }
|
|
|
- nCh <- n
|
|
|
- errCh <- err
|
|
|
-}
|
|
|
|
|
|
func (s *conn) Close() (err error) {
|
|
|
s.once.Do(s.closeProcess)
|
|
@@ -138,288 +105,531 @@ func (s *conn) Close() (err error) {
|
|
|
|
|
|
func (s *conn) closeProcess() {
|
|
|
s.isClose = true
|
|
|
- s.mux.connMap.Delete(s.connId)
|
|
|
- if !s.mux.IsClose {
|
|
|
+ s.receiveWindow.mux.connMap.Delete(s.connId)
|
|
|
+ if !s.receiveWindow.mux.IsClose {
|
|
|
// if server or user close the conn while reading, will get a io.EOF
|
|
|
// and this Close method will be invoke, send this signal to close other side
|
|
|
- s.mux.sendInfo(common.MUX_CONN_CLOSE, s.connId, nil)
|
|
|
+ s.receiveWindow.mux.sendInfo(common.MUX_CONN_CLOSE, s.connId, nil)
|
|
|
}
|
|
|
s.sendWindow.CloseWindow()
|
|
|
s.receiveWindow.CloseWindow()
|
|
|
+ //d := getM(s.label, int(s.connId))
|
|
|
+ //d.isClose = true
|
|
|
+ //d.logs = append(d.logs, s.label+"close "+time.Now().String())
|
|
|
+ //setM(s.label, int(s.connId), d)
|
|
|
return
|
|
|
}
|
|
|
|
|
|
func (s *conn) LocalAddr() net.Addr {
|
|
|
- return s.mux.conn.LocalAddr()
|
|
|
+ return s.receiveWindow.mux.conn.LocalAddr()
|
|
|
}
|
|
|
|
|
|
func (s *conn) RemoteAddr() net.Addr {
|
|
|
- return s.mux.conn.RemoteAddr()
|
|
|
+ return s.receiveWindow.mux.conn.RemoteAddr()
|
|
|
}
|
|
|
|
|
|
func (s *conn) SetDeadline(t time.Time) error {
|
|
|
- s.readTimeOut = t
|
|
|
- s.writeTimeOut = t
|
|
|
+ _ = s.SetReadDeadline(t)
|
|
|
+ _ = s.SetWriteDeadline(t)
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
func (s *conn) SetReadDeadline(t time.Time) error {
|
|
|
- s.readTimeOut = t
|
|
|
+ s.receiveWindow.SetTimeOut(t)
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
func (s *conn) SetWriteDeadline(t time.Time) error {
|
|
|
- s.writeTimeOut = t
|
|
|
+ s.sendWindow.SetTimeOut(t)
|
|
|
return nil
|
|
|
}
|
|
|
|
|
|
type window struct {
|
|
|
- windowBuff []byte
|
|
|
- off uint16
|
|
|
- readOp chan struct{}
|
|
|
- readWait bool
|
|
|
- WindowFull bool
|
|
|
- usableReceiveWindow chan uint16
|
|
|
- WriteWg sync.WaitGroup
|
|
|
- closeOp bool
|
|
|
- closeOpCh chan struct{}
|
|
|
- WriteEndOp chan struct{}
|
|
|
- mutex sync.Mutex
|
|
|
-}
|
|
|
-
|
|
|
-func (Self *window) NewReceive() {
|
|
|
- // initial a window for receive
|
|
|
- Self.windowBuff = common.WindowBuff.Get()
|
|
|
- Self.readOp = make(chan struct{})
|
|
|
- Self.WriteEndOp = make(chan struct{})
|
|
|
- Self.closeOpCh = make(chan struct{}, 3)
|
|
|
+ remainingWait uint64 // 64bit alignment
|
|
|
+ off uint32
|
|
|
+ maxSize uint32
|
|
|
+ closeOp bool
|
|
|
+ closeOpCh chan struct{}
|
|
|
+ mux *Mux
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *window) unpack(ptrs uint64) (remaining, wait uint32) {
|
|
|
+ const mask = 1<<dequeueBits - 1
|
|
|
+ remaining = uint32((ptrs >> dequeueBits) & mask)
|
|
|
+ wait = uint32(ptrs & mask)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
-func (Self *window) NewSend() {
|
|
|
- // initial a window for send
|
|
|
- Self.usableReceiveWindow = make(chan uint16)
|
|
|
- Self.closeOpCh = make(chan struct{}, 3)
|
|
|
+func (Self *window) pack(remaining, wait uint32) uint64 {
|
|
|
+ const mask = 1<<dequeueBits - 1
|
|
|
+ return (uint64(remaining) << dequeueBits) |
|
|
|
+ uint64(wait&mask)
|
|
|
}
|
|
|
|
|
|
-func (Self *window) SetSendBuf(buf []byte) {
|
|
|
- // send window buff from conn write method, set it to send window
|
|
|
- Self.mutex.Lock()
|
|
|
- Self.windowBuff = buf
|
|
|
- Self.off = 0
|
|
|
- Self.mutex.Unlock()
|
|
|
+func (Self *window) New() {
|
|
|
+ Self.closeOpCh = make(chan struct{}, 2)
|
|
|
}
|
|
|
|
|
|
-func (Self *window) fullSlide() {
|
|
|
- // slide by allocate
|
|
|
- newBuf := common.WindowBuff.Get()
|
|
|
- Self.liteSlide()
|
|
|
- n := copy(newBuf[:Self.len()], Self.windowBuff)
|
|
|
- common.WindowBuff.Put(Self.windowBuff)
|
|
|
- Self.windowBuff = newBuf[:n]
|
|
|
- return
|
|
|
-}
|
|
|
-
|
|
|
-func (Self *window) liteSlide() {
|
|
|
- // slide by re slice
|
|
|
- Self.windowBuff = Self.windowBuff[Self.off:]
|
|
|
- Self.off = 0
|
|
|
- return
|
|
|
+func (Self *window) CloseWindow() {
|
|
|
+ if !Self.closeOp {
|
|
|
+ Self.closeOp = true
|
|
|
+ Self.closeOpCh <- struct{}{}
|
|
|
+ Self.closeOpCh <- struct{}{}
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-func (Self *window) Size() (n int) {
|
|
|
- // receive Window remaining
|
|
|
- n = common.PoolSizeWindow - Self.len()
|
|
|
- return
|
|
|
+type ReceiveWindow struct {
|
|
|
+ window
|
|
|
+ bufQueue ReceiveWindowQueue
|
|
|
+ element *common.ListElement
|
|
|
+ count int8
|
|
|
+ once sync.Once
|
|
|
}
|
|
|
|
|
|
-func (Self *window) len() (n int) {
|
|
|
- n = len(Self.windowBuff[Self.off:])
|
|
|
+func (Self *ReceiveWindow) New(mux *Mux) {
|
|
|
+ // initial a window for receive
|
|
|
+ Self.bufQueue.New()
|
|
|
+ Self.element = common.ListElementPool.Get()
|
|
|
+ Self.maxSize = common.MAXIMUM_SEGMENT_SIZE * 10
|
|
|
+ Self.mux = mux
|
|
|
+ Self.window.New()
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *ReceiveWindow) remainingSize(delta uint16) (n uint32) {
|
|
|
+ // receive window remaining
|
|
|
+ l := int64(atomic.LoadUint32(&Self.maxSize)) - int64(Self.bufQueue.Len())
|
|
|
+ l -= int64(delta)
|
|
|
+ if l > 0 {
|
|
|
+ n = uint32(l)
|
|
|
+ }
|
|
|
return
|
|
|
}
|
|
|
|
|
|
-func (Self *window) cap() (n int) {
|
|
|
- n = cap(Self.windowBuff[Self.off:])
|
|
|
+func (Self *ReceiveWindow) calcSize() {
|
|
|
+ // calculating maximum receive window size
|
|
|
+ if Self.count == 0 {
|
|
|
+ //logs.Warn("ping, bw", Self.mux.latency, Self.bw.Get())
|
|
|
+ conns := Self.mux.connMap.Size()
|
|
|
+ n := uint32(math.Float64frombits(atomic.LoadUint64(&Self.mux.latency)) *
|
|
|
+ Self.mux.bw.Get() / float64(conns))
|
|
|
+ if n < common.MAXIMUM_SEGMENT_SIZE*10 {
|
|
|
+ n = common.MAXIMUM_SEGMENT_SIZE * 10
|
|
|
+ }
|
|
|
+ bufLen := Self.bufQueue.Len()
|
|
|
+ if n < bufLen {
|
|
|
+ n = bufLen
|
|
|
+ }
|
|
|
+ if n < Self.maxSize/2 {
|
|
|
+ n = Self.maxSize / 2
|
|
|
+ }
|
|
|
+ // set the minimal size
|
|
|
+ if n > 2*Self.maxSize {
|
|
|
+ n = 2 * Self.maxSize
|
|
|
+ }
|
|
|
+ if n > (common.MAXIMUM_WINDOW_SIZE / uint32(conns)) {
|
|
|
+ n = common.MAXIMUM_WINDOW_SIZE / uint32(conns)
|
|
|
+ }
|
|
|
+ // set the maximum size
|
|
|
+ //logs.Warn("n", n)
|
|
|
+ atomic.StoreUint32(&Self.maxSize, n)
|
|
|
+ Self.count = -10
|
|
|
+ }
|
|
|
+ Self.count += 1
|
|
|
return
|
|
|
}
|
|
|
|
|
|
-func (Self *window) grow(n int) {
|
|
|
- Self.windowBuff = Self.windowBuff[:Self.len()+n]
|
|
|
-}
|
|
|
-
|
|
|
-func (Self *window) Write(p []byte) (n int, err error) {
|
|
|
+func (Self *ReceiveWindow) Write(buf []byte, l uint16, part bool, id int32) (err error) {
|
|
|
if Self.closeOp {
|
|
|
- return 0, errors.New("conn.receiveWindow: write on closed window")
|
|
|
+ return errors.New("conn.receiveWindow: write on closed window")
|
|
|
}
|
|
|
- if len(p) > Self.Size() {
|
|
|
- return 0, errors.New("conn.receiveWindow: write too large")
|
|
|
+ element, err := NewListElement(buf, l, part)
|
|
|
+ //logs.Warn("push the buf", len(buf), l, (&element).l)
|
|
|
+ if err != nil {
|
|
|
+ return
|
|
|
}
|
|
|
- Self.mutex.Lock()
|
|
|
- // slide the offset
|
|
|
- if len(p) > Self.cap()-Self.len() {
|
|
|
- // not enough space, need to allocate
|
|
|
- Self.fullSlide()
|
|
|
- } else {
|
|
|
- // have enough space, re slice
|
|
|
- Self.liteSlide()
|
|
|
+ Self.calcSize() // calculate the max window size
|
|
|
+ var wait uint32
|
|
|
+start:
|
|
|
+ ptrs := atomic.LoadUint64(&Self.remainingWait)
|
|
|
+ _, wait = Self.unpack(ptrs)
|
|
|
+ newRemaining := Self.remainingSize(l)
|
|
|
+ // calculate the remaining window size now, plus the element we will push
|
|
|
+ if newRemaining == 0 {
|
|
|
+ //logs.Warn("window full true", remaining)
|
|
|
+ wait = 1
|
|
|
}
|
|
|
- length := Self.len() // length before grow
|
|
|
- Self.grow(len(p)) // grow for copy
|
|
|
- n = copy(Self.windowBuff[length:], p) // must copy data before allow Read
|
|
|
- if Self.readWait {
|
|
|
- // if there condition is length == 0 and
|
|
|
- // Read method just take away all the windowBuff,
|
|
|
- // this method will block until windowBuff is empty again
|
|
|
-
|
|
|
- // allow continue read
|
|
|
- defer Self.allowRead()
|
|
|
+ if !atomic.CompareAndSwapUint64(&Self.remainingWait, ptrs, Self.pack(0, wait)) {
|
|
|
+ goto start
|
|
|
+ // another goroutine change the status, make sure shall we need wait
|
|
|
}
|
|
|
- Self.mutex.Unlock()
|
|
|
- return n, nil
|
|
|
+ Self.bufQueue.Push(element)
|
|
|
+ // status check finish, now we can push the element into the queue
|
|
|
+ if wait == 0 {
|
|
|
+ Self.mux.sendInfo(common.MUX_MSG_SEND_OK, id, Self.maxSize, newRemaining)
|
|
|
+ // send the remaining window size, not including zero size
|
|
|
+ }
|
|
|
+ return nil
|
|
|
}
|
|
|
|
|
|
-func (Self *window) allowRead() (closed bool) {
|
|
|
+func (Self *ReceiveWindow) Read(p []byte, id int32) (n int, err error) {
|
|
|
if Self.closeOp {
|
|
|
- close(Self.readOp)
|
|
|
- return true
|
|
|
+ return 0, io.EOF // receive close signal, returns eof
|
|
|
}
|
|
|
- Self.mutex.Lock()
|
|
|
- Self.readWait = false
|
|
|
- Self.mutex.Unlock()
|
|
|
- select {
|
|
|
- case <-Self.closeOpCh:
|
|
|
- close(Self.readOp)
|
|
|
- return true
|
|
|
- case Self.readOp <- struct{}{}:
|
|
|
- return false
|
|
|
+ pOff := 0
|
|
|
+ l := 0
|
|
|
+ //logs.Warn("receive window read off, element.l", Self.off, Self.element.l)
|
|
|
+copyData:
|
|
|
+ if Self.off == uint32(Self.element.L) {
|
|
|
+ // on the first Read method invoked, Self.off and Self.element.l
|
|
|
+ // both zero value
|
|
|
+ common.ListElementPool.Put(Self.element)
|
|
|
+ if Self.closeOp {
|
|
|
+ return 0, io.EOF
|
|
|
+ }
|
|
|
+ Self.element, err = Self.bufQueue.Pop()
|
|
|
+ // if the queue is empty, Pop method will wait until one element push
|
|
|
+ // into the queue successful, or timeout.
|
|
|
+ // timer start on timeout parameter is set up ,
|
|
|
+ // reset to 60s if timeout and data still available
|
|
|
+ Self.off = 0
|
|
|
+ if err != nil {
|
|
|
+ return // queue receive stop or time out, break the loop and return
|
|
|
+ }
|
|
|
+ //logs.Warn("pop element", Self.element.l, Self.element.part)
|
|
|
+ }
|
|
|
+ l = copy(p[pOff:], Self.element.Buf[Self.off:Self.element.L])
|
|
|
+ pOff += l
|
|
|
+ Self.off += uint32(l)
|
|
|
+ //logs.Warn("window read length buf len", Self.readLength, Self.bufQueue.Len())
|
|
|
+ n += l
|
|
|
+ l = 0
|
|
|
+ if Self.off == uint32(Self.element.L) {
|
|
|
+ //logs.Warn("put the element end ", string(Self.element.buf[:15]))
|
|
|
+ common.WindowBuff.Put(Self.element.Buf)
|
|
|
+ Self.sendStatus(id, Self.element.L)
|
|
|
+ // check the window full status
|
|
|
+ }
|
|
|
+ if pOff < len(p) && Self.element.Part {
|
|
|
+ // element is a part of the segments, trying to fill up buf p
|
|
|
+ goto copyData
|
|
|
}
|
|
|
+ return // buf p is full or all of segments in buf, return
|
|
|
}
|
|
|
|
|
|
-func (Self *window) Read(p []byte) (n int, err error) {
|
|
|
- if Self.closeOp {
|
|
|
- return 0, io.EOF // Write method receive close signal, returns eof
|
|
|
- }
|
|
|
- Self.mutex.Lock()
|
|
|
- length := Self.len() // protect the length data, it invokes
|
|
|
- // before Write lock and after Write unlock
|
|
|
- if length == 0 {
|
|
|
- // window is empty, waiting for Write method send a success readOp signal
|
|
|
- // or get timeout or close
|
|
|
- Self.readWait = true
|
|
|
- Self.mutex.Unlock()
|
|
|
- ticker := time.NewTicker(2 * time.Minute)
|
|
|
- defer ticker.Stop()
|
|
|
- select {
|
|
|
- case _, ok := <-Self.readOp:
|
|
|
- if !ok {
|
|
|
- return 0, errors.New("conn.receiveWindow: window closed")
|
|
|
- }
|
|
|
- case <-Self.WriteEndOp:
|
|
|
- return 0, io.EOF // receive eof signal, returns eof
|
|
|
- case <-ticker.C:
|
|
|
- return 0, errors.New("conn.receiveWindow: read time out")
|
|
|
- case <-Self.closeOpCh:
|
|
|
- close(Self.readOp)
|
|
|
- return 0, io.EOF // receive close signal, returns eof
|
|
|
- }
|
|
|
- } else {
|
|
|
- Self.mutex.Unlock()
|
|
|
- }
|
|
|
- minCopy := 512
|
|
|
+func (Self *ReceiveWindow) sendStatus(id int32, l uint16) {
|
|
|
+ var remaining, wait uint32
|
|
|
for {
|
|
|
- Self.mutex.Lock()
|
|
|
- if len(p) == n || Self.len() == 0 {
|
|
|
- Self.mutex.Unlock()
|
|
|
+ ptrs := atomic.LoadUint64(&Self.remainingWait)
|
|
|
+ remaining, wait = Self.unpack(ptrs)
|
|
|
+ remaining += uint32(l)
|
|
|
+ if atomic.CompareAndSwapUint64(&Self.remainingWait, ptrs, Self.pack(remaining, 0)) {
|
|
|
break
|
|
|
}
|
|
|
- if n+minCopy > len(p) {
|
|
|
- minCopy = len(p) - n
|
|
|
- }
|
|
|
- i := copy(p[n:n+minCopy], Self.windowBuff[Self.off:])
|
|
|
- Self.off += uint16(i)
|
|
|
- n += i
|
|
|
- Self.mutex.Unlock()
|
|
|
+ runtime.Gosched()
|
|
|
+ // another goroutine change remaining or wait status, make sure
|
|
|
+ // we need acknowledge other side
|
|
|
+ }
|
|
|
+ // now we get the current window status success
|
|
|
+ if wait == 1 {
|
|
|
+ //logs.Warn("send the wait status", remaining)
|
|
|
+ Self.mux.sendInfo(common.MUX_MSG_SEND_OK, id, atomic.LoadUint32(&Self.maxSize), remaining)
|
|
|
}
|
|
|
- p = p[:n]
|
|
|
return
|
|
|
}
|
|
|
|
|
|
-func (Self *window) WriteTo() (p []byte, err error) {
|
|
|
- if Self.closeOp {
|
|
|
- return nil, errors.New("conn.writeWindow: window closed")
|
|
|
- }
|
|
|
- if Self.len() == 0 {
|
|
|
- return nil, io.EOF
|
|
|
- // send window buff is drain, return eof and get another one
|
|
|
- }
|
|
|
- var windowSize uint16
|
|
|
- var ok bool
|
|
|
-waiting:
|
|
|
- ticker := time.NewTicker(2 * time.Minute)
|
|
|
- defer ticker.Stop()
|
|
|
- // waiting for receive usable window size, or timeout
|
|
|
- select {
|
|
|
- case windowSize, ok = <-Self.usableReceiveWindow:
|
|
|
- if !ok {
|
|
|
- return nil, errors.New("conn.writeWindow: window closed")
|
|
|
+func (Self *ReceiveWindow) SetTimeOut(t time.Time) {
|
|
|
+ // waiting for FIFO queue Pop method
|
|
|
+ Self.bufQueue.SetTimeOut(t)
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *ReceiveWindow) Stop() {
|
|
|
+ // queue has no more data to push, so unblock pop method
|
|
|
+ Self.once.Do(Self.bufQueue.Stop)
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *ReceiveWindow) CloseWindow() {
|
|
|
+ Self.window.CloseWindow()
|
|
|
+ Self.Stop()
|
|
|
+ Self.release()
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *ReceiveWindow) release() {
|
|
|
+ //if Self.element != nil {
|
|
|
+ // if Self.element.Buf != nil {
|
|
|
+ // common.WindowBuff.Put(Self.element.Buf)
|
|
|
+ // }
|
|
|
+ // common.ListElementPool.Put(Self.element)
|
|
|
+ //}
|
|
|
+ for {
|
|
|
+ Self.element = Self.bufQueue.TryPop()
|
|
|
+ if Self.element == nil {
|
|
|
+ return
|
|
|
}
|
|
|
- case <-ticker.C:
|
|
|
- return nil, errors.New("conn.writeWindow: write to time out")
|
|
|
- case <-Self.closeOpCh:
|
|
|
- return nil, errors.New("conn.writeWindow: window closed")
|
|
|
- }
|
|
|
- if windowSize == 0 {
|
|
|
- goto waiting // waiting for another usable window size
|
|
|
- }
|
|
|
- Self.mutex.Lock()
|
|
|
- if windowSize > uint16(Self.len()) {
|
|
|
- // usable window size is bigger than window buff size, send the full buff
|
|
|
- windowSize = uint16(Self.len())
|
|
|
- }
|
|
|
- p = Self.windowBuff[Self.off : windowSize+Self.off]
|
|
|
- Self.off += windowSize
|
|
|
- Self.mutex.Unlock()
|
|
|
- return
|
|
|
+ if Self.element.Buf != nil {
|
|
|
+ common.WindowBuff.Put(Self.element.Buf)
|
|
|
+ }
|
|
|
+ common.ListElementPool.Put(Self.element)
|
|
|
+ } // release resource
|
|
|
+}
|
|
|
+
|
|
|
+type SendWindow struct {
|
|
|
+ window
|
|
|
+ buf []byte
|
|
|
+ setSizeCh chan struct{}
|
|
|
+ timeout time.Time
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *SendWindow) New(mux *Mux) {
|
|
|
+ Self.setSizeCh = make(chan struct{})
|
|
|
+ Self.maxSize = common.MAXIMUM_SEGMENT_SIZE * 10
|
|
|
+ atomic.AddUint64(&Self.remainingWait, uint64(common.MAXIMUM_SEGMENT_SIZE*10)<<dequeueBits)
|
|
|
+ Self.mux = mux
|
|
|
+ Self.window.New()
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *SendWindow) SetSendBuf(buf []byte) {
|
|
|
+ // send window buff from conn write method, set it to send window
|
|
|
+ Self.buf = buf
|
|
|
+ Self.off = 0
|
|
|
}
|
|
|
|
|
|
-func (Self *window) SetAllowSize(value uint16) (closed bool) {
|
|
|
+func (Self *SendWindow) SetSize(windowSize, newRemaining uint32) (closed bool) {
|
|
|
+ // set the window size from receive window
|
|
|
defer func() {
|
|
|
if recover() != nil {
|
|
|
closed = true
|
|
|
}
|
|
|
}()
|
|
|
if Self.closeOp {
|
|
|
- close(Self.usableReceiveWindow)
|
|
|
+ close(Self.setSizeCh)
|
|
|
return true
|
|
|
}
|
|
|
+ //logs.Warn("set send window size to ", windowSize, newRemaining)
|
|
|
+ var remaining, wait, newWait uint32
|
|
|
+ for {
|
|
|
+ ptrs := atomic.LoadUint64(&Self.remainingWait)
|
|
|
+ remaining, wait = Self.unpack(ptrs)
|
|
|
+ if remaining == newRemaining {
|
|
|
+ //logs.Warn("waiting for another window size")
|
|
|
+ return false // waiting for receive another usable window size
|
|
|
+ }
|
|
|
+ if newRemaining == 0 && wait == 1 {
|
|
|
+ newWait = 1 // keep the wait status,
|
|
|
+ // also if newRemaining is not zero, change wait to 0
|
|
|
+ }
|
|
|
+ if atomic.CompareAndSwapUint64(&Self.remainingWait, ptrs, Self.pack(newRemaining, newWait)) {
|
|
|
+ break
|
|
|
+ }
|
|
|
+ // anther goroutine change wait status or window size
|
|
|
+ }
|
|
|
+ if wait == 1 {
|
|
|
+ // send window into the wait status, need notice the channel
|
|
|
+ //logs.Warn("send window remaining size is 0")
|
|
|
+ Self.allow()
|
|
|
+ }
|
|
|
+ // send window not into the wait status, so just do slide
|
|
|
+ return false
|
|
|
+}
|
|
|
+
|
|
|
+func (Self *SendWindow) allow() {
|
|
|
select {
|
|
|
- case Self.usableReceiveWindow <- value:
|
|
|
- return false
|
|
|
+ case Self.setSizeCh <- struct{}{}:
|
|
|
+ //logs.Warn("send window remaining size is 0 finish")
|
|
|
+ return
|
|
|
case <-Self.closeOpCh:
|
|
|
- close(Self.usableReceiveWindow)
|
|
|
- return true
|
|
|
+ close(Self.setSizeCh)
|
|
|
+ return
|
|
|
}
|
|
|
}
|
|
|
|
|
|
-func (Self *window) CloseWindow() {
|
|
|
- Self.closeOp = true
|
|
|
- Self.closeOpCh <- struct{}{}
|
|
|
- Self.closeOpCh <- struct{}{}
|
|
|
- Self.closeOpCh <- struct{}{}
|
|
|
- close(Self.closeOpCh)
|
|
|
- return
|
|
|
+func (Self *SendWindow) sent(sentSize uint32) {
|
|
|
+ atomic.AddUint64(&Self.remainingWait, ^(uint64(sentSize)<<dequeueBits - 1))
|
|
|
}
|
|
|
|
|
|
-type waitingCh struct {
|
|
|
- nCh chan int
|
|
|
- errCh chan error
|
|
|
+func (Self *SendWindow) WriteTo() (p []byte, sendSize uint32, part bool, err error) {
|
|
|
+ // returns buf segments, return only one segments, need a loop outside
|
|
|
+ // until err = io.EOF
|
|
|
+ if Self.closeOp {
|
|
|
+ return nil, 0, false, errors.New("conn.writeWindow: window closed")
|
|
|
+ }
|
|
|
+ if Self.off == uint32(len(Self.buf)) {
|
|
|
+ return nil, 0, false, io.EOF
|
|
|
+ // send window buff is drain, return eof and get another one
|
|
|
+ }
|
|
|
+ var remaining uint32
|
|
|
+start:
|
|
|
+ ptrs := atomic.LoadUint64(&Self.remainingWait)
|
|
|
+ remaining, _ = Self.unpack(ptrs)
|
|
|
+ if remaining == 0 {
|
|
|
+ if !atomic.CompareAndSwapUint64(&Self.remainingWait, ptrs, Self.pack(0, 1)) {
|
|
|
+ goto start // another goroutine change the window, try again
|
|
|
+ }
|
|
|
+ // into the wait status
|
|
|
+ //logs.Warn("send window into wait status")
|
|
|
+ err = Self.waitReceiveWindow()
|
|
|
+ if err != nil {
|
|
|
+ return nil, 0, false, err
|
|
|
+ }
|
|
|
+ //logs.Warn("rem into wait finish")
|
|
|
+ goto start
|
|
|
+ }
|
|
|
+ // there are still remaining window
|
|
|
+ //logs.Warn("rem", remaining)
|
|
|
+ if len(Self.buf[Self.off:]) > common.MAXIMUM_SEGMENT_SIZE {
|
|
|
+ sendSize = common.MAXIMUM_SEGMENT_SIZE
|
|
|
+ //logs.Warn("cut buf by mss")
|
|
|
+ } else {
|
|
|
+ sendSize = uint32(len(Self.buf[Self.off:]))
|
|
|
+ }
|
|
|
+ if remaining < sendSize {
|
|
|
+ // usable window size is small than
|
|
|
+ // window MAXIMUM_SEGMENT_SIZE or send buf left
|
|
|
+ sendSize = remaining
|
|
|
+ //logs.Warn("cut buf by remainingsize", sendSize, len(Self.buf[Self.off:]))
|
|
|
+ }
|
|
|
+ //logs.Warn("send size", sendSize)
|
|
|
+ if sendSize < uint32(len(Self.buf[Self.off:])) {
|
|
|
+ part = true
|
|
|
+ }
|
|
|
+ p = Self.buf[Self.off : sendSize+Self.off]
|
|
|
+ Self.off += sendSize
|
|
|
+ Self.sent(sendSize)
|
|
|
+ return
|
|
|
}
|
|
|
|
|
|
-func (Self *waitingCh) new() {
|
|
|
- Self.nCh = make(chan int)
|
|
|
- Self.errCh = make(chan error)
|
|
|
+func (Self *SendWindow) waitReceiveWindow() (err error) {
|
|
|
+ t := Self.timeout.Sub(time.Now())
|
|
|
+ if t < 0 {
|
|
|
+ t = time.Minute * 5
|
|
|
+ }
|
|
|
+ timer := time.NewTimer(t)
|
|
|
+ defer timer.Stop()
|
|
|
+ // waiting for receive usable window size, or timeout
|
|
|
+ select {
|
|
|
+ case _, ok := <-Self.setSizeCh:
|
|
|
+ if !ok {
|
|
|
+ return errors.New("conn.writeWindow: window closed")
|
|
|
+ }
|
|
|
+ return nil
|
|
|
+ case <-timer.C:
|
|
|
+ return errors.New("conn.writeWindow: write to time out")
|
|
|
+ case <-Self.closeOpCh:
|
|
|
+ return errors.New("conn.writeWindow: window closed")
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
-func (Self *waitingCh) close() {
|
|
|
- close(Self.nCh)
|
|
|
- close(Self.errCh)
|
|
|
+func (Self *SendWindow) WriteFull(buf []byte, id int32) (n int, err error) {
|
|
|
+ Self.SetSendBuf(buf) // set the buf to send window
|
|
|
+ //logs.Warn("set the buf to send window")
|
|
|
+ var bufSeg []byte
|
|
|
+ var part bool
|
|
|
+ var l uint32
|
|
|
+ for {
|
|
|
+ bufSeg, l, part, err = Self.WriteTo()
|
|
|
+ //logs.Warn("buf seg", len(bufSeg), part, err)
|
|
|
+ // get the buf segments from send window
|
|
|
+ if bufSeg == nil && part == false && err == io.EOF {
|
|
|
+ // send window is drain, break the loop
|
|
|
+ err = nil
|
|
|
+ break
|
|
|
+ }
|
|
|
+ if err != nil {
|
|
|
+ break
|
|
|
+ }
|
|
|
+ n += int(l)
|
|
|
+ l = 0
|
|
|
+ if part {
|
|
|
+ Self.mux.sendInfo(common.MUX_NEW_MSG_PART, id, bufSeg)
|
|
|
+ } else {
|
|
|
+ Self.mux.sendInfo(common.MUX_NEW_MSG, id, bufSeg)
|
|
|
+ //logs.Warn("buf seg sent", len(bufSeg), part, err)
|
|
|
+ }
|
|
|
+ // send to other side, not send nil data to other side
|
|
|
+ }
|
|
|
+ //logs.Warn("buf seg write success")
|
|
|
+ return
|
|
|
}
|
|
|
+
|
|
|
+func (Self *SendWindow) SetTimeOut(t time.Time) {
|
|
|
+ // waiting for receive a receive window size
|
|
|
+ Self.timeout = t
|
|
|
+}
|
|
|
+
|
|
|
+//type bandwidth struct {
|
|
|
+// readStart time.Time
|
|
|
+// lastReadStart time.Time
|
|
|
+// readEnd time.Time
|
|
|
+// lastReadEnd time.Time
|
|
|
+// bufLength int
|
|
|
+// lastBufLength int
|
|
|
+// count int8
|
|
|
+// readBW float64
|
|
|
+// writeBW float64
|
|
|
+// readBandwidth float64
|
|
|
+//}
|
|
|
+//
|
|
|
+//func (Self *bandwidth) StartRead() {
|
|
|
+// Self.lastReadStart, Self.readStart = Self.readStart, time.Now()
|
|
|
+// if !Self.lastReadStart.IsZero() {
|
|
|
+// if Self.count == -5 {
|
|
|
+// Self.calcBandWidth()
|
|
|
+// }
|
|
|
+// }
|
|
|
+//}
|
|
|
+//
|
|
|
+//func (Self *bandwidth) EndRead() {
|
|
|
+// Self.lastReadEnd, Self.readEnd = Self.readEnd, time.Now()
|
|
|
+// if Self.count == -5 {
|
|
|
+// Self.calcWriteBandwidth()
|
|
|
+// }
|
|
|
+// if Self.count == 0 {
|
|
|
+// Self.calcReadBandwidth()
|
|
|
+// Self.count = -6
|
|
|
+// }
|
|
|
+// Self.count += 1
|
|
|
+//}
|
|
|
+//
|
|
|
+//func (Self *bandwidth) SetCopySize(n int) {
|
|
|
+// // must be invoke between StartRead and EndRead
|
|
|
+// Self.lastBufLength, Self.bufLength = Self.bufLength, n
|
|
|
+//}
|
|
|
+//// calculating
|
|
|
+//// start end start end
|
|
|
+//// read read
|
|
|
+//// write
|
|
|
+//
|
|
|
+//func (Self *bandwidth) calcBandWidth() {
|
|
|
+// t := Self.readStart.Sub(Self.lastReadStart)
|
|
|
+// if Self.lastBufLength >= 32768 {
|
|
|
+// Self.readBandwidth = float64(Self.lastBufLength) / t.Seconds()
|
|
|
+// }
|
|
|
+//}
|
|
|
+//
|
|
|
+//func (Self *bandwidth) calcReadBandwidth() {
|
|
|
+// // Bandwidth between nps and npc
|
|
|
+// readTime := Self.readEnd.Sub(Self.readStart)
|
|
|
+// Self.readBW = float64(Self.bufLength) / readTime.Seconds()
|
|
|
+// //logs.Warn("calc read bw", Self.readBW, Self.bufLength, readTime.Seconds())
|
|
|
+//}
|
|
|
+//
|
|
|
+//func (Self *bandwidth) calcWriteBandwidth() {
|
|
|
+// // Bandwidth between nps and user, npc and application
|
|
|
+// writeTime := Self.readStart.Sub(Self.lastReadEnd)
|
|
|
+// Self.writeBW = float64(Self.lastBufLength) / writeTime.Seconds()
|
|
|
+// //logs.Warn("calc write bw", Self.writeBW, Self.bufLength, writeTime.Seconds())
|
|
|
+//}
|
|
|
+//
|
|
|
+//func (Self *bandwidth) Get() (bw float64) {
|
|
|
+// // The zero value, 0 for numeric types
|
|
|
+// if Self.writeBW == 0 && Self.readBW == 0 {
|
|
|
+// //logs.Warn("bw both 0")
|
|
|
+// return 100
|
|
|
+// }
|
|
|
+// if Self.writeBW == 0 && Self.readBW != 0 {
|
|
|
+// return Self.readBW
|
|
|
+// }
|
|
|
+// if Self.readBW == 0 && Self.writeBW != 0 {
|
|
|
+// return Self.writeBW
|
|
|
+// }
|
|
|
+// return Self.readBandwidth
|
|
|
+//}
|