queue.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570
  1. package mux
  2. import (
  3. "errors"
  4. "github.com/cnlh/nps/lib/common"
  5. "io"
  6. "math"
  7. "runtime"
  8. "sync"
  9. "sync/atomic"
  10. "time"
  11. "unsafe"
  12. )
  13. type PriorityQueue struct {
  14. highestChain *bufChain
  15. middleChain *bufChain
  16. lowestChain *bufChain
  17. starving uint8
  18. stop bool
  19. cond *sync.Cond
  20. }
  21. func (Self *PriorityQueue) New() {
  22. Self.highestChain = new(bufChain)
  23. Self.highestChain.new(4)
  24. Self.middleChain = new(bufChain)
  25. Self.middleChain.new(32)
  26. Self.lowestChain = new(bufChain)
  27. Self.lowestChain.new(256)
  28. locker := new(sync.Mutex)
  29. Self.cond = sync.NewCond(locker)
  30. }
  31. func (Self *PriorityQueue) Push(packager *common.MuxPackager) {
  32. //logs.Warn("push start")
  33. Self.push(packager)
  34. Self.cond.Broadcast()
  35. //logs.Warn("push finish")
  36. return
  37. }
  38. func (Self *PriorityQueue) push(packager *common.MuxPackager) {
  39. switch packager.Flag {
  40. case common.MUX_PING_FLAG, common.MUX_PING_RETURN:
  41. Self.highestChain.pushHead(unsafe.Pointer(packager))
  42. // the ping package need highest priority
  43. // prevent ping calculation error
  44. case common.MUX_NEW_CONN, common.MUX_NEW_CONN_OK, common.MUX_NEW_CONN_Fail:
  45. // the new conn package need some priority too
  46. Self.middleChain.pushHead(unsafe.Pointer(packager))
  47. default:
  48. Self.lowestChain.pushHead(unsafe.Pointer(packager))
  49. }
  50. }
  51. const maxStarving uint8 = 8
  52. func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) {
  53. var iter bool
  54. for {
  55. packager = Self.pop()
  56. if packager != nil {
  57. return
  58. }
  59. if Self.stop {
  60. return
  61. }
  62. if iter {
  63. break
  64. // trying to pop twice
  65. }
  66. iter = true
  67. runtime.Gosched()
  68. }
  69. Self.cond.L.Lock()
  70. defer Self.cond.L.Unlock()
  71. for packager = Self.pop(); packager == nil; {
  72. if Self.stop {
  73. return
  74. }
  75. //logs.Warn("queue into wait")
  76. Self.cond.Wait()
  77. // wait for it with no more iter
  78. packager = Self.pop()
  79. //logs.Warn("queue wait finish", packager)
  80. }
  81. return
  82. }
  83. func (Self *PriorityQueue) pop() (packager *common.MuxPackager) {
  84. ptr, ok := Self.highestChain.popTail()
  85. if ok {
  86. packager = (*common.MuxPackager)(ptr)
  87. return
  88. }
  89. if Self.starving < maxStarving {
  90. // not pop too much, lowestChain will wait too long
  91. ptr, ok = Self.middleChain.popTail()
  92. if ok {
  93. packager = (*common.MuxPackager)(ptr)
  94. Self.starving++
  95. return
  96. }
  97. }
  98. ptr, ok = Self.lowestChain.popTail()
  99. if ok {
  100. packager = (*common.MuxPackager)(ptr)
  101. if Self.starving > 0 {
  102. Self.starving = uint8(Self.starving / 2)
  103. }
  104. return
  105. }
  106. if Self.starving > 0 {
  107. ptr, ok = Self.middleChain.popTail()
  108. if ok {
  109. packager = (*common.MuxPackager)(ptr)
  110. Self.starving++
  111. return
  112. }
  113. }
  114. return
  115. }
  116. func (Self *PriorityQueue) Stop() {
  117. Self.stop = true
  118. Self.cond.Broadcast()
  119. }
  120. type ConnQueue struct {
  121. chain *bufChain
  122. starving uint8
  123. stop bool
  124. cond *sync.Cond
  125. }
  126. func (Self *ConnQueue) New() {
  127. Self.chain = new(bufChain)
  128. Self.chain.new(32)
  129. locker := new(sync.Mutex)
  130. Self.cond = sync.NewCond(locker)
  131. }
  132. func (Self *ConnQueue) Push(connection *conn) {
  133. Self.chain.pushHead(unsafe.Pointer(connection))
  134. Self.cond.Broadcast()
  135. return
  136. }
  137. func (Self *ConnQueue) Pop() (connection *conn) {
  138. var iter bool
  139. for {
  140. connection = Self.pop()
  141. if connection != nil {
  142. return
  143. }
  144. if Self.stop {
  145. return
  146. }
  147. if iter {
  148. break
  149. // trying to pop twice
  150. }
  151. iter = true
  152. runtime.Gosched()
  153. }
  154. Self.cond.L.Lock()
  155. defer Self.cond.L.Unlock()
  156. for connection = Self.pop(); connection == nil; {
  157. if Self.stop {
  158. return
  159. }
  160. //logs.Warn("queue into wait")
  161. Self.cond.Wait()
  162. // wait for it with no more iter
  163. connection = Self.pop()
  164. //logs.Warn("queue wait finish", packager)
  165. }
  166. return
  167. }
  168. func (Self *ConnQueue) pop() (connection *conn) {
  169. ptr, ok := Self.chain.popTail()
  170. if ok {
  171. connection = (*conn)(ptr)
  172. return
  173. }
  174. return
  175. }
  176. func (Self *ConnQueue) Stop() {
  177. Self.stop = true
  178. Self.cond.Broadcast()
  179. }
  180. func NewListElement(buf []byte, l uint16, part bool) (element *common.ListElement, err error) {
  181. if uint16(len(buf)) != l {
  182. err = errors.New("ListElement: buf length not match")
  183. return
  184. }
  185. //if l == 0 {
  186. // logs.Warn("push zero")
  187. //}
  188. element = common.ListElementPool.Get()
  189. element.Buf = buf
  190. element.L = l
  191. element.Part = part
  192. return
  193. }
  194. type ReceiveWindowQueue struct {
  195. chain *bufChain
  196. stopOp chan struct{}
  197. readOp chan struct{}
  198. lengthWait uint64
  199. timeout time.Time
  200. }
  201. func (Self *ReceiveWindowQueue) New() {
  202. Self.readOp = make(chan struct{})
  203. Self.chain = new(bufChain)
  204. Self.chain.new(64)
  205. Self.stopOp = make(chan struct{}, 2)
  206. }
  207. func (Self *ReceiveWindowQueue) Push(element *common.ListElement) {
  208. var length, wait uint32
  209. for {
  210. ptrs := atomic.LoadUint64(&Self.lengthWait)
  211. length, wait = Self.chain.head.unpack(ptrs)
  212. length += uint32(element.L)
  213. if atomic.CompareAndSwapUint64(&Self.lengthWait, ptrs, Self.chain.head.pack(length, 0)) {
  214. break
  215. }
  216. // another goroutine change the length or into wait, make sure
  217. }
  218. //logs.Warn("window push before", Self.Len(), uint32(element.l), len(element.buf))
  219. Self.chain.pushHead(unsafe.Pointer(element))
  220. //logs.Warn("window push", Self.Len())
  221. if wait == 1 {
  222. Self.allowPop()
  223. }
  224. return
  225. }
  226. func (Self *ReceiveWindowQueue) Pop() (element *common.ListElement, err error) {
  227. var length uint32
  228. startPop:
  229. ptrs := atomic.LoadUint64(&Self.lengthWait)
  230. length, _ = Self.chain.head.unpack(ptrs)
  231. if length == 0 {
  232. if !atomic.CompareAndSwapUint64(&Self.lengthWait, ptrs, Self.chain.head.pack(0, 1)) {
  233. goto startPop // another goroutine is pushing
  234. }
  235. err = Self.waitPush()
  236. // there is no more data in queue, wait for it
  237. if err != nil {
  238. return
  239. }
  240. goto startPop // wait finish, trying to get the new status
  241. }
  242. // length is not zero, so try to pop
  243. for {
  244. ptr, ok := Self.chain.popTail()
  245. if ok {
  246. //logs.Warn("window pop before", Self.Len())
  247. element = (*common.ListElement)(ptr)
  248. atomic.AddUint64(&Self.lengthWait, ^(uint64(element.L)<<dequeueBits - 1))
  249. //logs.Warn("window pop", Self.Len(), uint32(element.l))
  250. return
  251. }
  252. runtime.Gosched() // another goroutine is still pushing
  253. }
  254. }
  255. func (Self *ReceiveWindowQueue) allowPop() (closed bool) {
  256. //logs.Warn("allow pop", Self.Len())
  257. select {
  258. case Self.readOp <- struct{}{}:
  259. return false
  260. case <-Self.stopOp:
  261. return true
  262. }
  263. }
  264. func (Self *ReceiveWindowQueue) waitPush() (err error) {
  265. //logs.Warn("wait push")
  266. //defer logs.Warn("wait push finish")
  267. t := Self.timeout.Sub(time.Now())
  268. if t <= 0 {
  269. t = time.Minute * 5
  270. }
  271. timer := time.NewTimer(t)
  272. defer timer.Stop()
  273. //logs.Warn("queue into wait")
  274. select {
  275. case <-Self.readOp:
  276. //logs.Warn("queue wait finish")
  277. return nil
  278. case <-Self.stopOp:
  279. err = io.EOF
  280. return
  281. case <-timer.C:
  282. err = errors.New("mux.queue: read time out")
  283. return
  284. }
  285. }
  286. func (Self *ReceiveWindowQueue) Len() (n uint32) {
  287. ptrs := atomic.LoadUint64(&Self.lengthWait)
  288. n, _ = Self.chain.head.unpack(ptrs)
  289. return
  290. }
  291. func (Self *ReceiveWindowQueue) Stop() {
  292. Self.stopOp <- struct{}{}
  293. Self.stopOp <- struct{}{}
  294. }
  295. func (Self *ReceiveWindowQueue) SetTimeOut(t time.Time) {
  296. Self.timeout = t
  297. }
  298. // https://golang.org/src/sync/poolqueue.go
  299. type bufDequeue struct {
  300. // headTail packs together a 32-bit head index and a 32-bit
  301. // tail index. Both are indexes into vals modulo len(vals)-1.
  302. //
  303. // tail = index of oldest data in queue
  304. // head = index of next slot to fill
  305. //
  306. // Slots in the range [tail, head) are owned by consumers.
  307. // A consumer continues to own a slot outside this range until
  308. // it nils the slot, at which point ownership passes to the
  309. // producer.
  310. //
  311. // The head index is stored in the most-significant bits so
  312. // that we can atomically add to it and the overflow is
  313. // harmless.
  314. headTail uint64
  315. // vals is a ring buffer of interface{} values stored in this
  316. // dequeue. The size of this must be a power of 2.
  317. //
  318. // A slot is still in use until *both* the tail
  319. // index has moved beyond it and typ has been set to nil. This
  320. // is set to nil atomically by the consumer and read
  321. // atomically by the producer.
  322. vals []unsafe.Pointer
  323. starving uint32
  324. }
  325. const dequeueBits = 32
  326. // dequeueLimit is the maximum size of a bufDequeue.
  327. //
  328. // This must be at most (1<<dequeueBits)/2 because detecting fullness
  329. // depends on wrapping around the ring buffer without wrapping around
  330. // the index. We divide by 4 so this fits in an int on 32-bit.
  331. const dequeueLimit = (1 << dequeueBits) / 4
  332. func (d *bufDequeue) unpack(ptrs uint64) (head, tail uint32) {
  333. const mask = 1<<dequeueBits - 1
  334. head = uint32((ptrs >> dequeueBits) & mask)
  335. tail = uint32(ptrs & mask)
  336. return
  337. }
  338. func (d *bufDequeue) pack(head, tail uint32) uint64 {
  339. const mask = 1<<dequeueBits - 1
  340. return (uint64(head) << dequeueBits) |
  341. uint64(tail&mask)
  342. }
  343. // pushHead adds val at the head of the queue. It returns false if the
  344. // queue is full.
  345. func (d *bufDequeue) pushHead(val unsafe.Pointer) bool {
  346. var slot *unsafe.Pointer
  347. var starve uint8
  348. if atomic.LoadUint32(&d.starving) > 0 {
  349. runtime.Gosched()
  350. }
  351. for {
  352. ptrs := atomic.LoadUint64(&d.headTail)
  353. head, tail := d.unpack(ptrs)
  354. if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
  355. // Queue is full.
  356. return false
  357. }
  358. ptrs2 := d.pack(head+1, tail)
  359. if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
  360. slot = &d.vals[head&uint32(len(d.vals)-1)]
  361. if starve >= 3 && atomic.LoadUint32(&d.starving) > 0 {
  362. atomic.StoreUint32(&d.starving, 0)
  363. }
  364. break
  365. }
  366. starve++
  367. if starve >= 3 {
  368. atomic.StoreUint32(&d.starving, 1)
  369. }
  370. }
  371. // The head slot is free, so we own it.
  372. *slot = val
  373. return true
  374. }
  375. // popTail removes and returns the element at the tail of the queue.
  376. // It returns false if the queue is empty. It may be called by any
  377. // number of consumers.
  378. func (d *bufDequeue) popTail() (unsafe.Pointer, bool) {
  379. ptrs := atomic.LoadUint64(&d.headTail)
  380. head, tail := d.unpack(ptrs)
  381. if tail == head {
  382. // Queue is empty.
  383. return nil, false
  384. }
  385. slot := &d.vals[tail&uint32(len(d.vals)-1)]
  386. var val unsafe.Pointer
  387. for {
  388. val = atomic.LoadPointer(slot)
  389. if val != nil {
  390. // We now own slot.
  391. break
  392. }
  393. // Another goroutine is still pushing data on the tail.
  394. }
  395. // Tell pushHead that we're done with this slot. Zeroing the
  396. // slot is also important so we don't leave behind references
  397. // that could keep this object live longer than necessary.
  398. //
  399. // We write to val first and then publish that we're done with
  400. atomic.StorePointer(slot, nil)
  401. // At this point pushHead owns the slot.
  402. if tail < math.MaxUint32 {
  403. atomic.AddUint64(&d.headTail, 1)
  404. } else {
  405. atomic.AddUint64(&d.headTail, ^uint64(math.MaxUint32-1))
  406. }
  407. return val, true
  408. }
  409. // bufChain is a dynamically-sized version of bufDequeue.
  410. //
  411. // This is implemented as a doubly-linked list queue of poolDequeues
  412. // where each dequeue is double the size of the previous one. Once a
  413. // dequeue fills up, this allocates a new one and only ever pushes to
  414. // the latest dequeue. Pops happen from the other end of the list and
  415. // once a dequeue is exhausted, it gets removed from the list.
  416. type bufChain struct {
  417. // head is the bufDequeue to push to. This is only accessed
  418. // by the producer, so doesn't need to be synchronized.
  419. head *bufChainElt
  420. // tail is the bufDequeue to popTail from. This is accessed
  421. // by consumers, so reads and writes must be atomic.
  422. tail *bufChainElt
  423. newChain uint32
  424. }
  425. type bufChainElt struct {
  426. bufDequeue
  427. // next and prev link to the adjacent poolChainElts in this
  428. // bufChain.
  429. //
  430. // next is written atomically by the producer and read
  431. // atomically by the consumer. It only transitions from nil to
  432. // non-nil.
  433. //
  434. // prev is written atomically by the consumer and read
  435. // atomically by the producer. It only transitions from
  436. // non-nil to nil.
  437. next, prev *bufChainElt
  438. }
  439. func storePoolChainElt(pp **bufChainElt, v *bufChainElt) {
  440. atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(pp)), unsafe.Pointer(v))
  441. }
  442. func loadPoolChainElt(pp **bufChainElt) *bufChainElt {
  443. return (*bufChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp))))
  444. }
  445. func (c *bufChain) new(initSize int) {
  446. // Initialize the chain.
  447. // initSize must be a power of 2
  448. d := new(bufChainElt)
  449. d.vals = make([]unsafe.Pointer, initSize)
  450. storePoolChainElt(&c.head, d)
  451. storePoolChainElt(&c.tail, d)
  452. }
  453. func (c *bufChain) pushHead(val unsafe.Pointer) {
  454. startPush:
  455. for {
  456. if atomic.LoadUint32(&c.newChain) > 0 {
  457. runtime.Gosched()
  458. } else {
  459. break
  460. }
  461. }
  462. d := loadPoolChainElt(&c.head)
  463. if d.pushHead(val) {
  464. return
  465. }
  466. // The current dequeue is full. Allocate a new one of twice
  467. // the size.
  468. if atomic.CompareAndSwapUint32(&c.newChain, 0, 1) {
  469. newSize := len(d.vals) * 2
  470. if newSize >= dequeueLimit {
  471. // Can't make it any bigger.
  472. newSize = dequeueLimit
  473. }
  474. d2 := &bufChainElt{prev: d}
  475. d2.vals = make([]unsafe.Pointer, newSize)
  476. d2.pushHead(val)
  477. storePoolChainElt(&c.head, d2)
  478. storePoolChainElt(&d.next, d2)
  479. atomic.StoreUint32(&c.newChain, 0)
  480. return
  481. }
  482. goto startPush
  483. }
  484. func (c *bufChain) popTail() (unsafe.Pointer, bool) {
  485. d := loadPoolChainElt(&c.tail)
  486. if d == nil {
  487. return nil, false
  488. }
  489. for {
  490. // It's important that we load the next pointer
  491. // *before* popping the tail. In general, d may be
  492. // transiently empty, but if next is non-nil before
  493. // the pop and the pop fails, then d is permanently
  494. // empty, which is the only condition under which it's
  495. // safe to drop d from the chain.
  496. d2 := loadPoolChainElt(&d.next)
  497. if val, ok := d.popTail(); ok {
  498. return val, ok
  499. }
  500. if d2 == nil {
  501. // This is the only dequeue. It's empty right
  502. // now, but could be pushed to in the future.
  503. return nil, false
  504. }
  505. // The tail of the chain has been drained, so move on
  506. // to the next dequeue. Try to drop it from the chain
  507. // so the next pop doesn't have to look at the empty
  508. // dequeue again.
  509. if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) {
  510. // We won the race. Clear the prev pointer so
  511. // the garbage collector can collect the empty
  512. // dequeue and so popHead doesn't back up
  513. // further than necessary.
  514. storePoolChainElt(&d2.prev, nil)
  515. }
  516. d = d2
  517. }
  518. }