queue.go 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578
  1. package mux
  2. import (
  3. "errors"
  4. "github.com/cnlh/nps/lib/common"
  5. "io"
  6. "math"
  7. "runtime"
  8. "sync"
  9. "sync/atomic"
  10. "time"
  11. "unsafe"
  12. )
  13. type PriorityQueue struct {
  14. highestChain *bufChain
  15. middleChain *bufChain
  16. lowestChain *bufChain
  17. starving uint8
  18. stop bool
  19. cond *sync.Cond
  20. }
  21. func (Self *PriorityQueue) New() {
  22. Self.highestChain = new(bufChain)
  23. Self.highestChain.new(4)
  24. Self.middleChain = new(bufChain)
  25. Self.middleChain.new(32)
  26. Self.lowestChain = new(bufChain)
  27. Self.lowestChain.new(256)
  28. locker := new(sync.Mutex)
  29. Self.cond = sync.NewCond(locker)
  30. }
  31. func (Self *PriorityQueue) Push(packager *common.MuxPackager) {
  32. //logs.Warn("push start")
  33. Self.push(packager)
  34. Self.cond.Broadcast()
  35. //logs.Warn("push finish")
  36. return
  37. }
  38. func (Self *PriorityQueue) push(packager *common.MuxPackager) {
  39. switch packager.Flag {
  40. case common.MUX_PING_FLAG, common.MUX_PING_RETURN:
  41. Self.highestChain.pushHead(unsafe.Pointer(packager))
  42. // the ping package need highest priority
  43. // prevent ping calculation error
  44. case common.MUX_NEW_CONN, common.MUX_NEW_CONN_OK, common.MUX_NEW_CONN_Fail:
  45. // the new conn package need some priority too
  46. Self.middleChain.pushHead(unsafe.Pointer(packager))
  47. default:
  48. Self.lowestChain.pushHead(unsafe.Pointer(packager))
  49. }
  50. }
  51. const maxStarving uint8 = 8
  52. func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) {
  53. var iter bool
  54. for {
  55. packager = Self.TryPop()
  56. if packager != nil {
  57. return
  58. }
  59. if Self.stop {
  60. return
  61. }
  62. if iter {
  63. break
  64. // trying to pop twice
  65. }
  66. iter = true
  67. runtime.Gosched()
  68. }
  69. Self.cond.L.Lock()
  70. defer Self.cond.L.Unlock()
  71. for packager = Self.TryPop(); packager == nil; {
  72. if Self.stop {
  73. return
  74. }
  75. //logs.Warn("queue into wait")
  76. Self.cond.Wait()
  77. // wait for it with no more iter
  78. packager = Self.TryPop()
  79. //logs.Warn("queue wait finish", packager)
  80. }
  81. return
  82. }
  83. func (Self *PriorityQueue) TryPop() (packager *common.MuxPackager) {
  84. ptr, ok := Self.highestChain.popTail()
  85. if ok {
  86. packager = (*common.MuxPackager)(ptr)
  87. return
  88. }
  89. if Self.starving < maxStarving {
  90. // not pop too much, lowestChain will wait too long
  91. ptr, ok = Self.middleChain.popTail()
  92. if ok {
  93. packager = (*common.MuxPackager)(ptr)
  94. Self.starving++
  95. return
  96. }
  97. }
  98. ptr, ok = Self.lowestChain.popTail()
  99. if ok {
  100. packager = (*common.MuxPackager)(ptr)
  101. if Self.starving > 0 {
  102. Self.starving = uint8(Self.starving / 2)
  103. }
  104. return
  105. }
  106. if Self.starving > 0 {
  107. ptr, ok = Self.middleChain.popTail()
  108. if ok {
  109. packager = (*common.MuxPackager)(ptr)
  110. Self.starving++
  111. return
  112. }
  113. }
  114. return
  115. }
  116. func (Self *PriorityQueue) Stop() {
  117. Self.stop = true
  118. Self.cond.Broadcast()
  119. }
  120. type ConnQueue struct {
  121. chain *bufChain
  122. starving uint8
  123. stop bool
  124. cond *sync.Cond
  125. }
  126. func (Self *ConnQueue) New() {
  127. Self.chain = new(bufChain)
  128. Self.chain.new(32)
  129. locker := new(sync.Mutex)
  130. Self.cond = sync.NewCond(locker)
  131. }
  132. func (Self *ConnQueue) Push(connection *conn) {
  133. Self.chain.pushHead(unsafe.Pointer(connection))
  134. Self.cond.Broadcast()
  135. return
  136. }
  137. func (Self *ConnQueue) Pop() (connection *conn) {
  138. var iter bool
  139. for {
  140. connection = Self.TryPop()
  141. if connection != nil {
  142. return
  143. }
  144. if Self.stop {
  145. return
  146. }
  147. if iter {
  148. break
  149. // trying to pop twice
  150. }
  151. iter = true
  152. runtime.Gosched()
  153. }
  154. Self.cond.L.Lock()
  155. defer Self.cond.L.Unlock()
  156. for connection = Self.TryPop(); connection == nil; {
  157. if Self.stop {
  158. return
  159. }
  160. //logs.Warn("queue into wait")
  161. Self.cond.Wait()
  162. // wait for it with no more iter
  163. connection = Self.TryPop()
  164. //logs.Warn("queue wait finish", packager)
  165. }
  166. return
  167. }
  168. func (Self *ConnQueue) TryPop() (connection *conn) {
  169. ptr, ok := Self.chain.popTail()
  170. if ok {
  171. connection = (*conn)(ptr)
  172. return
  173. }
  174. return
  175. }
  176. func (Self *ConnQueue) Stop() {
  177. Self.stop = true
  178. Self.cond.Broadcast()
  179. }
  180. func NewListElement(buf []byte, l uint16, part bool) (element *common.ListElement, err error) {
  181. if uint16(len(buf)) != l {
  182. err = errors.New("ListElement: buf length not match")
  183. return
  184. }
  185. //if l == 0 {
  186. // logs.Warn("push zero")
  187. //}
  188. element = common.ListElementPool.Get()
  189. element.Buf = buf
  190. element.L = l
  191. element.Part = part
  192. return
  193. }
  194. type ReceiveWindowQueue struct {
  195. lengthWait uint64
  196. chain *bufChain
  197. stopOp chan struct{}
  198. readOp chan struct{}
  199. timeout time.Time
  200. }
  201. func (Self *ReceiveWindowQueue) New() {
  202. Self.readOp = make(chan struct{})
  203. Self.chain = new(bufChain)
  204. Self.chain.new(64)
  205. Self.stopOp = make(chan struct{}, 2)
  206. }
  207. func (Self *ReceiveWindowQueue) Push(element *common.ListElement) {
  208. var length, wait uint32
  209. for {
  210. ptrs := atomic.LoadUint64(&Self.lengthWait)
  211. length, wait = Self.chain.head.unpack(ptrs)
  212. length += uint32(element.L)
  213. if atomic.CompareAndSwapUint64(&Self.lengthWait, ptrs, Self.chain.head.pack(length, 0)) {
  214. break
  215. }
  216. // another goroutine change the length or into wait, make sure
  217. }
  218. //logs.Warn("window push before", Self.Len(), uint32(element.l), len(element.buf))
  219. Self.chain.pushHead(unsafe.Pointer(element))
  220. //logs.Warn("window push", Self.Len())
  221. if wait == 1 {
  222. Self.allowPop()
  223. }
  224. return
  225. }
  226. func (Self *ReceiveWindowQueue) Pop() (element *common.ListElement, err error) {
  227. var length uint32
  228. startPop:
  229. ptrs := atomic.LoadUint64(&Self.lengthWait)
  230. length, _ = Self.chain.head.unpack(ptrs)
  231. if length == 0 {
  232. if !atomic.CompareAndSwapUint64(&Self.lengthWait, ptrs, Self.chain.head.pack(0, 1)) {
  233. goto startPop // another goroutine is pushing
  234. }
  235. err = Self.waitPush()
  236. // there is no more data in queue, wait for it
  237. if err != nil {
  238. return
  239. }
  240. goto startPop // wait finish, trying to get the new status
  241. }
  242. // length is not zero, so try to pop
  243. for {
  244. element = Self.TryPop()
  245. if element != nil {
  246. return
  247. }
  248. runtime.Gosched() // another goroutine is still pushing
  249. }
  250. }
  251. func (Self *ReceiveWindowQueue) TryPop() (element *common.ListElement) {
  252. ptr, ok := Self.chain.popTail()
  253. if ok {
  254. //logs.Warn("window pop before", Self.Len())
  255. element = (*common.ListElement)(ptr)
  256. atomic.AddUint64(&Self.lengthWait, ^(uint64(element.L)<<dequeueBits - 1))
  257. //logs.Warn("window pop", Self.Len(), uint32(element.l))
  258. return
  259. }
  260. return nil
  261. }
  262. func (Self *ReceiveWindowQueue) allowPop() (closed bool) {
  263. //logs.Warn("allow pop", Self.Len())
  264. select {
  265. case Self.readOp <- struct{}{}:
  266. return false
  267. case <-Self.stopOp:
  268. return true
  269. }
  270. }
  271. func (Self *ReceiveWindowQueue) waitPush() (err error) {
  272. //logs.Warn("wait push")
  273. //defer logs.Warn("wait push finish")
  274. t := Self.timeout.Sub(time.Now())
  275. if t <= 0 {
  276. t = time.Minute * 5
  277. }
  278. timer := time.NewTimer(t)
  279. defer timer.Stop()
  280. //logs.Warn("queue into wait")
  281. select {
  282. case <-Self.readOp:
  283. //logs.Warn("queue wait finish")
  284. return nil
  285. case <-Self.stopOp:
  286. err = io.EOF
  287. return
  288. case <-timer.C:
  289. err = errors.New("mux.queue: read time out")
  290. return
  291. }
  292. }
  293. func (Self *ReceiveWindowQueue) Len() (n uint32) {
  294. ptrs := atomic.LoadUint64(&Self.lengthWait)
  295. n, _ = Self.chain.head.unpack(ptrs)
  296. return
  297. }
  298. func (Self *ReceiveWindowQueue) Stop() {
  299. Self.stopOp <- struct{}{}
  300. Self.stopOp <- struct{}{}
  301. }
  302. func (Self *ReceiveWindowQueue) SetTimeOut(t time.Time) {
  303. Self.timeout = t
  304. }
  305. // https://golang.org/src/sync/poolqueue.go
  306. type bufDequeue struct {
  307. // headTail packs together a 32-bit head index and a 32-bit
  308. // tail index. Both are indexes into vals modulo len(vals)-1.
  309. //
  310. // tail = index of oldest data in queue
  311. // head = index of next slot to fill
  312. //
  313. // Slots in the range [tail, head) are owned by consumers.
  314. // A consumer continues to own a slot outside this range until
  315. // it nils the slot, at which point ownership passes to the
  316. // producer.
  317. //
  318. // The head index is stored in the most-significant bits so
  319. // that we can atomically add to it and the overflow is
  320. // harmless.
  321. headTail uint64
  322. // vals is a ring buffer of interface{} values stored in this
  323. // dequeue. The size of this must be a power of 2.
  324. //
  325. // A slot is still in use until *both* the tail
  326. // index has moved beyond it and typ has been set to nil. This
  327. // is set to nil atomically by the consumer and read
  328. // atomically by the producer.
  329. vals []unsafe.Pointer
  330. starving uint32
  331. }
  332. const dequeueBits = 32
  333. // dequeueLimit is the maximum size of a bufDequeue.
  334. //
  335. // This must be at most (1<<dequeueBits)/2 because detecting fullness
  336. // depends on wrapping around the ring buffer without wrapping around
  337. // the index. We divide by 4 so this fits in an int on 32-bit.
  338. const dequeueLimit = (1 << dequeueBits) / 4
  339. func (d *bufDequeue) unpack(ptrs uint64) (head, tail uint32) {
  340. const mask = 1<<dequeueBits - 1
  341. head = uint32((ptrs >> dequeueBits) & mask)
  342. tail = uint32(ptrs & mask)
  343. return
  344. }
  345. func (d *bufDequeue) pack(head, tail uint32) uint64 {
  346. const mask = 1<<dequeueBits - 1
  347. return (uint64(head) << dequeueBits) |
  348. uint64(tail&mask)
  349. }
  350. // pushHead adds val at the head of the queue. It returns false if the
  351. // queue is full.
  352. func (d *bufDequeue) pushHead(val unsafe.Pointer) bool {
  353. var slot *unsafe.Pointer
  354. var starve uint8
  355. if atomic.LoadUint32(&d.starving) > 0 {
  356. runtime.Gosched()
  357. }
  358. for {
  359. ptrs := atomic.LoadUint64(&d.headTail)
  360. head, tail := d.unpack(ptrs)
  361. if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
  362. // Queue is full.
  363. return false
  364. }
  365. ptrs2 := d.pack(head+1, tail)
  366. if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
  367. slot = &d.vals[head&uint32(len(d.vals)-1)]
  368. if starve >= 3 && atomic.LoadUint32(&d.starving) > 0 {
  369. atomic.StoreUint32(&d.starving, 0)
  370. }
  371. break
  372. }
  373. starve++
  374. if starve >= 3 {
  375. atomic.StoreUint32(&d.starving, 1)
  376. }
  377. }
  378. // The head slot is free, so we own it.
  379. *slot = val
  380. return true
  381. }
  382. // popTail removes and returns the element at the tail of the queue.
  383. // It returns false if the queue is empty. It may be called by any
  384. // number of consumers.
  385. func (d *bufDequeue) popTail() (unsafe.Pointer, bool) {
  386. ptrs := atomic.LoadUint64(&d.headTail)
  387. head, tail := d.unpack(ptrs)
  388. if tail == head {
  389. // Queue is empty.
  390. return nil, false
  391. }
  392. slot := &d.vals[tail&uint32(len(d.vals)-1)]
  393. var val unsafe.Pointer
  394. for {
  395. val = atomic.LoadPointer(slot)
  396. if val != nil {
  397. // We now own slot.
  398. break
  399. }
  400. // Another goroutine is still pushing data on the tail.
  401. }
  402. // Tell pushHead that we're done with this slot. Zeroing the
  403. // slot is also important so we don't leave behind references
  404. // that could keep this object live longer than necessary.
  405. //
  406. // We write to val first and then publish that we're done with
  407. atomic.StorePointer(slot, nil)
  408. // At this point pushHead owns the slot.
  409. if tail < math.MaxUint32 {
  410. atomic.AddUint64(&d.headTail, 1)
  411. } else {
  412. atomic.AddUint64(&d.headTail, ^uint64(math.MaxUint32-1))
  413. }
  414. return val, true
  415. }
  416. // bufChain is a dynamically-sized version of bufDequeue.
  417. //
  418. // This is implemented as a doubly-linked list queue of poolDequeues
  419. // where each dequeue is double the size of the previous one. Once a
  420. // dequeue fills up, this allocates a new one and only ever pushes to
  421. // the latest dequeue. Pops happen from the other end of the list and
  422. // once a dequeue is exhausted, it gets removed from the list.
  423. type bufChain struct {
  424. // head is the bufDequeue to push to. This is only accessed
  425. // by the producer, so doesn't need to be synchronized.
  426. head *bufChainElt
  427. // tail is the bufDequeue to popTail from. This is accessed
  428. // by consumers, so reads and writes must be atomic.
  429. tail *bufChainElt
  430. newChain uint32
  431. }
  432. type bufChainElt struct {
  433. bufDequeue
  434. // next and prev link to the adjacent poolChainElts in this
  435. // bufChain.
  436. //
  437. // next is written atomically by the producer and read
  438. // atomically by the consumer. It only transitions from nil to
  439. // non-nil.
  440. //
  441. // prev is written atomically by the consumer and read
  442. // atomically by the producer. It only transitions from
  443. // non-nil to nil.
  444. next, prev *bufChainElt
  445. }
  446. func storePoolChainElt(pp **bufChainElt, v *bufChainElt) {
  447. atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(pp)), unsafe.Pointer(v))
  448. }
  449. func loadPoolChainElt(pp **bufChainElt) *bufChainElt {
  450. return (*bufChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp))))
  451. }
  452. func (c *bufChain) new(initSize int) {
  453. // Initialize the chain.
  454. // initSize must be a power of 2
  455. d := new(bufChainElt)
  456. d.vals = make([]unsafe.Pointer, initSize)
  457. storePoolChainElt(&c.head, d)
  458. storePoolChainElt(&c.tail, d)
  459. }
  460. func (c *bufChain) pushHead(val unsafe.Pointer) {
  461. startPush:
  462. for {
  463. if atomic.LoadUint32(&c.newChain) > 0 {
  464. runtime.Gosched()
  465. } else {
  466. break
  467. }
  468. }
  469. d := loadPoolChainElt(&c.head)
  470. if d.pushHead(val) {
  471. return
  472. }
  473. // The current dequeue is full. Allocate a new one of twice
  474. // the size.
  475. if atomic.CompareAndSwapUint32(&c.newChain, 0, 1) {
  476. newSize := len(d.vals) * 2
  477. if newSize >= dequeueLimit {
  478. // Can't make it any bigger.
  479. newSize = dequeueLimit
  480. }
  481. d2 := &bufChainElt{prev: d}
  482. d2.vals = make([]unsafe.Pointer, newSize)
  483. d2.pushHead(val)
  484. storePoolChainElt(&c.head, d2)
  485. storePoolChainElt(&d.next, d2)
  486. atomic.StoreUint32(&c.newChain, 0)
  487. return
  488. }
  489. goto startPush
  490. }
  491. func (c *bufChain) popTail() (unsafe.Pointer, bool) {
  492. d := loadPoolChainElt(&c.tail)
  493. if d == nil {
  494. return nil, false
  495. }
  496. for {
  497. // It's important that we load the next pointer
  498. // *before* popping the tail. In general, d may be
  499. // transiently empty, but if next is non-nil before
  500. // the TryPop and the TryPop fails, then d is permanently
  501. // empty, which is the only condition under which it's
  502. // safe to drop d from the chain.
  503. d2 := loadPoolChainElt(&d.next)
  504. if val, ok := d.popTail(); ok {
  505. return val, ok
  506. }
  507. if d2 == nil {
  508. // This is the only dequeue. It's empty right
  509. // now, but could be pushed to in the future.
  510. return nil, false
  511. }
  512. // The tail of the chain has been drained, so move on
  513. // to the next dequeue. Try to drop it from the chain
  514. // so the next TryPop doesn't have to look at the empty
  515. // dequeue again.
  516. if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) {
  517. // We won the race. Clear the prev pointer so
  518. // the garbage collector can collect the empty
  519. // dequeue and so popHead doesn't back up
  520. // further than necessary.
  521. storePoolChainElt(&d2.prev, nil)
  522. }
  523. d = d2
  524. }
  525. }