queue.go 15 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589
  1. package mux
  2. import (
  3. "errors"
  4. "github.com/cnlh/nps/lib/common"
  5. "io"
  6. "math"
  7. "runtime"
  8. "sync"
  9. "sync/atomic"
  10. "time"
  11. "unsafe"
  12. )
  13. type PriorityQueue struct {
  14. highestChain *bufChain
  15. middleChain *bufChain
  16. lowestChain *bufChain
  17. starving uint8
  18. stop bool
  19. cond *sync.Cond
  20. }
  21. func (Self *PriorityQueue) New() {
  22. Self.highestChain = new(bufChain)
  23. Self.highestChain.new(4)
  24. Self.middleChain = new(bufChain)
  25. Self.middleChain.new(32)
  26. Self.lowestChain = new(bufChain)
  27. Self.lowestChain.new(256)
  28. locker := new(sync.Mutex)
  29. Self.cond = sync.NewCond(locker)
  30. }
  31. func (Self *PriorityQueue) Push(packager *common.MuxPackager) {
  32. //logs.Warn("push start")
  33. Self.push(packager)
  34. Self.cond.Broadcast()
  35. //logs.Warn("push finish")
  36. return
  37. }
  38. func (Self *PriorityQueue) push(packager *common.MuxPackager) {
  39. switch packager.Flag {
  40. case common.MUX_PING_FLAG, common.MUX_PING_RETURN:
  41. Self.highestChain.pushHead(unsafe.Pointer(packager))
  42. // the ping package need highest priority
  43. // prevent ping calculation error
  44. case common.MUX_NEW_CONN, common.MUX_NEW_CONN_OK, common.MUX_NEW_CONN_Fail:
  45. // the new conn package need some priority too
  46. Self.middleChain.pushHead(unsafe.Pointer(packager))
  47. default:
  48. Self.lowestChain.pushHead(unsafe.Pointer(packager))
  49. }
  50. }
  51. const maxStarving uint8 = 8
  52. func (Self *PriorityQueue) Pop() (packager *common.MuxPackager) {
  53. var iter bool
  54. for {
  55. packager = Self.TryPop()
  56. if packager != nil {
  57. return
  58. }
  59. if Self.stop {
  60. return
  61. }
  62. if iter {
  63. break
  64. // trying to pop twice
  65. }
  66. iter = true
  67. runtime.Gosched()
  68. }
  69. Self.cond.L.Lock()
  70. defer Self.cond.L.Unlock()
  71. for packager = Self.TryPop(); packager == nil; {
  72. if Self.stop {
  73. return
  74. }
  75. //logs.Warn("queue into wait")
  76. Self.cond.Wait()
  77. // wait for it with no more iter
  78. packager = Self.TryPop()
  79. //logs.Warn("queue wait finish", packager)
  80. }
  81. return
  82. }
  83. func (Self *PriorityQueue) TryPop() (packager *common.MuxPackager) {
  84. ptr, ok := Self.highestChain.popTail()
  85. if ok {
  86. packager = (*common.MuxPackager)(ptr)
  87. return
  88. }
  89. if Self.starving < maxStarving {
  90. // not pop too much, lowestChain will wait too long
  91. ptr, ok = Self.middleChain.popTail()
  92. if ok {
  93. packager = (*common.MuxPackager)(ptr)
  94. Self.starving++
  95. return
  96. }
  97. }
  98. ptr, ok = Self.lowestChain.popTail()
  99. if ok {
  100. packager = (*common.MuxPackager)(ptr)
  101. if Self.starving > 0 {
  102. Self.starving = uint8(Self.starving / 2)
  103. }
  104. return
  105. }
  106. if Self.starving > 0 {
  107. ptr, ok = Self.middleChain.popTail()
  108. if ok {
  109. packager = (*common.MuxPackager)(ptr)
  110. Self.starving++
  111. return
  112. }
  113. }
  114. return
  115. }
  116. func (Self *PriorityQueue) Stop() {
  117. Self.stop = true
  118. Self.cond.Broadcast()
  119. }
  120. type ConnQueue struct {
  121. chain *bufChain
  122. starving uint8
  123. stop bool
  124. cond *sync.Cond
  125. }
  126. func (Self *ConnQueue) New() {
  127. Self.chain = new(bufChain)
  128. Self.chain.new(32)
  129. locker := new(sync.Mutex)
  130. Self.cond = sync.NewCond(locker)
  131. }
  132. func (Self *ConnQueue) Push(connection *conn) {
  133. Self.chain.pushHead(unsafe.Pointer(connection))
  134. Self.cond.Broadcast()
  135. return
  136. }
  137. func (Self *ConnQueue) Pop() (connection *conn) {
  138. var iter bool
  139. for {
  140. connection = Self.TryPop()
  141. if connection != nil {
  142. return
  143. }
  144. if Self.stop {
  145. return
  146. }
  147. if iter {
  148. break
  149. // trying to pop twice
  150. }
  151. iter = true
  152. runtime.Gosched()
  153. }
  154. Self.cond.L.Lock()
  155. defer Self.cond.L.Unlock()
  156. for connection = Self.TryPop(); connection == nil; {
  157. if Self.stop {
  158. return
  159. }
  160. //logs.Warn("queue into wait")
  161. Self.cond.Wait()
  162. // wait for it with no more iter
  163. connection = Self.TryPop()
  164. //logs.Warn("queue wait finish", packager)
  165. }
  166. return
  167. }
  168. func (Self *ConnQueue) TryPop() (connection *conn) {
  169. ptr, ok := Self.chain.popTail()
  170. if ok {
  171. connection = (*conn)(ptr)
  172. return
  173. }
  174. return
  175. }
  176. func (Self *ConnQueue) Stop() {
  177. Self.stop = true
  178. Self.cond.Broadcast()
  179. }
  180. func NewListElement(buf []byte, l uint16, part bool) (element *common.ListElement, err error) {
  181. if uint16(len(buf)) != l {
  182. err = errors.New("ListElement: buf length not match")
  183. return
  184. }
  185. //if l == 0 {
  186. // logs.Warn("push zero")
  187. //}
  188. element = common.ListElementPool.Get()
  189. element.Buf = buf
  190. element.L = l
  191. element.Part = part
  192. return
  193. }
  194. type ReceiveWindowQueue struct {
  195. chain *bufChain
  196. stopOp chan struct{}
  197. readOp chan struct{}
  198. lengthWait uint64 // really strange ???? need put here
  199. // https://golang.org/pkg/sync/atomic/#pkg-note-BUG
  200. // On non-Linux ARM, the 64-bit functions use instructions unavailable before the ARMv6k core.
  201. // On ARM, x86-32, and 32-bit MIPS, it is the caller's responsibility
  202. // to arrange for 64-bit alignment of 64-bit words accessed atomically.
  203. // The first word in a variable or in an allocated struct, array, or slice can be relied upon to be 64-bit aligned.
  204. timeout time.Time
  205. }
  206. func (Self *ReceiveWindowQueue) New() {
  207. Self.readOp = make(chan struct{})
  208. Self.chain = new(bufChain)
  209. Self.chain.new(64)
  210. Self.stopOp = make(chan struct{}, 2)
  211. }
  212. func (Self *ReceiveWindowQueue) Push(element *common.ListElement) {
  213. var length, wait uint32
  214. for {
  215. ptrs := atomic.LoadUint64(&Self.lengthWait)
  216. length, wait = Self.chain.head.unpack(ptrs)
  217. length += uint32(element.L)
  218. if atomic.CompareAndSwapUint64(&Self.lengthWait, ptrs, Self.chain.head.pack(length, 0)) {
  219. break
  220. }
  221. // another goroutine change the length or into wait, make sure
  222. }
  223. //logs.Warn("window push before", Self.Len(), uint32(element.l), len(element.buf))
  224. Self.chain.pushHead(unsafe.Pointer(element))
  225. //logs.Warn("window push", Self.Len())
  226. if wait == 1 {
  227. Self.allowPop()
  228. }
  229. return
  230. }
  231. func (Self *ReceiveWindowQueue) Pop() (element *common.ListElement, err error) {
  232. var length uint32
  233. startPop:
  234. ptrs := atomic.LoadUint64(&Self.lengthWait)
  235. length, _ = Self.chain.head.unpack(ptrs)
  236. if length == 0 {
  237. if !atomic.CompareAndSwapUint64(&Self.lengthWait, ptrs, Self.chain.head.pack(0, 1)) {
  238. goto startPop // another goroutine is pushing
  239. }
  240. err = Self.waitPush()
  241. // there is no more data in queue, wait for it
  242. if err != nil {
  243. return
  244. }
  245. goto startPop // wait finish, trying to get the new status
  246. }
  247. // length is not zero, so try to pop
  248. for {
  249. element = Self.TryPop()
  250. if element != nil {
  251. return
  252. }
  253. runtime.Gosched() // another goroutine is still pushing
  254. }
  255. }
  256. func (Self *ReceiveWindowQueue) TryPop() (element *common.ListElement) {
  257. ptr, ok := Self.chain.popTail()
  258. if ok {
  259. //logs.Warn("window pop before", Self.Len())
  260. element = (*common.ListElement)(ptr)
  261. atomic.AddUint64(&Self.lengthWait, ^(uint64(element.L)<<dequeueBits - 1))
  262. //logs.Warn("window pop", Self.Len(), uint32(element.l))
  263. return
  264. }
  265. return nil
  266. }
  267. func (Self *ReceiveWindowQueue) allowPop() (closed bool) {
  268. //logs.Warn("allow pop", Self.Len())
  269. select {
  270. case Self.readOp <- struct{}{}:
  271. return false
  272. case <-Self.stopOp:
  273. return true
  274. }
  275. }
  276. func (Self *ReceiveWindowQueue) waitPush() (err error) {
  277. //logs.Warn("wait push")
  278. //defer logs.Warn("wait push finish")
  279. t := Self.timeout.Sub(time.Now())
  280. if t <= 0 { // not set the timeout, so wait for it without timeout, just like a tcp connection
  281. select {
  282. case <-Self.readOp:
  283. return nil
  284. case <-Self.stopOp:
  285. err = io.EOF
  286. return
  287. }
  288. }
  289. timer := time.NewTimer(t)
  290. defer timer.Stop()
  291. //logs.Warn("queue into wait")
  292. select {
  293. case <-Self.readOp:
  294. //logs.Warn("queue wait finish")
  295. return nil
  296. case <-Self.stopOp:
  297. err = io.EOF
  298. return
  299. case <-timer.C:
  300. err = errors.New("mux.queue: read time out")
  301. return
  302. }
  303. }
  304. func (Self *ReceiveWindowQueue) Len() (n uint32) {
  305. ptrs := atomic.LoadUint64(&Self.lengthWait)
  306. n, _ = Self.chain.head.unpack(ptrs)
  307. return
  308. }
  309. func (Self *ReceiveWindowQueue) Stop() {
  310. Self.stopOp <- struct{}{}
  311. Self.stopOp <- struct{}{}
  312. }
  313. func (Self *ReceiveWindowQueue) SetTimeOut(t time.Time) {
  314. Self.timeout = t
  315. }
  316. // https://golang.org/src/sync/poolqueue.go
  317. type bufDequeue struct {
  318. // headTail packs together a 32-bit head index and a 32-bit
  319. // tail index. Both are indexes into vals modulo len(vals)-1.
  320. //
  321. // tail = index of oldest data in queue
  322. // head = index of next slot to fill
  323. //
  324. // Slots in the range [tail, head) are owned by consumers.
  325. // A consumer continues to own a slot outside this range until
  326. // it nils the slot, at which point ownership passes to the
  327. // producer.
  328. //
  329. // The head index is stored in the most-significant bits so
  330. // that we can atomically add to it and the overflow is
  331. // harmless.
  332. headTail uint64
  333. // vals is a ring buffer of interface{} values stored in this
  334. // dequeue. The size of this must be a power of 2.
  335. //
  336. // A slot is still in use until *both* the tail
  337. // index has moved beyond it and typ has been set to nil. This
  338. // is set to nil atomically by the consumer and read
  339. // atomically by the producer.
  340. vals []unsafe.Pointer
  341. starving uint32
  342. }
  343. const dequeueBits = 32
  344. // dequeueLimit is the maximum size of a bufDequeue.
  345. //
  346. // This must be at most (1<<dequeueBits)/2 because detecting fullness
  347. // depends on wrapping around the ring buffer without wrapping around
  348. // the index. We divide by 4 so this fits in an int on 32-bit.
  349. const dequeueLimit = (1 << dequeueBits) / 4
  350. func (d *bufDequeue) unpack(ptrs uint64) (head, tail uint32) {
  351. const mask = 1<<dequeueBits - 1
  352. head = uint32((ptrs >> dequeueBits) & mask)
  353. tail = uint32(ptrs & mask)
  354. return
  355. }
  356. func (d *bufDequeue) pack(head, tail uint32) uint64 {
  357. const mask = 1<<dequeueBits - 1
  358. return (uint64(head) << dequeueBits) |
  359. uint64(tail&mask)
  360. }
  361. // pushHead adds val at the head of the queue. It returns false if the
  362. // queue is full.
  363. func (d *bufDequeue) pushHead(val unsafe.Pointer) bool {
  364. var slot *unsafe.Pointer
  365. var starve uint8
  366. if atomic.LoadUint32(&d.starving) > 0 {
  367. runtime.Gosched()
  368. }
  369. for {
  370. ptrs := atomic.LoadUint64(&d.headTail)
  371. head, tail := d.unpack(ptrs)
  372. if (tail+uint32(len(d.vals)))&(1<<dequeueBits-1) == head {
  373. // Queue is full.
  374. return false
  375. }
  376. ptrs2 := d.pack(head+1, tail)
  377. if atomic.CompareAndSwapUint64(&d.headTail, ptrs, ptrs2) {
  378. slot = &d.vals[head&uint32(len(d.vals)-1)]
  379. if starve >= 3 && atomic.LoadUint32(&d.starving) > 0 {
  380. atomic.StoreUint32(&d.starving, 0)
  381. }
  382. break
  383. }
  384. starve++
  385. if starve >= 3 {
  386. atomic.StoreUint32(&d.starving, 1)
  387. }
  388. }
  389. // The head slot is free, so we own it.
  390. *slot = val
  391. return true
  392. }
  393. // popTail removes and returns the element at the tail of the queue.
  394. // It returns false if the queue is empty. It may be called by any
  395. // number of consumers.
  396. func (d *bufDequeue) popTail() (unsafe.Pointer, bool) {
  397. ptrs := atomic.LoadUint64(&d.headTail)
  398. head, tail := d.unpack(ptrs)
  399. if tail == head {
  400. // Queue is empty.
  401. return nil, false
  402. }
  403. slot := &d.vals[tail&uint32(len(d.vals)-1)]
  404. var val unsafe.Pointer
  405. for {
  406. val = atomic.LoadPointer(slot)
  407. if val != nil {
  408. // We now own slot.
  409. break
  410. }
  411. // Another goroutine is still pushing data on the tail.
  412. }
  413. // Tell pushHead that we're done with this slot. Zeroing the
  414. // slot is also important so we don't leave behind references
  415. // that could keep this object live longer than necessary.
  416. //
  417. // We write to val first and then publish that we're done with
  418. atomic.StorePointer(slot, nil)
  419. // At this point pushHead owns the slot.
  420. if tail < math.MaxUint32 {
  421. atomic.AddUint64(&d.headTail, 1)
  422. } else {
  423. atomic.AddUint64(&d.headTail, ^uint64(math.MaxUint32-1))
  424. }
  425. return val, true
  426. }
  427. // bufChain is a dynamically-sized version of bufDequeue.
  428. //
  429. // This is implemented as a doubly-linked list queue of poolDequeues
  430. // where each dequeue is double the size of the previous one. Once a
  431. // dequeue fills up, this allocates a new one and only ever pushes to
  432. // the latest dequeue. Pops happen from the other end of the list and
  433. // once a dequeue is exhausted, it gets removed from the list.
  434. type bufChain struct {
  435. // head is the bufDequeue to push to. This is only accessed
  436. // by the producer, so doesn't need to be synchronized.
  437. head *bufChainElt
  438. // tail is the bufDequeue to popTail from. This is accessed
  439. // by consumers, so reads and writes must be atomic.
  440. tail *bufChainElt
  441. newChain uint32
  442. }
  443. type bufChainElt struct {
  444. bufDequeue
  445. // next and prev link to the adjacent poolChainElts in this
  446. // bufChain.
  447. //
  448. // next is written atomically by the producer and read
  449. // atomically by the consumer. It only transitions from nil to
  450. // non-nil.
  451. //
  452. // prev is written atomically by the consumer and read
  453. // atomically by the producer. It only transitions from
  454. // non-nil to nil.
  455. next, prev *bufChainElt
  456. }
  457. func storePoolChainElt(pp **bufChainElt, v *bufChainElt) {
  458. atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(pp)), unsafe.Pointer(v))
  459. }
  460. func loadPoolChainElt(pp **bufChainElt) *bufChainElt {
  461. return (*bufChainElt)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(pp))))
  462. }
  463. func (c *bufChain) new(initSize int) {
  464. // Initialize the chain.
  465. // initSize must be a power of 2
  466. d := new(bufChainElt)
  467. d.vals = make([]unsafe.Pointer, initSize)
  468. storePoolChainElt(&c.head, d)
  469. storePoolChainElt(&c.tail, d)
  470. }
  471. func (c *bufChain) pushHead(val unsafe.Pointer) {
  472. startPush:
  473. for {
  474. if atomic.LoadUint32(&c.newChain) > 0 {
  475. runtime.Gosched()
  476. } else {
  477. break
  478. }
  479. }
  480. d := loadPoolChainElt(&c.head)
  481. if d.pushHead(val) {
  482. return
  483. }
  484. // The current dequeue is full. Allocate a new one of twice
  485. // the size.
  486. if atomic.CompareAndSwapUint32(&c.newChain, 0, 1) {
  487. newSize := len(d.vals) * 2
  488. if newSize >= dequeueLimit {
  489. // Can't make it any bigger.
  490. newSize = dequeueLimit
  491. }
  492. d2 := &bufChainElt{prev: d}
  493. d2.vals = make([]unsafe.Pointer, newSize)
  494. d2.pushHead(val)
  495. storePoolChainElt(&c.head, d2)
  496. storePoolChainElt(&d.next, d2)
  497. atomic.StoreUint32(&c.newChain, 0)
  498. return
  499. }
  500. goto startPush
  501. }
  502. func (c *bufChain) popTail() (unsafe.Pointer, bool) {
  503. d := loadPoolChainElt(&c.tail)
  504. if d == nil {
  505. return nil, false
  506. }
  507. for {
  508. // It's important that we load the next pointer
  509. // *before* popping the tail. In general, d may be
  510. // transiently empty, but if next is non-nil before
  511. // the TryPop and the TryPop fails, then d is permanently
  512. // empty, which is the only condition under which it's
  513. // safe to drop d from the chain.
  514. d2 := loadPoolChainElt(&d.next)
  515. if val, ok := d.popTail(); ok {
  516. return val, ok
  517. }
  518. if d2 == nil {
  519. // This is the only dequeue. It's empty right
  520. // now, but could be pushed to in the future.
  521. return nil, false
  522. }
  523. // The tail of the chain has been drained, so move on
  524. // to the next dequeue. Try to drop it from the chain
  525. // so the next TryPop doesn't have to look at the empty
  526. // dequeue again.
  527. if atomic.CompareAndSwapPointer((*unsafe.Pointer)(unsafe.Pointer(&c.tail)), unsafe.Pointer(d), unsafe.Pointer(d2)) {
  528. // We won the race. Clear the prev pointer so
  529. // the garbage collector can collect the empty
  530. // dequeue and so popHead doesn't back up
  531. // further than necessary.
  532. storePoolChainElt(&d2.prev, nil)
  533. }
  534. d = d2
  535. }
  536. }