Mercurial > pub > ImplabNet
annotate Implab/Parallels/AsyncQueue.cs @ 237:f2150c16b476 v2
missing files
author | cin |
---|---|
date | Wed, 22 Nov 2017 16:54:58 +0300 |
parents | 8dd666e6b6bf |
children |
rev | line source |
---|---|
119 | 1 using System.Threading; |
2 using System.Collections.Generic; | |
3 using System; | |
4 using System.Collections; | |
124 | 5 using System.Diagnostics; |
234 | 6 using System.Runtime.CompilerServices; |
119 | 7 |
8 namespace Implab.Parallels { | |
9 public class AsyncQueue<T> : IEnumerable<T> { | |
10 class Chunk { | |
233 | 11 public volatile Chunk next; |
119 | 12 |
233 | 13 volatile int m_low; |
14 volatile int m_hi; | |
15 volatile int m_alloc; | |
119 | 16 readonly int m_size; |
17 readonly T[] m_data; | |
18 | |
19 public Chunk(int size) { | |
20 m_size = size; | |
21 m_data = new T[size]; | |
22 } | |
23 | |
24 public Chunk(int size, T value) { | |
25 m_size = size; | |
26 m_hi = 1; | |
27 m_alloc = 1; | |
28 m_data = new T[size]; | |
29 m_data[0] = value; | |
30 } | |
31 | |
233 | 32 public Chunk(int size, int allocated) { |
121 | 33 m_size = size; |
233 | 34 m_hi = allocated; |
35 m_alloc = allocated; | |
121 | 36 m_data = new T[size]; |
233 | 37 } |
38 | |
39 public void WriteData(T[] data, int offset, int dest, int length) { | |
40 Array.Copy(data, offset, m_data, dest, length); | |
121 | 41 } |
42 | |
119 | 43 public int Low { |
44 get { return m_low; } | |
45 } | |
46 | |
47 public int Hi { | |
48 get { return m_hi; } | |
49 } | |
50 | |
127 | 51 public int Size { |
52 get { return m_size; } | |
53 } | |
54 | |
234 | 55 [MethodImpl(MethodImplOptions.AggressiveInlining)] |
56 void AwaitWrites(int mark) { | |
57 if (m_hi != mark) { | |
58 SpinWait spin = new SpinWait(); | |
59 do { | |
60 spin.SpinOnce(); | |
61 } while (m_hi != mark); | |
62 } | |
63 } | |
64 | |
233 | 65 public bool TryEnqueue(T value) { |
66 int alloc; | |
67 do { | |
68 alloc = m_alloc; | |
69 if (alloc >= m_size) | |
70 return false; | |
71 } while(alloc != Interlocked.CompareExchange(ref m_alloc, alloc + 1, alloc)); | |
72 | |
119 | 73 m_data[alloc] = value; |
74 | |
234 | 75 AwaitWrites(alloc); |
233 | 76 m_hi = alloc + 1; |
77 | |
119 | 78 return true; |
79 } | |
80 | |
124 | 81 /// <summary> |
82 /// Prevents from allocating new space in the chunk and waits for all write operations to complete | |
83 /// </summary> | |
233 | 84 public void Seal() { |
85 var actual = Math.Min(Interlocked.Exchange(ref m_alloc, m_size), m_size); | |
234 | 86 AwaitWrites(actual); |
124 | 87 } |
88 | |
121 | 89 public bool TryDequeue(out T value, out bool recycle) { |
119 | 90 int low; |
91 do { | |
92 low = m_low; | |
93 if (low >= m_hi) { | |
94 value = default(T); | |
95 recycle = (low == m_size); | |
96 return false; | |
97 } | |
233 | 98 } while (low != Interlocked.CompareExchange(ref m_low, low + 1, low)); |
119 | 99 |
233 | 100 recycle = (low + 1 == m_size); |
119 | 101 value = m_data[low]; |
102 | |
103 return true; | |
104 } | |
105 | |
233 | 106 public bool TryEnqueueBatch(T[] batch, int offset, int length, out int enqueued) { |
107 int alloc; | |
108 do { | |
109 alloc = m_alloc; | |
110 if (alloc >= m_size) { | |
111 enqueued = 0; | |
112 return false; | |
113 } else { | |
114 enqueued = Math.Min(length, m_size - alloc); | |
115 } | |
116 } while (alloc != Interlocked.CompareExchange(ref m_alloc, alloc + enqueued, alloc)); | |
117 | |
118 Array.Copy(batch, offset, m_data, alloc, enqueued); | |
120 | 119 |
234 | 120 AwaitWrites(alloc); |
233 | 121 m_hi = alloc + enqueued; |
120 | 122 return true; |
123 } | |
124 | |
233 | 125 public bool TryDequeueBatch(T[] buffer, int offset, int length, out int dequeued, out bool recycle) { |
121 | 126 int low, hi, batchSize; |
127 | |
128 do { | |
129 low = m_low; | |
130 hi = m_hi; | |
131 if (low >= hi) { | |
132 dequeued = 0; | |
233 | 133 recycle = (low == m_size); |
121 | 134 return false; |
135 } | |
136 batchSize = Math.Min(hi - low, length); | |
233 | 137 } while (low != Interlocked.CompareExchange(ref m_low, low + batchSize, low)); |
121 | 138 |
139 dequeued = batchSize; | |
233 | 140 recycle = (low + batchSize == m_size); |
121 | 141 Array.Copy(m_data, low, buffer, offset, batchSize); |
142 | |
143 return true; | |
144 } | |
145 | |
119 | 146 public T GetAt(int pos) { |
147 return m_data[pos]; | |
148 } | |
149 } | |
150 | |
151 public const int DEFAULT_CHUNK_SIZE = 32; | |
233 | 152 public const int MAX_CHUNK_SIZE = 256; |
119 | 153 |
154 Chunk m_first; | |
155 Chunk m_last; | |
156 | |
233 | 157 public AsyncQueue() { |
158 m_first = m_last = new Chunk(DEFAULT_CHUNK_SIZE); | |
159 } | |
160 | |
121 | 161 /// <summary> |
162 /// Adds the specified value to the queue. | |
163 /// </summary> | |
164 /// <param name="value">Tha value which will be added to the queue.</param> | |
233 | 165 public void Enqueue(T value) { |
119 | 166 var last = m_last; |
233 | 167 SpinWait spin = new SpinWait(); |
168 while (!last.TryEnqueue(value)) { | |
119 | 169 // try to extend queue |
233 | 170 var chunk = new Chunk(DEFAULT_CHUNK_SIZE, value); |
171 var t = Interlocked.CompareExchange(ref m_last, chunk, last); | |
172 if (t == last) { | |
173 last.next = chunk; | |
174 break; | |
119 | 175 } else { |
233 | 176 last = t; |
119 | 177 } |
233 | 178 spin.SpinOnce(); |
119 | 179 } |
180 } | |
181 | |
121 | 182 /// <summary> |
183 /// Adds the specified data to the queue. | |
184 /// </summary> | |
185 /// <param name="data">The buffer which contains the data to be enqueued.</param> | |
186 /// <param name="offset">The offset of the data in the buffer.</param> | |
187 /// <param name="length">The size of the data to read from the buffer.</param> | |
233 | 188 public void EnqueueRange(T[] data, int offset, int length) { |
121 | 189 if (data == null) |
190 throw new ArgumentNullException("data"); | |
191 if (offset < 0) | |
192 throw new ArgumentOutOfRangeException("offset"); | |
193 if (length < 1 || offset + length > data.Length) | |
194 throw new ArgumentOutOfRangeException("length"); | |
195 | |
233 | 196 while (length > 0) { |
197 var last = m_last; | |
198 int enqueued; | |
121 | 199 |
233 | 200 if (last.TryEnqueueBatch(data, offset, length, out enqueued)) { |
121 | 201 length -= enqueued; |
202 offset += enqueued; | |
203 } | |
204 | |
233 | 205 if (length > 0) { |
206 // we have something to enqueue | |
121 | 207 |
233 | 208 var tail = length % MAX_CHUNK_SIZE; |
121 | 209 |
233 | 210 var chunk = new Chunk(Math.Max(tail, DEFAULT_CHUNK_SIZE), tail); |
211 | |
212 if (last != Interlocked.CompareExchange(ref m_last, chunk, last)) | |
213 continue; // we wasn't able to catch the writer, roundtrip | |
121 | 214 |
233 | 215 // we are lucky |
216 // we can exclusively write our batch, the other writers will continue their work | |
217 | |
218 length -= tail; | |
121 | 219 |
233 | 220 |
221 for(var i = 0; i < length; i+= MAX_CHUNK_SIZE) { | |
222 var node = new Chunk(MAX_CHUNK_SIZE, MAX_CHUNK_SIZE); | |
223 node.WriteData(data, offset, 0, MAX_CHUNK_SIZE); | |
224 offset += MAX_CHUNK_SIZE; | |
225 // fence last.next is volatile | |
226 last.next = node; | |
227 last = node; | |
121 | 228 } |
229 | |
233 | 230 if (tail > 0) |
231 chunk.WriteData(data, offset, 0, tail); | |
232 | |
233 // fence last.next is volatile | |
234 last.next = chunk; | |
235 return; | |
121 | 236 } |
237 } | |
238 } | |
239 | |
240 /// <summary> | |
241 /// Tries to retrieve the first element from the queue. | |
242 /// </summary> | |
243 /// <returns><c>true</c>, if element is dequeued, <c>false</c> otherwise.</returns> | |
244 /// <param name="value">The value of the dequeued element.</param> | |
119 | 245 public bool TryDequeue(out T value) { |
246 var chunk = m_first; | |
233 | 247 do { |
248 bool recycle; | |
119 | 249 |
250 var result = chunk.TryDequeue(out value, out recycle); | |
251 | |
233 | 252 if (recycle && chunk.next != null) { |
253 // this chunk is waste | |
254 chunk = Interlocked.CompareExchange(ref m_first, chunk.next, chunk); | |
255 } else { | |
119 | 256 return result; // this chunk is usable and returned actual result |
233 | 257 } |
119 | 258 |
259 if (result) // this chunk is waste but the true result is always actual | |
260 return true; | |
233 | 261 } while (true); |
119 | 262 } |
263 | |
121 | 264 /// <summary> |
265 /// Tries to dequeue the specified amount of data from the queue. | |
266 /// </summary> | |
267 /// <returns><c>true</c>, if data was deuqueued, <c>false</c> otherwise.</returns> | |
268 /// <param name="buffer">The buffer to which the data will be written.</param> | |
269 /// <param name="offset">The offset in the buffer at which the data will be written.</param> | |
270 /// <param name="length">The maximum amount of data to be retrieved.</param> | |
271 /// <param name="dequeued">The actual amout of the retrieved data.</param> | |
272 public bool TryDequeueRange(T[] buffer, int offset, int length, out int dequeued) { | |
273 if (buffer == null) | |
274 throw new ArgumentNullException("buffer"); | |
275 if (offset < 0) | |
276 throw new ArgumentOutOfRangeException("offset"); | |
277 if (length < 1 || offset + length > buffer.Length) | |
278 throw new ArgumentOutOfRangeException("length"); | |
279 | |
280 var chunk = m_first; | |
281 dequeued = 0; | |
233 | 282 do { |
283 bool recycle; | |
121 | 284 int actual; |
285 if (chunk.TryDequeueBatch(buffer, offset, length, out actual, out recycle)) { | |
286 offset += actual; | |
287 length -= actual; | |
288 dequeued += actual; | |
289 } | |
290 | |
233 | 291 if (recycle && chunk.next != null) { |
292 // this chunk is waste | |
293 chunk = Interlocked.CompareExchange(ref m_first, chunk.next, chunk); | |
294 } else { | |
295 chunk = null; | |
296 } | |
121 | 297 |
298 if (length == 0) | |
299 return true; | |
233 | 300 } while (chunk != null); |
121 | 301 |
302 return dequeued != 0; | |
303 } | |
304 | |
305 /// <summary> | |
306 /// Tries to dequeue all remaining data in the first chunk. | |
307 /// </summary> | |
308 /// <returns><c>true</c>, if data was dequeued, <c>false</c> otherwise.</returns> | |
122
0c8685c8b56b
minor fixes and improvements of AsyncQueue, additional tests
cin
parents:
121
diff
changeset
|
309 /// <param name="buffer">The buffer to which the data will be written.</param> |
121 | 310 /// <param name="offset">The offset in the buffer at which the data will be written.</param> |
311 /// <param name="length">Tha maximum amount of the data to be dequeued.</param> | |
312 /// <param name="dequeued">The actual amount of the dequeued data.</param> | |
313 public bool TryDequeueChunk(T[] buffer, int offset, int length, out int dequeued) { | |
314 if (buffer == null) | |
315 throw new ArgumentNullException("buffer"); | |
316 if (offset < 0) | |
317 throw new ArgumentOutOfRangeException("offset"); | |
318 if (length < 1 || offset + length > buffer.Length) | |
319 throw new ArgumentOutOfRangeException("length"); | |
320 | |
321 var chunk = m_first; | |
233 | 322 do { |
323 bool recycle; | |
324 chunk.TryDequeueBatch(buffer, offset, length, out dequeued, out recycle); | |
121 | 325 |
233 | 326 if (recycle && chunk.next != null) { |
327 // this chunk is waste | |
328 chunk = Interlocked.CompareExchange(ref m_first, chunk.next, chunk); | |
329 } else { | |
330 chunk = null; | |
121 | 331 } |
332 | |
333 // if we have dequeued any data, then return | |
334 if (dequeued != 0) | |
335 return true; | |
336 | |
233 | 337 } while (chunk != null); |
121 | 338 |
339 return false; | |
340 } | |
233 | 341 |
119 | 342 |
123 | 343 public void Clear() { |
344 // start the new queue | |
125 | 345 var chunk = new Chunk(DEFAULT_CHUNK_SIZE); |
124 | 346 do { |
347 var first = m_first; | |
233 | 348 if (first.next == null && first != m_last) { |
124 | 349 continue; |
233 | 350 } |
123 | 351 |
124 | 352 // here we will create inconsistency which will force others to spin |
353 // and prevent from fetching. chunk.next = null | |
354 if (first != Interlocked.CompareExchange(ref m_first, chunk, first)) | |
355 continue;// inconsistent | |
123 | 356 |
233 | 357 m_last = chunk; |
358 return; | |
359 } while (true); | |
360 } | |
361 | |
362 public List<T> Drain() { | |
234 | 363 Chunk chunk = null; |
233 | 364 do { |
365 var first = m_first; | |
366 // first.next is volatile | |
367 if (first.next == null) { | |
368 if (first != m_last) | |
369 continue; | |
370 else if (first.Hi == first.Low) | |
371 return new List<T>(); | |
372 } | |
373 | |
234 | 374 // start the new queue |
375 if (chunk == null) | |
376 chunk = new Chunk(DEFAULT_CHUNK_SIZE); | |
377 | |
233 | 378 // here we will create inconsistency which will force others to spin |
379 // and prevent from fetching. chunk.next = null | |
380 if (first != Interlocked.CompareExchange(ref m_first, chunk, first)) | |
381 continue;// inconsistent | |
382 | |
383 var last = Interlocked.Exchange(ref m_last, chunk); | |
124 | 384 |
385 return ReadChunks(first, last); | |
386 | |
233 | 387 } while (true); |
123 | 388 } |
233 | 389 |
390 static List<T> ReadChunks(Chunk chunk, object last) { | |
123 | 391 var result = new List<T>(); |
233 | 392 var buffer = new T[MAX_CHUNK_SIZE]; |
123 | 393 int actual; |
394 bool recycle; | |
233 | 395 SpinWait spin = new SpinWait(); |
123 | 396 while (chunk != null) { |
124 | 397 // ensure all write operations on the chunk are complete |
233 | 398 chunk.Seal(); |
124 | 399 |
123 | 400 // we need to read the chunk using this way |
401 // since some client still may completing the dequeue | |
402 // operation, such clients most likely won't get results | |
403 while (chunk.TryDequeueBatch(buffer, 0, buffer.Length, out actual, out recycle)) | |
404 result.AddRange(new ArraySegmentCollection(buffer, 0, actual)); | |
405 | |
124 | 406 if (chunk == last) { |
407 chunk = null; | |
408 } else { | |
409 while (chunk.next == null) | |
233 | 410 spin.SpinOnce(); |
124 | 411 chunk = chunk.next; |
412 } | |
123 | 413 } |
414 | |
233 | 415 return result; |
123 | 416 } |
417 | |
418 struct ArraySegmentCollection : ICollection<T> { | |
419 readonly T[] m_data; | |
420 readonly int m_offset; | |
421 readonly int m_length; | |
422 | |
423 public ArraySegmentCollection(T[] data, int offset, int length) { | |
424 m_data = data; | |
425 m_offset = offset; | |
426 m_length = length; | |
427 } | |
428 | |
429 #region ICollection implementation | |
430 | |
431 public void Add(T item) { | |
129 | 432 throw new NotSupportedException(); |
123 | 433 } |
434 | |
435 public void Clear() { | |
129 | 436 throw new NotSupportedException(); |
123 | 437 } |
438 | |
439 public bool Contains(T item) { | |
440 return false; | |
441 } | |
442 | |
443 public void CopyTo(T[] array, int arrayIndex) { | |
233 | 444 Array.Copy(m_data, m_offset, array, arrayIndex, m_length); |
123 | 445 } |
446 | |
447 public bool Remove(T item) { | |
129 | 448 throw new NotSupportedException(); |
123 | 449 } |
450 | |
451 public int Count { | |
452 get { | |
453 return m_length; | |
454 } | |
455 } | |
456 | |
457 public bool IsReadOnly { | |
458 get { | |
459 return true; | |
460 } | |
461 } | |
462 | |
463 #endregion | |
464 | |
465 #region IEnumerable implementation | |
466 | |
467 public IEnumerator<T> GetEnumerator() { | |
468 for (int i = m_offset; i < m_length + m_offset; i++) | |
469 yield return m_data[i]; | |
470 } | |
471 | |
472 #endregion | |
473 | |
474 #region IEnumerable implementation | |
475 | |
476 IEnumerator IEnumerable.GetEnumerator() { | |
477 return GetEnumerator(); | |
478 } | |
479 | |
480 #endregion | |
481 } | |
482 | |
119 | 483 #region IEnumerable implementation |
484 | |
485 class Enumerator : IEnumerator<T> { | |
486 Chunk m_current; | |
487 int m_pos = -1; | |
488 | |
489 public Enumerator(Chunk fisrt) { | |
490 m_current = fisrt; | |
491 } | |
492 | |
493 #region IEnumerator implementation | |
494 | |
495 public bool MoveNext() { | |
496 if (m_current == null) | |
497 return false; | |
498 | |
499 if (m_pos == -1) | |
500 m_pos = m_current.Low; | |
501 else | |
502 m_pos++; | |
127 | 503 |
119 | 504 if (m_pos == m_current.Hi) { |
127 | 505 |
506 m_current = m_pos == m_current.Size ? m_current.next : null; | |
507 | |
119 | 508 m_pos = 0; |
127 | 509 |
510 if (m_current == null) | |
511 return false; | |
119 | 512 } |
513 | |
514 return true; | |
515 } | |
516 | |
517 public void Reset() { | |
518 throw new NotSupportedException(); | |
519 } | |
520 | |
521 object IEnumerator.Current { | |
522 get { | |
523 return Current; | |
524 } | |
525 } | |
526 | |
527 #endregion | |
528 | |
529 #region IDisposable implementation | |
530 | |
531 public void Dispose() { | |
532 } | |
533 | |
534 #endregion | |
535 | |
536 #region IEnumerator implementation | |
537 | |
538 public T Current { | |
539 get { | |
540 if (m_pos == -1 || m_current == null) | |
541 throw new InvalidOperationException(); | |
542 return m_current.GetAt(m_pos); | |
543 } | |
544 } | |
545 | |
546 #endregion | |
547 } | |
548 | |
549 public IEnumerator<T> GetEnumerator() { | |
550 return new Enumerator(m_first); | |
551 } | |
552 | |
553 #endregion | |
554 | |
555 #region IEnumerable implementation | |
556 | |
557 IEnumerator IEnumerable.GetEnumerator() { | |
558 return GetEnumerator(); | |
559 } | |
560 | |
561 #endregion | |
562 } | |
563 } |