1//! An unbounded set of futures.
2//!
3//! This module is only available when the `std` or `alloc` feature of this
4//! library is activated, and it is activated by default.
5
6use crate::task::AtomicWaker;
7use alloc::sync::{Arc, Weak};
8use core::cell::UnsafeCell;
9use core::fmt::{self, Debug};
10use core::iter::FromIterator;
11use core::marker::PhantomData;
12use core::mem;
13use core::pin::Pin;
14use core::ptr;
15use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst};
16use core::sync::atomic::{AtomicBool, AtomicPtr};
17use futures_core::future::Future;
18use futures_core::stream::{FusedStream, Stream};
19use futures_core::task::{Context, Poll};
20use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError};
21
22mod abort;
23
24mod iter;
25#[allow(unreachable_pub)] // https://github.com/rust-lang/rust/issues/102352
26pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef};
27
28mod task;
29use self::task::Task;
30
31mod ready_to_run_queue;
32use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue};
33
34/// A set of futures which may complete in any order.
35///
36/// See [`FuturesOrdered`](crate::stream::FuturesOrdered) for a version of this
37/// type that preserves a FIFO order.
38///
39/// This structure is optimized to manage a large number of futures.
40/// Futures managed by [`FuturesUnordered`] will only be polled when they
41/// generate wake-up notifications. This reduces the required amount of work
42/// needed to poll large numbers of futures.
43///
44/// [`FuturesUnordered`] can be filled by [`collect`](Iterator::collect)ing an
45/// iterator of futures into a [`FuturesUnordered`], or by
46/// [`push`](FuturesUnordered::push)ing futures onto an existing
47/// [`FuturesUnordered`]. When new futures are added,
48/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving
49/// wake-ups for new futures.
50///
51/// Note that you can create a ready-made [`FuturesUnordered`] via the
52/// [`collect`](Iterator::collect) method, or you can start with an empty set
53/// with the [`FuturesUnordered::new`] constructor.
54///
55/// This type is only available when the `std` or `alloc` feature of this
56/// library is activated, and it is activated by default.
57#[must_use = "streams do nothing unless polled"]
58pub struct FuturesUnordered<Fut> {
59 ready_to_run_queue: Arc<ReadyToRunQueue<Fut>>,
60 head_all: AtomicPtr<Task<Fut>>,
61 is_terminated: AtomicBool,
62}
63
64unsafe impl<Fut: Send> Send for FuturesUnordered<Fut> {}
65unsafe impl<Fut: Send + Sync> Sync for FuturesUnordered<Fut> {}
66impl<Fut> Unpin for FuturesUnordered<Fut> {}
67
68impl Spawn for FuturesUnordered<FutureObj<'_, ()>> {
69 fn spawn_obj(&self, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> {
70 self.push(future_obj);
71 Ok(())
72 }
73}
74
75impl LocalSpawn for FuturesUnordered<LocalFutureObj<'_, ()>> {
76 fn spawn_local_obj(&self, future_obj: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
77 self.push(future_obj);
78 Ok(())
79 }
80}
81
82// FuturesUnordered is implemented using two linked lists. One which links all
83// futures managed by a `FuturesUnordered` and one that tracks futures that have
84// been scheduled for polling. The first linked list allows for thread safe
85// insertion of nodes at the head as well as forward iteration, but is otherwise
86// not thread safe and is only accessed by the thread that owns the
87// `FuturesUnordered` value for any other operations. The second linked list is
88// an implementation of the intrusive MPSC queue algorithm described by
89// 1024cores.net.
90//
91// When a future is submitted to the set, a task is allocated and inserted in
92// both linked lists. The next call to `poll_next` will (eventually) see this
93// task and call `poll` on the future.
94//
95// Before a managed future is polled, the current context's waker is replaced
96// with one that is aware of the specific future being run. This ensures that
97// wake-up notifications generated by that specific future are visible to
98// `FuturesUnordered`. When a wake-up notification is received, the task is
99// inserted into the ready to run queue, so that its future can be polled later.
100//
101// Each task is wrapped in an `Arc` and thereby atomically reference counted.
102// Also, each task contains an `AtomicBool` which acts as a flag that indicates
103// whether the task is currently inserted in the atomic queue. When a wake-up
104// notification is received, the task will only be inserted into the ready to
105// run queue if it isn't inserted already.
106
107impl<Fut> Default for FuturesUnordered<Fut> {
108 fn default() -> Self {
109 Self::new()
110 }
111}
112
113impl<Fut> FuturesUnordered<Fut> {
114 /// Constructs a new, empty [`FuturesUnordered`].
115 ///
116 /// The returned [`FuturesUnordered`] does not contain any futures.
117 /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will
118 /// return [`Poll::Ready(None)`](Poll::Ready).
119 pub fn new() -> Self {
120 let stub = Arc::new(Task {
121 future: UnsafeCell::new(None),
122 next_all: AtomicPtr::new(ptr::null_mut()),
123 prev_all: UnsafeCell::new(ptr::null()),
124 len_all: UnsafeCell::new(0),
125 next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
126 queued: AtomicBool::new(true),
127 ready_to_run_queue: Weak::new(),
128 woken: AtomicBool::new(false),
129 });
130 let stub_ptr = Arc::as_ptr(&stub);
131 let ready_to_run_queue = Arc::new(ReadyToRunQueue {
132 waker: AtomicWaker::new(),
133 head: AtomicPtr::new(stub_ptr as *mut _),
134 tail: UnsafeCell::new(stub_ptr),
135 stub,
136 });
137
138 Self {
139 head_all: AtomicPtr::new(ptr::null_mut()),
140 ready_to_run_queue,
141 is_terminated: AtomicBool::new(false),
142 }
143 }
144
145 /// Returns the number of futures contained in the set.
146 ///
147 /// This represents the total number of in-flight futures.
148 pub fn len(&self) -> usize {
149 let (_, len) = self.atomic_load_head_and_len_all();
150 len
151 }
152
153 /// Returns `true` if the set contains no futures.
154 pub fn is_empty(&self) -> bool {
155 // Relaxed ordering can be used here since we don't need to read from
156 // the head pointer, only check whether it is null.
157 self.head_all.load(Relaxed).is_null()
158 }
159
160 /// Push a future into the set.
161 ///
162 /// This method adds the given future to the set. This method will not
163 /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must
164 /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called
165 /// in order to receive wake-up notifications for the given future.
166 pub fn push(&self, future: Fut) {
167 let task = Arc::new(Task {
168 future: UnsafeCell::new(Some(future)),
169 next_all: AtomicPtr::new(self.pending_next_all()),
170 prev_all: UnsafeCell::new(ptr::null_mut()),
171 len_all: UnsafeCell::new(0),
172 next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
173 queued: AtomicBool::new(true),
174 ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue),
175 woken: AtomicBool::new(false),
176 });
177
178 // Reset the `is_terminated` flag if we've previously marked ourselves
179 // as terminated.
180 self.is_terminated.store(false, Relaxed);
181
182 // Right now our task has a strong reference count of 1. We transfer
183 // ownership of this reference count to our internal linked list
184 // and we'll reclaim ownership through the `unlink` method below.
185 let ptr = self.link(task);
186
187 // We'll need to get the future "into the system" to start tracking it,
188 // e.g. getting its wake-up notifications going to us tracking which
189 // futures are ready. To do that we unconditionally enqueue it for
190 // polling here.
191 self.ready_to_run_queue.enqueue(ptr);
192 }
193
194 /// Returns an iterator that allows inspecting each future in the set.
195 pub fn iter(&self) -> Iter<'_, Fut>
196 where
197 Fut: Unpin,
198 {
199 Iter(Pin::new(self).iter_pin_ref())
200 }
201
202 /// Returns an iterator that allows inspecting each future in the set.
203 pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> {
204 let (task, len) = self.atomic_load_head_and_len_all();
205 let pending_next_all = self.pending_next_all();
206
207 IterPinRef { task, len, pending_next_all, _marker: PhantomData }
208 }
209
210 /// Returns an iterator that allows modifying each future in the set.
211 pub fn iter_mut(&mut self) -> IterMut<'_, Fut>
212 where
213 Fut: Unpin,
214 {
215 IterMut(Pin::new(self).iter_pin_mut())
216 }
217
218 /// Returns an iterator that allows modifying each future in the set.
219 pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> {
220 // `head_all` can be accessed directly and we don't need to spin on
221 // `Task::next_all` since we have exclusive access to the set.
222 let task = *self.head_all.get_mut();
223 let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };
224
225 IterPinMut { task, len, _marker: PhantomData }
226 }
227
228 /// Returns the current head node and number of futures in the list of all
229 /// futures within a context where access is shared with other threads
230 /// (mostly for use with the `len` and `iter_pin_ref` methods).
231 fn atomic_load_head_and_len_all(&self) -> (*const Task<Fut>, usize) {
232 let task = self.head_all.load(Acquire);
233 let len = if task.is_null() {
234 0
235 } else {
236 unsafe {
237 (*task).spin_next_all(self.pending_next_all(), Acquire);
238 *(*task).len_all.get()
239 }
240 };
241
242 (task, len)
243 }
244
245 /// Releases the task. It destroys the future inside and either drops
246 /// the `Arc<Task>` or transfers ownership to the ready to run queue.
247 /// The task this method is called on must have been unlinked before.
248 fn release_task(&mut self, task: Arc<Task<Fut>>) {
249 // `release_task` must only be called on unlinked tasks
250 debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
251 unsafe {
252 debug_assert!((*task.prev_all.get()).is_null());
253 }
254
255 // The future is done, try to reset the queued flag. This will prevent
256 // `wake` from doing any work in the future
257 let prev = task.queued.swap(true, SeqCst);
258
259 // Drop the future, even if it hasn't finished yet. This is safe
260 // because we're dropping the future on the thread that owns
261 // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and
262 // such.
263 unsafe {
264 // Set to `None` rather than `take()`ing to prevent moving the
265 // future.
266 *task.future.get() = None;
267 }
268
269 // If the queued flag was previously set, then it means that this task
270 // is still in our internal ready to run queue. We then transfer
271 // ownership of our reference count to the ready to run queue, and it'll
272 // come along and free it later, noticing that the future is `None`.
273 //
274 // If, however, the queued flag was *not* set then we're safe to
275 // release our reference count on the task. The queued flag was set
276 // above so all future `enqueue` operations will not actually
277 // enqueue the task, so our task will never see the ready to run queue
278 // again. The task itself will be deallocated once all reference counts
279 // have been dropped elsewhere by the various wakers that contain it.
280 if prev {
281 mem::forget(task);
282 }
283 }
284
285 /// Insert a new task into the internal linked list.
286 fn link(&self, task: Arc<Task<Fut>>) -> *const Task<Fut> {
287 // `next_all` should already be reset to the pending state before this
288 // function is called.
289 debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
290 let ptr = Arc::into_raw(task);
291
292 // Atomically swap out the old head node to get the node that should be
293 // assigned to `next_all`.
294 let next = self.head_all.swap(ptr as *mut _, AcqRel);
295
296 unsafe {
297 // Store the new list length in the new node.
298 let new_len = if next.is_null() {
299 1
300 } else {
301 // Make sure `next_all` has been written to signal that it is
302 // safe to read `len_all`.
303 (*next).spin_next_all(self.pending_next_all(), Acquire);
304 *(*next).len_all.get() + 1
305 };
306 *(*ptr).len_all.get() = new_len;
307
308 // Write the old head as the next node pointer, signaling to other
309 // threads that `len_all` and `next_all` are ready to read.
310 (*ptr).next_all.store(next, Release);
311
312 // `prev_all` updates don't need to be synchronized, as the field is
313 // only ever used after exclusive access has been acquired.
314 if !next.is_null() {
315 *(*next).prev_all.get() = ptr;
316 }
317 }
318
319 ptr
320 }
321
322 /// Remove the task from the linked list tracking all tasks currently
323 /// managed by `FuturesUnordered`.
324 /// This method is unsafe because it has be guaranteed that `task` is a
325 /// valid pointer.
326 unsafe fn unlink(&mut self, task: *const Task<Fut>) -> Arc<Task<Fut>> {
327 // Compute the new list length now in case we're removing the head node
328 // and won't be able to retrieve the correct length later.
329 let head = *self.head_all.get_mut();
330 debug_assert!(!head.is_null());
331 let new_len = *(*head).len_all.get() - 1;
332
333 let task = Arc::from_raw(task);
334 let next = task.next_all.load(Relaxed);
335 let prev = *task.prev_all.get();
336 task.next_all.store(self.pending_next_all(), Relaxed);
337 *task.prev_all.get() = ptr::null_mut();
338
339 if !next.is_null() {
340 *(*next).prev_all.get() = prev;
341 }
342
343 if !prev.is_null() {
344 (*prev).next_all.store(next, Relaxed);
345 } else {
346 *self.head_all.get_mut() = next;
347 }
348
349 // Store the new list length in the head node.
350 let head = *self.head_all.get_mut();
351 if !head.is_null() {
352 *(*head).len_all.get() = new_len;
353 }
354
355 task
356 }
357
358 /// Returns the reserved value for `Task::next_all` to indicate a pending
359 /// assignment from the thread that inserted the task.
360 ///
361 /// `FuturesUnordered::link` needs to update `Task` pointers in an order
362 /// that ensures any iterators created on other threads can correctly
363 /// traverse the entire `Task` list using the chain of `next_all` pointers.
364 /// This could be solved with a compare-exchange loop that stores the
365 /// current `head_all` in `next_all` and swaps out `head_all` with the new
366 /// `Task` pointer if the head hasn't already changed. Under heavy thread
367 /// contention, this compare-exchange loop could become costly.
368 ///
369 /// An alternative is to initialize `next_all` to a reserved pending state
370 /// first, perform an atomic swap on `head_all`, and finally update
371 /// `next_all` with the old head node. Iterators will then either see the
372 /// pending state value or the correct next node pointer, and can reload
373 /// `next_all` as needed until the correct value is loaded. The number of
374 /// retries needed (if any) would be small and will always be finite, so
375 /// this should generally perform better than the compare-exchange loop.
376 ///
377 /// A valid `Task` pointer in the `head_all` list is guaranteed to never be
378 /// this value, so it is safe to use as a reserved value until the correct
379 /// value can be written.
380 fn pending_next_all(&self) -> *mut Task<Fut> {
381 // The `ReadyToRunQueue` stub is never inserted into the `head_all`
382 // list, and its pointer value will remain valid for the lifetime of
383 // this `FuturesUnordered`, so we can make use of its value here.
384 Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _
385 }
386}
387
388impl<Fut: Future> Stream for FuturesUnordered<Fut> {
389 type Item = Fut::Output;
390
391 fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
392 let len = self.len();
393
394 // Keep track of how many child futures we have polled,
395 // in case we want to forcibly yield.
396 let mut polled = 0;
397 let mut yielded = 0;
398
399 // Ensure `parent` is correctly set.
400 self.ready_to_run_queue.waker.register(cx.waker());
401
402 loop {
403 // Safety: &mut self guarantees the mutual exclusion `dequeue`
404 // expects
405 let task = match unsafe { self.ready_to_run_queue.dequeue() } {
406 Dequeue::Empty => {
407 if self.is_empty() {
408 // We can only consider ourselves terminated once we
409 // have yielded a `None`
410 *self.is_terminated.get_mut() = true;
411 return Poll::Ready(None);
412 } else {
413 return Poll::Pending;
414 }
415 }
416 Dequeue::Inconsistent => {
417 // At this point, it may be worth yielding the thread &
418 // spinning a few times... but for now, just yield using the
419 // task system.
420 cx.waker().wake_by_ref();
421 return Poll::Pending;
422 }
423 Dequeue::Data(task) => task,
424 };
425
426 debug_assert!(task != self.ready_to_run_queue.stub());
427
428 // Safety:
429 // - `task` is a valid pointer.
430 // - We are the only thread that accesses the `UnsafeCell` that
431 // contains the future
432 let future = match unsafe { &mut *(*task).future.get() } {
433 Some(future) => future,
434
435 // If the future has already gone away then we're just
436 // cleaning out this task. See the comment in
437 // `release_task` for more information, but we're basically
438 // just taking ownership of our reference count here.
439 None => {
440 // This case only happens when `release_task` was called
441 // for this task before and couldn't drop the task
442 // because it was already enqueued in the ready to run
443 // queue.
444
445 // Safety: `task` is a valid pointer
446 let task = unsafe { Arc::from_raw(task) };
447
448 // Double check that the call to `release_task` really
449 // happened. Calling it required the task to be unlinked.
450 debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
451 unsafe {
452 debug_assert!((*task.prev_all.get()).is_null());
453 }
454 continue;
455 }
456 };
457
458 // Safety: `task` is a valid pointer
459 let task = unsafe { self.unlink(task) };
460
461 // Unset queued flag: This must be done before polling to ensure
462 // that the future's task gets rescheduled if it sends a wake-up
463 // notification **during** the call to `poll`.
464 let prev = task.queued.swap(false, SeqCst);
465 assert!(prev);
466
467 // We're going to need to be very careful if the `poll`
468 // method below panics. We need to (a) not leak memory and
469 // (b) ensure that we still don't have any use-after-frees. To
470 // manage this we do a few things:
471 //
472 // * A "bomb" is created which if dropped abnormally will call
473 // `release_task`. That way we'll be sure the memory management
474 // of the `task` is managed correctly. In particular
475 // `release_task` will drop the future. This ensures that it is
476 // dropped on this thread and not accidentally on a different
477 // thread (bad).
478 // * We unlink the task from our internal queue to preemptively
479 // assume it'll panic, in which case we'll want to discard it
480 // regardless.
481 struct Bomb<'a, Fut> {
482 queue: &'a mut FuturesUnordered<Fut>,
483 task: Option<Arc<Task<Fut>>>,
484 }
485
486 impl<Fut> Drop for Bomb<'_, Fut> {
487 fn drop(&mut self) {
488 if let Some(task) = self.task.take() {
489 self.queue.release_task(task);
490 }
491 }
492 }
493
494 let mut bomb = Bomb { task: Some(task), queue: &mut *self };
495
496 // Poll the underlying future with the appropriate waker
497 // implementation. This is where a large bit of the unsafety
498 // starts to stem from internally. The waker is basically just
499 // our `Arc<Task<Fut>>` and can schedule the future for polling by
500 // enqueuing itself in the ready to run queue.
501 //
502 // Critically though `Task<Fut>` won't actually access `Fut`, the
503 // future, while it's floating around inside of wakers.
504 // These structs will basically just use `Fut` to size
505 // the internal allocation, appropriately accessing fields and
506 // deallocating the task if need be.
507 let res = {
508 let task = bomb.task.as_ref().unwrap();
509 // We are only interested in whether the future is awoken before it
510 // finishes polling, so reset the flag here.
511 task.woken.store(false, Relaxed);
512 let waker = Task::waker_ref(task);
513 let mut cx = Context::from_waker(&waker);
514
515 // Safety: We won't move the future ever again
516 let future = unsafe { Pin::new_unchecked(future) };
517
518 future.poll(&mut cx)
519 };
520 polled += 1;
521
522 match res {
523 Poll::Pending => {
524 let task = bomb.task.take().unwrap();
525 // If the future was awoken during polling, we assume
526 // the future wanted to explicitly yield.
527 yielded += task.woken.load(Relaxed) as usize;
528 bomb.queue.link(task);
529
530 // If a future yields, we respect it and yield here.
531 // If all futures have been polled, we also yield here to
532 // avoid starving other tasks waiting on the executor.
533 // (polling the same future twice per iteration may cause
534 // the problem: https://github.com/rust-lang/futures-rs/pull/2333)
535 if yielded >= 2 || polled == len {
536 cx.waker().wake_by_ref();
537 return Poll::Pending;
538 }
539 continue;
540 }
541 Poll::Ready(output) => return Poll::Ready(Some(output)),
542 }
543 }
544 }
545
546 fn size_hint(&self) -> (usize, Option<usize>) {
547 let len = self.len();
548 (len, Some(len))
549 }
550}
551
552impl<Fut> Debug for FuturesUnordered<Fut> {
553 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
554 write!(f, "FuturesUnordered {{ ... }}")
555 }
556}
557
558impl<Fut> FuturesUnordered<Fut> {
559 /// Clears the set, removing all futures.
560 pub fn clear(&mut self) {
561 *self = Self::new();
562 }
563}
564
565impl<Fut> Drop for FuturesUnordered<Fut> {
566 fn drop(&mut self) {
567 // When a `FuturesUnordered` is dropped we want to drop all futures
568 // associated with it. At the same time though there may be tons of
569 // wakers flying around which contain `Task<Fut>` references
570 // inside them. We'll let those naturally get deallocated.
571 while !self.head_all.get_mut().is_null() {
572 let head = *self.head_all.get_mut();
573 let task = unsafe { self.unlink(head) };
574 self.release_task(task);
575 }
576
577 // Note that at this point we could still have a bunch of tasks in the
578 // ready to run queue. None of those tasks, however, have futures
579 // associated with them so they're safe to destroy on any thread. At
580 // this point the `FuturesUnordered` struct, the owner of the one strong
581 // reference to the ready to run queue will drop the strong reference.
582 // At that point whichever thread releases the strong refcount last (be
583 // it this thread or some other thread as part of an `upgrade`) will
584 // clear out the ready to run queue and free all remaining tasks.
585 //
586 // While that freeing operation isn't guaranteed to happen here, it's
587 // guaranteed to happen "promptly" as no more "blocking work" will
588 // happen while there's a strong refcount held.
589 }
590}
591
592impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered<Fut> {
593 type Item = &'a Fut;
594 type IntoIter = Iter<'a, Fut>;
595
596 fn into_iter(self) -> Self::IntoIter {
597 self.iter()
598 }
599}
600
601impl<'a, Fut: Unpin> IntoIterator for &'a mut FuturesUnordered<Fut> {
602 type Item = &'a mut Fut;
603 type IntoIter = IterMut<'a, Fut>;
604
605 fn into_iter(self) -> Self::IntoIter {
606 self.iter_mut()
607 }
608}
609
610impl<Fut: Unpin> IntoIterator for FuturesUnordered<Fut> {
611 type Item = Fut;
612 type IntoIter = IntoIter<Fut>;
613
614 fn into_iter(mut self) -> Self::IntoIter {
615 // `head_all` can be accessed directly and we don't need to spin on
616 // `Task::next_all` since we have exclusive access to the set.
617 let task: *mut Task = *self.head_all.get_mut();
618 let len: usize = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };
619
620 IntoIter { len, inner: self }
621 }
622}
623
624impl<Fut> FromIterator<Fut> for FuturesUnordered<Fut> {
625 fn from_iter<I>(iter: I) -> Self
626 where
627 I: IntoIterator<Item = Fut>,
628 {
629 let acc: FuturesUnordered = Self::new();
630 iter.into_iter().fold(init:acc, |acc: FuturesUnordered, item: Fut| {
631 acc.push(future:item);
632 acc
633 })
634 }
635}
636
637impl<Fut: Future> FusedStream for FuturesUnordered<Fut> {
638 fn is_terminated(&self) -> bool {
639 self.is_terminated.load(order:Relaxed)
640 }
641}
642
643impl<Fut> Extend<Fut> for FuturesUnordered<Fut> {
644 fn extend<I>(&mut self, iter: I)
645 where
646 I: IntoIterator<Item = Fut>,
647 {
648 for item: Fut in iter {
649 self.push(future:item);
650 }
651 }
652}
653