futures_util/stream/futures_unordered/
mod.rs

1//! An unbounded set of futures.
2//!
3//! This module is only available when the `std` or `alloc` feature of this
4//! library is activated, and it is activated by default.
5
6use crate::task::AtomicWaker;
7use alloc::sync::{Arc, Weak};
8use core::cell::UnsafeCell;
9use core::fmt::{self, Debug};
10use core::iter::FromIterator;
11use core::marker::PhantomData;
12use core::mem;
13use core::pin::Pin;
14use core::ptr;
15use core::sync::atomic::Ordering::{AcqRel, Acquire, Relaxed, Release, SeqCst};
16use core::sync::atomic::{AtomicBool, AtomicPtr};
17use futures_core::future::Future;
18use futures_core::stream::{FusedStream, Stream};
19use futures_core::task::{Context, Poll};
20use futures_task::{FutureObj, LocalFutureObj, LocalSpawn, Spawn, SpawnError};
21
22mod abort;
23
24mod iter;
25pub use self::iter::{IntoIter, Iter, IterMut, IterPinMut, IterPinRef};
26
27mod task;
28use self::task::Task;
29
30mod ready_to_run_queue;
31use self::ready_to_run_queue::{Dequeue, ReadyToRunQueue};
32
33/// A set of futures which may complete in any order.
34///
35/// This structure is optimized to manage a large number of futures.
36/// Futures managed by [`FuturesUnordered`] will only be polled when they
37/// generate wake-up notifications. This reduces the required amount of work
38/// needed to poll large numbers of futures.
39///
40/// [`FuturesUnordered`] can be filled by [`collect`](Iterator::collect)ing an
41/// iterator of futures into a [`FuturesUnordered`], or by
42/// [`push`](FuturesUnordered::push)ing futures onto an existing
43/// [`FuturesUnordered`]. When new futures are added,
44/// [`poll_next`](Stream::poll_next) must be called in order to begin receiving
45/// wake-ups for new futures.
46///
47/// Note that you can create a ready-made [`FuturesUnordered`] via the
48/// [`collect`](Iterator::collect) method, or you can start with an empty set
49/// with the [`FuturesUnordered::new`] constructor.
50///
51/// This type is only available when the `std` or `alloc` feature of this
52/// library is activated, and it is activated by default.
53#[must_use = "streams do nothing unless polled"]
54pub struct FuturesUnordered<Fut> {
55    ready_to_run_queue: Arc<ReadyToRunQueue<Fut>>,
56    head_all: AtomicPtr<Task<Fut>>,
57    is_terminated: AtomicBool,
58}
59
60unsafe impl<Fut: Send> Send for FuturesUnordered<Fut> {}
61unsafe impl<Fut: Sync> Sync for FuturesUnordered<Fut> {}
62impl<Fut> Unpin for FuturesUnordered<Fut> {}
63
64impl Spawn for FuturesUnordered<FutureObj<'_, ()>> {
65    fn spawn_obj(&self, future_obj: FutureObj<'static, ()>) -> Result<(), SpawnError> {
66        self.push(future_obj);
67        Ok(())
68    }
69}
70
71impl LocalSpawn for FuturesUnordered<LocalFutureObj<'_, ()>> {
72    fn spawn_local_obj(&self, future_obj: LocalFutureObj<'static, ()>) -> Result<(), SpawnError> {
73        self.push(future_obj);
74        Ok(())
75    }
76}
77
78// FuturesUnordered is implemented using two linked lists. One which links all
79// futures managed by a `FuturesUnordered` and one that tracks futures that have
80// been scheduled for polling. The first linked list allows for thread safe
81// insertion of nodes at the head as well as forward iteration, but is otherwise
82// not thread safe and is only accessed by the thread that owns the
83// `FuturesUnordered` value for any other operations. The second linked list is
84// an implementation of the intrusive MPSC queue algorithm described by
85// 1024cores.net.
86//
87// When a future is submitted to the set, a task is allocated and inserted in
88// both linked lists. The next call to `poll_next` will (eventually) see this
89// task and call `poll` on the future.
90//
91// Before a managed future is polled, the current context's waker is replaced
92// with one that is aware of the specific future being run. This ensures that
93// wake-up notifications generated by that specific future are visible to
94// `FuturesUnordered`. When a wake-up notification is received, the task is
95// inserted into the ready to run queue, so that its future can be polled later.
96//
97// Each task is wrapped in an `Arc` and thereby atomically reference counted.
98// Also, each task contains an `AtomicBool` which acts as a flag that indicates
99// whether the task is currently inserted in the atomic queue. When a wake-up
100// notification is received, the task will only be inserted into the ready to
101// run queue if it isn't inserted already.
102
103impl<Fut> Default for FuturesUnordered<Fut> {
104    fn default() -> Self {
105        Self::new()
106    }
107}
108
109impl<Fut> FuturesUnordered<Fut> {
110    /// Constructs a new, empty [`FuturesUnordered`].
111    ///
112    /// The returned [`FuturesUnordered`] does not contain any futures.
113    /// In this state, [`FuturesUnordered::poll_next`](Stream::poll_next) will
114    /// return [`Poll::Ready(None)`](Poll::Ready).
115    pub fn new() -> Self {
116        let stub = Arc::new(Task {
117            future: UnsafeCell::new(None),
118            next_all: AtomicPtr::new(ptr::null_mut()),
119            prev_all: UnsafeCell::new(ptr::null()),
120            len_all: UnsafeCell::new(0),
121            next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
122            queued: AtomicBool::new(true),
123            ready_to_run_queue: Weak::new(),
124            woken: AtomicBool::new(false),
125        });
126        let stub_ptr = Arc::as_ptr(&stub);
127        let ready_to_run_queue = Arc::new(ReadyToRunQueue {
128            waker: AtomicWaker::new(),
129            head: AtomicPtr::new(stub_ptr as *mut _),
130            tail: UnsafeCell::new(stub_ptr),
131            stub,
132        });
133
134        Self {
135            head_all: AtomicPtr::new(ptr::null_mut()),
136            ready_to_run_queue,
137            is_terminated: AtomicBool::new(false),
138        }
139    }
140
141    /// Returns the number of futures contained in the set.
142    ///
143    /// This represents the total number of in-flight futures.
144    pub fn len(&self) -> usize {
145        let (_, len) = self.atomic_load_head_and_len_all();
146        len
147    }
148
149    /// Returns `true` if the set contains no futures.
150    pub fn is_empty(&self) -> bool {
151        // Relaxed ordering can be used here since we don't need to read from
152        // the head pointer, only check whether it is null.
153        self.head_all.load(Relaxed).is_null()
154    }
155
156    /// Push a future into the set.
157    ///
158    /// This method adds the given future to the set. This method will not
159    /// call [`poll`](core::future::Future::poll) on the submitted future. The caller must
160    /// ensure that [`FuturesUnordered::poll_next`](Stream::poll_next) is called
161    /// in order to receive wake-up notifications for the given future.
162    pub fn push(&self, future: Fut) {
163        let task = Arc::new(Task {
164            future: UnsafeCell::new(Some(future)),
165            next_all: AtomicPtr::new(self.pending_next_all()),
166            prev_all: UnsafeCell::new(ptr::null_mut()),
167            len_all: UnsafeCell::new(0),
168            next_ready_to_run: AtomicPtr::new(ptr::null_mut()),
169            queued: AtomicBool::new(true),
170            ready_to_run_queue: Arc::downgrade(&self.ready_to_run_queue),
171            woken: AtomicBool::new(false),
172        });
173
174        // Reset the `is_terminated` flag if we've previously marked ourselves
175        // as terminated.
176        self.is_terminated.store(false, Relaxed);
177
178        // Right now our task has a strong reference count of 1. We transfer
179        // ownership of this reference count to our internal linked list
180        // and we'll reclaim ownership through the `unlink` method below.
181        let ptr = self.link(task);
182
183        // We'll need to get the future "into the system" to start tracking it,
184        // e.g. getting its wake-up notifications going to us tracking which
185        // futures are ready. To do that we unconditionally enqueue it for
186        // polling here.
187        self.ready_to_run_queue.enqueue(ptr);
188    }
189
190    /// Returns an iterator that allows inspecting each future in the set.
191    pub fn iter(&self) -> Iter<'_, Fut>
192    where
193        Fut: Unpin,
194    {
195        Iter(Pin::new(self).iter_pin_ref())
196    }
197
198    /// Returns an iterator that allows inspecting each future in the set.
199    pub fn iter_pin_ref(self: Pin<&Self>) -> IterPinRef<'_, Fut> {
200        let (task, len) = self.atomic_load_head_and_len_all();
201        let pending_next_all = self.pending_next_all();
202
203        IterPinRef { task, len, pending_next_all, _marker: PhantomData }
204    }
205
206    /// Returns an iterator that allows modifying each future in the set.
207    pub fn iter_mut(&mut self) -> IterMut<'_, Fut>
208    where
209        Fut: Unpin,
210    {
211        IterMut(Pin::new(self).iter_pin_mut())
212    }
213
214    /// Returns an iterator that allows modifying each future in the set.
215    pub fn iter_pin_mut(mut self: Pin<&mut Self>) -> IterPinMut<'_, Fut> {
216        // `head_all` can be accessed directly and we don't need to spin on
217        // `Task::next_all` since we have exclusive access to the set.
218        let task = *self.head_all.get_mut();
219        let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };
220
221        IterPinMut { task, len, _marker: PhantomData }
222    }
223
224    /// Returns the current head node and number of futures in the list of all
225    /// futures within a context where access is shared with other threads
226    /// (mostly for use with the `len` and `iter_pin_ref` methods).
227    fn atomic_load_head_and_len_all(&self) -> (*const Task<Fut>, usize) {
228        let task = self.head_all.load(Acquire);
229        let len = if task.is_null() {
230            0
231        } else {
232            unsafe {
233                (*task).spin_next_all(self.pending_next_all(), Acquire);
234                *(*task).len_all.get()
235            }
236        };
237
238        (task, len)
239    }
240
241    /// Releases the task. It destroys the future inside and either drops
242    /// the `Arc<Task>` or transfers ownership to the ready to run queue.
243    /// The task this method is called on must have been unlinked before.
244    fn release_task(&mut self, task: Arc<Task<Fut>>) {
245        // `release_task` must only be called on unlinked tasks
246        debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
247        unsafe {
248            debug_assert!((*task.prev_all.get()).is_null());
249        }
250
251        // The future is done, try to reset the queued flag. This will prevent
252        // `wake` from doing any work in the future
253        let prev = task.queued.swap(true, SeqCst);
254
255        // Drop the future, even if it hasn't finished yet. This is safe
256        // because we're dropping the future on the thread that owns
257        // `FuturesUnordered`, which correctly tracks `Fut`'s lifetimes and
258        // such.
259        unsafe {
260            // Set to `None` rather than `take()`ing to prevent moving the
261            // future.
262            *task.future.get() = None;
263        }
264
265        // If the queued flag was previously set, then it means that this task
266        // is still in our internal ready to run queue. We then transfer
267        // ownership of our reference count to the ready to run queue, and it'll
268        // come along and free it later, noticing that the future is `None`.
269        //
270        // If, however, the queued flag was *not* set then we're safe to
271        // release our reference count on the task. The queued flag was set
272        // above so all future `enqueue` operations will not actually
273        // enqueue the task, so our task will never see the ready to run queue
274        // again. The task itself will be deallocated once all reference counts
275        // have been dropped elsewhere by the various wakers that contain it.
276        if prev {
277            mem::forget(task);
278        }
279    }
280
281    /// Insert a new task into the internal linked list.
282    fn link(&self, task: Arc<Task<Fut>>) -> *const Task<Fut> {
283        // `next_all` should already be reset to the pending state before this
284        // function is called.
285        debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
286        let ptr = Arc::into_raw(task);
287
288        // Atomically swap out the old head node to get the node that should be
289        // assigned to `next_all`.
290        let next = self.head_all.swap(ptr as *mut _, AcqRel);
291
292        unsafe {
293            // Store the new list length in the new node.
294            let new_len = if next.is_null() {
295                1
296            } else {
297                // Make sure `next_all` has been written to signal that it is
298                // safe to read `len_all`.
299                (*next).spin_next_all(self.pending_next_all(), Acquire);
300                *(*next).len_all.get() + 1
301            };
302            *(*ptr).len_all.get() = new_len;
303
304            // Write the old head as the next node pointer, signaling to other
305            // threads that `len_all` and `next_all` are ready to read.
306            (*ptr).next_all.store(next, Release);
307
308            // `prev_all` updates don't need to be synchronized, as the field is
309            // only ever used after exclusive access has been acquired.
310            if !next.is_null() {
311                *(*next).prev_all.get() = ptr;
312            }
313        }
314
315        ptr
316    }
317
318    /// Remove the task from the linked list tracking all tasks currently
319    /// managed by `FuturesUnordered`.
320    /// This method is unsafe because it has be guaranteed that `task` is a
321    /// valid pointer.
322    unsafe fn unlink(&mut self, task: *const Task<Fut>) -> Arc<Task<Fut>> {
323        // Compute the new list length now in case we're removing the head node
324        // and won't be able to retrieve the correct length later.
325        let head = *self.head_all.get_mut();
326        debug_assert!(!head.is_null());
327        let new_len = *(*head).len_all.get() - 1;
328
329        let task = Arc::from_raw(task);
330        let next = task.next_all.load(Relaxed);
331        let prev = *task.prev_all.get();
332        task.next_all.store(self.pending_next_all(), Relaxed);
333        *task.prev_all.get() = ptr::null_mut();
334
335        if !next.is_null() {
336            *(*next).prev_all.get() = prev;
337        }
338
339        if !prev.is_null() {
340            (*prev).next_all.store(next, Relaxed);
341        } else {
342            *self.head_all.get_mut() = next;
343        }
344
345        // Store the new list length in the head node.
346        let head = *self.head_all.get_mut();
347        if !head.is_null() {
348            *(*head).len_all.get() = new_len;
349        }
350
351        task
352    }
353
354    /// Returns the reserved value for `Task::next_all` to indicate a pending
355    /// assignment from the thread that inserted the task.
356    ///
357    /// `FuturesUnordered::link` needs to update `Task` pointers in an order
358    /// that ensures any iterators created on other threads can correctly
359    /// traverse the entire `Task` list using the chain of `next_all` pointers.
360    /// This could be solved with a compare-exchange loop that stores the
361    /// current `head_all` in `next_all` and swaps out `head_all` with the new
362    /// `Task` pointer if the head hasn't already changed. Under heavy thread
363    /// contention, this compare-exchange loop could become costly.
364    ///
365    /// An alternative is to initialize `next_all` to a reserved pending state
366    /// first, perform an atomic swap on `head_all`, and finally update
367    /// `next_all` with the old head node. Iterators will then either see the
368    /// pending state value or the correct next node pointer, and can reload
369    /// `next_all` as needed until the correct value is loaded. The number of
370    /// retries needed (if any) would be small and will always be finite, so
371    /// this should generally perform better than the compare-exchange loop.
372    ///
373    /// A valid `Task` pointer in the `head_all` list is guaranteed to never be
374    /// this value, so it is safe to use as a reserved value until the correct
375    /// value can be written.
376    fn pending_next_all(&self) -> *mut Task<Fut> {
377        // The `ReadyToRunQueue` stub is never inserted into the `head_all`
378        // list, and its pointer value will remain valid for the lifetime of
379        // this `FuturesUnordered`, so we can make use of its value here.
380        Arc::as_ptr(&self.ready_to_run_queue.stub) as *mut _
381    }
382}
383
384impl<Fut: Future> Stream for FuturesUnordered<Fut> {
385    type Item = Fut::Output;
386
387    fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
388        let len = self.len();
389
390        // Keep track of how many child futures we have polled,
391        // in case we want to forcibly yield.
392        let mut polled = 0;
393        let mut yielded = 0;
394
395        // Ensure `parent` is correctly set.
396        self.ready_to_run_queue.waker.register(cx.waker());
397
398        loop {
399            // Safety: &mut self guarantees the mutual exclusion `dequeue`
400            // expects
401            let task = match unsafe { self.ready_to_run_queue.dequeue() } {
402                Dequeue::Empty => {
403                    if self.is_empty() {
404                        // We can only consider ourselves terminated once we
405                        // have yielded a `None`
406                        *self.is_terminated.get_mut() = true;
407                        return Poll::Ready(None);
408                    } else {
409                        return Poll::Pending;
410                    }
411                }
412                Dequeue::Inconsistent => {
413                    // At this point, it may be worth yielding the thread &
414                    // spinning a few times... but for now, just yield using the
415                    // task system.
416                    cx.waker().wake_by_ref();
417                    return Poll::Pending;
418                }
419                Dequeue::Data(task) => task,
420            };
421
422            debug_assert!(task != self.ready_to_run_queue.stub());
423
424            // Safety:
425            // - `task` is a valid pointer.
426            // - We are the only thread that accesses the `UnsafeCell` that
427            //   contains the future
428            let future = match unsafe { &mut *(*task).future.get() } {
429                Some(future) => future,
430
431                // If the future has already gone away then we're just
432                // cleaning out this task. See the comment in
433                // `release_task` for more information, but we're basically
434                // just taking ownership of our reference count here.
435                None => {
436                    // This case only happens when `release_task` was called
437                    // for this task before and couldn't drop the task
438                    // because it was already enqueued in the ready to run
439                    // queue.
440
441                    // Safety: `task` is a valid pointer
442                    let task = unsafe { Arc::from_raw(task) };
443
444                    // Double check that the call to `release_task` really
445                    // happened. Calling it required the task to be unlinked.
446                    debug_assert_eq!(task.next_all.load(Relaxed), self.pending_next_all());
447                    unsafe {
448                        debug_assert!((*task.prev_all.get()).is_null());
449                    }
450                    continue;
451                }
452            };
453
454            // Safety: `task` is a valid pointer
455            let task = unsafe { self.unlink(task) };
456
457            // Unset queued flag: This must be done before polling to ensure
458            // that the future's task gets rescheduled if it sends a wake-up
459            // notification **during** the call to `poll`.
460            let prev = task.queued.swap(false, SeqCst);
461            assert!(prev);
462
463            // We're going to need to be very careful if the `poll`
464            // method below panics. We need to (a) not leak memory and
465            // (b) ensure that we still don't have any use-after-frees. To
466            // manage this we do a few things:
467            //
468            // * A "bomb" is created which if dropped abnormally will call
469            //   `release_task`. That way we'll be sure the memory management
470            //   of the `task` is managed correctly. In particular
471            //   `release_task` will drop the future. This ensures that it is
472            //   dropped on this thread and not accidentally on a different
473            //   thread (bad).
474            // * We unlink the task from our internal queue to preemptively
475            //   assume it'll panic, in which case we'll want to discard it
476            //   regardless.
477            struct Bomb<'a, Fut> {
478                queue: &'a mut FuturesUnordered<Fut>,
479                task: Option<Arc<Task<Fut>>>,
480            }
481
482            impl<Fut> Drop for Bomb<'_, Fut> {
483                fn drop(&mut self) {
484                    if let Some(task) = self.task.take() {
485                        self.queue.release_task(task);
486                    }
487                }
488            }
489
490            let mut bomb = Bomb { task: Some(task), queue: &mut *self };
491
492            // Poll the underlying future with the appropriate waker
493            // implementation. This is where a large bit of the unsafety
494            // starts to stem from internally. The waker is basically just
495            // our `Arc<Task<Fut>>` and can schedule the future for polling by
496            // enqueuing itself in the ready to run queue.
497            //
498            // Critically though `Task<Fut>` won't actually access `Fut`, the
499            // future, while it's floating around inside of wakers.
500            // These structs will basically just use `Fut` to size
501            // the internal allocation, appropriately accessing fields and
502            // deallocating the task if need be.
503            let res = {
504                let task = bomb.task.as_ref().unwrap();
505                // We are only interested in whether the future is awoken before it
506                // finishes polling, so reset the flag here.
507                task.woken.store(false, Relaxed);
508                let waker = Task::waker_ref(task);
509                let mut cx = Context::from_waker(&waker);
510
511                // Safety: We won't move the future ever again
512                let future = unsafe { Pin::new_unchecked(future) };
513
514                future.poll(&mut cx)
515            };
516            polled += 1;
517
518            match res {
519                Poll::Pending => {
520                    let task = bomb.task.take().unwrap();
521                    // If the future was awoken during polling, we assume
522                    // the future wanted to explicitly yield.
523                    yielded += task.woken.load(Relaxed) as usize;
524                    bomb.queue.link(task);
525
526                    // If a future yields, we respect it and yield here.
527                    // If all futures have been polled, we also yield here to
528                    // avoid starving other tasks waiting on the executor.
529                    // (polling the same future twice per iteration may cause
530                    // the problem: https://github.com/rust-lang/futures-rs/pull/2333)
531                    if yielded >= 2 || polled == len {
532                        cx.waker().wake_by_ref();
533                        return Poll::Pending;
534                    }
535                    continue;
536                }
537                Poll::Ready(output) => return Poll::Ready(Some(output)),
538            }
539        }
540    }
541
542    fn size_hint(&self) -> (usize, Option<usize>) {
543        let len = self.len();
544        (len, Some(len))
545    }
546}
547
548impl<Fut> Debug for FuturesUnordered<Fut> {
549    fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
550        write!(f, "FuturesUnordered {{ ... }}")
551    }
552}
553
554impl<Fut> FuturesUnordered<Fut> {
555    /// Clears the set, removing all futures.
556    pub fn clear(&mut self) {
557        self.clear_head_all();
558
559        // we just cleared all the tasks, and we have &mut self, so this is safe.
560        unsafe { self.ready_to_run_queue.clear() };
561
562        self.is_terminated.store(false, Relaxed);
563    }
564
565    fn clear_head_all(&mut self) {
566        while !self.head_all.get_mut().is_null() {
567            let head = *self.head_all.get_mut();
568            let task = unsafe { self.unlink(head) };
569            self.release_task(task);
570        }
571    }
572}
573
574impl<Fut> Drop for FuturesUnordered<Fut> {
575    fn drop(&mut self) {
576        // When a `FuturesUnordered` is dropped we want to drop all futures
577        // associated with it. At the same time though there may be tons of
578        // wakers flying around which contain `Task<Fut>` references
579        // inside them. We'll let those naturally get deallocated.
580        self.clear_head_all();
581
582        // Note that at this point we could still have a bunch of tasks in the
583        // ready to run queue. None of those tasks, however, have futures
584        // associated with them so they're safe to destroy on any thread. At
585        // this point the `FuturesUnordered` struct, the owner of the one strong
586        // reference to the ready to run queue will drop the strong reference.
587        // At that point whichever thread releases the strong refcount last (be
588        // it this thread or some other thread as part of an `upgrade`) will
589        // clear out the ready to run queue and free all remaining tasks.
590        //
591        // While that freeing operation isn't guaranteed to happen here, it's
592        // guaranteed to happen "promptly" as no more "blocking work" will
593        // happen while there's a strong refcount held.
594    }
595}
596
597impl<'a, Fut: Unpin> IntoIterator for &'a FuturesUnordered<Fut> {
598    type Item = &'a Fut;
599    type IntoIter = Iter<'a, Fut>;
600
601    fn into_iter(self) -> Self::IntoIter {
602        self.iter()
603    }
604}
605
606impl<'a, Fut: Unpin> IntoIterator for &'a mut FuturesUnordered<Fut> {
607    type Item = &'a mut Fut;
608    type IntoIter = IterMut<'a, Fut>;
609
610    fn into_iter(self) -> Self::IntoIter {
611        self.iter_mut()
612    }
613}
614
615impl<Fut: Unpin> IntoIterator for FuturesUnordered<Fut> {
616    type Item = Fut;
617    type IntoIter = IntoIter<Fut>;
618
619    fn into_iter(mut self) -> Self::IntoIter {
620        // `head_all` can be accessed directly and we don't need to spin on
621        // `Task::next_all` since we have exclusive access to the set.
622        let task = *self.head_all.get_mut();
623        let len = if task.is_null() { 0 } else { unsafe { *(*task).len_all.get() } };
624
625        IntoIter { len, inner: self }
626    }
627}
628
629impl<Fut> FromIterator<Fut> for FuturesUnordered<Fut> {
630    fn from_iter<I>(iter: I) -> Self
631    where
632        I: IntoIterator<Item = Fut>,
633    {
634        let acc = Self::new();
635        iter.into_iter().fold(acc, |acc, item| {
636            acc.push(item);
637            acc
638        })
639    }
640}
641
642impl<Fut: Future> FusedStream for FuturesUnordered<Fut> {
643    fn is_terminated(&self) -> bool {
644        self.is_terminated.load(Relaxed)
645    }
646}
647
648impl<Fut> Extend<Fut> for FuturesUnordered<Fut> {
649    fn extend<I>(&mut self, iter: I)
650    where
651        I: IntoIterator<Item = Fut>,
652    {
653        for item in iter {
654            self.push(item);
655        }
656    }
657}