1//! Benchmark implementation details of the threaded scheduler. These benches are
2//! intended to be used as a form of regression testing and not as a general
3//! purpose benchmark demonstrating real-world performance.
4
5use tokio::runtime::{self, Runtime};
6
7use criterion::{criterion_group, criterion_main, Criterion};
8
9const NUM_SPAWN: usize = 1_000;
10
11fn spawn_many_local(c: &mut Criterion) {
12 let rt = rt();
13 let mut handles = Vec::with_capacity(NUM_SPAWN);
14
15 c.bench_function("spawn_many_local", |b| {
16 b.iter(|| {
17 rt.block_on(async {
18 for _ in 0..NUM_SPAWN {
19 handles.push(tokio::spawn(async move {}));
20 }
21
22 for handle in handles.drain(..) {
23 handle.await.unwrap();
24 }
25 });
26 })
27 });
28}
29
30fn spawn_many_remote_idle(c: &mut Criterion) {
31 let rt = rt();
32 let rt_handle = rt.handle();
33 let mut handles = Vec::with_capacity(NUM_SPAWN);
34
35 c.bench_function("spawn_many_remote_idle", |b| {
36 b.iter(|| {
37 for _ in 0..NUM_SPAWN {
38 handles.push(rt_handle.spawn(async {}));
39 }
40
41 rt.block_on(async {
42 for handle in handles.drain(..) {
43 handle.await.unwrap();
44 }
45 });
46 })
47 });
48}
49
50fn spawn_many_remote_busy(c: &mut Criterion) {
51 let rt = rt();
52 let rt_handle = rt.handle();
53 let mut handles = Vec::with_capacity(NUM_SPAWN);
54
55 rt.spawn(async {
56 fn iter() {
57 tokio::spawn(async { iter() });
58 }
59
60 iter()
61 });
62
63 c.bench_function("spawn_many_remote_busy", |b| {
64 b.iter(|| {
65 for _ in 0..NUM_SPAWN {
66 handles.push(rt_handle.spawn(async {}));
67 }
68
69 rt.block_on(async {
70 for handle in handles.drain(..) {
71 handle.await.unwrap();
72 }
73 });
74 })
75 });
76}
77
78fn rt() -> Runtime {
79 runtime::Builder::new_current_thread().build().unwrap()
80}
81
82criterion_group!(
83 scheduler,
84 spawn_many_local,
85 spawn_many_remote_idle,
86 spawn_many_remote_busy
87);
88
89criterion_main!(scheduler);
90