1 | //===----------------------------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #ifndef TEST_BENCHMARKS_CONTAINERS_ASSOCIATIVE_CONTAINER_BENCHMARKS_H |
10 | #define TEST_BENCHMARKS_CONTAINERS_ASSOCIATIVE_CONTAINER_BENCHMARKS_H |
11 | |
12 | #include <algorithm> |
13 | #include <iterator> |
14 | #include <random> |
15 | #include <string> |
16 | #include <type_traits> |
17 | #include <utility> |
18 | #include <vector> |
19 | |
20 | #include "benchmark/benchmark.h" |
21 | #include "../../GenerateInput.h" |
22 | |
23 | namespace support { |
24 | |
25 | template <class Container> |
26 | struct adapt_operations { |
27 | // using ValueType = ...; |
28 | // using KeyType = ...; |
29 | // static ValueType value_from_key(KeyType const& k); |
30 | // static KeyType key_from_value(ValueType const& value); |
31 | |
32 | // using InsertionResult = ...; |
33 | // static Container::iterator get_iterator(InsertionResult const&); |
34 | }; |
35 | |
36 | template <class Container> |
37 | void associative_container_benchmarks(std::string container) { |
38 | using Key = typename Container::key_type; |
39 | using Value = typename Container::value_type; |
40 | |
41 | auto generate_unique_keys = [=](std::size_t n) { |
42 | std::set<Key> keys; |
43 | while (keys.size() < n) { |
44 | Key k = Generate<Key>::random(); |
45 | keys.insert(k); |
46 | } |
47 | return std::vector<Key>(keys.begin(), keys.end()); |
48 | }; |
49 | |
50 | auto make_value_types = [](std::vector<Key> const& keys) { |
51 | std::vector<Value> kv; |
52 | for (Key const& k : keys) |
53 | kv.push_back(adapt_operations<Container>::value_from_key(k)); |
54 | return kv; |
55 | }; |
56 | |
57 | auto get_key = [](Value const& v) { return adapt_operations<Container>::key_from_value(v); }; |
58 | |
59 | auto bench = [&](std::string operation, auto f) { |
60 | benchmark::RegisterBenchmark(container + "::" + operation, f)->Arg(32)->Arg(1024)->Arg(8192); |
61 | }; |
62 | |
63 | static constexpr bool is_multi_key_container = |
64 | !std::is_same_v<typename adapt_operations<Container>::InsertionResult, |
65 | std::pair<typename Container::iterator, bool>>; |
66 | |
67 | static constexpr bool is_ordered_container = requires(Container c, Key k) { c.lower_bound(k); }; |
68 | |
69 | // These benchmarks are structured to perform the operation being benchmarked |
70 | // a small number of times at each iteration, in order to offset the cost of |
71 | // PauseTiming() and ResumeTiming(). |
72 | static constexpr std::size_t BatchSize = 32; |
73 | |
74 | struct alignas(Container) ScratchSpace { |
75 | char storage[sizeof(Container)]; |
76 | }; |
77 | |
78 | ///////////////////////// |
79 | // Constructors |
80 | ///////////////////////// |
81 | bench("ctor(const&)" , [=](auto& st) { |
82 | const std::size_t size = st.range(0); |
83 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
84 | Container src(in.begin(), in.end()); |
85 | ScratchSpace c[BatchSize]; |
86 | |
87 | while (st.KeepRunningBatch(BatchSize)) { |
88 | for (std::size_t i = 0; i != BatchSize; ++i) { |
89 | new (c + i) Container(src); |
90 | benchmark::DoNotOptimize(c + i); |
91 | benchmark::ClobberMemory(); |
92 | } |
93 | |
94 | st.PauseTiming(); |
95 | for (std::size_t i = 0; i != BatchSize; ++i) { |
96 | reinterpret_cast<Container*>(c + i)->~Container(); |
97 | } |
98 | st.ResumeTiming(); |
99 | } |
100 | }); |
101 | |
102 | bench("ctor(iterator, iterator) (unsorted sequence)" , [=](auto& st) { |
103 | const std::size_t size = st.range(0); |
104 | std::mt19937 randomness; |
105 | std::vector<Key> keys = generate_unique_keys(size); |
106 | std::shuffle(keys.begin(), keys.end(), randomness); |
107 | std::vector<Value> in = make_value_types(keys); |
108 | ScratchSpace c[BatchSize]; |
109 | |
110 | while (st.KeepRunningBatch(BatchSize)) { |
111 | for (std::size_t i = 0; i != BatchSize; ++i) { |
112 | new (c + i) Container(in.begin(), in.end()); |
113 | benchmark::DoNotOptimize(c + i); |
114 | benchmark::ClobberMemory(); |
115 | } |
116 | |
117 | st.PauseTiming(); |
118 | for (std::size_t i = 0; i != BatchSize; ++i) { |
119 | reinterpret_cast<Container*>(c + i)->~Container(); |
120 | } |
121 | st.ResumeTiming(); |
122 | } |
123 | }); |
124 | |
125 | bench("ctor(iterator, iterator) (sorted sequence)" , [=](auto& st) { |
126 | const std::size_t size = st.range(0); |
127 | std::vector<Key> keys = generate_unique_keys(size); |
128 | std::sort(keys.begin(), keys.end()); |
129 | std::vector<Value> in = make_value_types(keys); |
130 | ScratchSpace c[BatchSize]; |
131 | |
132 | while (st.KeepRunningBatch(BatchSize)) { |
133 | for (std::size_t i = 0; i != BatchSize; ++i) { |
134 | new (c + i) Container(in.begin(), in.end()); |
135 | benchmark::DoNotOptimize(c + i); |
136 | benchmark::ClobberMemory(); |
137 | } |
138 | |
139 | st.PauseTiming(); |
140 | for (std::size_t i = 0; i != BatchSize; ++i) { |
141 | reinterpret_cast<Container*>(c + i)->~Container(); |
142 | } |
143 | st.ResumeTiming(); |
144 | } |
145 | }); |
146 | |
147 | ///////////////////////// |
148 | // Assignment |
149 | ///////////////////////// |
150 | bench("operator=(const&)" , [=](auto& st) { |
151 | const std::size_t size = st.range(0); |
152 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
153 | Container src(in.begin(), in.end()); |
154 | Container c[BatchSize]; |
155 | |
156 | while (st.KeepRunningBatch(BatchSize)) { |
157 | for (std::size_t i = 0; i != BatchSize; ++i) { |
158 | c[i] = src; |
159 | benchmark::DoNotOptimize(c[i]); |
160 | benchmark::ClobberMemory(); |
161 | } |
162 | |
163 | st.PauseTiming(); |
164 | for (std::size_t i = 0; i != BatchSize; ++i) { |
165 | c[i].clear(); |
166 | } |
167 | st.ResumeTiming(); |
168 | } |
169 | }); |
170 | |
171 | ///////////////////////// |
172 | // Insertion |
173 | ///////////////////////// |
174 | bench("insert(value) (already present)" , [=](auto& st) { |
175 | const std::size_t size = st.range(0); |
176 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
177 | Value to_insert = in[in.size() / 2]; // pick any existing value |
178 | std::vector<Container> c(BatchSize, Container(in.begin(), in.end())); |
179 | typename Container::iterator inserted[BatchSize]; |
180 | |
181 | while (st.KeepRunningBatch(BatchSize)) { |
182 | for (std::size_t i = 0; i != BatchSize; ++i) { |
183 | inserted[i] = adapt_operations<Container>::get_iterator(c[i].insert(to_insert)); |
184 | benchmark::DoNotOptimize(inserted[i]); |
185 | benchmark::DoNotOptimize(c[i]); |
186 | benchmark::ClobberMemory(); |
187 | } |
188 | |
189 | if constexpr (is_multi_key_container) { |
190 | st.PauseTiming(); |
191 | for (std::size_t i = 0; i != BatchSize; ++i) { |
192 | c[i].erase(inserted[i]); |
193 | } |
194 | st.ResumeTiming(); |
195 | } |
196 | } |
197 | }); |
198 | |
199 | bench("insert(value) (new value)" , [=](auto& st) { |
200 | const std::size_t size = st.range(0); |
201 | std::vector<Value> in = make_value_types(generate_unique_keys(size + 1)); |
202 | Value to_insert = in.back(); |
203 | in.pop_back(); |
204 | std::vector<Container> c(BatchSize, Container(in.begin(), in.end())); |
205 | |
206 | while (st.KeepRunningBatch(BatchSize)) { |
207 | for (std::size_t i = 0; i != BatchSize; ++i) { |
208 | auto result = c[i].insert(to_insert); |
209 | benchmark::DoNotOptimize(result); |
210 | benchmark::DoNotOptimize(c[i]); |
211 | benchmark::ClobberMemory(); |
212 | } |
213 | |
214 | st.PauseTiming(); |
215 | for (std::size_t i = 0; i != BatchSize; ++i) { |
216 | c[i].erase(get_key(to_insert)); |
217 | } |
218 | st.ResumeTiming(); |
219 | } |
220 | }); |
221 | |
222 | // The insert(hint, ...) methods are only relevant for ordered containers, and we lack |
223 | // a good way to compute a hint for unordered ones. |
224 | if constexpr (is_ordered_container) { |
225 | bench("insert(hint, value) (good hint)" , [=](auto& st) { |
226 | const std::size_t size = st.range(0); |
227 | std::vector<Value> in = make_value_types(generate_unique_keys(size + 1)); |
228 | Value to_insert = in.back(); |
229 | in.pop_back(); |
230 | |
231 | std::vector<Container> c(BatchSize, Container(in.begin(), in.end())); |
232 | typename Container::iterator hints[BatchSize]; |
233 | for (std::size_t i = 0; i != BatchSize; ++i) { |
234 | hints[i] = c[i].lower_bound(get_key(to_insert)); |
235 | } |
236 | |
237 | while (st.KeepRunningBatch(BatchSize)) { |
238 | for (std::size_t i = 0; i != BatchSize; ++i) { |
239 | auto result = c[i].insert(hints[i], to_insert); |
240 | benchmark::DoNotOptimize(result); |
241 | benchmark::DoNotOptimize(c[i]); |
242 | benchmark::ClobberMemory(); |
243 | } |
244 | |
245 | st.PauseTiming(); |
246 | for (std::size_t i = 0; i != BatchSize; ++i) { |
247 | c[i].erase(get_key(to_insert)); |
248 | hints[i] = c[i].lower_bound(get_key(to_insert)); // refresh hints in case of invalidation |
249 | } |
250 | st.ResumeTiming(); |
251 | } |
252 | }); |
253 | |
254 | bench("insert(hint, value) (bad hint)" , [=](auto& st) { |
255 | const std::size_t size = st.range(0); |
256 | std::vector<Value> in = make_value_types(generate_unique_keys(size + 1)); |
257 | Value to_insert = in.back(); |
258 | in.pop_back(); |
259 | std::vector<Container> c(BatchSize, Container(in.begin(), in.end())); |
260 | |
261 | while (st.KeepRunningBatch(BatchSize)) { |
262 | for (std::size_t i = 0; i != BatchSize; ++i) { |
263 | auto result = c[i].insert(c[i].begin(), to_insert); |
264 | benchmark::DoNotOptimize(result); |
265 | benchmark::DoNotOptimize(c[i]); |
266 | benchmark::ClobberMemory(); |
267 | } |
268 | |
269 | st.PauseTiming(); |
270 | for (std::size_t i = 0; i != BatchSize; ++i) { |
271 | c[i].erase(get_key(to_insert)); |
272 | } |
273 | st.ResumeTiming(); |
274 | } |
275 | }); |
276 | } |
277 | |
278 | bench("insert(iterator, iterator) (all new keys)" , [=](auto& st) { |
279 | const std::size_t size = st.range(0); |
280 | std::vector<Value> in = make_value_types(generate_unique_keys(size + (size / 10))); |
281 | |
282 | // Populate a container with a small number of elements, that's what containers will start with. |
283 | std::vector<Value> small; |
284 | for (std::size_t i = 0; i != (size / 10); ++i) { |
285 | small.push_back(in.back()); |
286 | in.pop_back(); |
287 | } |
288 | Container c(small.begin(), small.end()); |
289 | |
290 | for ([[maybe_unused]] auto _ : st) { |
291 | c.insert(in.begin(), in.end()); |
292 | benchmark::DoNotOptimize(c); |
293 | benchmark::ClobberMemory(); |
294 | |
295 | st.PauseTiming(); |
296 | c = Container(small.begin(), small.end()); |
297 | st.ResumeTiming(); |
298 | } |
299 | }); |
300 | |
301 | bench("insert(iterator, iterator) (half new keys)" , [=](auto& st) { |
302 | const std::size_t size = st.range(0); |
303 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
304 | |
305 | // Populate a container that already contains half the elements we'll try inserting, |
306 | // that's what our container will start with. |
307 | std::vector<Value> small; |
308 | for (std::size_t i = 0; i != size / 2; ++i) { |
309 | small.push_back(in.at(i * 2)); |
310 | } |
311 | Container c(small.begin(), small.end()); |
312 | |
313 | for ([[maybe_unused]] auto _ : st) { |
314 | c.insert(in.begin(), in.end()); |
315 | benchmark::DoNotOptimize(c); |
316 | benchmark::ClobberMemory(); |
317 | |
318 | st.PauseTiming(); |
319 | c = Container(small.begin(), small.end()); |
320 | st.ResumeTiming(); |
321 | } |
322 | }); |
323 | |
324 | ///////////////////////// |
325 | // Erasure |
326 | ///////////////////////// |
327 | bench("erase(key) (existent)" , [=](auto& st) { |
328 | const std::size_t size = st.range(0); |
329 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
330 | Value element = in[in.size() / 2]; // pick any element |
331 | std::vector<Container> c(BatchSize, Container(in.begin(), in.end())); |
332 | |
333 | while (st.KeepRunningBatch(BatchSize)) { |
334 | for (std::size_t i = 0; i != BatchSize; ++i) { |
335 | auto result = c[i].erase(get_key(element)); |
336 | benchmark::DoNotOptimize(result); |
337 | benchmark::DoNotOptimize(c[i]); |
338 | benchmark::ClobberMemory(); |
339 | } |
340 | |
341 | st.PauseTiming(); |
342 | for (std::size_t i = 0; i != BatchSize; ++i) { |
343 | c[i].insert(element); |
344 | } |
345 | st.ResumeTiming(); |
346 | } |
347 | }); |
348 | |
349 | bench("erase(key) (non-existent)" , [=](auto& st) { |
350 | const std::size_t size = st.range(0); |
351 | std::vector<Value> in = make_value_types(generate_unique_keys(size + BatchSize)); |
352 | std::vector<Key> keys; |
353 | for (std::size_t i = 0; i != BatchSize; ++i) { |
354 | keys.push_back(get_key(in.back())); |
355 | in.pop_back(); |
356 | } |
357 | Container c(in.begin(), in.end()); |
358 | |
359 | while (st.KeepRunningBatch(BatchSize)) { |
360 | for (std::size_t i = 0; i != BatchSize; ++i) { |
361 | auto result = c.erase(keys[i]); |
362 | benchmark::DoNotOptimize(result); |
363 | benchmark::DoNotOptimize(c); |
364 | benchmark::ClobberMemory(); |
365 | } |
366 | |
367 | // no cleanup required because we erased a non-existent element |
368 | } |
369 | }); |
370 | |
371 | bench("erase(iterator)" , [=](auto& st) { |
372 | const std::size_t size = st.range(0); |
373 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
374 | Value element = in[in.size() / 2]; // pick any element |
375 | |
376 | std::vector<Container> c; |
377 | std::vector<typename Container::iterator> iterators; |
378 | for (std::size_t i = 0; i != BatchSize; ++i) { |
379 | c.push_back(Container(in.begin(), in.end())); |
380 | iterators.push_back(c[i].find(get_key(element))); |
381 | } |
382 | |
383 | while (st.KeepRunningBatch(BatchSize)) { |
384 | for (std::size_t i = 0; i != BatchSize; ++i) { |
385 | auto result = c[i].erase(iterators[i]); |
386 | benchmark::DoNotOptimize(result); |
387 | benchmark::DoNotOptimize(c[i]); |
388 | benchmark::ClobberMemory(); |
389 | } |
390 | |
391 | st.PauseTiming(); |
392 | for (std::size_t i = 0; i != BatchSize; ++i) { |
393 | iterators[i] = adapt_operations<Container>::get_iterator(c[i].insert(element)); |
394 | } |
395 | st.ResumeTiming(); |
396 | } |
397 | }); |
398 | |
399 | bench("erase(iterator, iterator) (erase half the container)" , [=](auto& st) { |
400 | const std::size_t size = st.range(0); |
401 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
402 | Container c(in.begin(), in.end()); |
403 | |
404 | auto first = std::next(c.begin(), c.size() / 4); |
405 | auto last = std::next(c.begin(), 3 * (c.size() / 4)); |
406 | for ([[maybe_unused]] auto _ : st) { |
407 | auto result = c.erase(first, last); |
408 | benchmark::DoNotOptimize(result); |
409 | benchmark::DoNotOptimize(c); |
410 | benchmark::ClobberMemory(); |
411 | |
412 | st.PauseTiming(); |
413 | c = Container(in.begin(), in.end()); |
414 | first = std::next(c.begin(), c.size() / 4); |
415 | last = std::next(c.begin(), 3 * (c.size() / 4)); |
416 | st.ResumeTiming(); |
417 | } |
418 | }); |
419 | |
420 | bench("clear()" , [=](auto& st) { |
421 | const std::size_t size = st.range(0); |
422 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
423 | Container c(in.begin(), in.end()); |
424 | |
425 | for ([[maybe_unused]] auto _ : st) { |
426 | c.clear(); |
427 | benchmark::DoNotOptimize(c); |
428 | benchmark::ClobberMemory(); |
429 | |
430 | st.PauseTiming(); |
431 | c = Container(in.begin(), in.end()); |
432 | st.ResumeTiming(); |
433 | } |
434 | }); |
435 | |
436 | ///////////////////////// |
437 | // Query |
438 | ///////////////////////// |
439 | auto with_existent_key = [=](auto func) { |
440 | return [=](auto& st) { |
441 | const std::size_t size = st.range(0); |
442 | std::vector<Value> in = make_value_types(generate_unique_keys(size)); |
443 | // Pick any `BatchSize` number of elements |
444 | std::vector<Key> keys; |
445 | for (std::size_t i = 0; i < in.size(); i += (in.size() / BatchSize)) { |
446 | keys.push_back(get_key(in.at(i))); |
447 | } |
448 | Container c(in.begin(), in.end()); |
449 | |
450 | while (st.KeepRunningBatch(BatchSize)) { |
451 | for (std::size_t i = 0; i != BatchSize; ++i) { |
452 | auto result = func(c, keys[i]); |
453 | benchmark::DoNotOptimize(c); |
454 | benchmark::DoNotOptimize(result); |
455 | benchmark::ClobberMemory(); |
456 | } |
457 | } |
458 | }; |
459 | }; |
460 | |
461 | auto with_nonexistent_key = [=](auto func) { |
462 | return [=](auto& st) { |
463 | const std::size_t size = st.range(0); |
464 | std::vector<Value> in = make_value_types(generate_unique_keys(size + BatchSize)); |
465 | std::vector<Key> keys; |
466 | for (std::size_t i = 0; i != BatchSize; ++i) { |
467 | keys.push_back(get_key(in.back())); |
468 | in.pop_back(); |
469 | } |
470 | Container c(in.begin(), in.end()); |
471 | |
472 | while (st.KeepRunningBatch(BatchSize)) { |
473 | for (std::size_t i = 0; i != BatchSize; ++i) { |
474 | auto result = func(c, keys[i]); |
475 | benchmark::DoNotOptimize(c); |
476 | benchmark::DoNotOptimize(result); |
477 | benchmark::ClobberMemory(); |
478 | } |
479 | } |
480 | }; |
481 | }; |
482 | |
483 | auto find = [](Container const& c, Key const& key) { return c.find(key); }; |
484 | bench("find(key) (existent)" , with_existent_key(find)); |
485 | bench("find(key) (non-existent)" , with_nonexistent_key(find)); |
486 | |
487 | auto count = [](Container const& c, Key const& key) { return c.count(key); }; |
488 | bench("count(key) (existent)" , with_existent_key(count)); |
489 | bench("count(key) (non-existent)" , with_nonexistent_key(count)); |
490 | |
491 | auto contains = [](Container const& c, Key const& key) { return c.contains(key); }; |
492 | bench("contains(key) (existent)" , with_existent_key(contains)); |
493 | bench("contains(key) (non-existent)" , with_nonexistent_key(contains)); |
494 | |
495 | if constexpr (is_ordered_container) { |
496 | auto lower_bound = [](Container const& c, Key const& key) { return c.lower_bound(key); }; |
497 | bench("lower_bound(key) (existent)" , with_existent_key(lower_bound)); |
498 | bench("lower_bound(key) (non-existent)" , with_nonexistent_key(lower_bound)); |
499 | |
500 | auto upper_bound = [](Container const& c, Key const& key) { return c.upper_bound(key); }; |
501 | bench("upper_bound(key) (existent)" , with_existent_key(upper_bound)); |
502 | bench("upper_bound(key) (non-existent)" , with_nonexistent_key(upper_bound)); |
503 | |
504 | auto equal_range = [](Container const& c, Key const& key) { return c.equal_range(key); }; |
505 | bench("equal_range(key) (existent)" , with_existent_key(equal_range)); |
506 | bench("equal_range(key) (non-existent)" , with_nonexistent_key(equal_range)); |
507 | } |
508 | } |
509 | |
510 | } // namespace support |
511 | |
512 | #endif // TEST_BENCHMARKS_CONTAINERS_ASSOCIATIVE_CONTAINER_BENCHMARKS_H |
513 | |