1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Budget Fair Queueing (BFQ) I/O scheduler. |
4 | * |
5 | * Based on ideas and code from CFQ: |
6 | * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk> |
7 | * |
8 | * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it> |
9 | * Paolo Valente <paolo.valente@unimore.it> |
10 | * |
11 | * Copyright (C) 2010 Paolo Valente <paolo.valente@unimore.it> |
12 | * Arianna Avanzini <avanzini@google.com> |
13 | * |
14 | * Copyright (C) 2017 Paolo Valente <paolo.valente@linaro.org> |
15 | * |
16 | * BFQ is a proportional-share I/O scheduler, with some extra |
17 | * low-latency capabilities. BFQ also supports full hierarchical |
18 | * scheduling through cgroups. Next paragraphs provide an introduction |
19 | * on BFQ inner workings. Details on BFQ benefits, usage and |
20 | * limitations can be found in Documentation/block/bfq-iosched.rst. |
21 | * |
22 | * BFQ is a proportional-share storage-I/O scheduling algorithm based |
23 | * on the slice-by-slice service scheme of CFQ. But BFQ assigns |
24 | * budgets, measured in number of sectors, to processes instead of |
25 | * time slices. The device is not granted to the in-service process |
26 | * for a given time slice, but until it has exhausted its assigned |
27 | * budget. This change from the time to the service domain enables BFQ |
28 | * to distribute the device throughput among processes as desired, |
29 | * without any distortion due to throughput fluctuations, or to device |
30 | * internal queueing. BFQ uses an ad hoc internal scheduler, called |
31 | * B-WF2Q+, to schedule processes according to their budgets. More |
32 | * precisely, BFQ schedules queues associated with processes. Each |
33 | * process/queue is assigned a user-configurable weight, and B-WF2Q+ |
34 | * guarantees that each queue receives a fraction of the throughput |
35 | * proportional to its weight. Thanks to the accurate policy of |
36 | * B-WF2Q+, BFQ can afford to assign high budgets to I/O-bound |
37 | * processes issuing sequential requests (to boost the throughput), |
38 | * and yet guarantee a low latency to interactive and soft real-time |
39 | * applications. |
40 | * |
41 | * In particular, to provide these low-latency guarantees, BFQ |
42 | * explicitly privileges the I/O of two classes of time-sensitive |
43 | * applications: interactive and soft real-time. In more detail, BFQ |
44 | * behaves this way if the low_latency parameter is set (default |
45 | * configuration). This feature enables BFQ to provide applications in |
46 | * these classes with a very low latency. |
47 | * |
48 | * To implement this feature, BFQ constantly tries to detect whether |
49 | * the I/O requests in a bfq_queue come from an interactive or a soft |
50 | * real-time application. For brevity, in these cases, the queue is |
51 | * said to be interactive or soft real-time. In both cases, BFQ |
52 | * privileges the service of the queue, over that of non-interactive |
53 | * and non-soft-real-time queues. This privileging is performed, |
54 | * mainly, by raising the weight of the queue. So, for brevity, we |
55 | * call just weight-raising periods the time periods during which a |
56 | * queue is privileged, because deemed interactive or soft real-time. |
57 | * |
58 | * The detection of soft real-time queues/applications is described in |
59 | * detail in the comments on the function |
60 | * bfq_bfqq_softrt_next_start. On the other hand, the detection of an |
61 | * interactive queue works as follows: a queue is deemed interactive |
62 | * if it is constantly non empty only for a limited time interval, |
63 | * after which it does become empty. The queue may be deemed |
64 | * interactive again (for a limited time), if it restarts being |
65 | * constantly non empty, provided that this happens only after the |
66 | * queue has remained empty for a given minimum idle time. |
67 | * |
68 | * By default, BFQ computes automatically the above maximum time |
69 | * interval, i.e., the time interval after which a constantly |
70 | * non-empty queue stops being deemed interactive. Since a queue is |
71 | * weight-raised while it is deemed interactive, this maximum time |
72 | * interval happens to coincide with the (maximum) duration of the |
73 | * weight-raising for interactive queues. |
74 | * |
75 | * Finally, BFQ also features additional heuristics for |
76 | * preserving both a low latency and a high throughput on NCQ-capable, |
77 | * rotational or flash-based devices, and to get the job done quickly |
78 | * for applications consisting in many I/O-bound processes. |
79 | * |
80 | * NOTE: if the main or only goal, with a given device, is to achieve |
81 | * the maximum-possible throughput at all times, then do switch off |
82 | * all low-latency heuristics for that device, by setting low_latency |
83 | * to 0. |
84 | * |
85 | * BFQ is described in [1], where also a reference to the initial, |
86 | * more theoretical paper on BFQ can be found. The interested reader |
87 | * can find in the latter paper full details on the main algorithm, as |
88 | * well as formulas of the guarantees and formal proofs of all the |
89 | * properties. With respect to the version of BFQ presented in these |
90 | * papers, this implementation adds a few more heuristics, such as the |
91 | * ones that guarantee a low latency to interactive and soft real-time |
92 | * applications, and a hierarchical extension based on H-WF2Q+. |
93 | * |
94 | * B-WF2Q+ is based on WF2Q+, which is described in [2], together with |
95 | * H-WF2Q+, while the augmented tree used here to implement B-WF2Q+ |
96 | * with O(log N) complexity derives from the one introduced with EEVDF |
97 | * in [3]. |
98 | * |
99 | * [1] P. Valente, A. Avanzini, "Evolution of the BFQ Storage I/O |
100 | * Scheduler", Proceedings of the First Workshop on Mobile System |
101 | * Technologies (MST-2015), May 2015. |
102 | * http://algogroup.unimore.it/people/paolo/disk_sched/mst-2015.pdf |
103 | * |
104 | * [2] Jon C.R. Bennett and H. Zhang, "Hierarchical Packet Fair Queueing |
105 | * Algorithms", IEEE/ACM Transactions on Networking, 5(5):675-689, |
106 | * Oct 1997. |
107 | * |
108 | * http://www.cs.cmu.edu/~hzhang/papers/TON-97-Oct.ps.gz |
109 | * |
110 | * [3] I. Stoica and H. Abdel-Wahab, "Earliest Eligible Virtual Deadline |
111 | * First: A Flexible and Accurate Mechanism for Proportional Share |
112 | * Resource Allocation", technical report. |
113 | * |
114 | * http://www.cs.berkeley.edu/~istoica/papers/eevdf-tr-95.pdf |
115 | */ |
116 | #include <linux/module.h> |
117 | #include <linux/slab.h> |
118 | #include <linux/blkdev.h> |
119 | #include <linux/cgroup.h> |
120 | #include <linux/ktime.h> |
121 | #include <linux/rbtree.h> |
122 | #include <linux/ioprio.h> |
123 | #include <linux/sbitmap.h> |
124 | #include <linux/delay.h> |
125 | #include <linux/backing-dev.h> |
126 | |
127 | #include <trace/events/block.h> |
128 | |
129 | #include "elevator.h" |
130 | #include "blk.h" |
131 | #include "blk-mq.h" |
132 | #include "blk-mq-sched.h" |
133 | #include "bfq-iosched.h" |
134 | #include "blk-wbt.h" |
135 | |
136 | #define BFQ_BFQQ_FNS(name) \ |
137 | void bfq_mark_bfqq_##name(struct bfq_queue *bfqq) \ |
138 | { \ |
139 | __set_bit(BFQQF_##name, &(bfqq)->flags); \ |
140 | } \ |
141 | void bfq_clear_bfqq_##name(struct bfq_queue *bfqq) \ |
142 | { \ |
143 | __clear_bit(BFQQF_##name, &(bfqq)->flags); \ |
144 | } \ |
145 | int bfq_bfqq_##name(const struct bfq_queue *bfqq) \ |
146 | { \ |
147 | return test_bit(BFQQF_##name, &(bfqq)->flags); \ |
148 | } |
149 | |
150 | BFQ_BFQQ_FNS(just_created); |
151 | BFQ_BFQQ_FNS(busy); |
152 | BFQ_BFQQ_FNS(wait_request); |
153 | BFQ_BFQQ_FNS(non_blocking_wait_rq); |
154 | BFQ_BFQQ_FNS(fifo_expire); |
155 | BFQ_BFQQ_FNS(has_short_ttime); |
156 | BFQ_BFQQ_FNS(sync); |
157 | BFQ_BFQQ_FNS(IO_bound); |
158 | BFQ_BFQQ_FNS(in_large_burst); |
159 | BFQ_BFQQ_FNS(coop); |
160 | BFQ_BFQQ_FNS(split_coop); |
161 | BFQ_BFQQ_FNS(softrt_update); |
162 | #undef BFQ_BFQQ_FNS \ |
163 | |
164 | /* Expiration time of async (0) and sync (1) requests, in ns. */ |
165 | static const u64 bfq_fifo_expire[2] = { NSEC_PER_SEC / 4, NSEC_PER_SEC / 8 }; |
166 | |
167 | /* Maximum backwards seek (magic number lifted from CFQ), in KiB. */ |
168 | static const int bfq_back_max = 16 * 1024; |
169 | |
170 | /* Penalty of a backwards seek, in number of sectors. */ |
171 | static const int bfq_back_penalty = 2; |
172 | |
173 | /* Idling period duration, in ns. */ |
174 | static u64 bfq_slice_idle = NSEC_PER_SEC / 125; |
175 | |
176 | /* Minimum number of assigned budgets for which stats are safe to compute. */ |
177 | static const int bfq_stats_min_budgets = 194; |
178 | |
179 | /* Default maximum budget values, in sectors and number of requests. */ |
180 | static const int bfq_default_max_budget = 16 * 1024; |
181 | |
182 | /* |
183 | * When a sync request is dispatched, the queue that contains that |
184 | * request, and all the ancestor entities of that queue, are charged |
185 | * with the number of sectors of the request. In contrast, if the |
186 | * request is async, then the queue and its ancestor entities are |
187 | * charged with the number of sectors of the request, multiplied by |
188 | * the factor below. This throttles the bandwidth for async I/O, |
189 | * w.r.t. to sync I/O, and it is done to counter the tendency of async |
190 | * writes to steal I/O throughput to reads. |
191 | * |
192 | * The current value of this parameter is the result of a tuning with |
193 | * several hardware and software configurations. We tried to find the |
194 | * lowest value for which writes do not cause noticeable problems to |
195 | * reads. In fact, the lower this parameter, the stabler I/O control, |
196 | * in the following respect. The lower this parameter is, the less |
197 | * the bandwidth enjoyed by a group decreases |
198 | * - when the group does writes, w.r.t. to when it does reads; |
199 | * - when other groups do reads, w.r.t. to when they do writes. |
200 | */ |
201 | static const int bfq_async_charge_factor = 3; |
202 | |
203 | /* Default timeout values, in jiffies, approximating CFQ defaults. */ |
204 | const int bfq_timeout = HZ / 8; |
205 | |
206 | /* |
207 | * Time limit for merging (see comments in bfq_setup_cooperator). Set |
208 | * to the slowest value that, in our tests, proved to be effective in |
209 | * removing false positives, while not causing true positives to miss |
210 | * queue merging. |
211 | * |
212 | * As can be deduced from the low time limit below, queue merging, if |
213 | * successful, happens at the very beginning of the I/O of the involved |
214 | * cooperating processes, as a consequence of the arrival of the very |
215 | * first requests from each cooperator. After that, there is very |
216 | * little chance to find cooperators. |
217 | */ |
218 | static const unsigned long bfq_merge_time_limit = HZ/10; |
219 | |
220 | static struct kmem_cache *bfq_pool; |
221 | |
222 | /* Below this threshold (in ns), we consider thinktime immediate. */ |
223 | #define BFQ_MIN_TT (2 * NSEC_PER_MSEC) |
224 | |
225 | /* hw_tag detection: parallel requests threshold and min samples needed. */ |
226 | #define BFQ_HW_QUEUE_THRESHOLD 3 |
227 | #define BFQ_HW_QUEUE_SAMPLES 32 |
228 | |
229 | #define BFQQ_SEEK_THR (sector_t)(8 * 100) |
230 | #define BFQQ_SECT_THR_NONROT (sector_t)(2 * 32) |
231 | #define BFQ_RQ_SEEKY(bfqd, last_pos, rq) \ |
232 | (get_sdist(last_pos, rq) > \ |
233 | BFQQ_SEEK_THR && \ |
234 | (!blk_queue_nonrot(bfqd->queue) || \ |
235 | blk_rq_sectors(rq) < BFQQ_SECT_THR_NONROT)) |
236 | #define BFQQ_CLOSE_THR (sector_t)(8 * 1024) |
237 | #define BFQQ_SEEKY(bfqq) (hweight32(bfqq->seek_history) > 19) |
238 | /* |
239 | * Sync random I/O is likely to be confused with soft real-time I/O, |
240 | * because it is characterized by limited throughput and apparently |
241 | * isochronous arrival pattern. To avoid false positives, queues |
242 | * containing only random (seeky) I/O are prevented from being tagged |
243 | * as soft real-time. |
244 | */ |
245 | #define BFQQ_TOTALLY_SEEKY(bfqq) (bfqq->seek_history == -1) |
246 | |
247 | /* Min number of samples required to perform peak-rate update */ |
248 | #define BFQ_RATE_MIN_SAMPLES 32 |
249 | /* Min observation time interval required to perform a peak-rate update (ns) */ |
250 | #define BFQ_RATE_MIN_INTERVAL (300*NSEC_PER_MSEC) |
251 | /* Target observation time interval for a peak-rate update (ns) */ |
252 | #define BFQ_RATE_REF_INTERVAL NSEC_PER_SEC |
253 | |
254 | /* |
255 | * Shift used for peak-rate fixed precision calculations. |
256 | * With |
257 | * - the current shift: 16 positions |
258 | * - the current type used to store rate: u32 |
259 | * - the current unit of measure for rate: [sectors/usec], or, more precisely, |
260 | * [(sectors/usec) / 2^BFQ_RATE_SHIFT] to take into account the shift, |
261 | * the range of rates that can be stored is |
262 | * [1 / 2^BFQ_RATE_SHIFT, 2^(32 - BFQ_RATE_SHIFT)] sectors/usec = |
263 | * [1 / 2^16, 2^16] sectors/usec = [15e-6, 65536] sectors/usec = |
264 | * [15, 65G] sectors/sec |
265 | * Which, assuming a sector size of 512B, corresponds to a range of |
266 | * [7.5K, 33T] B/sec |
267 | */ |
268 | #define BFQ_RATE_SHIFT 16 |
269 | |
270 | /* |
271 | * When configured for computing the duration of the weight-raising |
272 | * for interactive queues automatically (see the comments at the |
273 | * beginning of this file), BFQ does it using the following formula: |
274 | * duration = (ref_rate / r) * ref_wr_duration, |
275 | * where r is the peak rate of the device, and ref_rate and |
276 | * ref_wr_duration are two reference parameters. In particular, |
277 | * ref_rate is the peak rate of the reference storage device (see |
278 | * below), and ref_wr_duration is about the maximum time needed, with |
279 | * BFQ and while reading two files in parallel, to load typical large |
280 | * applications on the reference device (see the comments on |
281 | * max_service_from_wr below, for more details on how ref_wr_duration |
282 | * is obtained). In practice, the slower/faster the device at hand |
283 | * is, the more/less it takes to load applications with respect to the |
284 | * reference device. Accordingly, the longer/shorter BFQ grants |
285 | * weight raising to interactive applications. |
286 | * |
287 | * BFQ uses two different reference pairs (ref_rate, ref_wr_duration), |
288 | * depending on whether the device is rotational or non-rotational. |
289 | * |
290 | * In the following definitions, ref_rate[0] and ref_wr_duration[0] |
291 | * are the reference values for a rotational device, whereas |
292 | * ref_rate[1] and ref_wr_duration[1] are the reference values for a |
293 | * non-rotational device. The reference rates are not the actual peak |
294 | * rates of the devices used as a reference, but slightly lower |
295 | * values. The reason for using slightly lower values is that the |
296 | * peak-rate estimator tends to yield slightly lower values than the |
297 | * actual peak rate (it can yield the actual peak rate only if there |
298 | * is only one process doing I/O, and the process does sequential |
299 | * I/O). |
300 | * |
301 | * The reference peak rates are measured in sectors/usec, left-shifted |
302 | * by BFQ_RATE_SHIFT. |
303 | */ |
304 | static int ref_rate[2] = {14000, 33000}; |
305 | /* |
306 | * To improve readability, a conversion function is used to initialize |
307 | * the following array, which entails that the array can be |
308 | * initialized only in a function. |
309 | */ |
310 | static int ref_wr_duration[2]; |
311 | |
312 | /* |
313 | * BFQ uses the above-detailed, time-based weight-raising mechanism to |
314 | * privilege interactive tasks. This mechanism is vulnerable to the |
315 | * following false positives: I/O-bound applications that will go on |
316 | * doing I/O for much longer than the duration of weight |
317 | * raising. These applications have basically no benefit from being |
318 | * weight-raised at the beginning of their I/O. On the opposite end, |
319 | * while being weight-raised, these applications |
320 | * a) unjustly steal throughput to applications that may actually need |
321 | * low latency; |
322 | * b) make BFQ uselessly perform device idling; device idling results |
323 | * in loss of device throughput with most flash-based storage, and may |
324 | * increase latencies when used purposelessly. |
325 | * |
326 | * BFQ tries to reduce these problems, by adopting the following |
327 | * countermeasure. To introduce this countermeasure, we need first to |
328 | * finish explaining how the duration of weight-raising for |
329 | * interactive tasks is computed. |
330 | * |
331 | * For a bfq_queue deemed as interactive, the duration of weight |
332 | * raising is dynamically adjusted, as a function of the estimated |
333 | * peak rate of the device, so as to be equal to the time needed to |
334 | * execute the 'largest' interactive task we benchmarked so far. By |
335 | * largest task, we mean the task for which each involved process has |
336 | * to do more I/O than for any of the other tasks we benchmarked. This |
337 | * reference interactive task is the start-up of LibreOffice Writer, |
338 | * and in this task each process/bfq_queue needs to have at most ~110K |
339 | * sectors transferred. |
340 | * |
341 | * This last piece of information enables BFQ to reduce the actual |
342 | * duration of weight-raising for at least one class of I/O-bound |
343 | * applications: those doing sequential or quasi-sequential I/O. An |
344 | * example is file copy. In fact, once started, the main I/O-bound |
345 | * processes of these applications usually consume the above 110K |
346 | * sectors in much less time than the processes of an application that |
347 | * is starting, because these I/O-bound processes will greedily devote |
348 | * almost all their CPU cycles only to their target, |
349 | * throughput-friendly I/O operations. This is even more true if BFQ |
350 | * happens to be underestimating the device peak rate, and thus |
351 | * overestimating the duration of weight raising. But, according to |
352 | * our measurements, once transferred 110K sectors, these processes |
353 | * have no right to be weight-raised any longer. |
354 | * |
355 | * Basing on the last consideration, BFQ ends weight-raising for a |
356 | * bfq_queue if the latter happens to have received an amount of |
357 | * service at least equal to the following constant. The constant is |
358 | * set to slightly more than 110K, to have a minimum safety margin. |
359 | * |
360 | * This early ending of weight-raising reduces the amount of time |
361 | * during which interactive false positives cause the two problems |
362 | * described at the beginning of these comments. |
363 | */ |
364 | static const unsigned long max_service_from_wr = 120000; |
365 | |
366 | /* |
367 | * Maximum time between the creation of two queues, for stable merge |
368 | * to be activated (in ms) |
369 | */ |
370 | static const unsigned long bfq_activation_stable_merging = 600; |
371 | /* |
372 | * Minimum time to be waited before evaluating delayed stable merge (in ms) |
373 | */ |
374 | static const unsigned long bfq_late_stable_merging = 600; |
375 | |
376 | #define RQ_BIC(rq) ((struct bfq_io_cq *)((rq)->elv.priv[0])) |
377 | #define RQ_BFQQ(rq) ((rq)->elv.priv[1]) |
378 | |
379 | struct bfq_queue *bic_to_bfqq(struct bfq_io_cq *bic, bool is_sync, |
380 | unsigned int actuator_idx) |
381 | { |
382 | if (is_sync) |
383 | return bic->bfqq[1][actuator_idx]; |
384 | |
385 | return bic->bfqq[0][actuator_idx]; |
386 | } |
387 | |
388 | static void bfq_put_stable_ref(struct bfq_queue *bfqq); |
389 | |
390 | void bic_set_bfqq(struct bfq_io_cq *bic, |
391 | struct bfq_queue *bfqq, |
392 | bool is_sync, |
393 | unsigned int actuator_idx) |
394 | { |
395 | struct bfq_queue *old_bfqq = bic->bfqq[is_sync][actuator_idx]; |
396 | |
397 | /* |
398 | * If bfqq != NULL, then a non-stable queue merge between |
399 | * bic->bfqq and bfqq is happening here. This causes troubles |
400 | * in the following case: bic->bfqq has also been scheduled |
401 | * for a possible stable merge with bic->stable_merge_bfqq, |
402 | * and bic->stable_merge_bfqq == bfqq happens to |
403 | * hold. Troubles occur because bfqq may then undergo a split, |
404 | * thereby becoming eligible for a stable merge. Yet, if |
405 | * bic->stable_merge_bfqq points exactly to bfqq, then bfqq |
406 | * would be stably merged with itself. To avoid this anomaly, |
407 | * we cancel the stable merge if |
408 | * bic->stable_merge_bfqq == bfqq. |
409 | */ |
410 | struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[actuator_idx]; |
411 | |
412 | /* Clear bic pointer if bfqq is detached from this bic */ |
413 | if (old_bfqq && old_bfqq->bic == bic) |
414 | old_bfqq->bic = NULL; |
415 | |
416 | if (is_sync) |
417 | bic->bfqq[1][actuator_idx] = bfqq; |
418 | else |
419 | bic->bfqq[0][actuator_idx] = bfqq; |
420 | |
421 | if (bfqq && bfqq_data->stable_merge_bfqq == bfqq) { |
422 | /* |
423 | * Actually, these same instructions are executed also |
424 | * in bfq_setup_cooperator, in case of abort or actual |
425 | * execution of a stable merge. We could avoid |
426 | * repeating these instructions there too, but if we |
427 | * did so, we would nest even more complexity in this |
428 | * function. |
429 | */ |
430 | bfq_put_stable_ref(bfqq: bfqq_data->stable_merge_bfqq); |
431 | |
432 | bfqq_data->stable_merge_bfqq = NULL; |
433 | } |
434 | } |
435 | |
436 | struct bfq_data *bic_to_bfqd(struct bfq_io_cq *bic) |
437 | { |
438 | return bic->icq.q->elevator->elevator_data; |
439 | } |
440 | |
441 | /** |
442 | * icq_to_bic - convert iocontext queue structure to bfq_io_cq. |
443 | * @icq: the iocontext queue. |
444 | */ |
445 | static struct bfq_io_cq *icq_to_bic(struct io_cq *icq) |
446 | { |
447 | /* bic->icq is the first member, %NULL will convert to %NULL */ |
448 | return container_of(icq, struct bfq_io_cq, icq); |
449 | } |
450 | |
451 | /** |
452 | * bfq_bic_lookup - search into @ioc a bic associated to @bfqd. |
453 | * @q: the request queue. |
454 | */ |
455 | static struct bfq_io_cq *bfq_bic_lookup(struct request_queue *q) |
456 | { |
457 | struct bfq_io_cq *icq; |
458 | unsigned long flags; |
459 | |
460 | if (!current->io_context) |
461 | return NULL; |
462 | |
463 | spin_lock_irqsave(&q->queue_lock, flags); |
464 | icq = icq_to_bic(icq: ioc_lookup_icq(q)); |
465 | spin_unlock_irqrestore(lock: &q->queue_lock, flags); |
466 | |
467 | return icq; |
468 | } |
469 | |
470 | /* |
471 | * Scheduler run of queue, if there are requests pending and no one in the |
472 | * driver that will restart queueing. |
473 | */ |
474 | void bfq_schedule_dispatch(struct bfq_data *bfqd) |
475 | { |
476 | lockdep_assert_held(&bfqd->lock); |
477 | |
478 | if (bfqd->queued != 0) { |
479 | bfq_log(bfqd, "schedule dispatch" ); |
480 | blk_mq_run_hw_queues(q: bfqd->queue, async: true); |
481 | } |
482 | } |
483 | |
484 | #define bfq_class_idle(bfqq) ((bfqq)->ioprio_class == IOPRIO_CLASS_IDLE) |
485 | |
486 | #define bfq_sample_valid(samples) ((samples) > 80) |
487 | |
488 | /* |
489 | * Lifted from AS - choose which of rq1 and rq2 that is best served now. |
490 | * We choose the request that is closer to the head right now. Distance |
491 | * behind the head is penalized and only allowed to a certain extent. |
492 | */ |
493 | static struct request *bfq_choose_req(struct bfq_data *bfqd, |
494 | struct request *rq1, |
495 | struct request *rq2, |
496 | sector_t last) |
497 | { |
498 | sector_t s1, s2, d1 = 0, d2 = 0; |
499 | unsigned long back_max; |
500 | #define BFQ_RQ1_WRAP 0x01 /* request 1 wraps */ |
501 | #define BFQ_RQ2_WRAP 0x02 /* request 2 wraps */ |
502 | unsigned int wrap = 0; /* bit mask: requests behind the disk head? */ |
503 | |
504 | if (!rq1 || rq1 == rq2) |
505 | return rq2; |
506 | if (!rq2) |
507 | return rq1; |
508 | |
509 | if (rq_is_sync(rq: rq1) && !rq_is_sync(rq: rq2)) |
510 | return rq1; |
511 | else if (rq_is_sync(rq: rq2) && !rq_is_sync(rq: rq1)) |
512 | return rq2; |
513 | if ((rq1->cmd_flags & REQ_META) && !(rq2->cmd_flags & REQ_META)) |
514 | return rq1; |
515 | else if ((rq2->cmd_flags & REQ_META) && !(rq1->cmd_flags & REQ_META)) |
516 | return rq2; |
517 | |
518 | s1 = blk_rq_pos(rq: rq1); |
519 | s2 = blk_rq_pos(rq: rq2); |
520 | |
521 | /* |
522 | * By definition, 1KiB is 2 sectors. |
523 | */ |
524 | back_max = bfqd->bfq_back_max * 2; |
525 | |
526 | /* |
527 | * Strict one way elevator _except_ in the case where we allow |
528 | * short backward seeks which are biased as twice the cost of a |
529 | * similar forward seek. |
530 | */ |
531 | if (s1 >= last) |
532 | d1 = s1 - last; |
533 | else if (s1 + back_max >= last) |
534 | d1 = (last - s1) * bfqd->bfq_back_penalty; |
535 | else |
536 | wrap |= BFQ_RQ1_WRAP; |
537 | |
538 | if (s2 >= last) |
539 | d2 = s2 - last; |
540 | else if (s2 + back_max >= last) |
541 | d2 = (last - s2) * bfqd->bfq_back_penalty; |
542 | else |
543 | wrap |= BFQ_RQ2_WRAP; |
544 | |
545 | /* Found required data */ |
546 | |
547 | /* |
548 | * By doing switch() on the bit mask "wrap" we avoid having to |
549 | * check two variables for all permutations: --> faster! |
550 | */ |
551 | switch (wrap) { |
552 | case 0: /* common case for CFQ: rq1 and rq2 not wrapped */ |
553 | if (d1 < d2) |
554 | return rq1; |
555 | else if (d2 < d1) |
556 | return rq2; |
557 | |
558 | if (s1 >= s2) |
559 | return rq1; |
560 | else |
561 | return rq2; |
562 | |
563 | case BFQ_RQ2_WRAP: |
564 | return rq1; |
565 | case BFQ_RQ1_WRAP: |
566 | return rq2; |
567 | case BFQ_RQ1_WRAP|BFQ_RQ2_WRAP: /* both rqs wrapped */ |
568 | default: |
569 | /* |
570 | * Since both rqs are wrapped, |
571 | * start with the one that's further behind head |
572 | * (--> only *one* back seek required), |
573 | * since back seek takes more time than forward. |
574 | */ |
575 | if (s1 <= s2) |
576 | return rq1; |
577 | else |
578 | return rq2; |
579 | } |
580 | } |
581 | |
582 | #define BFQ_LIMIT_INLINE_DEPTH 16 |
583 | |
584 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
585 | static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit) |
586 | { |
587 | struct bfq_data *bfqd = bfqq->bfqd; |
588 | struct bfq_entity *entity = &bfqq->entity; |
589 | struct bfq_entity *inline_entities[BFQ_LIMIT_INLINE_DEPTH]; |
590 | struct bfq_entity **entities = inline_entities; |
591 | int depth, level, alloc_depth = BFQ_LIMIT_INLINE_DEPTH; |
592 | int class_idx = bfqq->ioprio_class - 1; |
593 | struct bfq_sched_data *sched_data; |
594 | unsigned long wsum; |
595 | bool ret = false; |
596 | |
597 | if (!entity->on_st_or_in_serv) |
598 | return false; |
599 | |
600 | retry: |
601 | spin_lock_irq(lock: &bfqd->lock); |
602 | /* +1 for bfqq entity, root cgroup not included */ |
603 | depth = bfqg_to_blkg(bfqg: bfqq_group(bfqq))->blkcg->css.cgroup->level + 1; |
604 | if (depth > alloc_depth) { |
605 | spin_unlock_irq(lock: &bfqd->lock); |
606 | if (entities != inline_entities) |
607 | kfree(objp: entities); |
608 | entities = kmalloc_array(n: depth, size: sizeof(*entities), GFP_NOIO); |
609 | if (!entities) |
610 | return false; |
611 | alloc_depth = depth; |
612 | goto retry; |
613 | } |
614 | |
615 | sched_data = entity->sched_data; |
616 | /* Gather our ancestors as we need to traverse them in reverse order */ |
617 | level = 0; |
618 | for_each_entity(entity) { |
619 | /* |
620 | * If at some level entity is not even active, allow request |
621 | * queueing so that BFQ knows there's work to do and activate |
622 | * entities. |
623 | */ |
624 | if (!entity->on_st_or_in_serv) |
625 | goto out; |
626 | /* Uh, more parents than cgroup subsystem thinks? */ |
627 | if (WARN_ON_ONCE(level >= depth)) |
628 | break; |
629 | entities[level++] = entity; |
630 | } |
631 | WARN_ON_ONCE(level != depth); |
632 | for (level--; level >= 0; level--) { |
633 | entity = entities[level]; |
634 | if (level > 0) { |
635 | wsum = bfq_entity_service_tree(entity)->wsum; |
636 | } else { |
637 | int i; |
638 | /* |
639 | * For bfqq itself we take into account service trees |
640 | * of all higher priority classes and multiply their |
641 | * weights so that low prio queue from higher class |
642 | * gets more requests than high prio queue from lower |
643 | * class. |
644 | */ |
645 | wsum = 0; |
646 | for (i = 0; i <= class_idx; i++) { |
647 | wsum = wsum * IOPRIO_BE_NR + |
648 | sched_data->service_tree[i].wsum; |
649 | } |
650 | } |
651 | if (!wsum) |
652 | continue; |
653 | limit = DIV_ROUND_CLOSEST(limit * entity->weight, wsum); |
654 | if (entity->allocated >= limit) { |
655 | bfq_log_bfqq(bfqq->bfqd, bfqq, |
656 | "too many requests: allocated %d limit %d level %d" , |
657 | entity->allocated, limit, level); |
658 | ret = true; |
659 | break; |
660 | } |
661 | } |
662 | out: |
663 | spin_unlock_irq(lock: &bfqd->lock); |
664 | if (entities != inline_entities) |
665 | kfree(objp: entities); |
666 | return ret; |
667 | } |
668 | #else |
669 | static bool bfqq_request_over_limit(struct bfq_queue *bfqq, int limit) |
670 | { |
671 | return false; |
672 | } |
673 | #endif |
674 | |
675 | /* |
676 | * Async I/O can easily starve sync I/O (both sync reads and sync |
677 | * writes), by consuming all tags. Similarly, storms of sync writes, |
678 | * such as those that sync(2) may trigger, can starve sync reads. |
679 | * Limit depths of async I/O and sync writes so as to counter both |
680 | * problems. |
681 | * |
682 | * Also if a bfq queue or its parent cgroup consume more tags than would be |
683 | * appropriate for their weight, we trim the available tag depth to 1. This |
684 | * avoids a situation where one cgroup can starve another cgroup from tags and |
685 | * thus block service differentiation among cgroups. Note that because the |
686 | * queue / cgroup already has many requests allocated and queued, this does not |
687 | * significantly affect service guarantees coming from the BFQ scheduling |
688 | * algorithm. |
689 | */ |
690 | static void bfq_limit_depth(blk_opf_t opf, struct blk_mq_alloc_data *data) |
691 | { |
692 | struct bfq_data *bfqd = data->q->elevator->elevator_data; |
693 | struct bfq_io_cq *bic = bfq_bic_lookup(q: data->q); |
694 | int depth; |
695 | unsigned limit = data->q->nr_requests; |
696 | unsigned int act_idx; |
697 | |
698 | /* Sync reads have full depth available */ |
699 | if (op_is_sync(op: opf) && !op_is_write(op: opf)) { |
700 | depth = 0; |
701 | } else { |
702 | depth = bfqd->word_depths[!!bfqd->wr_busy_queues][op_is_sync(op: opf)]; |
703 | limit = (limit * depth) >> bfqd->full_depth_shift; |
704 | } |
705 | |
706 | for (act_idx = 0; bic && act_idx < bfqd->num_actuators; act_idx++) { |
707 | struct bfq_queue *bfqq = |
708 | bic_to_bfqq(bic, is_sync: op_is_sync(op: opf), actuator_idx: act_idx); |
709 | |
710 | /* |
711 | * Does queue (or any parent entity) exceed number of |
712 | * requests that should be available to it? Heavily |
713 | * limit depth so that it cannot consume more |
714 | * available requests and thus starve other entities. |
715 | */ |
716 | if (bfqq && bfqq_request_over_limit(bfqq, limit)) { |
717 | depth = 1; |
718 | break; |
719 | } |
720 | } |
721 | bfq_log(bfqd, "[%s] wr_busy %d sync %d depth %u" , |
722 | __func__, bfqd->wr_busy_queues, op_is_sync(opf), depth); |
723 | if (depth) |
724 | data->shallow_depth = depth; |
725 | } |
726 | |
727 | static struct bfq_queue * |
728 | bfq_rq_pos_tree_lookup(struct bfq_data *bfqd, struct rb_root *root, |
729 | sector_t sector, struct rb_node **ret_parent, |
730 | struct rb_node ***rb_link) |
731 | { |
732 | struct rb_node **p, *parent; |
733 | struct bfq_queue *bfqq = NULL; |
734 | |
735 | parent = NULL; |
736 | p = &root->rb_node; |
737 | while (*p) { |
738 | struct rb_node **n; |
739 | |
740 | parent = *p; |
741 | bfqq = rb_entry(parent, struct bfq_queue, pos_node); |
742 | |
743 | /* |
744 | * Sort strictly based on sector. Smallest to the left, |
745 | * largest to the right. |
746 | */ |
747 | if (sector > blk_rq_pos(rq: bfqq->next_rq)) |
748 | n = &(*p)->rb_right; |
749 | else if (sector < blk_rq_pos(rq: bfqq->next_rq)) |
750 | n = &(*p)->rb_left; |
751 | else |
752 | break; |
753 | p = n; |
754 | bfqq = NULL; |
755 | } |
756 | |
757 | *ret_parent = parent; |
758 | if (rb_link) |
759 | *rb_link = p; |
760 | |
761 | bfq_log(bfqd, "rq_pos_tree_lookup %llu: returning %d" , |
762 | (unsigned long long)sector, |
763 | bfqq ? bfqq->pid : 0); |
764 | |
765 | return bfqq; |
766 | } |
767 | |
768 | static bool bfq_too_late_for_merging(struct bfq_queue *bfqq) |
769 | { |
770 | return bfqq->service_from_backlogged > 0 && |
771 | time_is_before_jiffies(bfqq->first_IO_time + |
772 | bfq_merge_time_limit); |
773 | } |
774 | |
775 | /* |
776 | * The following function is not marked as __cold because it is |
777 | * actually cold, but for the same performance goal described in the |
778 | * comments on the likely() at the beginning of |
779 | * bfq_setup_cooperator(). Unexpectedly, to reach an even lower |
780 | * execution time for the case where this function is not invoked, we |
781 | * had to add an unlikely() in each involved if(). |
782 | */ |
783 | void __cold |
784 | bfq_pos_tree_add_move(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
785 | { |
786 | struct rb_node **p, *parent; |
787 | struct bfq_queue *__bfqq; |
788 | |
789 | if (bfqq->pos_root) { |
790 | rb_erase(&bfqq->pos_node, bfqq->pos_root); |
791 | bfqq->pos_root = NULL; |
792 | } |
793 | |
794 | /* oom_bfqq does not participate in queue merging */ |
795 | if (bfqq == &bfqd->oom_bfqq) |
796 | return; |
797 | |
798 | /* |
799 | * bfqq cannot be merged any longer (see comments in |
800 | * bfq_setup_cooperator): no point in adding bfqq into the |
801 | * position tree. |
802 | */ |
803 | if (bfq_too_late_for_merging(bfqq)) |
804 | return; |
805 | |
806 | if (bfq_class_idle(bfqq)) |
807 | return; |
808 | if (!bfqq->next_rq) |
809 | return; |
810 | |
811 | bfqq->pos_root = &bfqq_group(bfqq)->rq_pos_tree; |
812 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, root: bfqq->pos_root, |
813 | sector: blk_rq_pos(rq: bfqq->next_rq), ret_parent: &parent, rb_link: &p); |
814 | if (!__bfqq) { |
815 | rb_link_node(node: &bfqq->pos_node, parent, rb_link: p); |
816 | rb_insert_color(&bfqq->pos_node, bfqq->pos_root); |
817 | } else |
818 | bfqq->pos_root = NULL; |
819 | } |
820 | |
821 | /* |
822 | * The following function returns false either if every active queue |
823 | * must receive the same share of the throughput (symmetric scenario), |
824 | * or, as a special case, if bfqq must receive a share of the |
825 | * throughput lower than or equal to the share that every other active |
826 | * queue must receive. If bfqq does sync I/O, then these are the only |
827 | * two cases where bfqq happens to be guaranteed its share of the |
828 | * throughput even if I/O dispatching is not plugged when bfqq remains |
829 | * temporarily empty (for more details, see the comments in the |
830 | * function bfq_better_to_idle()). For this reason, the return value |
831 | * of this function is used to check whether I/O-dispatch plugging can |
832 | * be avoided. |
833 | * |
834 | * The above first case (symmetric scenario) occurs when: |
835 | * 1) all active queues have the same weight, |
836 | * 2) all active queues belong to the same I/O-priority class, |
837 | * 3) all active groups at the same level in the groups tree have the same |
838 | * weight, |
839 | * 4) all active groups at the same level in the groups tree have the same |
840 | * number of children. |
841 | * |
842 | * Unfortunately, keeping the necessary state for evaluating exactly |
843 | * the last two symmetry sub-conditions above would be quite complex |
844 | * and time consuming. Therefore this function evaluates, instead, |
845 | * only the following stronger three sub-conditions, for which it is |
846 | * much easier to maintain the needed state: |
847 | * 1) all active queues have the same weight, |
848 | * 2) all active queues belong to the same I/O-priority class, |
849 | * 3) there is at most one active group. |
850 | * In particular, the last condition is always true if hierarchical |
851 | * support or the cgroups interface are not enabled, thus no state |
852 | * needs to be maintained in this case. |
853 | */ |
854 | static bool bfq_asymmetric_scenario(struct bfq_data *bfqd, |
855 | struct bfq_queue *bfqq) |
856 | { |
857 | bool smallest_weight = bfqq && |
858 | bfqq->weight_counter && |
859 | bfqq->weight_counter == |
860 | container_of( |
861 | rb_first_cached(&bfqd->queue_weights_tree), |
862 | struct bfq_weight_counter, |
863 | weights_node); |
864 | |
865 | /* |
866 | * For queue weights to differ, queue_weights_tree must contain |
867 | * at least two nodes. |
868 | */ |
869 | bool varied_queue_weights = !smallest_weight && |
870 | !RB_EMPTY_ROOT(&bfqd->queue_weights_tree.rb_root) && |
871 | (bfqd->queue_weights_tree.rb_root.rb_node->rb_left || |
872 | bfqd->queue_weights_tree.rb_root.rb_node->rb_right); |
873 | |
874 | bool multiple_classes_busy = |
875 | (bfqd->busy_queues[0] && bfqd->busy_queues[1]) || |
876 | (bfqd->busy_queues[0] && bfqd->busy_queues[2]) || |
877 | (bfqd->busy_queues[1] && bfqd->busy_queues[2]); |
878 | |
879 | return varied_queue_weights || multiple_classes_busy |
880 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
881 | || bfqd->num_groups_with_pending_reqs > 1 |
882 | #endif |
883 | ; |
884 | } |
885 | |
886 | /* |
887 | * If the weight-counter tree passed as input contains no counter for |
888 | * the weight of the input queue, then add that counter; otherwise just |
889 | * increment the existing counter. |
890 | * |
891 | * Note that weight-counter trees contain few nodes in mostly symmetric |
892 | * scenarios. For example, if all queues have the same weight, then the |
893 | * weight-counter tree for the queues may contain at most one node. |
894 | * This holds even if low_latency is on, because weight-raised queues |
895 | * are not inserted in the tree. |
896 | * In most scenarios, the rate at which nodes are created/destroyed |
897 | * should be low too. |
898 | */ |
899 | void bfq_weights_tree_add(struct bfq_queue *bfqq) |
900 | { |
901 | struct rb_root_cached *root = &bfqq->bfqd->queue_weights_tree; |
902 | struct bfq_entity *entity = &bfqq->entity; |
903 | struct rb_node **new = &(root->rb_root.rb_node), *parent = NULL; |
904 | bool leftmost = true; |
905 | |
906 | /* |
907 | * Do not insert if the queue is already associated with a |
908 | * counter, which happens if: |
909 | * 1) a request arrival has caused the queue to become both |
910 | * non-weight-raised, and hence change its weight, and |
911 | * backlogged; in this respect, each of the two events |
912 | * causes an invocation of this function, |
913 | * 2) this is the invocation of this function caused by the |
914 | * second event. This second invocation is actually useless, |
915 | * and we handle this fact by exiting immediately. More |
916 | * efficient or clearer solutions might possibly be adopted. |
917 | */ |
918 | if (bfqq->weight_counter) |
919 | return; |
920 | |
921 | while (*new) { |
922 | struct bfq_weight_counter *__counter = container_of(*new, |
923 | struct bfq_weight_counter, |
924 | weights_node); |
925 | parent = *new; |
926 | |
927 | if (entity->weight == __counter->weight) { |
928 | bfqq->weight_counter = __counter; |
929 | goto inc_counter; |
930 | } |
931 | if (entity->weight < __counter->weight) |
932 | new = &((*new)->rb_left); |
933 | else { |
934 | new = &((*new)->rb_right); |
935 | leftmost = false; |
936 | } |
937 | } |
938 | |
939 | bfqq->weight_counter = kzalloc(size: sizeof(struct bfq_weight_counter), |
940 | GFP_ATOMIC); |
941 | |
942 | /* |
943 | * In the unlucky event of an allocation failure, we just |
944 | * exit. This will cause the weight of queue to not be |
945 | * considered in bfq_asymmetric_scenario, which, in its turn, |
946 | * causes the scenario to be deemed wrongly symmetric in case |
947 | * bfqq's weight would have been the only weight making the |
948 | * scenario asymmetric. On the bright side, no unbalance will |
949 | * however occur when bfqq becomes inactive again (the |
950 | * invocation of this function is triggered by an activation |
951 | * of queue). In fact, bfq_weights_tree_remove does nothing |
952 | * if !bfqq->weight_counter. |
953 | */ |
954 | if (unlikely(!bfqq->weight_counter)) |
955 | return; |
956 | |
957 | bfqq->weight_counter->weight = entity->weight; |
958 | rb_link_node(node: &bfqq->weight_counter->weights_node, parent, rb_link: new); |
959 | rb_insert_color_cached(node: &bfqq->weight_counter->weights_node, root, |
960 | leftmost); |
961 | |
962 | inc_counter: |
963 | bfqq->weight_counter->num_active++; |
964 | bfqq->ref++; |
965 | } |
966 | |
967 | /* |
968 | * Decrement the weight counter associated with the queue, and, if the |
969 | * counter reaches 0, remove the counter from the tree. |
970 | * See the comments to the function bfq_weights_tree_add() for considerations |
971 | * about overhead. |
972 | */ |
973 | void bfq_weights_tree_remove(struct bfq_queue *bfqq) |
974 | { |
975 | struct rb_root_cached *root; |
976 | |
977 | if (!bfqq->weight_counter) |
978 | return; |
979 | |
980 | root = &bfqq->bfqd->queue_weights_tree; |
981 | bfqq->weight_counter->num_active--; |
982 | if (bfqq->weight_counter->num_active > 0) |
983 | goto reset_entity_pointer; |
984 | |
985 | rb_erase_cached(node: &bfqq->weight_counter->weights_node, root); |
986 | kfree(objp: bfqq->weight_counter); |
987 | |
988 | reset_entity_pointer: |
989 | bfqq->weight_counter = NULL; |
990 | bfq_put_queue(bfqq); |
991 | } |
992 | |
993 | /* |
994 | * Return expired entry, or NULL to just start from scratch in rbtree. |
995 | */ |
996 | static struct request *bfq_check_fifo(struct bfq_queue *bfqq, |
997 | struct request *last) |
998 | { |
999 | struct request *rq; |
1000 | |
1001 | if (bfq_bfqq_fifo_expire(bfqq)) |
1002 | return NULL; |
1003 | |
1004 | bfq_mark_bfqq_fifo_expire(bfqq); |
1005 | |
1006 | rq = rq_entry_fifo(bfqq->fifo.next); |
1007 | |
1008 | if (rq == last || ktime_get_ns() < rq->fifo_time) |
1009 | return NULL; |
1010 | |
1011 | bfq_log_bfqq(bfqq->bfqd, bfqq, "check_fifo: returned %p" , rq); |
1012 | return rq; |
1013 | } |
1014 | |
1015 | static struct request *bfq_find_next_rq(struct bfq_data *bfqd, |
1016 | struct bfq_queue *bfqq, |
1017 | struct request *last) |
1018 | { |
1019 | struct rb_node *rbnext = rb_next(&last->rb_node); |
1020 | struct rb_node *rbprev = rb_prev(&last->rb_node); |
1021 | struct request *next, *prev = NULL; |
1022 | |
1023 | /* Follow expired path, else get first next available. */ |
1024 | next = bfq_check_fifo(bfqq, last); |
1025 | if (next) |
1026 | return next; |
1027 | |
1028 | if (rbprev) |
1029 | prev = rb_entry_rq(rbprev); |
1030 | |
1031 | if (rbnext) |
1032 | next = rb_entry_rq(rbnext); |
1033 | else { |
1034 | rbnext = rb_first(&bfqq->sort_list); |
1035 | if (rbnext && rbnext != &last->rb_node) |
1036 | next = rb_entry_rq(rbnext); |
1037 | } |
1038 | |
1039 | return bfq_choose_req(bfqd, rq1: next, rq2: prev, last: blk_rq_pos(rq: last)); |
1040 | } |
1041 | |
1042 | /* see the definition of bfq_async_charge_factor for details */ |
1043 | static unsigned long bfq_serv_to_charge(struct request *rq, |
1044 | struct bfq_queue *bfqq) |
1045 | { |
1046 | if (bfq_bfqq_sync(bfqq) || bfqq->wr_coeff > 1 || |
1047 | bfq_asymmetric_scenario(bfqd: bfqq->bfqd, bfqq)) |
1048 | return blk_rq_sectors(rq); |
1049 | |
1050 | return blk_rq_sectors(rq) * bfq_async_charge_factor; |
1051 | } |
1052 | |
1053 | /** |
1054 | * bfq_updated_next_req - update the queue after a new next_rq selection. |
1055 | * @bfqd: the device data the queue belongs to. |
1056 | * @bfqq: the queue to update. |
1057 | * |
1058 | * If the first request of a queue changes we make sure that the queue |
1059 | * has enough budget to serve at least its first request (if the |
1060 | * request has grown). We do this because if the queue has not enough |
1061 | * budget for its first request, it has to go through two dispatch |
1062 | * rounds to actually get it dispatched. |
1063 | */ |
1064 | static void bfq_updated_next_req(struct bfq_data *bfqd, |
1065 | struct bfq_queue *bfqq) |
1066 | { |
1067 | struct bfq_entity *entity = &bfqq->entity; |
1068 | struct request *next_rq = bfqq->next_rq; |
1069 | unsigned long new_budget; |
1070 | |
1071 | if (!next_rq) |
1072 | return; |
1073 | |
1074 | if (bfqq == bfqd->in_service_queue) |
1075 | /* |
1076 | * In order not to break guarantees, budgets cannot be |
1077 | * changed after an entity has been selected. |
1078 | */ |
1079 | return; |
1080 | |
1081 | new_budget = max_t(unsigned long, |
1082 | max_t(unsigned long, bfqq->max_budget, |
1083 | bfq_serv_to_charge(next_rq, bfqq)), |
1084 | entity->service); |
1085 | if (entity->budget != new_budget) { |
1086 | entity->budget = new_budget; |
1087 | bfq_log_bfqq(bfqd, bfqq, "updated next rq: new budget %lu" , |
1088 | new_budget); |
1089 | bfq_requeue_bfqq(bfqd, bfqq, expiration: false); |
1090 | } |
1091 | } |
1092 | |
1093 | static unsigned int bfq_wr_duration(struct bfq_data *bfqd) |
1094 | { |
1095 | u64 dur; |
1096 | |
1097 | dur = bfqd->rate_dur_prod; |
1098 | do_div(dur, bfqd->peak_rate); |
1099 | |
1100 | /* |
1101 | * Limit duration between 3 and 25 seconds. The upper limit |
1102 | * has been conservatively set after the following worst case: |
1103 | * on a QEMU/KVM virtual machine |
1104 | * - running in a slow PC |
1105 | * - with a virtual disk stacked on a slow low-end 5400rpm HDD |
1106 | * - serving a heavy I/O workload, such as the sequential reading |
1107 | * of several files |
1108 | * mplayer took 23 seconds to start, if constantly weight-raised. |
1109 | * |
1110 | * As for higher values than that accommodating the above bad |
1111 | * scenario, tests show that higher values would often yield |
1112 | * the opposite of the desired result, i.e., would worsen |
1113 | * responsiveness by allowing non-interactive applications to |
1114 | * preserve weight raising for too long. |
1115 | * |
1116 | * On the other end, lower values than 3 seconds make it |
1117 | * difficult for most interactive tasks to complete their jobs |
1118 | * before weight-raising finishes. |
1119 | */ |
1120 | return clamp_val(dur, msecs_to_jiffies(3000), msecs_to_jiffies(25000)); |
1121 | } |
1122 | |
1123 | /* switch back from soft real-time to interactive weight raising */ |
1124 | static void switch_back_to_interactive_wr(struct bfq_queue *bfqq, |
1125 | struct bfq_data *bfqd) |
1126 | { |
1127 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
1128 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
1129 | bfqq->last_wr_start_finish = bfqq->wr_start_at_switch_to_srt; |
1130 | } |
1131 | |
1132 | static void |
1133 | bfq_bfqq_resume_state(struct bfq_queue *bfqq, struct bfq_data *bfqd, |
1134 | struct bfq_io_cq *bic, bool bfq_already_existing) |
1135 | { |
1136 | unsigned int old_wr_coeff = 1; |
1137 | bool busy = bfq_already_existing && bfq_bfqq_busy(bfqq); |
1138 | unsigned int a_idx = bfqq->actuator_idx; |
1139 | struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; |
1140 | |
1141 | if (bfqq_data->saved_has_short_ttime) |
1142 | bfq_mark_bfqq_has_short_ttime(bfqq); |
1143 | else |
1144 | bfq_clear_bfqq_has_short_ttime(bfqq); |
1145 | |
1146 | if (bfqq_data->saved_IO_bound) |
1147 | bfq_mark_bfqq_IO_bound(bfqq); |
1148 | else |
1149 | bfq_clear_bfqq_IO_bound(bfqq); |
1150 | |
1151 | bfqq->last_serv_time_ns = bfqq_data->saved_last_serv_time_ns; |
1152 | bfqq->inject_limit = bfqq_data->saved_inject_limit; |
1153 | bfqq->decrease_time_jif = bfqq_data->saved_decrease_time_jif; |
1154 | |
1155 | bfqq->entity.new_weight = bfqq_data->saved_weight; |
1156 | bfqq->ttime = bfqq_data->saved_ttime; |
1157 | bfqq->io_start_time = bfqq_data->saved_io_start_time; |
1158 | bfqq->tot_idle_time = bfqq_data->saved_tot_idle_time; |
1159 | /* |
1160 | * Restore weight coefficient only if low_latency is on |
1161 | */ |
1162 | if (bfqd->low_latency) { |
1163 | old_wr_coeff = bfqq->wr_coeff; |
1164 | bfqq->wr_coeff = bfqq_data->saved_wr_coeff; |
1165 | } |
1166 | bfqq->service_from_wr = bfqq_data->saved_service_from_wr; |
1167 | bfqq->wr_start_at_switch_to_srt = |
1168 | bfqq_data->saved_wr_start_at_switch_to_srt; |
1169 | bfqq->last_wr_start_finish = bfqq_data->saved_last_wr_start_finish; |
1170 | bfqq->wr_cur_max_time = bfqq_data->saved_wr_cur_max_time; |
1171 | |
1172 | if (bfqq->wr_coeff > 1 && (bfq_bfqq_in_large_burst(bfqq) || |
1173 | time_is_before_jiffies(bfqq->last_wr_start_finish + |
1174 | bfqq->wr_cur_max_time))) { |
1175 | if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && |
1176 | !bfq_bfqq_in_large_burst(bfqq) && |
1177 | time_is_after_eq_jiffies(bfqq->wr_start_at_switch_to_srt + |
1178 | bfq_wr_duration(bfqd))) { |
1179 | switch_back_to_interactive_wr(bfqq, bfqd); |
1180 | } else { |
1181 | bfqq->wr_coeff = 1; |
1182 | bfq_log_bfqq(bfqq->bfqd, bfqq, |
1183 | "resume state: switching off wr" ); |
1184 | } |
1185 | } |
1186 | |
1187 | /* make sure weight will be updated, however we got here */ |
1188 | bfqq->entity.prio_changed = 1; |
1189 | |
1190 | if (likely(!busy)) |
1191 | return; |
1192 | |
1193 | if (old_wr_coeff == 1 && bfqq->wr_coeff > 1) |
1194 | bfqd->wr_busy_queues++; |
1195 | else if (old_wr_coeff > 1 && bfqq->wr_coeff == 1) |
1196 | bfqd->wr_busy_queues--; |
1197 | } |
1198 | |
1199 | static int bfqq_process_refs(struct bfq_queue *bfqq) |
1200 | { |
1201 | return bfqq->ref - bfqq->entity.allocated - |
1202 | bfqq->entity.on_st_or_in_serv - |
1203 | (bfqq->weight_counter != NULL) - bfqq->stable_ref; |
1204 | } |
1205 | |
1206 | /* Empty burst list and add just bfqq (see comments on bfq_handle_burst) */ |
1207 | static void bfq_reset_burst_list(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
1208 | { |
1209 | struct bfq_queue *item; |
1210 | struct hlist_node *n; |
1211 | |
1212 | hlist_for_each_entry_safe(item, n, &bfqd->burst_list, burst_list_node) |
1213 | hlist_del_init(n: &item->burst_list_node); |
1214 | |
1215 | /* |
1216 | * Start the creation of a new burst list only if there is no |
1217 | * active queue. See comments on the conditional invocation of |
1218 | * bfq_handle_burst(). |
1219 | */ |
1220 | if (bfq_tot_busy_queues(bfqd) == 0) { |
1221 | hlist_add_head(n: &bfqq->burst_list_node, h: &bfqd->burst_list); |
1222 | bfqd->burst_size = 1; |
1223 | } else |
1224 | bfqd->burst_size = 0; |
1225 | |
1226 | bfqd->burst_parent_entity = bfqq->entity.parent; |
1227 | } |
1228 | |
1229 | /* Add bfqq to the list of queues in current burst (see bfq_handle_burst) */ |
1230 | static void bfq_add_to_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
1231 | { |
1232 | /* Increment burst size to take into account also bfqq */ |
1233 | bfqd->burst_size++; |
1234 | |
1235 | if (bfqd->burst_size == bfqd->bfq_large_burst_thresh) { |
1236 | struct bfq_queue *pos, *bfqq_item; |
1237 | struct hlist_node *n; |
1238 | |
1239 | /* |
1240 | * Enough queues have been activated shortly after each |
1241 | * other to consider this burst as large. |
1242 | */ |
1243 | bfqd->large_burst = true; |
1244 | |
1245 | /* |
1246 | * We can now mark all queues in the burst list as |
1247 | * belonging to a large burst. |
1248 | */ |
1249 | hlist_for_each_entry(bfqq_item, &bfqd->burst_list, |
1250 | burst_list_node) |
1251 | bfq_mark_bfqq_in_large_burst(bfqq: bfqq_item); |
1252 | bfq_mark_bfqq_in_large_burst(bfqq); |
1253 | |
1254 | /* |
1255 | * From now on, and until the current burst finishes, any |
1256 | * new queue being activated shortly after the last queue |
1257 | * was inserted in the burst can be immediately marked as |
1258 | * belonging to a large burst. So the burst list is not |
1259 | * needed any more. Remove it. |
1260 | */ |
1261 | hlist_for_each_entry_safe(pos, n, &bfqd->burst_list, |
1262 | burst_list_node) |
1263 | hlist_del_init(n: &pos->burst_list_node); |
1264 | } else /* |
1265 | * Burst not yet large: add bfqq to the burst list. Do |
1266 | * not increment the ref counter for bfqq, because bfqq |
1267 | * is removed from the burst list before freeing bfqq |
1268 | * in put_queue. |
1269 | */ |
1270 | hlist_add_head(n: &bfqq->burst_list_node, h: &bfqd->burst_list); |
1271 | } |
1272 | |
1273 | /* |
1274 | * If many queues belonging to the same group happen to be created |
1275 | * shortly after each other, then the processes associated with these |
1276 | * queues have typically a common goal. In particular, bursts of queue |
1277 | * creations are usually caused by services or applications that spawn |
1278 | * many parallel threads/processes. Examples are systemd during boot, |
1279 | * or git grep. To help these processes get their job done as soon as |
1280 | * possible, it is usually better to not grant either weight-raising |
1281 | * or device idling to their queues, unless these queues must be |
1282 | * protected from the I/O flowing through other active queues. |
1283 | * |
1284 | * In this comment we describe, firstly, the reasons why this fact |
1285 | * holds, and, secondly, the next function, which implements the main |
1286 | * steps needed to properly mark these queues so that they can then be |
1287 | * treated in a different way. |
1288 | * |
1289 | * The above services or applications benefit mostly from a high |
1290 | * throughput: the quicker the requests of the activated queues are |
1291 | * cumulatively served, the sooner the target job of these queues gets |
1292 | * completed. As a consequence, weight-raising any of these queues, |
1293 | * which also implies idling the device for it, is almost always |
1294 | * counterproductive, unless there are other active queues to isolate |
1295 | * these new queues from. If there no other active queues, then |
1296 | * weight-raising these new queues just lowers throughput in most |
1297 | * cases. |
1298 | * |
1299 | * On the other hand, a burst of queue creations may be caused also by |
1300 | * the start of an application that does not consist of a lot of |
1301 | * parallel I/O-bound threads. In fact, with a complex application, |
1302 | * several short processes may need to be executed to start-up the |
1303 | * application. In this respect, to start an application as quickly as |
1304 | * possible, the best thing to do is in any case to privilege the I/O |
1305 | * related to the application with respect to all other |
1306 | * I/O. Therefore, the best strategy to start as quickly as possible |
1307 | * an application that causes a burst of queue creations is to |
1308 | * weight-raise all the queues created during the burst. This is the |
1309 | * exact opposite of the best strategy for the other type of bursts. |
1310 | * |
1311 | * In the end, to take the best action for each of the two cases, the |
1312 | * two types of bursts need to be distinguished. Fortunately, this |
1313 | * seems relatively easy, by looking at the sizes of the bursts. In |
1314 | * particular, we found a threshold such that only bursts with a |
1315 | * larger size than that threshold are apparently caused by |
1316 | * services or commands such as systemd or git grep. For brevity, |
1317 | * hereafter we call just 'large' these bursts. BFQ *does not* |
1318 | * weight-raise queues whose creation occurs in a large burst. In |
1319 | * addition, for each of these queues BFQ performs or does not perform |
1320 | * idling depending on which choice boosts the throughput more. The |
1321 | * exact choice depends on the device and request pattern at |
1322 | * hand. |
1323 | * |
1324 | * Unfortunately, false positives may occur while an interactive task |
1325 | * is starting (e.g., an application is being started). The |
1326 | * consequence is that the queues associated with the task do not |
1327 | * enjoy weight raising as expected. Fortunately these false positives |
1328 | * are very rare. They typically occur if some service happens to |
1329 | * start doing I/O exactly when the interactive task starts. |
1330 | * |
1331 | * Turning back to the next function, it is invoked only if there are |
1332 | * no active queues (apart from active queues that would belong to the |
1333 | * same, possible burst bfqq would belong to), and it implements all |
1334 | * the steps needed to detect the occurrence of a large burst and to |
1335 | * properly mark all the queues belonging to it (so that they can then |
1336 | * be treated in a different way). This goal is achieved by |
1337 | * maintaining a "burst list" that holds, temporarily, the queues that |
1338 | * belong to the burst in progress. The list is then used to mark |
1339 | * these queues as belonging to a large burst if the burst does become |
1340 | * large. The main steps are the following. |
1341 | * |
1342 | * . when the very first queue is created, the queue is inserted into the |
1343 | * list (as it could be the first queue in a possible burst) |
1344 | * |
1345 | * . if the current burst has not yet become large, and a queue Q that does |
1346 | * not yet belong to the burst is activated shortly after the last time |
1347 | * at which a new queue entered the burst list, then the function appends |
1348 | * Q to the burst list |
1349 | * |
1350 | * . if, as a consequence of the previous step, the burst size reaches |
1351 | * the large-burst threshold, then |
1352 | * |
1353 | * . all the queues in the burst list are marked as belonging to a |
1354 | * large burst |
1355 | * |
1356 | * . the burst list is deleted; in fact, the burst list already served |
1357 | * its purpose (keeping temporarily track of the queues in a burst, |
1358 | * so as to be able to mark them as belonging to a large burst in the |
1359 | * previous sub-step), and now is not needed any more |
1360 | * |
1361 | * . the device enters a large-burst mode |
1362 | * |
1363 | * . if a queue Q that does not belong to the burst is created while |
1364 | * the device is in large-burst mode and shortly after the last time |
1365 | * at which a queue either entered the burst list or was marked as |
1366 | * belonging to the current large burst, then Q is immediately marked |
1367 | * as belonging to a large burst. |
1368 | * |
1369 | * . if a queue Q that does not belong to the burst is created a while |
1370 | * later, i.e., not shortly after, than the last time at which a queue |
1371 | * either entered the burst list or was marked as belonging to the |
1372 | * current large burst, then the current burst is deemed as finished and: |
1373 | * |
1374 | * . the large-burst mode is reset if set |
1375 | * |
1376 | * . the burst list is emptied |
1377 | * |
1378 | * . Q is inserted in the burst list, as Q may be the first queue |
1379 | * in a possible new burst (then the burst list contains just Q |
1380 | * after this step). |
1381 | */ |
1382 | static void bfq_handle_burst(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
1383 | { |
1384 | /* |
1385 | * If bfqq is already in the burst list or is part of a large |
1386 | * burst, or finally has just been split, then there is |
1387 | * nothing else to do. |
1388 | */ |
1389 | if (!hlist_unhashed(h: &bfqq->burst_list_node) || |
1390 | bfq_bfqq_in_large_burst(bfqq) || |
1391 | time_is_after_eq_jiffies(bfqq->split_time + |
1392 | msecs_to_jiffies(10))) |
1393 | return; |
1394 | |
1395 | /* |
1396 | * If bfqq's creation happens late enough, or bfqq belongs to |
1397 | * a different group than the burst group, then the current |
1398 | * burst is finished, and related data structures must be |
1399 | * reset. |
1400 | * |
1401 | * In this respect, consider the special case where bfqq is |
1402 | * the very first queue created after BFQ is selected for this |
1403 | * device. In this case, last_ins_in_burst and |
1404 | * burst_parent_entity are not yet significant when we get |
1405 | * here. But it is easy to verify that, whether or not the |
1406 | * following condition is true, bfqq will end up being |
1407 | * inserted into the burst list. In particular the list will |
1408 | * happen to contain only bfqq. And this is exactly what has |
1409 | * to happen, as bfqq may be the first queue of the first |
1410 | * burst. |
1411 | */ |
1412 | if (time_is_before_jiffies(bfqd->last_ins_in_burst + |
1413 | bfqd->bfq_burst_interval) || |
1414 | bfqq->entity.parent != bfqd->burst_parent_entity) { |
1415 | bfqd->large_burst = false; |
1416 | bfq_reset_burst_list(bfqd, bfqq); |
1417 | goto end; |
1418 | } |
1419 | |
1420 | /* |
1421 | * If we get here, then bfqq is being activated shortly after the |
1422 | * last queue. So, if the current burst is also large, we can mark |
1423 | * bfqq as belonging to this large burst immediately. |
1424 | */ |
1425 | if (bfqd->large_burst) { |
1426 | bfq_mark_bfqq_in_large_burst(bfqq); |
1427 | goto end; |
1428 | } |
1429 | |
1430 | /* |
1431 | * If we get here, then a large-burst state has not yet been |
1432 | * reached, but bfqq is being activated shortly after the last |
1433 | * queue. Then we add bfqq to the burst. |
1434 | */ |
1435 | bfq_add_to_burst(bfqd, bfqq); |
1436 | end: |
1437 | /* |
1438 | * At this point, bfqq either has been added to the current |
1439 | * burst or has caused the current burst to terminate and a |
1440 | * possible new burst to start. In particular, in the second |
1441 | * case, bfqq has become the first queue in the possible new |
1442 | * burst. In both cases last_ins_in_burst needs to be moved |
1443 | * forward. |
1444 | */ |
1445 | bfqd->last_ins_in_burst = jiffies; |
1446 | } |
1447 | |
1448 | static int bfq_bfqq_budget_left(struct bfq_queue *bfqq) |
1449 | { |
1450 | struct bfq_entity *entity = &bfqq->entity; |
1451 | |
1452 | return entity->budget - entity->service; |
1453 | } |
1454 | |
1455 | /* |
1456 | * If enough samples have been computed, return the current max budget |
1457 | * stored in bfqd, which is dynamically updated according to the |
1458 | * estimated disk peak rate; otherwise return the default max budget |
1459 | */ |
1460 | static int bfq_max_budget(struct bfq_data *bfqd) |
1461 | { |
1462 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) |
1463 | return bfq_default_max_budget; |
1464 | else |
1465 | return bfqd->bfq_max_budget; |
1466 | } |
1467 | |
1468 | /* |
1469 | * Return min budget, which is a fraction of the current or default |
1470 | * max budget (trying with 1/32) |
1471 | */ |
1472 | static int bfq_min_budget(struct bfq_data *bfqd) |
1473 | { |
1474 | if (bfqd->budgets_assigned < bfq_stats_min_budgets) |
1475 | return bfq_default_max_budget / 32; |
1476 | else |
1477 | return bfqd->bfq_max_budget / 32; |
1478 | } |
1479 | |
1480 | /* |
1481 | * The next function, invoked after the input queue bfqq switches from |
1482 | * idle to busy, updates the budget of bfqq. The function also tells |
1483 | * whether the in-service queue should be expired, by returning |
1484 | * true. The purpose of expiring the in-service queue is to give bfqq |
1485 | * the chance to possibly preempt the in-service queue, and the reason |
1486 | * for preempting the in-service queue is to achieve one of the two |
1487 | * goals below. |
1488 | * |
1489 | * 1. Guarantee to bfqq its reserved bandwidth even if bfqq has |
1490 | * expired because it has remained idle. In particular, bfqq may have |
1491 | * expired for one of the following two reasons: |
1492 | * |
1493 | * - BFQQE_NO_MORE_REQUESTS bfqq did not enjoy any device idling |
1494 | * and did not make it to issue a new request before its last |
1495 | * request was served; |
1496 | * |
1497 | * - BFQQE_TOO_IDLE bfqq did enjoy device idling, but did not issue |
1498 | * a new request before the expiration of the idling-time. |
1499 | * |
1500 | * Even if bfqq has expired for one of the above reasons, the process |
1501 | * associated with the queue may be however issuing requests greedily, |
1502 | * and thus be sensitive to the bandwidth it receives (bfqq may have |
1503 | * remained idle for other reasons: CPU high load, bfqq not enjoying |
1504 | * idling, I/O throttling somewhere in the path from the process to |
1505 | * the I/O scheduler, ...). But if, after every expiration for one of |
1506 | * the above two reasons, bfqq has to wait for the service of at least |
1507 | * one full budget of another queue before being served again, then |
1508 | * bfqq is likely to get a much lower bandwidth or resource time than |
1509 | * its reserved ones. To address this issue, two countermeasures need |
1510 | * to be taken. |
1511 | * |
1512 | * First, the budget and the timestamps of bfqq need to be updated in |
1513 | * a special way on bfqq reactivation: they need to be updated as if |
1514 | * bfqq did not remain idle and did not expire. In fact, if they are |
1515 | * computed as if bfqq expired and remained idle until reactivation, |
1516 | * then the process associated with bfqq is treated as if, instead of |
1517 | * being greedy, it stopped issuing requests when bfqq remained idle, |
1518 | * and restarts issuing requests only on this reactivation. In other |
1519 | * words, the scheduler does not help the process recover the "service |
1520 | * hole" between bfqq expiration and reactivation. As a consequence, |
1521 | * the process receives a lower bandwidth than its reserved one. In |
1522 | * contrast, to recover this hole, the budget must be updated as if |
1523 | * bfqq was not expired at all before this reactivation, i.e., it must |
1524 | * be set to the value of the remaining budget when bfqq was |
1525 | * expired. Along the same line, timestamps need to be assigned the |
1526 | * value they had the last time bfqq was selected for service, i.e., |
1527 | * before last expiration. Thus timestamps need to be back-shifted |
1528 | * with respect to their normal computation (see [1] for more details |
1529 | * on this tricky aspect). |
1530 | * |
1531 | * Secondly, to allow the process to recover the hole, the in-service |
1532 | * queue must be expired too, to give bfqq the chance to preempt it |
1533 | * immediately. In fact, if bfqq has to wait for a full budget of the |
1534 | * in-service queue to be completed, then it may become impossible to |
1535 | * let the process recover the hole, even if the back-shifted |
1536 | * timestamps of bfqq are lower than those of the in-service queue. If |
1537 | * this happens for most or all of the holes, then the process may not |
1538 | * receive its reserved bandwidth. In this respect, it is worth noting |
1539 | * that, being the service of outstanding requests unpreemptible, a |
1540 | * little fraction of the holes may however be unrecoverable, thereby |
1541 | * causing a little loss of bandwidth. |
1542 | * |
1543 | * The last important point is detecting whether bfqq does need this |
1544 | * bandwidth recovery. In this respect, the next function deems the |
1545 | * process associated with bfqq greedy, and thus allows it to recover |
1546 | * the hole, if: 1) the process is waiting for the arrival of a new |
1547 | * request (which implies that bfqq expired for one of the above two |
1548 | * reasons), and 2) such a request has arrived soon. The first |
1549 | * condition is controlled through the flag non_blocking_wait_rq, |
1550 | * while the second through the flag arrived_in_time. If both |
1551 | * conditions hold, then the function computes the budget in the |
1552 | * above-described special way, and signals that the in-service queue |
1553 | * should be expired. Timestamp back-shifting is done later in |
1554 | * __bfq_activate_entity. |
1555 | * |
1556 | * 2. Reduce latency. Even if timestamps are not backshifted to let |
1557 | * the process associated with bfqq recover a service hole, bfqq may |
1558 | * however happen to have, after being (re)activated, a lower finish |
1559 | * timestamp than the in-service queue. That is, the next budget of |
1560 | * bfqq may have to be completed before the one of the in-service |
1561 | * queue. If this is the case, then preempting the in-service queue |
1562 | * allows this goal to be achieved, apart from the unpreemptible, |
1563 | * outstanding requests mentioned above. |
1564 | * |
1565 | * Unfortunately, regardless of which of the above two goals one wants |
1566 | * to achieve, service trees need first to be updated to know whether |
1567 | * the in-service queue must be preempted. To have service trees |
1568 | * correctly updated, the in-service queue must be expired and |
1569 | * rescheduled, and bfqq must be scheduled too. This is one of the |
1570 | * most costly operations (in future versions, the scheduling |
1571 | * mechanism may be re-designed in such a way to make it possible to |
1572 | * know whether preemption is needed without needing to update service |
1573 | * trees). In addition, queue preemptions almost always cause random |
1574 | * I/O, which may in turn cause loss of throughput. Finally, there may |
1575 | * even be no in-service queue when the next function is invoked (so, |
1576 | * no queue to compare timestamps with). Because of these facts, the |
1577 | * next function adopts the following simple scheme to avoid costly |
1578 | * operations, too frequent preemptions and too many dependencies on |
1579 | * the state of the scheduler: it requests the expiration of the |
1580 | * in-service queue (unconditionally) only for queues that need to |
1581 | * recover a hole. Then it delegates to other parts of the code the |
1582 | * responsibility of handling the above case 2. |
1583 | */ |
1584 | static bool bfq_bfqq_update_budg_for_activation(struct bfq_data *bfqd, |
1585 | struct bfq_queue *bfqq, |
1586 | bool arrived_in_time) |
1587 | { |
1588 | struct bfq_entity *entity = &bfqq->entity; |
1589 | |
1590 | /* |
1591 | * In the next compound condition, we check also whether there |
1592 | * is some budget left, because otherwise there is no point in |
1593 | * trying to go on serving bfqq with this same budget: bfqq |
1594 | * would be expired immediately after being selected for |
1595 | * service. This would only cause useless overhead. |
1596 | */ |
1597 | if (bfq_bfqq_non_blocking_wait_rq(bfqq) && arrived_in_time && |
1598 | bfq_bfqq_budget_left(bfqq) > 0) { |
1599 | /* |
1600 | * We do not clear the flag non_blocking_wait_rq here, as |
1601 | * the latter is used in bfq_activate_bfqq to signal |
1602 | * that timestamps need to be back-shifted (and is |
1603 | * cleared right after). |
1604 | */ |
1605 | |
1606 | /* |
1607 | * In next assignment we rely on that either |
1608 | * entity->service or entity->budget are not updated |
1609 | * on expiration if bfqq is empty (see |
1610 | * __bfq_bfqq_recalc_budget). Thus both quantities |
1611 | * remain unchanged after such an expiration, and the |
1612 | * following statement therefore assigns to |
1613 | * entity->budget the remaining budget on such an |
1614 | * expiration. |
1615 | */ |
1616 | entity->budget = min_t(unsigned long, |
1617 | bfq_bfqq_budget_left(bfqq), |
1618 | bfqq->max_budget); |
1619 | |
1620 | /* |
1621 | * At this point, we have used entity->service to get |
1622 | * the budget left (needed for updating |
1623 | * entity->budget). Thus we finally can, and have to, |
1624 | * reset entity->service. The latter must be reset |
1625 | * because bfqq would otherwise be charged again for |
1626 | * the service it has received during its previous |
1627 | * service slot(s). |
1628 | */ |
1629 | entity->service = 0; |
1630 | |
1631 | return true; |
1632 | } |
1633 | |
1634 | /* |
1635 | * We can finally complete expiration, by setting service to 0. |
1636 | */ |
1637 | entity->service = 0; |
1638 | entity->budget = max_t(unsigned long, bfqq->max_budget, |
1639 | bfq_serv_to_charge(bfqq->next_rq, bfqq)); |
1640 | bfq_clear_bfqq_non_blocking_wait_rq(bfqq); |
1641 | return false; |
1642 | } |
1643 | |
1644 | /* |
1645 | * Return the farthest past time instant according to jiffies |
1646 | * macros. |
1647 | */ |
1648 | static unsigned long bfq_smallest_from_now(void) |
1649 | { |
1650 | return jiffies - MAX_JIFFY_OFFSET; |
1651 | } |
1652 | |
1653 | static void bfq_update_bfqq_wr_on_rq_arrival(struct bfq_data *bfqd, |
1654 | struct bfq_queue *bfqq, |
1655 | unsigned int old_wr_coeff, |
1656 | bool wr_or_deserves_wr, |
1657 | bool interactive, |
1658 | bool in_burst, |
1659 | bool soft_rt) |
1660 | { |
1661 | if (old_wr_coeff == 1 && wr_or_deserves_wr) { |
1662 | /* start a weight-raising period */ |
1663 | if (interactive) { |
1664 | bfqq->service_from_wr = 0; |
1665 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
1666 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
1667 | } else { |
1668 | /* |
1669 | * No interactive weight raising in progress |
1670 | * here: assign minus infinity to |
1671 | * wr_start_at_switch_to_srt, to make sure |
1672 | * that, at the end of the soft-real-time |
1673 | * weight raising periods that is starting |
1674 | * now, no interactive weight-raising period |
1675 | * may be wrongly considered as still in |
1676 | * progress (and thus actually started by |
1677 | * mistake). |
1678 | */ |
1679 | bfqq->wr_start_at_switch_to_srt = |
1680 | bfq_smallest_from_now(); |
1681 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * |
1682 | BFQ_SOFTRT_WEIGHT_FACTOR; |
1683 | bfqq->wr_cur_max_time = |
1684 | bfqd->bfq_wr_rt_max_time; |
1685 | } |
1686 | |
1687 | /* |
1688 | * If needed, further reduce budget to make sure it is |
1689 | * close to bfqq's backlog, so as to reduce the |
1690 | * scheduling-error component due to a too large |
1691 | * budget. Do not care about throughput consequences, |
1692 | * but only about latency. Finally, do not assign a |
1693 | * too small budget either, to avoid increasing |
1694 | * latency by causing too frequent expirations. |
1695 | */ |
1696 | bfqq->entity.budget = min_t(unsigned long, |
1697 | bfqq->entity.budget, |
1698 | 2 * bfq_min_budget(bfqd)); |
1699 | } else if (old_wr_coeff > 1) { |
1700 | if (interactive) { /* update wr coeff and duration */ |
1701 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
1702 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
1703 | } else if (in_burst) |
1704 | bfqq->wr_coeff = 1; |
1705 | else if (soft_rt) { |
1706 | /* |
1707 | * The application is now or still meeting the |
1708 | * requirements for being deemed soft rt. We |
1709 | * can then correctly and safely (re)charge |
1710 | * the weight-raising duration for the |
1711 | * application with the weight-raising |
1712 | * duration for soft rt applications. |
1713 | * |
1714 | * In particular, doing this recharge now, i.e., |
1715 | * before the weight-raising period for the |
1716 | * application finishes, reduces the probability |
1717 | * of the following negative scenario: |
1718 | * 1) the weight of a soft rt application is |
1719 | * raised at startup (as for any newly |
1720 | * created application), |
1721 | * 2) since the application is not interactive, |
1722 | * at a certain time weight-raising is |
1723 | * stopped for the application, |
1724 | * 3) at that time the application happens to |
1725 | * still have pending requests, and hence |
1726 | * is destined to not have a chance to be |
1727 | * deemed soft rt before these requests are |
1728 | * completed (see the comments to the |
1729 | * function bfq_bfqq_softrt_next_start() |
1730 | * for details on soft rt detection), |
1731 | * 4) these pending requests experience a high |
1732 | * latency because the application is not |
1733 | * weight-raised while they are pending. |
1734 | */ |
1735 | if (bfqq->wr_cur_max_time != |
1736 | bfqd->bfq_wr_rt_max_time) { |
1737 | bfqq->wr_start_at_switch_to_srt = |
1738 | bfqq->last_wr_start_finish; |
1739 | |
1740 | bfqq->wr_cur_max_time = |
1741 | bfqd->bfq_wr_rt_max_time; |
1742 | bfqq->wr_coeff = bfqd->bfq_wr_coeff * |
1743 | BFQ_SOFTRT_WEIGHT_FACTOR; |
1744 | } |
1745 | bfqq->last_wr_start_finish = jiffies; |
1746 | } |
1747 | } |
1748 | } |
1749 | |
1750 | static bool bfq_bfqq_idle_for_long_time(struct bfq_data *bfqd, |
1751 | struct bfq_queue *bfqq) |
1752 | { |
1753 | return bfqq->dispatched == 0 && |
1754 | time_is_before_jiffies( |
1755 | bfqq->budget_timeout + |
1756 | bfqd->bfq_wr_min_idle_time); |
1757 | } |
1758 | |
1759 | |
1760 | /* |
1761 | * Return true if bfqq is in a higher priority class, or has a higher |
1762 | * weight than the in-service queue. |
1763 | */ |
1764 | static bool bfq_bfqq_higher_class_or_weight(struct bfq_queue *bfqq, |
1765 | struct bfq_queue *in_serv_bfqq) |
1766 | { |
1767 | int bfqq_weight, in_serv_weight; |
1768 | |
1769 | if (bfqq->ioprio_class < in_serv_bfqq->ioprio_class) |
1770 | return true; |
1771 | |
1772 | if (in_serv_bfqq->entity.parent == bfqq->entity.parent) { |
1773 | bfqq_weight = bfqq->entity.weight; |
1774 | in_serv_weight = in_serv_bfqq->entity.weight; |
1775 | } else { |
1776 | if (bfqq->entity.parent) |
1777 | bfqq_weight = bfqq->entity.parent->weight; |
1778 | else |
1779 | bfqq_weight = bfqq->entity.weight; |
1780 | if (in_serv_bfqq->entity.parent) |
1781 | in_serv_weight = in_serv_bfqq->entity.parent->weight; |
1782 | else |
1783 | in_serv_weight = in_serv_bfqq->entity.weight; |
1784 | } |
1785 | |
1786 | return bfqq_weight > in_serv_weight; |
1787 | } |
1788 | |
1789 | /* |
1790 | * Get the index of the actuator that will serve bio. |
1791 | */ |
1792 | static unsigned int bfq_actuator_index(struct bfq_data *bfqd, struct bio *bio) |
1793 | { |
1794 | unsigned int i; |
1795 | sector_t end; |
1796 | |
1797 | /* no search needed if one or zero ranges present */ |
1798 | if (bfqd->num_actuators == 1) |
1799 | return 0; |
1800 | |
1801 | /* bio_end_sector(bio) gives the sector after the last one */ |
1802 | end = bio_end_sector(bio) - 1; |
1803 | |
1804 | for (i = 0; i < bfqd->num_actuators; i++) { |
1805 | if (end >= bfqd->sector[i] && |
1806 | end < bfqd->sector[i] + bfqd->nr_sectors[i]) |
1807 | return i; |
1808 | } |
1809 | |
1810 | WARN_ONCE(true, |
1811 | "bfq_actuator_index: bio sector out of ranges: end=%llu\n" , |
1812 | end); |
1813 | return 0; |
1814 | } |
1815 | |
1816 | static bool bfq_better_to_idle(struct bfq_queue *bfqq); |
1817 | |
1818 | static void bfq_bfqq_handle_idle_busy_switch(struct bfq_data *bfqd, |
1819 | struct bfq_queue *bfqq, |
1820 | int old_wr_coeff, |
1821 | struct request *rq, |
1822 | bool *interactive) |
1823 | { |
1824 | bool soft_rt, in_burst, wr_or_deserves_wr, |
1825 | bfqq_wants_to_preempt, |
1826 | idle_for_long_time = bfq_bfqq_idle_for_long_time(bfqd, bfqq), |
1827 | /* |
1828 | * See the comments on |
1829 | * bfq_bfqq_update_budg_for_activation for |
1830 | * details on the usage of the next variable. |
1831 | */ |
1832 | arrived_in_time = ktime_get_ns() <= |
1833 | bfqq->ttime.last_end_request + |
1834 | bfqd->bfq_slice_idle * 3; |
1835 | unsigned int act_idx = bfq_actuator_index(bfqd, bio: rq->bio); |
1836 | bool bfqq_non_merged_or_stably_merged = |
1837 | bfqq->bic || RQ_BIC(rq)->bfqq_data[act_idx].stably_merged; |
1838 | |
1839 | /* |
1840 | * bfqq deserves to be weight-raised if: |
1841 | * - it is sync, |
1842 | * - it does not belong to a large burst, |
1843 | * - it has been idle for enough time or is soft real-time, |
1844 | * - is linked to a bfq_io_cq (it is not shared in any sense), |
1845 | * - has a default weight (otherwise we assume the user wanted |
1846 | * to control its weight explicitly) |
1847 | */ |
1848 | in_burst = bfq_bfqq_in_large_burst(bfqq); |
1849 | soft_rt = bfqd->bfq_wr_max_softrt_rate > 0 && |
1850 | !BFQQ_TOTALLY_SEEKY(bfqq) && |
1851 | !in_burst && |
1852 | time_is_before_jiffies(bfqq->soft_rt_next_start) && |
1853 | bfqq->dispatched == 0 && |
1854 | bfqq->entity.new_weight == 40; |
1855 | *interactive = !in_burst && idle_for_long_time && |
1856 | bfqq->entity.new_weight == 40; |
1857 | /* |
1858 | * Merged bfq_queues are kept out of weight-raising |
1859 | * (low-latency) mechanisms. The reason is that these queues |
1860 | * are usually created for non-interactive and |
1861 | * non-soft-real-time tasks. Yet this is not the case for |
1862 | * stably-merged queues. These queues are merged just because |
1863 | * they are created shortly after each other. So they may |
1864 | * easily serve the I/O of an interactive or soft-real time |
1865 | * application, if the application happens to spawn multiple |
1866 | * processes. So let also stably-merged queued enjoy weight |
1867 | * raising. |
1868 | */ |
1869 | wr_or_deserves_wr = bfqd->low_latency && |
1870 | (bfqq->wr_coeff > 1 || |
1871 | (bfq_bfqq_sync(bfqq) && bfqq_non_merged_or_stably_merged && |
1872 | (*interactive || soft_rt))); |
1873 | |
1874 | /* |
1875 | * Using the last flag, update budget and check whether bfqq |
1876 | * may want to preempt the in-service queue. |
1877 | */ |
1878 | bfqq_wants_to_preempt = |
1879 | bfq_bfqq_update_budg_for_activation(bfqd, bfqq, |
1880 | arrived_in_time); |
1881 | |
1882 | /* |
1883 | * If bfqq happened to be activated in a burst, but has been |
1884 | * idle for much more than an interactive queue, then we |
1885 | * assume that, in the overall I/O initiated in the burst, the |
1886 | * I/O associated with bfqq is finished. So bfqq does not need |
1887 | * to be treated as a queue belonging to a burst |
1888 | * anymore. Accordingly, we reset bfqq's in_large_burst flag |
1889 | * if set, and remove bfqq from the burst list if it's |
1890 | * there. We do not decrement burst_size, because the fact |
1891 | * that bfqq does not need to belong to the burst list any |
1892 | * more does not invalidate the fact that bfqq was created in |
1893 | * a burst. |
1894 | */ |
1895 | if (likely(!bfq_bfqq_just_created(bfqq)) && |
1896 | idle_for_long_time && |
1897 | time_is_before_jiffies( |
1898 | bfqq->budget_timeout + |
1899 | msecs_to_jiffies(10000))) { |
1900 | hlist_del_init(n: &bfqq->burst_list_node); |
1901 | bfq_clear_bfqq_in_large_burst(bfqq); |
1902 | } |
1903 | |
1904 | bfq_clear_bfqq_just_created(bfqq); |
1905 | |
1906 | if (bfqd->low_latency) { |
1907 | if (unlikely(time_is_after_jiffies(bfqq->split_time))) |
1908 | /* wraparound */ |
1909 | bfqq->split_time = |
1910 | jiffies - bfqd->bfq_wr_min_idle_time - 1; |
1911 | |
1912 | if (time_is_before_jiffies(bfqq->split_time + |
1913 | bfqd->bfq_wr_min_idle_time)) { |
1914 | bfq_update_bfqq_wr_on_rq_arrival(bfqd, bfqq, |
1915 | old_wr_coeff, |
1916 | wr_or_deserves_wr, |
1917 | interactive: *interactive, |
1918 | in_burst, |
1919 | soft_rt); |
1920 | |
1921 | if (old_wr_coeff != bfqq->wr_coeff) |
1922 | bfqq->entity.prio_changed = 1; |
1923 | } |
1924 | } |
1925 | |
1926 | bfqq->last_idle_bklogged = jiffies; |
1927 | bfqq->service_from_backlogged = 0; |
1928 | bfq_clear_bfqq_softrt_update(bfqq); |
1929 | |
1930 | bfq_add_bfqq_busy(bfqq); |
1931 | |
1932 | /* |
1933 | * Expire in-service queue if preemption may be needed for |
1934 | * guarantees or throughput. As for guarantees, we care |
1935 | * explicitly about two cases. The first is that bfqq has to |
1936 | * recover a service hole, as explained in the comments on |
1937 | * bfq_bfqq_update_budg_for_activation(), i.e., that |
1938 | * bfqq_wants_to_preempt is true. However, if bfqq does not |
1939 | * carry time-critical I/O, then bfqq's bandwidth is less |
1940 | * important than that of queues that carry time-critical I/O. |
1941 | * So, as a further constraint, we consider this case only if |
1942 | * bfqq is at least as weight-raised, i.e., at least as time |
1943 | * critical, as the in-service queue. |
1944 | * |
1945 | * The second case is that bfqq is in a higher priority class, |
1946 | * or has a higher weight than the in-service queue. If this |
1947 | * condition does not hold, we don't care because, even if |
1948 | * bfqq does not start to be served immediately, the resulting |
1949 | * delay for bfqq's I/O is however lower or much lower than |
1950 | * the ideal completion time to be guaranteed to bfqq's I/O. |
1951 | * |
1952 | * In both cases, preemption is needed only if, according to |
1953 | * the timestamps of both bfqq and of the in-service queue, |
1954 | * bfqq actually is the next queue to serve. So, to reduce |
1955 | * useless preemptions, the return value of |
1956 | * next_queue_may_preempt() is considered in the next compound |
1957 | * condition too. Yet next_queue_may_preempt() just checks a |
1958 | * simple, necessary condition for bfqq to be the next queue |
1959 | * to serve. In fact, to evaluate a sufficient condition, the |
1960 | * timestamps of the in-service queue would need to be |
1961 | * updated, and this operation is quite costly (see the |
1962 | * comments on bfq_bfqq_update_budg_for_activation()). |
1963 | * |
1964 | * As for throughput, we ask bfq_better_to_idle() whether we |
1965 | * still need to plug I/O dispatching. If bfq_better_to_idle() |
1966 | * says no, then plugging is not needed any longer, either to |
1967 | * boost throughput or to perserve service guarantees. Then |
1968 | * the best option is to stop plugging I/O, as not doing so |
1969 | * would certainly lower throughput. We may end up in this |
1970 | * case if: (1) upon a dispatch attempt, we detected that it |
1971 | * was better to plug I/O dispatch, and to wait for a new |
1972 | * request to arrive for the currently in-service queue, but |
1973 | * (2) this switch of bfqq to busy changes the scenario. |
1974 | */ |
1975 | if (bfqd->in_service_queue && |
1976 | ((bfqq_wants_to_preempt && |
1977 | bfqq->wr_coeff >= bfqd->in_service_queue->wr_coeff) || |
1978 | bfq_bfqq_higher_class_or_weight(bfqq, in_serv_bfqq: bfqd->in_service_queue) || |
1979 | !bfq_better_to_idle(bfqq: bfqd->in_service_queue)) && |
1980 | next_queue_may_preempt(bfqd)) |
1981 | bfq_bfqq_expire(bfqd, bfqq: bfqd->in_service_queue, |
1982 | compensate: false, reason: BFQQE_PREEMPTED); |
1983 | } |
1984 | |
1985 | static void bfq_reset_inject_limit(struct bfq_data *bfqd, |
1986 | struct bfq_queue *bfqq) |
1987 | { |
1988 | /* invalidate baseline total service time */ |
1989 | bfqq->last_serv_time_ns = 0; |
1990 | |
1991 | /* |
1992 | * Reset pointer in case we are waiting for |
1993 | * some request completion. |
1994 | */ |
1995 | bfqd->waited_rq = NULL; |
1996 | |
1997 | /* |
1998 | * If bfqq has a short think time, then start by setting the |
1999 | * inject limit to 0 prudentially, because the service time of |
2000 | * an injected I/O request may be higher than the think time |
2001 | * of bfqq, and therefore, if one request was injected when |
2002 | * bfqq remains empty, this injected request might delay the |
2003 | * service of the next I/O request for bfqq significantly. In |
2004 | * case bfqq can actually tolerate some injection, then the |
2005 | * adaptive update will however raise the limit soon. This |
2006 | * lucky circumstance holds exactly because bfqq has a short |
2007 | * think time, and thus, after remaining empty, is likely to |
2008 | * get new I/O enqueued---and then completed---before being |
2009 | * expired. This is the very pattern that gives the |
2010 | * limit-update algorithm the chance to measure the effect of |
2011 | * injection on request service times, and then to update the |
2012 | * limit accordingly. |
2013 | * |
2014 | * However, in the following special case, the inject limit is |
2015 | * left to 1 even if the think time is short: bfqq's I/O is |
2016 | * synchronized with that of some other queue, i.e., bfqq may |
2017 | * receive new I/O only after the I/O of the other queue is |
2018 | * completed. Keeping the inject limit to 1 allows the |
2019 | * blocking I/O to be served while bfqq is in service. And |
2020 | * this is very convenient both for bfqq and for overall |
2021 | * throughput, as explained in detail in the comments in |
2022 | * bfq_update_has_short_ttime(). |
2023 | * |
2024 | * On the opposite end, if bfqq has a long think time, then |
2025 | * start directly by 1, because: |
2026 | * a) on the bright side, keeping at most one request in |
2027 | * service in the drive is unlikely to cause any harm to the |
2028 | * latency of bfqq's requests, as the service time of a single |
2029 | * request is likely to be lower than the think time of bfqq; |
2030 | * b) on the downside, after becoming empty, bfqq is likely to |
2031 | * expire before getting its next request. With this request |
2032 | * arrival pattern, it is very hard to sample total service |
2033 | * times and update the inject limit accordingly (see comments |
2034 | * on bfq_update_inject_limit()). So the limit is likely to be |
2035 | * never, or at least seldom, updated. As a consequence, by |
2036 | * setting the limit to 1, we avoid that no injection ever |
2037 | * occurs with bfqq. On the downside, this proactive step |
2038 | * further reduces chances to actually compute the baseline |
2039 | * total service time. Thus it reduces chances to execute the |
2040 | * limit-update algorithm and possibly raise the limit to more |
2041 | * than 1. |
2042 | */ |
2043 | if (bfq_bfqq_has_short_ttime(bfqq)) |
2044 | bfqq->inject_limit = 0; |
2045 | else |
2046 | bfqq->inject_limit = 1; |
2047 | |
2048 | bfqq->decrease_time_jif = jiffies; |
2049 | } |
2050 | |
2051 | static void bfq_update_io_intensity(struct bfq_queue *bfqq, u64 now_ns) |
2052 | { |
2053 | u64 tot_io_time = now_ns - bfqq->io_start_time; |
2054 | |
2055 | if (RB_EMPTY_ROOT(&bfqq->sort_list) && bfqq->dispatched == 0) |
2056 | bfqq->tot_idle_time += |
2057 | now_ns - bfqq->ttime.last_end_request; |
2058 | |
2059 | if (unlikely(bfq_bfqq_just_created(bfqq))) |
2060 | return; |
2061 | |
2062 | /* |
2063 | * Must be busy for at least about 80% of the time to be |
2064 | * considered I/O bound. |
2065 | */ |
2066 | if (bfqq->tot_idle_time * 5 > tot_io_time) |
2067 | bfq_clear_bfqq_IO_bound(bfqq); |
2068 | else |
2069 | bfq_mark_bfqq_IO_bound(bfqq); |
2070 | |
2071 | /* |
2072 | * Keep an observation window of at most 200 ms in the past |
2073 | * from now. |
2074 | */ |
2075 | if (tot_io_time > 200 * NSEC_PER_MSEC) { |
2076 | bfqq->io_start_time = now_ns - (tot_io_time>>1); |
2077 | bfqq->tot_idle_time >>= 1; |
2078 | } |
2079 | } |
2080 | |
2081 | /* |
2082 | * Detect whether bfqq's I/O seems synchronized with that of some |
2083 | * other queue, i.e., whether bfqq, after remaining empty, happens to |
2084 | * receive new I/O only right after some I/O request of the other |
2085 | * queue has been completed. We call waker queue the other queue, and |
2086 | * we assume, for simplicity, that bfqq may have at most one waker |
2087 | * queue. |
2088 | * |
2089 | * A remarkable throughput boost can be reached by unconditionally |
2090 | * injecting the I/O of the waker queue, every time a new |
2091 | * bfq_dispatch_request happens to be invoked while I/O is being |
2092 | * plugged for bfqq. In addition to boosting throughput, this |
2093 | * unblocks bfqq's I/O, thereby improving bandwidth and latency for |
2094 | * bfqq. Note that these same results may be achieved with the general |
2095 | * injection mechanism, but less effectively. For details on this |
2096 | * aspect, see the comments on the choice of the queue for injection |
2097 | * in bfq_select_queue(). |
2098 | * |
2099 | * Turning back to the detection of a waker queue, a queue Q is deemed as a |
2100 | * waker queue for bfqq if, for three consecutive times, bfqq happens to become |
2101 | * non empty right after a request of Q has been completed within given |
2102 | * timeout. In this respect, even if bfqq is empty, we do not check for a waker |
2103 | * if it still has some in-flight I/O. In fact, in this case bfqq is actually |
2104 | * still being served by the drive, and may receive new I/O on the completion |
2105 | * of some of the in-flight requests. In particular, on the first time, Q is |
2106 | * tentatively set as a candidate waker queue, while on the third consecutive |
2107 | * time that Q is detected, the field waker_bfqq is set to Q, to confirm that Q |
2108 | * is a waker queue for bfqq. These detection steps are performed only if bfqq |
2109 | * has a long think time, so as to make it more likely that bfqq's I/O is |
2110 | * actually being blocked by a synchronization. This last filter, plus the |
2111 | * above three-times requirement and time limit for detection, make false |
2112 | * positives less likely. |
2113 | * |
2114 | * NOTE |
2115 | * |
2116 | * The sooner a waker queue is detected, the sooner throughput can be |
2117 | * boosted by injecting I/O from the waker queue. Fortunately, |
2118 | * detection is likely to be actually fast, for the following |
2119 | * reasons. While blocked by synchronization, bfqq has a long think |
2120 | * time. This implies that bfqq's inject limit is at least equal to 1 |
2121 | * (see the comments in bfq_update_inject_limit()). So, thanks to |
2122 | * injection, the waker queue is likely to be served during the very |
2123 | * first I/O-plugging time interval for bfqq. This triggers the first |
2124 | * step of the detection mechanism. Thanks again to injection, the |
2125 | * candidate waker queue is then likely to be confirmed no later than |
2126 | * during the next I/O-plugging interval for bfqq. |
2127 | * |
2128 | * ISSUE |
2129 | * |
2130 | * On queue merging all waker information is lost. |
2131 | */ |
2132 | static void bfq_check_waker(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
2133 | u64 now_ns) |
2134 | { |
2135 | char waker_name[MAX_BFQQ_NAME_LENGTH]; |
2136 | |
2137 | if (!bfqd->last_completed_rq_bfqq || |
2138 | bfqd->last_completed_rq_bfqq == bfqq || |
2139 | bfq_bfqq_has_short_ttime(bfqq) || |
2140 | now_ns - bfqd->last_completion >= 4 * NSEC_PER_MSEC || |
2141 | bfqd->last_completed_rq_bfqq == &bfqd->oom_bfqq || |
2142 | bfqq == &bfqd->oom_bfqq) |
2143 | return; |
2144 | |
2145 | /* |
2146 | * We reset waker detection logic also if too much time has passed |
2147 | * since the first detection. If wakeups are rare, pointless idling |
2148 | * doesn't hurt throughput that much. The condition below makes sure |
2149 | * we do not uselessly idle blocking waker in more than 1/64 cases. |
2150 | */ |
2151 | if (bfqd->last_completed_rq_bfqq != |
2152 | bfqq->tentative_waker_bfqq || |
2153 | now_ns > bfqq->waker_detection_started + |
2154 | 128 * (u64)bfqd->bfq_slice_idle) { |
2155 | /* |
2156 | * First synchronization detected with a |
2157 | * candidate waker queue, or with a different |
2158 | * candidate waker queue from the current one. |
2159 | */ |
2160 | bfqq->tentative_waker_bfqq = |
2161 | bfqd->last_completed_rq_bfqq; |
2162 | bfqq->num_waker_detections = 1; |
2163 | bfqq->waker_detection_started = now_ns; |
2164 | bfq_bfqq_name(bfqq: bfqq->tentative_waker_bfqq, str: waker_name, |
2165 | MAX_BFQQ_NAME_LENGTH); |
2166 | bfq_log_bfqq(bfqd, bfqq, "set tentative waker %s" , waker_name); |
2167 | } else /* Same tentative waker queue detected again */ |
2168 | bfqq->num_waker_detections++; |
2169 | |
2170 | if (bfqq->num_waker_detections == 3) { |
2171 | bfqq->waker_bfqq = bfqd->last_completed_rq_bfqq; |
2172 | bfqq->tentative_waker_bfqq = NULL; |
2173 | bfq_bfqq_name(bfqq: bfqq->waker_bfqq, str: waker_name, |
2174 | MAX_BFQQ_NAME_LENGTH); |
2175 | bfq_log_bfqq(bfqd, bfqq, "set waker %s" , waker_name); |
2176 | |
2177 | /* |
2178 | * If the waker queue disappears, then |
2179 | * bfqq->waker_bfqq must be reset. To |
2180 | * this goal, we maintain in each |
2181 | * waker queue a list, woken_list, of |
2182 | * all the queues that reference the |
2183 | * waker queue through their |
2184 | * waker_bfqq pointer. When the waker |
2185 | * queue exits, the waker_bfqq pointer |
2186 | * of all the queues in the woken_list |
2187 | * is reset. |
2188 | * |
2189 | * In addition, if bfqq is already in |
2190 | * the woken_list of a waker queue, |
2191 | * then, before being inserted into |
2192 | * the woken_list of a new waker |
2193 | * queue, bfqq must be removed from |
2194 | * the woken_list of the old waker |
2195 | * queue. |
2196 | */ |
2197 | if (!hlist_unhashed(h: &bfqq->woken_list_node)) |
2198 | hlist_del_init(n: &bfqq->woken_list_node); |
2199 | hlist_add_head(n: &bfqq->woken_list_node, |
2200 | h: &bfqd->last_completed_rq_bfqq->woken_list); |
2201 | } |
2202 | } |
2203 | |
2204 | static void bfq_add_request(struct request *rq) |
2205 | { |
2206 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
2207 | struct bfq_data *bfqd = bfqq->bfqd; |
2208 | struct request *next_rq, *prev; |
2209 | unsigned int old_wr_coeff = bfqq->wr_coeff; |
2210 | bool interactive = false; |
2211 | u64 now_ns = ktime_get_ns(); |
2212 | |
2213 | bfq_log_bfqq(bfqd, bfqq, "add_request %d" , rq_is_sync(rq)); |
2214 | bfqq->queued[rq_is_sync(rq)]++; |
2215 | /* |
2216 | * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it |
2217 | * may be read without holding the lock in bfq_has_work(). |
2218 | */ |
2219 | WRITE_ONCE(bfqd->queued, bfqd->queued + 1); |
2220 | |
2221 | if (bfq_bfqq_sync(bfqq) && RQ_BIC(rq)->requests <= 1) { |
2222 | bfq_check_waker(bfqd, bfqq, now_ns); |
2223 | |
2224 | /* |
2225 | * Periodically reset inject limit, to make sure that |
2226 | * the latter eventually drops in case workload |
2227 | * changes, see step (3) in the comments on |
2228 | * bfq_update_inject_limit(). |
2229 | */ |
2230 | if (time_is_before_eq_jiffies(bfqq->decrease_time_jif + |
2231 | msecs_to_jiffies(1000))) |
2232 | bfq_reset_inject_limit(bfqd, bfqq); |
2233 | |
2234 | /* |
2235 | * The following conditions must hold to setup a new |
2236 | * sampling of total service time, and then a new |
2237 | * update of the inject limit: |
2238 | * - bfqq is in service, because the total service |
2239 | * time is evaluated only for the I/O requests of |
2240 | * the queues in service; |
2241 | * - this is the right occasion to compute or to |
2242 | * lower the baseline total service time, because |
2243 | * there are actually no requests in the drive, |
2244 | * or |
2245 | * the baseline total service time is available, and |
2246 | * this is the right occasion to compute the other |
2247 | * quantity needed to update the inject limit, i.e., |
2248 | * the total service time caused by the amount of |
2249 | * injection allowed by the current value of the |
2250 | * limit. It is the right occasion because injection |
2251 | * has actually been performed during the service |
2252 | * hole, and there are still in-flight requests, |
2253 | * which are very likely to be exactly the injected |
2254 | * requests, or part of them; |
2255 | * - the minimum interval for sampling the total |
2256 | * service time and updating the inject limit has |
2257 | * elapsed. |
2258 | */ |
2259 | if (bfqq == bfqd->in_service_queue && |
2260 | (bfqd->tot_rq_in_driver == 0 || |
2261 | (bfqq->last_serv_time_ns > 0 && |
2262 | bfqd->rqs_injected && bfqd->tot_rq_in_driver > 0)) && |
2263 | time_is_before_eq_jiffies(bfqq->decrease_time_jif + |
2264 | msecs_to_jiffies(10))) { |
2265 | bfqd->last_empty_occupied_ns = ktime_get_ns(); |
2266 | /* |
2267 | * Start the state machine for measuring the |
2268 | * total service time of rq: setting |
2269 | * wait_dispatch will cause bfqd->waited_rq to |
2270 | * be set when rq will be dispatched. |
2271 | */ |
2272 | bfqd->wait_dispatch = true; |
2273 | /* |
2274 | * If there is no I/O in service in the drive, |
2275 | * then possible injection occurred before the |
2276 | * arrival of rq will not affect the total |
2277 | * service time of rq. So the injection limit |
2278 | * must not be updated as a function of such |
2279 | * total service time, unless new injection |
2280 | * occurs before rq is completed. To have the |
2281 | * injection limit updated only in the latter |
2282 | * case, reset rqs_injected here (rqs_injected |
2283 | * will be set in case injection is performed |
2284 | * on bfqq before rq is completed). |
2285 | */ |
2286 | if (bfqd->tot_rq_in_driver == 0) |
2287 | bfqd->rqs_injected = false; |
2288 | } |
2289 | } |
2290 | |
2291 | if (bfq_bfqq_sync(bfqq)) |
2292 | bfq_update_io_intensity(bfqq, now_ns); |
2293 | |
2294 | elv_rb_add(&bfqq->sort_list, rq); |
2295 | |
2296 | /* |
2297 | * Check if this request is a better next-serve candidate. |
2298 | */ |
2299 | prev = bfqq->next_rq; |
2300 | next_rq = bfq_choose_req(bfqd, rq1: bfqq->next_rq, rq2: rq, last: bfqd->last_position); |
2301 | bfqq->next_rq = next_rq; |
2302 | |
2303 | /* |
2304 | * Adjust priority tree position, if next_rq changes. |
2305 | * See comments on bfq_pos_tree_add_move() for the unlikely(). |
2306 | */ |
2307 | if (unlikely(!bfqd->nonrot_with_queueing && prev != bfqq->next_rq)) |
2308 | bfq_pos_tree_add_move(bfqd, bfqq); |
2309 | |
2310 | if (!bfq_bfqq_busy(bfqq)) /* switching to busy ... */ |
2311 | bfq_bfqq_handle_idle_busy_switch(bfqd, bfqq, old_wr_coeff, |
2312 | rq, interactive: &interactive); |
2313 | else { |
2314 | if (bfqd->low_latency && old_wr_coeff == 1 && !rq_is_sync(rq) && |
2315 | time_is_before_jiffies( |
2316 | bfqq->last_wr_start_finish + |
2317 | bfqd->bfq_wr_min_inter_arr_async)) { |
2318 | bfqq->wr_coeff = bfqd->bfq_wr_coeff; |
2319 | bfqq->wr_cur_max_time = bfq_wr_duration(bfqd); |
2320 | |
2321 | bfqd->wr_busy_queues++; |
2322 | bfqq->entity.prio_changed = 1; |
2323 | } |
2324 | if (prev != bfqq->next_rq) |
2325 | bfq_updated_next_req(bfqd, bfqq); |
2326 | } |
2327 | |
2328 | /* |
2329 | * Assign jiffies to last_wr_start_finish in the following |
2330 | * cases: |
2331 | * |
2332 | * . if bfqq is not going to be weight-raised, because, for |
2333 | * non weight-raised queues, last_wr_start_finish stores the |
2334 | * arrival time of the last request; as of now, this piece |
2335 | * of information is used only for deciding whether to |
2336 | * weight-raise async queues |
2337 | * |
2338 | * . if bfqq is not weight-raised, because, if bfqq is now |
2339 | * switching to weight-raised, then last_wr_start_finish |
2340 | * stores the time when weight-raising starts |
2341 | * |
2342 | * . if bfqq is interactive, because, regardless of whether |
2343 | * bfqq is currently weight-raised, the weight-raising |
2344 | * period must start or restart (this case is considered |
2345 | * separately because it is not detected by the above |
2346 | * conditions, if bfqq is already weight-raised) |
2347 | * |
2348 | * last_wr_start_finish has to be updated also if bfqq is soft |
2349 | * real-time, because the weight-raising period is constantly |
2350 | * restarted on idle-to-busy transitions for these queues, but |
2351 | * this is already done in bfq_bfqq_handle_idle_busy_switch if |
2352 | * needed. |
2353 | */ |
2354 | if (bfqd->low_latency && |
2355 | (old_wr_coeff == 1 || bfqq->wr_coeff == 1 || interactive)) |
2356 | bfqq->last_wr_start_finish = jiffies; |
2357 | } |
2358 | |
2359 | static struct request *bfq_find_rq_fmerge(struct bfq_data *bfqd, |
2360 | struct bio *bio, |
2361 | struct request_queue *q) |
2362 | { |
2363 | struct bfq_queue *bfqq = bfqd->bio_bfqq; |
2364 | |
2365 | |
2366 | if (bfqq) |
2367 | return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); |
2368 | |
2369 | return NULL; |
2370 | } |
2371 | |
2372 | static sector_t get_sdist(sector_t last_pos, struct request *rq) |
2373 | { |
2374 | if (last_pos) |
2375 | return abs(blk_rq_pos(rq) - last_pos); |
2376 | |
2377 | return 0; |
2378 | } |
2379 | |
2380 | static void bfq_remove_request(struct request_queue *q, |
2381 | struct request *rq) |
2382 | { |
2383 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
2384 | struct bfq_data *bfqd = bfqq->bfqd; |
2385 | const int sync = rq_is_sync(rq); |
2386 | |
2387 | if (bfqq->next_rq == rq) { |
2388 | bfqq->next_rq = bfq_find_next_rq(bfqd, bfqq, last: rq); |
2389 | bfq_updated_next_req(bfqd, bfqq); |
2390 | } |
2391 | |
2392 | if (rq->queuelist.prev != &rq->queuelist) |
2393 | list_del_init(entry: &rq->queuelist); |
2394 | bfqq->queued[sync]--; |
2395 | /* |
2396 | * Updating of 'bfqd->queued' is protected by 'bfqd->lock', however, it |
2397 | * may be read without holding the lock in bfq_has_work(). |
2398 | */ |
2399 | WRITE_ONCE(bfqd->queued, bfqd->queued - 1); |
2400 | elv_rb_del(&bfqq->sort_list, rq); |
2401 | |
2402 | elv_rqhash_del(q, rq); |
2403 | if (q->last_merge == rq) |
2404 | q->last_merge = NULL; |
2405 | |
2406 | if (RB_EMPTY_ROOT(&bfqq->sort_list)) { |
2407 | bfqq->next_rq = NULL; |
2408 | |
2409 | if (bfq_bfqq_busy(bfqq) && bfqq != bfqd->in_service_queue) { |
2410 | bfq_del_bfqq_busy(bfqq, expiration: false); |
2411 | /* |
2412 | * bfqq emptied. In normal operation, when |
2413 | * bfqq is empty, bfqq->entity.service and |
2414 | * bfqq->entity.budget must contain, |
2415 | * respectively, the service received and the |
2416 | * budget used last time bfqq emptied. These |
2417 | * facts do not hold in this case, as at least |
2418 | * this last removal occurred while bfqq is |
2419 | * not in service. To avoid inconsistencies, |
2420 | * reset both bfqq->entity.service and |
2421 | * bfqq->entity.budget, if bfqq has still a |
2422 | * process that may issue I/O requests to it. |
2423 | */ |
2424 | bfqq->entity.budget = bfqq->entity.service = 0; |
2425 | } |
2426 | |
2427 | /* |
2428 | * Remove queue from request-position tree as it is empty. |
2429 | */ |
2430 | if (bfqq->pos_root) { |
2431 | rb_erase(&bfqq->pos_node, bfqq->pos_root); |
2432 | bfqq->pos_root = NULL; |
2433 | } |
2434 | } else { |
2435 | /* see comments on bfq_pos_tree_add_move() for the unlikely() */ |
2436 | if (unlikely(!bfqd->nonrot_with_queueing)) |
2437 | bfq_pos_tree_add_move(bfqd, bfqq); |
2438 | } |
2439 | |
2440 | if (rq->cmd_flags & REQ_META) |
2441 | bfqq->meta_pending--; |
2442 | |
2443 | } |
2444 | |
2445 | static bool bfq_bio_merge(struct request_queue *q, struct bio *bio, |
2446 | unsigned int nr_segs) |
2447 | { |
2448 | struct bfq_data *bfqd = q->elevator->elevator_data; |
2449 | struct request *free = NULL; |
2450 | /* |
2451 | * bfq_bic_lookup grabs the queue_lock: invoke it now and |
2452 | * store its return value for later use, to avoid nesting |
2453 | * queue_lock inside the bfqd->lock. We assume that the bic |
2454 | * returned by bfq_bic_lookup does not go away before |
2455 | * bfqd->lock is taken. |
2456 | */ |
2457 | struct bfq_io_cq *bic = bfq_bic_lookup(q); |
2458 | bool ret; |
2459 | |
2460 | spin_lock_irq(lock: &bfqd->lock); |
2461 | |
2462 | if (bic) { |
2463 | /* |
2464 | * Make sure cgroup info is uptodate for current process before |
2465 | * considering the merge. |
2466 | */ |
2467 | bfq_bic_update_cgroup(bic, bio); |
2468 | |
2469 | bfqd->bio_bfqq = bic_to_bfqq(bic, is_sync: op_is_sync(op: bio->bi_opf), |
2470 | actuator_idx: bfq_actuator_index(bfqd, bio)); |
2471 | } else { |
2472 | bfqd->bio_bfqq = NULL; |
2473 | } |
2474 | bfqd->bio_bic = bic; |
2475 | |
2476 | ret = blk_mq_sched_try_merge(q, bio, nr_segs, merged_request: &free); |
2477 | |
2478 | spin_unlock_irq(lock: &bfqd->lock); |
2479 | if (free) |
2480 | blk_mq_free_request(rq: free); |
2481 | |
2482 | return ret; |
2483 | } |
2484 | |
2485 | static int bfq_request_merge(struct request_queue *q, struct request **req, |
2486 | struct bio *bio) |
2487 | { |
2488 | struct bfq_data *bfqd = q->elevator->elevator_data; |
2489 | struct request *__rq; |
2490 | |
2491 | __rq = bfq_find_rq_fmerge(bfqd, bio, q); |
2492 | if (__rq && elv_bio_merge_ok(__rq, bio)) { |
2493 | *req = __rq; |
2494 | |
2495 | if (blk_discard_mergable(req: __rq)) |
2496 | return ELEVATOR_DISCARD_MERGE; |
2497 | return ELEVATOR_FRONT_MERGE; |
2498 | } |
2499 | |
2500 | return ELEVATOR_NO_MERGE; |
2501 | } |
2502 | |
2503 | static void bfq_request_merged(struct request_queue *q, struct request *req, |
2504 | enum elv_merge type) |
2505 | { |
2506 | if (type == ELEVATOR_FRONT_MERGE && |
2507 | rb_prev(&req->rb_node) && |
2508 | blk_rq_pos(rq: req) < |
2509 | blk_rq_pos(container_of(rb_prev(&req->rb_node), |
2510 | struct request, rb_node))) { |
2511 | struct bfq_queue *bfqq = RQ_BFQQ(req); |
2512 | struct bfq_data *bfqd; |
2513 | struct request *prev, *next_rq; |
2514 | |
2515 | if (!bfqq) |
2516 | return; |
2517 | |
2518 | bfqd = bfqq->bfqd; |
2519 | |
2520 | /* Reposition request in its sort_list */ |
2521 | elv_rb_del(&bfqq->sort_list, req); |
2522 | elv_rb_add(&bfqq->sort_list, req); |
2523 | |
2524 | /* Choose next request to be served for bfqq */ |
2525 | prev = bfqq->next_rq; |
2526 | next_rq = bfq_choose_req(bfqd, rq1: bfqq->next_rq, rq2: req, |
2527 | last: bfqd->last_position); |
2528 | bfqq->next_rq = next_rq; |
2529 | /* |
2530 | * If next_rq changes, update both the queue's budget to |
2531 | * fit the new request and the queue's position in its |
2532 | * rq_pos_tree. |
2533 | */ |
2534 | if (prev != bfqq->next_rq) { |
2535 | bfq_updated_next_req(bfqd, bfqq); |
2536 | /* |
2537 | * See comments on bfq_pos_tree_add_move() for |
2538 | * the unlikely(). |
2539 | */ |
2540 | if (unlikely(!bfqd->nonrot_with_queueing)) |
2541 | bfq_pos_tree_add_move(bfqd, bfqq); |
2542 | } |
2543 | } |
2544 | } |
2545 | |
2546 | /* |
2547 | * This function is called to notify the scheduler that the requests |
2548 | * rq and 'next' have been merged, with 'next' going away. BFQ |
2549 | * exploits this hook to address the following issue: if 'next' has a |
2550 | * fifo_time lower that rq, then the fifo_time of rq must be set to |
2551 | * the value of 'next', to not forget the greater age of 'next'. |
2552 | * |
2553 | * NOTE: in this function we assume that rq is in a bfq_queue, basing |
2554 | * on that rq is picked from the hash table q->elevator->hash, which, |
2555 | * in its turn, is filled only with I/O requests present in |
2556 | * bfq_queues, while BFQ is in use for the request queue q. In fact, |
2557 | * the function that fills this hash table (elv_rqhash_add) is called |
2558 | * only by bfq_insert_request. |
2559 | */ |
2560 | static void bfq_requests_merged(struct request_queue *q, struct request *rq, |
2561 | struct request *next) |
2562 | { |
2563 | struct bfq_queue *bfqq = RQ_BFQQ(rq), |
2564 | *next_bfqq = RQ_BFQQ(next); |
2565 | |
2566 | if (!bfqq) |
2567 | goto remove; |
2568 | |
2569 | /* |
2570 | * If next and rq belong to the same bfq_queue and next is older |
2571 | * than rq, then reposition rq in the fifo (by substituting next |
2572 | * with rq). Otherwise, if next and rq belong to different |
2573 | * bfq_queues, never reposition rq: in fact, we would have to |
2574 | * reposition it with respect to next's position in its own fifo, |
2575 | * which would most certainly be too expensive with respect to |
2576 | * the benefits. |
2577 | */ |
2578 | if (bfqq == next_bfqq && |
2579 | !list_empty(head: &rq->queuelist) && !list_empty(head: &next->queuelist) && |
2580 | next->fifo_time < rq->fifo_time) { |
2581 | list_del_init(entry: &rq->queuelist); |
2582 | list_replace_init(old: &next->queuelist, new: &rq->queuelist); |
2583 | rq->fifo_time = next->fifo_time; |
2584 | } |
2585 | |
2586 | if (bfqq->next_rq == next) |
2587 | bfqq->next_rq = rq; |
2588 | |
2589 | bfqg_stats_update_io_merged(bfqg: bfqq_group(bfqq), opf: next->cmd_flags); |
2590 | remove: |
2591 | /* Merged request may be in the IO scheduler. Remove it. */ |
2592 | if (!RB_EMPTY_NODE(&next->rb_node)) { |
2593 | bfq_remove_request(q: next->q, rq: next); |
2594 | if (next_bfqq) |
2595 | bfqg_stats_update_io_remove(bfqg: bfqq_group(bfqq: next_bfqq), |
2596 | opf: next->cmd_flags); |
2597 | } |
2598 | } |
2599 | |
2600 | /* Must be called with bfqq != NULL */ |
2601 | static void bfq_bfqq_end_wr(struct bfq_queue *bfqq) |
2602 | { |
2603 | /* |
2604 | * If bfqq has been enjoying interactive weight-raising, then |
2605 | * reset soft_rt_next_start. We do it for the following |
2606 | * reason. bfqq may have been conveying the I/O needed to load |
2607 | * a soft real-time application. Such an application actually |
2608 | * exhibits a soft real-time I/O pattern after it finishes |
2609 | * loading, and finally starts doing its job. But, if bfqq has |
2610 | * been receiving a lot of bandwidth so far (likely to happen |
2611 | * on a fast device), then soft_rt_next_start now contains a |
2612 | * high value that. So, without this reset, bfqq would be |
2613 | * prevented from being possibly considered as soft_rt for a |
2614 | * very long time. |
2615 | */ |
2616 | |
2617 | if (bfqq->wr_cur_max_time != |
2618 | bfqq->bfqd->bfq_wr_rt_max_time) |
2619 | bfqq->soft_rt_next_start = jiffies; |
2620 | |
2621 | if (bfq_bfqq_busy(bfqq)) |
2622 | bfqq->bfqd->wr_busy_queues--; |
2623 | bfqq->wr_coeff = 1; |
2624 | bfqq->wr_cur_max_time = 0; |
2625 | bfqq->last_wr_start_finish = jiffies; |
2626 | /* |
2627 | * Trigger a weight change on the next invocation of |
2628 | * __bfq_entity_update_weight_prio. |
2629 | */ |
2630 | bfqq->entity.prio_changed = 1; |
2631 | } |
2632 | |
2633 | void bfq_end_wr_async_queues(struct bfq_data *bfqd, |
2634 | struct bfq_group *bfqg) |
2635 | { |
2636 | int i, j, k; |
2637 | |
2638 | for (k = 0; k < bfqd->num_actuators; k++) { |
2639 | for (i = 0; i < 2; i++) |
2640 | for (j = 0; j < IOPRIO_NR_LEVELS; j++) |
2641 | if (bfqg->async_bfqq[i][j][k]) |
2642 | bfq_bfqq_end_wr(bfqq: bfqg->async_bfqq[i][j][k]); |
2643 | if (bfqg->async_idle_bfqq[k]) |
2644 | bfq_bfqq_end_wr(bfqq: bfqg->async_idle_bfqq[k]); |
2645 | } |
2646 | } |
2647 | |
2648 | static void bfq_end_wr(struct bfq_data *bfqd) |
2649 | { |
2650 | struct bfq_queue *bfqq; |
2651 | int i; |
2652 | |
2653 | spin_lock_irq(lock: &bfqd->lock); |
2654 | |
2655 | for (i = 0; i < bfqd->num_actuators; i++) { |
2656 | list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list) |
2657 | bfq_bfqq_end_wr(bfqq); |
2658 | } |
2659 | list_for_each_entry(bfqq, &bfqd->idle_list, bfqq_list) |
2660 | bfq_bfqq_end_wr(bfqq); |
2661 | bfq_end_wr_async(bfqd); |
2662 | |
2663 | spin_unlock_irq(lock: &bfqd->lock); |
2664 | } |
2665 | |
2666 | static sector_t bfq_io_struct_pos(void *io_struct, bool request) |
2667 | { |
2668 | if (request) |
2669 | return blk_rq_pos(rq: io_struct); |
2670 | else |
2671 | return ((struct bio *)io_struct)->bi_iter.bi_sector; |
2672 | } |
2673 | |
2674 | static int bfq_rq_close_to_sector(void *io_struct, bool request, |
2675 | sector_t sector) |
2676 | { |
2677 | return abs(bfq_io_struct_pos(io_struct, request) - sector) <= |
2678 | BFQQ_CLOSE_THR; |
2679 | } |
2680 | |
2681 | static struct bfq_queue *bfqq_find_close(struct bfq_data *bfqd, |
2682 | struct bfq_queue *bfqq, |
2683 | sector_t sector) |
2684 | { |
2685 | struct rb_root *root = &bfqq_group(bfqq)->rq_pos_tree; |
2686 | struct rb_node *parent, *node; |
2687 | struct bfq_queue *__bfqq; |
2688 | |
2689 | if (RB_EMPTY_ROOT(root)) |
2690 | return NULL; |
2691 | |
2692 | /* |
2693 | * First, if we find a request starting at the end of the last |
2694 | * request, choose it. |
2695 | */ |
2696 | __bfqq = bfq_rq_pos_tree_lookup(bfqd, root, sector, ret_parent: &parent, NULL); |
2697 | if (__bfqq) |
2698 | return __bfqq; |
2699 | |
2700 | /* |
2701 | * If the exact sector wasn't found, the parent of the NULL leaf |
2702 | * will contain the closest sector (rq_pos_tree sorted by |
2703 | * next_request position). |
2704 | */ |
2705 | __bfqq = rb_entry(parent, struct bfq_queue, pos_node); |
2706 | if (bfq_rq_close_to_sector(io_struct: __bfqq->next_rq, request: true, sector)) |
2707 | return __bfqq; |
2708 | |
2709 | if (blk_rq_pos(rq: __bfqq->next_rq) < sector) |
2710 | node = rb_next(&__bfqq->pos_node); |
2711 | else |
2712 | node = rb_prev(&__bfqq->pos_node); |
2713 | if (!node) |
2714 | return NULL; |
2715 | |
2716 | __bfqq = rb_entry(node, struct bfq_queue, pos_node); |
2717 | if (bfq_rq_close_to_sector(io_struct: __bfqq->next_rq, request: true, sector)) |
2718 | return __bfqq; |
2719 | |
2720 | return NULL; |
2721 | } |
2722 | |
2723 | static struct bfq_queue *bfq_find_close_cooperator(struct bfq_data *bfqd, |
2724 | struct bfq_queue *cur_bfqq, |
2725 | sector_t sector) |
2726 | { |
2727 | struct bfq_queue *bfqq; |
2728 | |
2729 | /* |
2730 | * We shall notice if some of the queues are cooperating, |
2731 | * e.g., working closely on the same area of the device. In |
2732 | * that case, we can group them together and: 1) don't waste |
2733 | * time idling, and 2) serve the union of their requests in |
2734 | * the best possible order for throughput. |
2735 | */ |
2736 | bfqq = bfqq_find_close(bfqd, bfqq: cur_bfqq, sector); |
2737 | if (!bfqq || bfqq == cur_bfqq) |
2738 | return NULL; |
2739 | |
2740 | return bfqq; |
2741 | } |
2742 | |
2743 | static struct bfq_queue * |
2744 | bfq_setup_merge(struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) |
2745 | { |
2746 | int process_refs, new_process_refs; |
2747 | struct bfq_queue *__bfqq; |
2748 | |
2749 | /* |
2750 | * If there are no process references on the new_bfqq, then it is |
2751 | * unsafe to follow the ->new_bfqq chain as other bfqq's in the chain |
2752 | * may have dropped their last reference (not just their last process |
2753 | * reference). |
2754 | */ |
2755 | if (!bfqq_process_refs(bfqq: new_bfqq)) |
2756 | return NULL; |
2757 | |
2758 | /* Avoid a circular list and skip interim queue merges. */ |
2759 | while ((__bfqq = new_bfqq->new_bfqq)) { |
2760 | if (__bfqq == bfqq) |
2761 | return NULL; |
2762 | new_bfqq = __bfqq; |
2763 | } |
2764 | |
2765 | process_refs = bfqq_process_refs(bfqq); |
2766 | new_process_refs = bfqq_process_refs(bfqq: new_bfqq); |
2767 | /* |
2768 | * If the process for the bfqq has gone away, there is no |
2769 | * sense in merging the queues. |
2770 | */ |
2771 | if (process_refs == 0 || new_process_refs == 0) |
2772 | return NULL; |
2773 | |
2774 | /* |
2775 | * Make sure merged queues belong to the same parent. Parents could |
2776 | * have changed since the time we decided the two queues are suitable |
2777 | * for merging. |
2778 | */ |
2779 | if (new_bfqq->entity.parent != bfqq->entity.parent) |
2780 | return NULL; |
2781 | |
2782 | bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d" , |
2783 | new_bfqq->pid); |
2784 | |
2785 | /* |
2786 | * Merging is just a redirection: the requests of the process |
2787 | * owning one of the two queues are redirected to the other queue. |
2788 | * The latter queue, in its turn, is set as shared if this is the |
2789 | * first time that the requests of some process are redirected to |
2790 | * it. |
2791 | * |
2792 | * We redirect bfqq to new_bfqq and not the opposite, because |
2793 | * we are in the context of the process owning bfqq, thus we |
2794 | * have the io_cq of this process. So we can immediately |
2795 | * configure this io_cq to redirect the requests of the |
2796 | * process to new_bfqq. In contrast, the io_cq of new_bfqq is |
2797 | * not available any more (new_bfqq->bic == NULL). |
2798 | * |
2799 | * Anyway, even in case new_bfqq coincides with the in-service |
2800 | * queue, redirecting requests the in-service queue is the |
2801 | * best option, as we feed the in-service queue with new |
2802 | * requests close to the last request served and, by doing so, |
2803 | * are likely to increase the throughput. |
2804 | */ |
2805 | bfqq->new_bfqq = new_bfqq; |
2806 | /* |
2807 | * The above assignment schedules the following redirections: |
2808 | * each time some I/O for bfqq arrives, the process that |
2809 | * generated that I/O is disassociated from bfqq and |
2810 | * associated with new_bfqq. Here we increases new_bfqq->ref |
2811 | * in advance, adding the number of processes that are |
2812 | * expected to be associated with new_bfqq as they happen to |
2813 | * issue I/O. |
2814 | */ |
2815 | new_bfqq->ref += process_refs; |
2816 | return new_bfqq; |
2817 | } |
2818 | |
2819 | static bool bfq_may_be_close_cooperator(struct bfq_queue *bfqq, |
2820 | struct bfq_queue *new_bfqq) |
2821 | { |
2822 | if (bfq_too_late_for_merging(bfqq: new_bfqq)) |
2823 | return false; |
2824 | |
2825 | if (bfq_class_idle(bfqq) || bfq_class_idle(new_bfqq) || |
2826 | (bfqq->ioprio_class != new_bfqq->ioprio_class)) |
2827 | return false; |
2828 | |
2829 | /* |
2830 | * If either of the queues has already been detected as seeky, |
2831 | * then merging it with the other queue is unlikely to lead to |
2832 | * sequential I/O. |
2833 | */ |
2834 | if (BFQQ_SEEKY(bfqq) || BFQQ_SEEKY(new_bfqq)) |
2835 | return false; |
2836 | |
2837 | /* |
2838 | * Interleaved I/O is known to be done by (some) applications |
2839 | * only for reads, so it does not make sense to merge async |
2840 | * queues. |
2841 | */ |
2842 | if (!bfq_bfqq_sync(bfqq) || !bfq_bfqq_sync(bfqq: new_bfqq)) |
2843 | return false; |
2844 | |
2845 | return true; |
2846 | } |
2847 | |
2848 | static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, |
2849 | struct bfq_queue *bfqq); |
2850 | |
2851 | static struct bfq_queue * |
2852 | bfq_setup_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
2853 | struct bfq_queue *stable_merge_bfqq, |
2854 | struct bfq_iocq_bfqq_data *bfqq_data) |
2855 | { |
2856 | int proc_ref = min(bfqq_process_refs(bfqq), |
2857 | bfqq_process_refs(stable_merge_bfqq)); |
2858 | struct bfq_queue *new_bfqq = NULL; |
2859 | |
2860 | bfqq_data->stable_merge_bfqq = NULL; |
2861 | if (idling_boosts_thr_without_issues(bfqd, bfqq) || proc_ref == 0) |
2862 | goto out; |
2863 | |
2864 | /* next function will take at least one ref */ |
2865 | new_bfqq = bfq_setup_merge(bfqq, new_bfqq: stable_merge_bfqq); |
2866 | |
2867 | if (new_bfqq) { |
2868 | bfqq_data->stably_merged = true; |
2869 | if (new_bfqq->bic) { |
2870 | unsigned int new_a_idx = new_bfqq->actuator_idx; |
2871 | struct bfq_iocq_bfqq_data *new_bfqq_data = |
2872 | &new_bfqq->bic->bfqq_data[new_a_idx]; |
2873 | |
2874 | new_bfqq_data->stably_merged = true; |
2875 | } |
2876 | } |
2877 | |
2878 | out: |
2879 | /* deschedule stable merge, because done or aborted here */ |
2880 | bfq_put_stable_ref(bfqq: stable_merge_bfqq); |
2881 | |
2882 | return new_bfqq; |
2883 | } |
2884 | |
2885 | /* |
2886 | * Attempt to schedule a merge of bfqq with the currently in-service |
2887 | * queue or with a close queue among the scheduled queues. Return |
2888 | * NULL if no merge was scheduled, a pointer to the shared bfq_queue |
2889 | * structure otherwise. |
2890 | * |
2891 | * The OOM queue is not allowed to participate to cooperation: in fact, since |
2892 | * the requests temporarily redirected to the OOM queue could be redirected |
2893 | * again to dedicated queues at any time, the state needed to correctly |
2894 | * handle merging with the OOM queue would be quite complex and expensive |
2895 | * to maintain. Besides, in such a critical condition as an out of memory, |
2896 | * the benefits of queue merging may be little relevant, or even negligible. |
2897 | * |
2898 | * WARNING: queue merging may impair fairness among non-weight raised |
2899 | * queues, for at least two reasons: 1) the original weight of a |
2900 | * merged queue may change during the merged state, 2) even being the |
2901 | * weight the same, a merged queue may be bloated with many more |
2902 | * requests than the ones produced by its originally-associated |
2903 | * process. |
2904 | */ |
2905 | static struct bfq_queue * |
2906 | bfq_setup_cooperator(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
2907 | void *io_struct, bool request, struct bfq_io_cq *bic) |
2908 | { |
2909 | struct bfq_queue *in_service_bfqq, *new_bfqq; |
2910 | unsigned int a_idx = bfqq->actuator_idx; |
2911 | struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; |
2912 | |
2913 | /* if a merge has already been setup, then proceed with that first */ |
2914 | if (bfqq->new_bfqq) |
2915 | return bfqq->new_bfqq; |
2916 | |
2917 | /* |
2918 | * Check delayed stable merge for rotational or non-queueing |
2919 | * devs. For this branch to be executed, bfqq must not be |
2920 | * currently merged with some other queue (i.e., bfqq->bic |
2921 | * must be non null). If we considered also merged queues, |
2922 | * then we should also check whether bfqq has already been |
2923 | * merged with bic->stable_merge_bfqq. But this would be |
2924 | * costly and complicated. |
2925 | */ |
2926 | if (unlikely(!bfqd->nonrot_with_queueing)) { |
2927 | /* |
2928 | * Make sure also that bfqq is sync, because |
2929 | * bic->stable_merge_bfqq may point to some queue (for |
2930 | * stable merging) also if bic is associated with a |
2931 | * sync queue, but this bfqq is async |
2932 | */ |
2933 | if (bfq_bfqq_sync(bfqq) && bfqq_data->stable_merge_bfqq && |
2934 | !bfq_bfqq_just_created(bfqq) && |
2935 | time_is_before_jiffies(bfqq->split_time + |
2936 | msecs_to_jiffies(bfq_late_stable_merging)) && |
2937 | time_is_before_jiffies(bfqq->creation_time + |
2938 | msecs_to_jiffies(bfq_late_stable_merging))) { |
2939 | struct bfq_queue *stable_merge_bfqq = |
2940 | bfqq_data->stable_merge_bfqq; |
2941 | |
2942 | return bfq_setup_stable_merge(bfqd, bfqq, |
2943 | stable_merge_bfqq, |
2944 | bfqq_data); |
2945 | } |
2946 | } |
2947 | |
2948 | /* |
2949 | * Do not perform queue merging if the device is non |
2950 | * rotational and performs internal queueing. In fact, such a |
2951 | * device reaches a high speed through internal parallelism |
2952 | * and pipelining. This means that, to reach a high |
2953 | * throughput, it must have many requests enqueued at the same |
2954 | * time. But, in this configuration, the internal scheduling |
2955 | * algorithm of the device does exactly the job of queue |
2956 | * merging: it reorders requests so as to obtain as much as |
2957 | * possible a sequential I/O pattern. As a consequence, with |
2958 | * the workload generated by processes doing interleaved I/O, |
2959 | * the throughput reached by the device is likely to be the |
2960 | * same, with and without queue merging. |
2961 | * |
2962 | * Disabling merging also provides a remarkable benefit in |
2963 | * terms of throughput. Merging tends to make many workloads |
2964 | * artificially more uneven, because of shared queues |
2965 | * remaining non empty for incomparably more time than |
2966 | * non-merged queues. This may accentuate workload |
2967 | * asymmetries. For example, if one of the queues in a set of |
2968 | * merged queues has a higher weight than a normal queue, then |
2969 | * the shared queue may inherit such a high weight and, by |
2970 | * staying almost always active, may force BFQ to perform I/O |
2971 | * plugging most of the time. This evidently makes it harder |
2972 | * for BFQ to let the device reach a high throughput. |
2973 | * |
2974 | * Finally, the likely() macro below is not used because one |
2975 | * of the two branches is more likely than the other, but to |
2976 | * have the code path after the following if() executed as |
2977 | * fast as possible for the case of a non rotational device |
2978 | * with queueing. We want it because this is the fastest kind |
2979 | * of device. On the opposite end, the likely() may lengthen |
2980 | * the execution time of BFQ for the case of slower devices |
2981 | * (rotational or at least without queueing). But in this case |
2982 | * the execution time of BFQ matters very little, if not at |
2983 | * all. |
2984 | */ |
2985 | if (likely(bfqd->nonrot_with_queueing)) |
2986 | return NULL; |
2987 | |
2988 | /* |
2989 | * Prevent bfqq from being merged if it has been created too |
2990 | * long ago. The idea is that true cooperating processes, and |
2991 | * thus their associated bfq_queues, are supposed to be |
2992 | * created shortly after each other. This is the case, e.g., |
2993 | * for KVM/QEMU and dump I/O threads. Basing on this |
2994 | * assumption, the following filtering greatly reduces the |
2995 | * probability that two non-cooperating processes, which just |
2996 | * happen to do close I/O for some short time interval, have |
2997 | * their queues merged by mistake. |
2998 | */ |
2999 | if (bfq_too_late_for_merging(bfqq)) |
3000 | return NULL; |
3001 | |
3002 | if (!io_struct || unlikely(bfqq == &bfqd->oom_bfqq)) |
3003 | return NULL; |
3004 | |
3005 | /* If there is only one backlogged queue, don't search. */ |
3006 | if (bfq_tot_busy_queues(bfqd) == 1) |
3007 | return NULL; |
3008 | |
3009 | in_service_bfqq = bfqd->in_service_queue; |
3010 | |
3011 | if (in_service_bfqq && in_service_bfqq != bfqq && |
3012 | likely(in_service_bfqq != &bfqd->oom_bfqq) && |
3013 | bfq_rq_close_to_sector(io_struct, request, |
3014 | sector: bfqd->in_serv_last_pos) && |
3015 | bfqq->entity.parent == in_service_bfqq->entity.parent && |
3016 | bfq_may_be_close_cooperator(bfqq, new_bfqq: in_service_bfqq)) { |
3017 | new_bfqq = bfq_setup_merge(bfqq, new_bfqq: in_service_bfqq); |
3018 | if (new_bfqq) |
3019 | return new_bfqq; |
3020 | } |
3021 | /* |
3022 | * Check whether there is a cooperator among currently scheduled |
3023 | * queues. The only thing we need is that the bio/request is not |
3024 | * NULL, as we need it to establish whether a cooperator exists. |
3025 | */ |
3026 | new_bfqq = bfq_find_close_cooperator(bfqd, cur_bfqq: bfqq, |
3027 | sector: bfq_io_struct_pos(io_struct, request)); |
3028 | |
3029 | if (new_bfqq && likely(new_bfqq != &bfqd->oom_bfqq) && |
3030 | bfq_may_be_close_cooperator(bfqq, new_bfqq)) |
3031 | return bfq_setup_merge(bfqq, new_bfqq); |
3032 | |
3033 | return NULL; |
3034 | } |
3035 | |
3036 | static void bfq_bfqq_save_state(struct bfq_queue *bfqq) |
3037 | { |
3038 | struct bfq_io_cq *bic = bfqq->bic; |
3039 | unsigned int a_idx = bfqq->actuator_idx; |
3040 | struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[a_idx]; |
3041 | |
3042 | /* |
3043 | * If !bfqq->bic, the queue is already shared or its requests |
3044 | * have already been redirected to a shared queue; both idle window |
3045 | * and weight raising state have already been saved. Do nothing. |
3046 | */ |
3047 | if (!bic) |
3048 | return; |
3049 | |
3050 | bfqq_data->saved_last_serv_time_ns = bfqq->last_serv_time_ns; |
3051 | bfqq_data->saved_inject_limit = bfqq->inject_limit; |
3052 | bfqq_data->saved_decrease_time_jif = bfqq->decrease_time_jif; |
3053 | |
3054 | bfqq_data->saved_weight = bfqq->entity.orig_weight; |
3055 | bfqq_data->saved_ttime = bfqq->ttime; |
3056 | bfqq_data->saved_has_short_ttime = |
3057 | bfq_bfqq_has_short_ttime(bfqq); |
3058 | bfqq_data->saved_IO_bound = bfq_bfqq_IO_bound(bfqq); |
3059 | bfqq_data->saved_io_start_time = bfqq->io_start_time; |
3060 | bfqq_data->saved_tot_idle_time = bfqq->tot_idle_time; |
3061 | bfqq_data->saved_in_large_burst = bfq_bfqq_in_large_burst(bfqq); |
3062 | bfqq_data->was_in_burst_list = |
3063 | !hlist_unhashed(h: &bfqq->burst_list_node); |
3064 | |
3065 | if (unlikely(bfq_bfqq_just_created(bfqq) && |
3066 | !bfq_bfqq_in_large_burst(bfqq) && |
3067 | bfqq->bfqd->low_latency)) { |
3068 | /* |
3069 | * bfqq being merged right after being created: bfqq |
3070 | * would have deserved interactive weight raising, but |
3071 | * did not make it to be set in a weight-raised state, |
3072 | * because of this early merge. Store directly the |
3073 | * weight-raising state that would have been assigned |
3074 | * to bfqq, so that to avoid that bfqq unjustly fails |
3075 | * to enjoy weight raising if split soon. |
3076 | */ |
3077 | bfqq_data->saved_wr_coeff = bfqq->bfqd->bfq_wr_coeff; |
3078 | bfqq_data->saved_wr_start_at_switch_to_srt = |
3079 | bfq_smallest_from_now(); |
3080 | bfqq_data->saved_wr_cur_max_time = |
3081 | bfq_wr_duration(bfqd: bfqq->bfqd); |
3082 | bfqq_data->saved_last_wr_start_finish = jiffies; |
3083 | } else { |
3084 | bfqq_data->saved_wr_coeff = bfqq->wr_coeff; |
3085 | bfqq_data->saved_wr_start_at_switch_to_srt = |
3086 | bfqq->wr_start_at_switch_to_srt; |
3087 | bfqq_data->saved_service_from_wr = |
3088 | bfqq->service_from_wr; |
3089 | bfqq_data->saved_last_wr_start_finish = |
3090 | bfqq->last_wr_start_finish; |
3091 | bfqq_data->saved_wr_cur_max_time = bfqq->wr_cur_max_time; |
3092 | } |
3093 | } |
3094 | |
3095 | |
3096 | static void |
3097 | bfq_reassign_last_bfqq(struct bfq_queue *cur_bfqq, struct bfq_queue *new_bfqq) |
3098 | { |
3099 | if (cur_bfqq->entity.parent && |
3100 | cur_bfqq->entity.parent->last_bfqq_created == cur_bfqq) |
3101 | cur_bfqq->entity.parent->last_bfqq_created = new_bfqq; |
3102 | else if (cur_bfqq->bfqd && cur_bfqq->bfqd->last_bfqq_created == cur_bfqq) |
3103 | cur_bfqq->bfqd->last_bfqq_created = new_bfqq; |
3104 | } |
3105 | |
3106 | void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
3107 | { |
3108 | /* |
3109 | * To prevent bfqq's service guarantees from being violated, |
3110 | * bfqq may be left busy, i.e., queued for service, even if |
3111 | * empty (see comments in __bfq_bfqq_expire() for |
3112 | * details). But, if no process will send requests to bfqq any |
3113 | * longer, then there is no point in keeping bfqq queued for |
3114 | * service. In addition, keeping bfqq queued for service, but |
3115 | * with no process ref any longer, may have caused bfqq to be |
3116 | * freed when dequeued from service. But this is assumed to |
3117 | * never happen. |
3118 | */ |
3119 | if (bfq_bfqq_busy(bfqq) && RB_EMPTY_ROOT(&bfqq->sort_list) && |
3120 | bfqq != bfqd->in_service_queue) |
3121 | bfq_del_bfqq_busy(bfqq, expiration: false); |
3122 | |
3123 | bfq_reassign_last_bfqq(cur_bfqq: bfqq, NULL); |
3124 | |
3125 | bfq_put_queue(bfqq); |
3126 | } |
3127 | |
3128 | static void |
3129 | bfq_merge_bfqqs(struct bfq_data *bfqd, struct bfq_io_cq *bic, |
3130 | struct bfq_queue *bfqq, struct bfq_queue *new_bfqq) |
3131 | { |
3132 | bfq_log_bfqq(bfqd, bfqq, "merging with queue %lu" , |
3133 | (unsigned long)new_bfqq->pid); |
3134 | /* Save weight raising and idle window of the merged queues */ |
3135 | bfq_bfqq_save_state(bfqq); |
3136 | bfq_bfqq_save_state(bfqq: new_bfqq); |
3137 | if (bfq_bfqq_IO_bound(bfqq)) |
3138 | bfq_mark_bfqq_IO_bound(bfqq: new_bfqq); |
3139 | bfq_clear_bfqq_IO_bound(bfqq); |
3140 | |
3141 | /* |
3142 | * The processes associated with bfqq are cooperators of the |
3143 | * processes associated with new_bfqq. So, if bfqq has a |
3144 | * waker, then assume that all these processes will be happy |
3145 | * to let bfqq's waker freely inject I/O when they have no |
3146 | * I/O. |
3147 | */ |
3148 | if (bfqq->waker_bfqq && !new_bfqq->waker_bfqq && |
3149 | bfqq->waker_bfqq != new_bfqq) { |
3150 | new_bfqq->waker_bfqq = bfqq->waker_bfqq; |
3151 | new_bfqq->tentative_waker_bfqq = NULL; |
3152 | |
3153 | /* |
3154 | * If the waker queue disappears, then |
3155 | * new_bfqq->waker_bfqq must be reset. So insert |
3156 | * new_bfqq into the woken_list of the waker. See |
3157 | * bfq_check_waker for details. |
3158 | */ |
3159 | hlist_add_head(n: &new_bfqq->woken_list_node, |
3160 | h: &new_bfqq->waker_bfqq->woken_list); |
3161 | |
3162 | } |
3163 | |
3164 | /* |
3165 | * If bfqq is weight-raised, then let new_bfqq inherit |
3166 | * weight-raising. To reduce false positives, neglect the case |
3167 | * where bfqq has just been created, but has not yet made it |
3168 | * to be weight-raised (which may happen because EQM may merge |
3169 | * bfqq even before bfq_add_request is executed for the first |
3170 | * time for bfqq). Handling this case would however be very |
3171 | * easy, thanks to the flag just_created. |
3172 | */ |
3173 | if (new_bfqq->wr_coeff == 1 && bfqq->wr_coeff > 1) { |
3174 | new_bfqq->wr_coeff = bfqq->wr_coeff; |
3175 | new_bfqq->wr_cur_max_time = bfqq->wr_cur_max_time; |
3176 | new_bfqq->last_wr_start_finish = bfqq->last_wr_start_finish; |
3177 | new_bfqq->wr_start_at_switch_to_srt = |
3178 | bfqq->wr_start_at_switch_to_srt; |
3179 | if (bfq_bfqq_busy(bfqq: new_bfqq)) |
3180 | bfqd->wr_busy_queues++; |
3181 | new_bfqq->entity.prio_changed = 1; |
3182 | } |
3183 | |
3184 | if (bfqq->wr_coeff > 1) { /* bfqq has given its wr to new_bfqq */ |
3185 | bfqq->wr_coeff = 1; |
3186 | bfqq->entity.prio_changed = 1; |
3187 | if (bfq_bfqq_busy(bfqq)) |
3188 | bfqd->wr_busy_queues--; |
3189 | } |
3190 | |
3191 | bfq_log_bfqq(bfqd, new_bfqq, "merge_bfqqs: wr_busy %d" , |
3192 | bfqd->wr_busy_queues); |
3193 | |
3194 | /* |
3195 | * Merge queues (that is, let bic redirect its requests to new_bfqq) |
3196 | */ |
3197 | bic_set_bfqq(bic, bfqq: new_bfqq, is_sync: true, actuator_idx: bfqq->actuator_idx); |
3198 | bfq_mark_bfqq_coop(bfqq: new_bfqq); |
3199 | /* |
3200 | * new_bfqq now belongs to at least two bics (it is a shared queue): |
3201 | * set new_bfqq->bic to NULL. bfqq either: |
3202 | * - does not belong to any bic any more, and hence bfqq->bic must |
3203 | * be set to NULL, or |
3204 | * - is a queue whose owning bics have already been redirected to a |
3205 | * different queue, hence the queue is destined to not belong to |
3206 | * any bic soon and bfqq->bic is already NULL (therefore the next |
3207 | * assignment causes no harm). |
3208 | */ |
3209 | new_bfqq->bic = NULL; |
3210 | /* |
3211 | * If the queue is shared, the pid is the pid of one of the associated |
3212 | * processes. Which pid depends on the exact sequence of merge events |
3213 | * the queue underwent. So printing such a pid is useless and confusing |
3214 | * because it reports a random pid between those of the associated |
3215 | * processes. |
3216 | * We mark such a queue with a pid -1, and then print SHARED instead of |
3217 | * a pid in logging messages. |
3218 | */ |
3219 | new_bfqq->pid = -1; |
3220 | bfqq->bic = NULL; |
3221 | |
3222 | bfq_reassign_last_bfqq(cur_bfqq: bfqq, new_bfqq); |
3223 | |
3224 | bfq_release_process_ref(bfqd, bfqq); |
3225 | } |
3226 | |
3227 | static bool bfq_allow_bio_merge(struct request_queue *q, struct request *rq, |
3228 | struct bio *bio) |
3229 | { |
3230 | struct bfq_data *bfqd = q->elevator->elevator_data; |
3231 | bool is_sync = op_is_sync(op: bio->bi_opf); |
3232 | struct bfq_queue *bfqq = bfqd->bio_bfqq, *new_bfqq; |
3233 | |
3234 | /* |
3235 | * Disallow merge of a sync bio into an async request. |
3236 | */ |
3237 | if (is_sync && !rq_is_sync(rq)) |
3238 | return false; |
3239 | |
3240 | /* |
3241 | * Lookup the bfqq that this bio will be queued with. Allow |
3242 | * merge only if rq is queued there. |
3243 | */ |
3244 | if (!bfqq) |
3245 | return false; |
3246 | |
3247 | /* |
3248 | * We take advantage of this function to perform an early merge |
3249 | * of the queues of possible cooperating processes. |
3250 | */ |
3251 | new_bfqq = bfq_setup_cooperator(bfqd, bfqq, io_struct: bio, request: false, bic: bfqd->bio_bic); |
3252 | if (new_bfqq) { |
3253 | /* |
3254 | * bic still points to bfqq, then it has not yet been |
3255 | * redirected to some other bfq_queue, and a queue |
3256 | * merge between bfqq and new_bfqq can be safely |
3257 | * fulfilled, i.e., bic can be redirected to new_bfqq |
3258 | * and bfqq can be put. |
3259 | */ |
3260 | bfq_merge_bfqqs(bfqd, bic: bfqd->bio_bic, bfqq, |
3261 | new_bfqq); |
3262 | /* |
3263 | * If we get here, bio will be queued into new_queue, |
3264 | * so use new_bfqq to decide whether bio and rq can be |
3265 | * merged. |
3266 | */ |
3267 | bfqq = new_bfqq; |
3268 | |
3269 | /* |
3270 | * Change also bqfd->bio_bfqq, as |
3271 | * bfqd->bio_bic now points to new_bfqq, and |
3272 | * this function may be invoked again (and then may |
3273 | * use again bqfd->bio_bfqq). |
3274 | */ |
3275 | bfqd->bio_bfqq = bfqq; |
3276 | } |
3277 | |
3278 | return bfqq == RQ_BFQQ(rq); |
3279 | } |
3280 | |
3281 | /* |
3282 | * Set the maximum time for the in-service queue to consume its |
3283 | * budget. This prevents seeky processes from lowering the throughput. |
3284 | * In practice, a time-slice service scheme is used with seeky |
3285 | * processes. |
3286 | */ |
3287 | static void bfq_set_budget_timeout(struct bfq_data *bfqd, |
3288 | struct bfq_queue *bfqq) |
3289 | { |
3290 | unsigned int timeout_coeff; |
3291 | |
3292 | if (bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time) |
3293 | timeout_coeff = 1; |
3294 | else |
3295 | timeout_coeff = bfqq->entity.weight / bfqq->entity.orig_weight; |
3296 | |
3297 | bfqd->last_budget_start = ktime_get(); |
3298 | |
3299 | bfqq->budget_timeout = jiffies + |
3300 | bfqd->bfq_timeout * timeout_coeff; |
3301 | } |
3302 | |
3303 | static void __bfq_set_in_service_queue(struct bfq_data *bfqd, |
3304 | struct bfq_queue *bfqq) |
3305 | { |
3306 | if (bfqq) { |
3307 | bfq_clear_bfqq_fifo_expire(bfqq); |
3308 | |
3309 | bfqd->budgets_assigned = (bfqd->budgets_assigned * 7 + 256) / 8; |
3310 | |
3311 | if (time_is_before_jiffies(bfqq->last_wr_start_finish) && |
3312 | bfqq->wr_coeff > 1 && |
3313 | bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && |
3314 | time_is_before_jiffies(bfqq->budget_timeout)) { |
3315 | /* |
3316 | * For soft real-time queues, move the start |
3317 | * of the weight-raising period forward by the |
3318 | * time the queue has not received any |
3319 | * service. Otherwise, a relatively long |
3320 | * service delay is likely to cause the |
3321 | * weight-raising period of the queue to end, |
3322 | * because of the short duration of the |
3323 | * weight-raising period of a soft real-time |
3324 | * queue. It is worth noting that this move |
3325 | * is not so dangerous for the other queues, |
3326 | * because soft real-time queues are not |
3327 | * greedy. |
3328 | * |
3329 | * To not add a further variable, we use the |
3330 | * overloaded field budget_timeout to |
3331 | * determine for how long the queue has not |
3332 | * received service, i.e., how much time has |
3333 | * elapsed since the queue expired. However, |
3334 | * this is a little imprecise, because |
3335 | * budget_timeout is set to jiffies if bfqq |
3336 | * not only expires, but also remains with no |
3337 | * request. |
3338 | */ |
3339 | if (time_after(bfqq->budget_timeout, |
3340 | bfqq->last_wr_start_finish)) |
3341 | bfqq->last_wr_start_finish += |
3342 | jiffies - bfqq->budget_timeout; |
3343 | else |
3344 | bfqq->last_wr_start_finish = jiffies; |
3345 | } |
3346 | |
3347 | bfq_set_budget_timeout(bfqd, bfqq); |
3348 | bfq_log_bfqq(bfqd, bfqq, |
3349 | "set_in_service_queue, cur-budget = %d" , |
3350 | bfqq->entity.budget); |
3351 | } |
3352 | |
3353 | bfqd->in_service_queue = bfqq; |
3354 | bfqd->in_serv_last_pos = 0; |
3355 | } |
3356 | |
3357 | /* |
3358 | * Get and set a new queue for service. |
3359 | */ |
3360 | static struct bfq_queue *bfq_set_in_service_queue(struct bfq_data *bfqd) |
3361 | { |
3362 | struct bfq_queue *bfqq = bfq_get_next_queue(bfqd); |
3363 | |
3364 | __bfq_set_in_service_queue(bfqd, bfqq); |
3365 | return bfqq; |
3366 | } |
3367 | |
3368 | static void bfq_arm_slice_timer(struct bfq_data *bfqd) |
3369 | { |
3370 | struct bfq_queue *bfqq = bfqd->in_service_queue; |
3371 | u32 sl; |
3372 | |
3373 | bfq_mark_bfqq_wait_request(bfqq); |
3374 | |
3375 | /* |
3376 | * We don't want to idle for seeks, but we do want to allow |
3377 | * fair distribution of slice time for a process doing back-to-back |
3378 | * seeks. So allow a little bit of time for him to submit a new rq. |
3379 | */ |
3380 | sl = bfqd->bfq_slice_idle; |
3381 | /* |
3382 | * Unless the queue is being weight-raised or the scenario is |
3383 | * asymmetric, grant only minimum idle time if the queue |
3384 | * is seeky. A long idling is preserved for a weight-raised |
3385 | * queue, or, more in general, in an asymmetric scenario, |
3386 | * because a long idling is needed for guaranteeing to a queue |
3387 | * its reserved share of the throughput (in particular, it is |
3388 | * needed if the queue has a higher weight than some other |
3389 | * queue). |
3390 | */ |
3391 | if (BFQQ_SEEKY(bfqq) && bfqq->wr_coeff == 1 && |
3392 | !bfq_asymmetric_scenario(bfqd, bfqq)) |
3393 | sl = min_t(u64, sl, BFQ_MIN_TT); |
3394 | else if (bfqq->wr_coeff > 1) |
3395 | sl = max_t(u32, sl, 20ULL * NSEC_PER_MSEC); |
3396 | |
3397 | bfqd->last_idling_start = ktime_get(); |
3398 | bfqd->last_idling_start_jiffies = jiffies; |
3399 | |
3400 | hrtimer_start(timer: &bfqd->idle_slice_timer, tim: ns_to_ktime(ns: sl), |
3401 | mode: HRTIMER_MODE_REL); |
3402 | bfqg_stats_set_start_idle_time(bfqg: bfqq_group(bfqq)); |
3403 | } |
3404 | |
3405 | /* |
3406 | * In autotuning mode, max_budget is dynamically recomputed as the |
3407 | * amount of sectors transferred in timeout at the estimated peak |
3408 | * rate. This enables BFQ to utilize a full timeslice with a full |
3409 | * budget, even if the in-service queue is served at peak rate. And |
3410 | * this maximises throughput with sequential workloads. |
3411 | */ |
3412 | static unsigned long bfq_calc_max_budget(struct bfq_data *bfqd) |
3413 | { |
3414 | return (u64)bfqd->peak_rate * USEC_PER_MSEC * |
3415 | jiffies_to_msecs(j: bfqd->bfq_timeout)>>BFQ_RATE_SHIFT; |
3416 | } |
3417 | |
3418 | /* |
3419 | * Update parameters related to throughput and responsiveness, as a |
3420 | * function of the estimated peak rate. See comments on |
3421 | * bfq_calc_max_budget(), and on the ref_wr_duration array. |
3422 | */ |
3423 | static void update_thr_responsiveness_params(struct bfq_data *bfqd) |
3424 | { |
3425 | if (bfqd->bfq_user_max_budget == 0) { |
3426 | bfqd->bfq_max_budget = |
3427 | bfq_calc_max_budget(bfqd); |
3428 | bfq_log(bfqd, "new max_budget = %d" , bfqd->bfq_max_budget); |
3429 | } |
3430 | } |
3431 | |
3432 | static void bfq_reset_rate_computation(struct bfq_data *bfqd, |
3433 | struct request *rq) |
3434 | { |
3435 | if (rq != NULL) { /* new rq dispatch now, reset accordingly */ |
3436 | bfqd->last_dispatch = bfqd->first_dispatch = ktime_get_ns(); |
3437 | bfqd->peak_rate_samples = 1; |
3438 | bfqd->sequential_samples = 0; |
3439 | bfqd->tot_sectors_dispatched = bfqd->last_rq_max_size = |
3440 | blk_rq_sectors(rq); |
3441 | } else /* no new rq dispatched, just reset the number of samples */ |
3442 | bfqd->peak_rate_samples = 0; /* full re-init on next disp. */ |
3443 | |
3444 | bfq_log(bfqd, |
3445 | "reset_rate_computation at end, sample %u/%u tot_sects %llu" , |
3446 | bfqd->peak_rate_samples, bfqd->sequential_samples, |
3447 | bfqd->tot_sectors_dispatched); |
3448 | } |
3449 | |
3450 | static void bfq_update_rate_reset(struct bfq_data *bfqd, struct request *rq) |
3451 | { |
3452 | u32 rate, weight, divisor; |
3453 | |
3454 | /* |
3455 | * For the convergence property to hold (see comments on |
3456 | * bfq_update_peak_rate()) and for the assessment to be |
3457 | * reliable, a minimum number of samples must be present, and |
3458 | * a minimum amount of time must have elapsed. If not so, do |
3459 | * not compute new rate. Just reset parameters, to get ready |
3460 | * for a new evaluation attempt. |
3461 | */ |
3462 | if (bfqd->peak_rate_samples < BFQ_RATE_MIN_SAMPLES || |
3463 | bfqd->delta_from_first < BFQ_RATE_MIN_INTERVAL) |
3464 | goto reset_computation; |
3465 | |
3466 | /* |
3467 | * If a new request completion has occurred after last |
3468 | * dispatch, then, to approximate the rate at which requests |
3469 | * have been served by the device, it is more precise to |
3470 | * extend the observation interval to the last completion. |
3471 | */ |
3472 | bfqd->delta_from_first = |
3473 | max_t(u64, bfqd->delta_from_first, |
3474 | bfqd->last_completion - bfqd->first_dispatch); |
3475 | |
3476 | /* |
3477 | * Rate computed in sects/usec, and not sects/nsec, for |
3478 | * precision issues. |
3479 | */ |
3480 | rate = div64_ul(bfqd->tot_sectors_dispatched<<BFQ_RATE_SHIFT, |
3481 | div_u64(bfqd->delta_from_first, NSEC_PER_USEC)); |
3482 | |
3483 | /* |
3484 | * Peak rate not updated if: |
3485 | * - the percentage of sequential dispatches is below 3/4 of the |
3486 | * total, and rate is below the current estimated peak rate |
3487 | * - rate is unreasonably high (> 20M sectors/sec) |
3488 | */ |
3489 | if ((bfqd->sequential_samples < (3 * bfqd->peak_rate_samples)>>2 && |
3490 | rate <= bfqd->peak_rate) || |
3491 | rate > 20<<BFQ_RATE_SHIFT) |
3492 | goto reset_computation; |
3493 | |
3494 | /* |
3495 | * We have to update the peak rate, at last! To this purpose, |
3496 | * we use a low-pass filter. We compute the smoothing constant |
3497 | * of the filter as a function of the 'weight' of the new |
3498 | * measured rate. |
3499 | * |
3500 | * As can be seen in next formulas, we define this weight as a |
3501 | * quantity proportional to how sequential the workload is, |
3502 | * and to how long the observation time interval is. |
3503 | * |
3504 | * The weight runs from 0 to 8. The maximum value of the |
3505 | * weight, 8, yields the minimum value for the smoothing |
3506 | * constant. At this minimum value for the smoothing constant, |
3507 | * the measured rate contributes for half of the next value of |
3508 | * the estimated peak rate. |
3509 | * |
3510 | * So, the first step is to compute the weight as a function |
3511 | * of how sequential the workload is. Note that the weight |
3512 | * cannot reach 9, because bfqd->sequential_samples cannot |
3513 | * become equal to bfqd->peak_rate_samples, which, in its |
3514 | * turn, holds true because bfqd->sequential_samples is not |
3515 | * incremented for the first sample. |
3516 | */ |
3517 | weight = (9 * bfqd->sequential_samples) / bfqd->peak_rate_samples; |
3518 | |
3519 | /* |
3520 | * Second step: further refine the weight as a function of the |
3521 | * duration of the observation interval. |
3522 | */ |
3523 | weight = min_t(u32, 8, |
3524 | div_u64(weight * bfqd->delta_from_first, |
3525 | BFQ_RATE_REF_INTERVAL)); |
3526 | |
3527 | /* |
3528 | * Divisor ranging from 10, for minimum weight, to 2, for |
3529 | * maximum weight. |
3530 | */ |
3531 | divisor = 10 - weight; |
3532 | |
3533 | /* |
3534 | * Finally, update peak rate: |
3535 | * |
3536 | * peak_rate = peak_rate * (divisor-1) / divisor + rate / divisor |
3537 | */ |
3538 | bfqd->peak_rate *= divisor-1; |
3539 | bfqd->peak_rate /= divisor; |
3540 | rate /= divisor; /* smoothing constant alpha = 1/divisor */ |
3541 | |
3542 | bfqd->peak_rate += rate; |
3543 | |
3544 | /* |
3545 | * For a very slow device, bfqd->peak_rate can reach 0 (see |
3546 | * the minimum representable values reported in the comments |
3547 | * on BFQ_RATE_SHIFT). Push to 1 if this happens, to avoid |
3548 | * divisions by zero where bfqd->peak_rate is used as a |
3549 | * divisor. |
3550 | */ |
3551 | bfqd->peak_rate = max_t(u32, 1, bfqd->peak_rate); |
3552 | |
3553 | update_thr_responsiveness_params(bfqd); |
3554 | |
3555 | reset_computation: |
3556 | bfq_reset_rate_computation(bfqd, rq); |
3557 | } |
3558 | |
3559 | /* |
3560 | * Update the read/write peak rate (the main quantity used for |
3561 | * auto-tuning, see update_thr_responsiveness_params()). |
3562 | * |
3563 | * It is not trivial to estimate the peak rate (correctly): because of |
3564 | * the presence of sw and hw queues between the scheduler and the |
3565 | * device components that finally serve I/O requests, it is hard to |
3566 | * say exactly when a given dispatched request is served inside the |
3567 | * device, and for how long. As a consequence, it is hard to know |
3568 | * precisely at what rate a given set of requests is actually served |
3569 | * by the device. |
3570 | * |
3571 | * On the opposite end, the dispatch time of any request is trivially |
3572 | * available, and, from this piece of information, the "dispatch rate" |
3573 | * of requests can be immediately computed. So, the idea in the next |
3574 | * function is to use what is known, namely request dispatch times |
3575 | * (plus, when useful, request completion times), to estimate what is |
3576 | * unknown, namely in-device request service rate. |
3577 | * |
3578 | * The main issue is that, because of the above facts, the rate at |
3579 | * which a certain set of requests is dispatched over a certain time |
3580 | * interval can vary greatly with respect to the rate at which the |
3581 | * same requests are then served. But, since the size of any |
3582 | * intermediate queue is limited, and the service scheme is lossless |
3583 | * (no request is silently dropped), the following obvious convergence |
3584 | * property holds: the number of requests dispatched MUST become |
3585 | * closer and closer to the number of requests completed as the |
3586 | * observation interval grows. This is the key property used in |
3587 | * the next function to estimate the peak service rate as a function |
3588 | * of the observed dispatch rate. The function assumes to be invoked |
3589 | * on every request dispatch. |
3590 | */ |
3591 | static void bfq_update_peak_rate(struct bfq_data *bfqd, struct request *rq) |
3592 | { |
3593 | u64 now_ns = ktime_get_ns(); |
3594 | |
3595 | if (bfqd->peak_rate_samples == 0) { /* first dispatch */ |
3596 | bfq_log(bfqd, "update_peak_rate: goto reset, samples %d" , |
3597 | bfqd->peak_rate_samples); |
3598 | bfq_reset_rate_computation(bfqd, rq); |
3599 | goto update_last_values; /* will add one sample */ |
3600 | } |
3601 | |
3602 | /* |
3603 | * Device idle for very long: the observation interval lasting |
3604 | * up to this dispatch cannot be a valid observation interval |
3605 | * for computing a new peak rate (similarly to the late- |
3606 | * completion event in bfq_completed_request()). Go to |
3607 | * update_rate_and_reset to have the following three steps |
3608 | * taken: |
3609 | * - close the observation interval at the last (previous) |
3610 | * request dispatch or completion |
3611 | * - compute rate, if possible, for that observation interval |
3612 | * - start a new observation interval with this dispatch |
3613 | */ |
3614 | if (now_ns - bfqd->last_dispatch > 100*NSEC_PER_MSEC && |
3615 | bfqd->tot_rq_in_driver == 0) |
3616 | goto update_rate_and_reset; |
3617 | |
3618 | /* Update sampling information */ |
3619 | bfqd->peak_rate_samples++; |
3620 | |
3621 | if ((bfqd->tot_rq_in_driver > 0 || |
3622 | now_ns - bfqd->last_completion < BFQ_MIN_TT) |
3623 | && !BFQ_RQ_SEEKY(bfqd, bfqd->last_position, rq)) |
3624 | bfqd->sequential_samples++; |
3625 | |
3626 | bfqd->tot_sectors_dispatched += blk_rq_sectors(rq); |
3627 | |
3628 | /* Reset max observed rq size every 32 dispatches */ |
3629 | if (likely(bfqd->peak_rate_samples % 32)) |
3630 | bfqd->last_rq_max_size = |
3631 | max_t(u32, blk_rq_sectors(rq), bfqd->last_rq_max_size); |
3632 | else |
3633 | bfqd->last_rq_max_size = blk_rq_sectors(rq); |
3634 | |
3635 | bfqd->delta_from_first = now_ns - bfqd->first_dispatch; |
3636 | |
3637 | /* Target observation interval not yet reached, go on sampling */ |
3638 | if (bfqd->delta_from_first < BFQ_RATE_REF_INTERVAL) |
3639 | goto update_last_values; |
3640 | |
3641 | update_rate_and_reset: |
3642 | bfq_update_rate_reset(bfqd, rq); |
3643 | update_last_values: |
3644 | bfqd->last_position = blk_rq_pos(rq) + blk_rq_sectors(rq); |
3645 | if (RQ_BFQQ(rq) == bfqd->in_service_queue) |
3646 | bfqd->in_serv_last_pos = bfqd->last_position; |
3647 | bfqd->last_dispatch = now_ns; |
3648 | } |
3649 | |
3650 | /* |
3651 | * Remove request from internal lists. |
3652 | */ |
3653 | static void bfq_dispatch_remove(struct request_queue *q, struct request *rq) |
3654 | { |
3655 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
3656 | |
3657 | /* |
3658 | * For consistency, the next instruction should have been |
3659 | * executed after removing the request from the queue and |
3660 | * dispatching it. We execute instead this instruction before |
3661 | * bfq_remove_request() (and hence introduce a temporary |
3662 | * inconsistency), for efficiency. In fact, should this |
3663 | * dispatch occur for a non in-service bfqq, this anticipated |
3664 | * increment prevents two counters related to bfqq->dispatched |
3665 | * from risking to be, first, uselessly decremented, and then |
3666 | * incremented again when the (new) value of bfqq->dispatched |
3667 | * happens to be taken into account. |
3668 | */ |
3669 | bfqq->dispatched++; |
3670 | bfq_update_peak_rate(bfqd: q->elevator->elevator_data, rq); |
3671 | |
3672 | bfq_remove_request(q, rq); |
3673 | } |
3674 | |
3675 | /* |
3676 | * There is a case where idling does not have to be performed for |
3677 | * throughput concerns, but to preserve the throughput share of |
3678 | * the process associated with bfqq. |
3679 | * |
3680 | * To introduce this case, we can note that allowing the drive |
3681 | * to enqueue more than one request at a time, and hence |
3682 | * delegating de facto final scheduling decisions to the |
3683 | * drive's internal scheduler, entails loss of control on the |
3684 | * actual request service order. In particular, the critical |
3685 | * situation is when requests from different processes happen |
3686 | * to be present, at the same time, in the internal queue(s) |
3687 | * of the drive. In such a situation, the drive, by deciding |
3688 | * the service order of the internally-queued requests, does |
3689 | * determine also the actual throughput distribution among |
3690 | * these processes. But the drive typically has no notion or |
3691 | * concern about per-process throughput distribution, and |
3692 | * makes its decisions only on a per-request basis. Therefore, |
3693 | * the service distribution enforced by the drive's internal |
3694 | * scheduler is likely to coincide with the desired throughput |
3695 | * distribution only in a completely symmetric, or favorably |
3696 | * skewed scenario where: |
3697 | * (i-a) each of these processes must get the same throughput as |
3698 | * the others, |
3699 | * (i-b) in case (i-a) does not hold, it holds that the process |
3700 | * associated with bfqq must receive a lower or equal |
3701 | * throughput than any of the other processes; |
3702 | * (ii) the I/O of each process has the same properties, in |
3703 | * terms of locality (sequential or random), direction |
3704 | * (reads or writes), request sizes, greediness |
3705 | * (from I/O-bound to sporadic), and so on; |
3706 | |
3707 | * In fact, in such a scenario, the drive tends to treat the requests |
3708 | * of each process in about the same way as the requests of the |
3709 | * others, and thus to provide each of these processes with about the |
3710 | * same throughput. This is exactly the desired throughput |
3711 | * distribution if (i-a) holds, or, if (i-b) holds instead, this is an |
3712 | * even more convenient distribution for (the process associated with) |
3713 | * bfqq. |
3714 | * |
3715 | * In contrast, in any asymmetric or unfavorable scenario, device |
3716 | * idling (I/O-dispatch plugging) is certainly needed to guarantee |
3717 | * that bfqq receives its assigned fraction of the device throughput |
3718 | * (see [1] for details). |
3719 | * |
3720 | * The problem is that idling may significantly reduce throughput with |
3721 | * certain combinations of types of I/O and devices. An important |
3722 | * example is sync random I/O on flash storage with command |
3723 | * queueing. So, unless bfqq falls in cases where idling also boosts |
3724 | * throughput, it is important to check conditions (i-a), i(-b) and |
3725 | * (ii) accurately, so as to avoid idling when not strictly needed for |
3726 | * service guarantees. |
3727 | * |
3728 | * Unfortunately, it is extremely difficult to thoroughly check |
3729 | * condition (ii). And, in case there are active groups, it becomes |
3730 | * very difficult to check conditions (i-a) and (i-b) too. In fact, |
3731 | * if there are active groups, then, for conditions (i-a) or (i-b) to |
3732 | * become false 'indirectly', it is enough that an active group |
3733 | * contains more active processes or sub-groups than some other active |
3734 | * group. More precisely, for conditions (i-a) or (i-b) to become |
3735 | * false because of such a group, it is not even necessary that the |
3736 | * group is (still) active: it is sufficient that, even if the group |
3737 | * has become inactive, some of its descendant processes still have |
3738 | * some request already dispatched but still waiting for |
3739 | * completion. In fact, requests have still to be guaranteed their |
3740 | * share of the throughput even after being dispatched. In this |
3741 | * respect, it is easy to show that, if a group frequently becomes |
3742 | * inactive while still having in-flight requests, and if, when this |
3743 | * happens, the group is not considered in the calculation of whether |
3744 | * the scenario is asymmetric, then the group may fail to be |
3745 | * guaranteed its fair share of the throughput (basically because |
3746 | * idling may not be performed for the descendant processes of the |
3747 | * group, but it had to be). We address this issue with the following |
3748 | * bi-modal behavior, implemented in the function |
3749 | * bfq_asymmetric_scenario(). |
3750 | * |
3751 | * If there are groups with requests waiting for completion |
3752 | * (as commented above, some of these groups may even be |
3753 | * already inactive), then the scenario is tagged as |
3754 | * asymmetric, conservatively, without checking any of the |
3755 | * conditions (i-a), (i-b) or (ii). So the device is idled for bfqq. |
3756 | * This behavior matches also the fact that groups are created |
3757 | * exactly if controlling I/O is a primary concern (to |
3758 | * preserve bandwidth and latency guarantees). |
3759 | * |
3760 | * On the opposite end, if there are no groups with requests waiting |
3761 | * for completion, then only conditions (i-a) and (i-b) are actually |
3762 | * controlled, i.e., provided that conditions (i-a) or (i-b) holds, |
3763 | * idling is not performed, regardless of whether condition (ii) |
3764 | * holds. In other words, only if conditions (i-a) and (i-b) do not |
3765 | * hold, then idling is allowed, and the device tends to be prevented |
3766 | * from queueing many requests, possibly of several processes. Since |
3767 | * there are no groups with requests waiting for completion, then, to |
3768 | * control conditions (i-a) and (i-b) it is enough to check just |
3769 | * whether all the queues with requests waiting for completion also |
3770 | * have the same weight. |
3771 | * |
3772 | * Not checking condition (ii) evidently exposes bfqq to the |
3773 | * risk of getting less throughput than its fair share. |
3774 | * However, for queues with the same weight, a further |
3775 | * mechanism, preemption, mitigates or even eliminates this |
3776 | * problem. And it does so without consequences on overall |
3777 | * throughput. This mechanism and its benefits are explained |
3778 | * in the next three paragraphs. |
3779 | * |
3780 | * Even if a queue, say Q, is expired when it remains idle, Q |
3781 | * can still preempt the new in-service queue if the next |
3782 | * request of Q arrives soon (see the comments on |
3783 | * bfq_bfqq_update_budg_for_activation). If all queues and |
3784 | * groups have the same weight, this form of preemption, |
3785 | * combined with the hole-recovery heuristic described in the |
3786 | * comments on function bfq_bfqq_update_budg_for_activation, |
3787 | * are enough to preserve a correct bandwidth distribution in |
3788 | * the mid term, even without idling. In fact, even if not |
3789 | * idling allows the internal queues of the device to contain |
3790 | * many requests, and thus to reorder requests, we can rather |
3791 | * safely assume that the internal scheduler still preserves a |
3792 | * minimum of mid-term fairness. |
3793 | * |
3794 | * More precisely, this preemption-based, idleless approach |
3795 | * provides fairness in terms of IOPS, and not sectors per |
3796 | * second. This can be seen with a simple example. Suppose |
3797 | * that there are two queues with the same weight, but that |
3798 | * the first queue receives requests of 8 sectors, while the |
3799 | * second queue receives requests of 1024 sectors. In |
3800 | * addition, suppose that each of the two queues contains at |
3801 | * most one request at a time, which implies that each queue |
3802 | * always remains idle after it is served. Finally, after |
3803 | * remaining idle, each queue receives very quickly a new |
3804 | * request. It follows that the two queues are served |
3805 | * alternatively, preempting each other if needed. This |
3806 | * implies that, although both queues have the same weight, |
3807 | * the queue with large requests receives a service that is |
3808 | * 1024/8 times as high as the service received by the other |
3809 | * queue. |
3810 | * |
3811 | * The motivation for using preemption instead of idling (for |
3812 | * queues with the same weight) is that, by not idling, |
3813 | * service guarantees are preserved (completely or at least in |
3814 | * part) without minimally sacrificing throughput. And, if |
3815 | * there is no active group, then the primary expectation for |
3816 | * this device is probably a high throughput. |
3817 | * |
3818 | * We are now left only with explaining the two sub-conditions in the |
3819 | * additional compound condition that is checked below for deciding |
3820 | * whether the scenario is asymmetric. To explain the first |
3821 | * sub-condition, we need to add that the function |
3822 | * bfq_asymmetric_scenario checks the weights of only |
3823 | * non-weight-raised queues, for efficiency reasons (see comments on |
3824 | * bfq_weights_tree_add()). Then the fact that bfqq is weight-raised |
3825 | * is checked explicitly here. More precisely, the compound condition |
3826 | * below takes into account also the fact that, even if bfqq is being |
3827 | * weight-raised, the scenario is still symmetric if all queues with |
3828 | * requests waiting for completion happen to be |
3829 | * weight-raised. Actually, we should be even more precise here, and |
3830 | * differentiate between interactive weight raising and soft real-time |
3831 | * weight raising. |
3832 | * |
3833 | * The second sub-condition checked in the compound condition is |
3834 | * whether there is a fair amount of already in-flight I/O not |
3835 | * belonging to bfqq. If so, I/O dispatching is to be plugged, for the |
3836 | * following reason. The drive may decide to serve in-flight |
3837 | * non-bfqq's I/O requests before bfqq's ones, thereby delaying the |
3838 | * arrival of new I/O requests for bfqq (recall that bfqq is sync). If |
3839 | * I/O-dispatching is not plugged, then, while bfqq remains empty, a |
3840 | * basically uncontrolled amount of I/O from other queues may be |
3841 | * dispatched too, possibly causing the service of bfqq's I/O to be |
3842 | * delayed even longer in the drive. This problem gets more and more |
3843 | * serious as the speed and the queue depth of the drive grow, |
3844 | * because, as these two quantities grow, the probability to find no |
3845 | * queue busy but many requests in flight grows too. By contrast, |
3846 | * plugging I/O dispatching minimizes the delay induced by already |
3847 | * in-flight I/O, and enables bfqq to recover the bandwidth it may |
3848 | * lose because of this delay. |
3849 | * |
3850 | * As a side note, it is worth considering that the above |
3851 | * device-idling countermeasures may however fail in the following |
3852 | * unlucky scenario: if I/O-dispatch plugging is (correctly) disabled |
3853 | * in a time period during which all symmetry sub-conditions hold, and |
3854 | * therefore the device is allowed to enqueue many requests, but at |
3855 | * some later point in time some sub-condition stops to hold, then it |
3856 | * may become impossible to make requests be served in the desired |
3857 | * order until all the requests already queued in the device have been |
3858 | * served. The last sub-condition commented above somewhat mitigates |
3859 | * this problem for weight-raised queues. |
3860 | * |
3861 | * However, as an additional mitigation for this problem, we preserve |
3862 | * plugging for a special symmetric case that may suddenly turn into |
3863 | * asymmetric: the case where only bfqq is busy. In this case, not |
3864 | * expiring bfqq does not cause any harm to any other queues in terms |
3865 | * of service guarantees. In contrast, it avoids the following unlucky |
3866 | * sequence of events: (1) bfqq is expired, (2) a new queue with a |
3867 | * lower weight than bfqq becomes busy (or more queues), (3) the new |
3868 | * queue is served until a new request arrives for bfqq, (4) when bfqq |
3869 | * is finally served, there are so many requests of the new queue in |
3870 | * the drive that the pending requests for bfqq take a lot of time to |
3871 | * be served. In particular, event (2) may case even already |
3872 | * dispatched requests of bfqq to be delayed, inside the drive. So, to |
3873 | * avoid this series of events, the scenario is preventively declared |
3874 | * as asymmetric also if bfqq is the only busy queues |
3875 | */ |
3876 | static bool idling_needed_for_service_guarantees(struct bfq_data *bfqd, |
3877 | struct bfq_queue *bfqq) |
3878 | { |
3879 | int tot_busy_queues = bfq_tot_busy_queues(bfqd); |
3880 | |
3881 | /* No point in idling for bfqq if it won't get requests any longer */ |
3882 | if (unlikely(!bfqq_process_refs(bfqq))) |
3883 | return false; |
3884 | |
3885 | return (bfqq->wr_coeff > 1 && |
3886 | (bfqd->wr_busy_queues < tot_busy_queues || |
3887 | bfqd->tot_rq_in_driver >= bfqq->dispatched + 4)) || |
3888 | bfq_asymmetric_scenario(bfqd, bfqq) || |
3889 | tot_busy_queues == 1; |
3890 | } |
3891 | |
3892 | static bool __bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
3893 | enum bfqq_expiration reason) |
3894 | { |
3895 | /* |
3896 | * If this bfqq is shared between multiple processes, check |
3897 | * to make sure that those processes are still issuing I/Os |
3898 | * within the mean seek distance. If not, it may be time to |
3899 | * break the queues apart again. |
3900 | */ |
3901 | if (bfq_bfqq_coop(bfqq) && BFQQ_SEEKY(bfqq)) |
3902 | bfq_mark_bfqq_split_coop(bfqq); |
3903 | |
3904 | /* |
3905 | * Consider queues with a higher finish virtual time than |
3906 | * bfqq. If idling_needed_for_service_guarantees(bfqq) returns |
3907 | * true, then bfqq's bandwidth would be violated if an |
3908 | * uncontrolled amount of I/O from these queues were |
3909 | * dispatched while bfqq is waiting for its new I/O to |
3910 | * arrive. This is exactly what may happen if this is a forced |
3911 | * expiration caused by a preemption attempt, and if bfqq is |
3912 | * not re-scheduled. To prevent this from happening, re-queue |
3913 | * bfqq if it needs I/O-dispatch plugging, even if it is |
3914 | * empty. By doing so, bfqq is granted to be served before the |
3915 | * above queues (provided that bfqq is of course eligible). |
3916 | */ |
3917 | if (RB_EMPTY_ROOT(&bfqq->sort_list) && |
3918 | !(reason == BFQQE_PREEMPTED && |
3919 | idling_needed_for_service_guarantees(bfqd, bfqq))) { |
3920 | if (bfqq->dispatched == 0) |
3921 | /* |
3922 | * Overloading budget_timeout field to store |
3923 | * the time at which the queue remains with no |
3924 | * backlog and no outstanding request; used by |
3925 | * the weight-raising mechanism. |
3926 | */ |
3927 | bfqq->budget_timeout = jiffies; |
3928 | |
3929 | bfq_del_bfqq_busy(bfqq, expiration: true); |
3930 | } else { |
3931 | bfq_requeue_bfqq(bfqd, bfqq, expiration: true); |
3932 | /* |
3933 | * Resort priority tree of potential close cooperators. |
3934 | * See comments on bfq_pos_tree_add_move() for the unlikely(). |
3935 | */ |
3936 | if (unlikely(!bfqd->nonrot_with_queueing && |
3937 | !RB_EMPTY_ROOT(&bfqq->sort_list))) |
3938 | bfq_pos_tree_add_move(bfqd, bfqq); |
3939 | } |
3940 | |
3941 | /* |
3942 | * All in-service entities must have been properly deactivated |
3943 | * or requeued before executing the next function, which |
3944 | * resets all in-service entities as no more in service. This |
3945 | * may cause bfqq to be freed. If this happens, the next |
3946 | * function returns true. |
3947 | */ |
3948 | return __bfq_bfqd_reset_in_service(bfqd); |
3949 | } |
3950 | |
3951 | /** |
3952 | * __bfq_bfqq_recalc_budget - try to adapt the budget to the @bfqq behavior. |
3953 | * @bfqd: device data. |
3954 | * @bfqq: queue to update. |
3955 | * @reason: reason for expiration. |
3956 | * |
3957 | * Handle the feedback on @bfqq budget at queue expiration. |
3958 | * See the body for detailed comments. |
3959 | */ |
3960 | static void __bfq_bfqq_recalc_budget(struct bfq_data *bfqd, |
3961 | struct bfq_queue *bfqq, |
3962 | enum bfqq_expiration reason) |
3963 | { |
3964 | struct request *next_rq; |
3965 | int budget, min_budget; |
3966 | |
3967 | min_budget = bfq_min_budget(bfqd); |
3968 | |
3969 | if (bfqq->wr_coeff == 1) |
3970 | budget = bfqq->max_budget; |
3971 | else /* |
3972 | * Use a constant, low budget for weight-raised queues, |
3973 | * to help achieve a low latency. Keep it slightly higher |
3974 | * than the minimum possible budget, to cause a little |
3975 | * bit fewer expirations. |
3976 | */ |
3977 | budget = 2 * min_budget; |
3978 | |
3979 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last budg %d, budg left %d" , |
3980 | bfqq->entity.budget, bfq_bfqq_budget_left(bfqq)); |
3981 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: last max_budg %d, min budg %d" , |
3982 | budget, bfq_min_budget(bfqd)); |
3983 | bfq_log_bfqq(bfqd, bfqq, "recalc_budg: sync %d, seeky %d" , |
3984 | bfq_bfqq_sync(bfqq), BFQQ_SEEKY(bfqd->in_service_queue)); |
3985 | |
3986 | if (bfq_bfqq_sync(bfqq) && bfqq->wr_coeff == 1) { |
3987 | switch (reason) { |
3988 | /* |
3989 | * Caveat: in all the following cases we trade latency |
3990 | * for throughput. |
3991 | */ |
3992 | case BFQQE_TOO_IDLE: |
3993 | /* |
3994 | * This is the only case where we may reduce |
3995 | * the budget: if there is no request of the |
3996 | * process still waiting for completion, then |
3997 | * we assume (tentatively) that the timer has |
3998 | * expired because the batch of requests of |
3999 | * the process could have been served with a |
4000 | * smaller budget. Hence, betting that |
4001 | * process will behave in the same way when it |
4002 | * becomes backlogged again, we reduce its |
4003 | * next budget. As long as we guess right, |
4004 | * this budget cut reduces the latency |
4005 | * experienced by the process. |
4006 | * |
4007 | * However, if there are still outstanding |
4008 | * requests, then the process may have not yet |
4009 | * issued its next request just because it is |
4010 | * still waiting for the completion of some of |
4011 | * the still outstanding ones. So in this |
4012 | * subcase we do not reduce its budget, on the |
4013 | * contrary we increase it to possibly boost |
4014 | * the throughput, as discussed in the |
4015 | * comments to the BUDGET_TIMEOUT case. |
4016 | */ |
4017 | if (bfqq->dispatched > 0) /* still outstanding reqs */ |
4018 | budget = min(budget * 2, bfqd->bfq_max_budget); |
4019 | else { |
4020 | if (budget > 5 * min_budget) |
4021 | budget -= 4 * min_budget; |
4022 | else |
4023 | budget = min_budget; |
4024 | } |
4025 | break; |
4026 | case BFQQE_BUDGET_TIMEOUT: |
4027 | /* |
4028 | * We double the budget here because it gives |
4029 | * the chance to boost the throughput if this |
4030 | * is not a seeky process (and has bumped into |
4031 | * this timeout because of, e.g., ZBR). |
4032 | */ |
4033 | budget = min(budget * 2, bfqd->bfq_max_budget); |
4034 | break; |
4035 | case BFQQE_BUDGET_EXHAUSTED: |
4036 | /* |
4037 | * The process still has backlog, and did not |
4038 | * let either the budget timeout or the disk |
4039 | * idling timeout expire. Hence it is not |
4040 | * seeky, has a short thinktime and may be |
4041 | * happy with a higher budget too. So |
4042 | * definitely increase the budget of this good |
4043 | * candidate to boost the disk throughput. |
4044 | */ |
4045 | budget = min(budget * 4, bfqd->bfq_max_budget); |
4046 | break; |
4047 | case BFQQE_NO_MORE_REQUESTS: |
4048 | /* |
4049 | * For queues that expire for this reason, it |
4050 | * is particularly important to keep the |
4051 | * budget close to the actual service they |
4052 | * need. Doing so reduces the timestamp |
4053 | * misalignment problem described in the |
4054 | * comments in the body of |
4055 | * __bfq_activate_entity. In fact, suppose |
4056 | * that a queue systematically expires for |
4057 | * BFQQE_NO_MORE_REQUESTS and presents a |
4058 | * new request in time to enjoy timestamp |
4059 | * back-shifting. The larger the budget of the |
4060 | * queue is with respect to the service the |
4061 | * queue actually requests in each service |
4062 | * slot, the more times the queue can be |
4063 | * reactivated with the same virtual finish |
4064 | * time. It follows that, even if this finish |
4065 | * time is pushed to the system virtual time |
4066 | * to reduce the consequent timestamp |
4067 | * misalignment, the queue unjustly enjoys for |
4068 | * many re-activations a lower finish time |
4069 | * than all newly activated queues. |
4070 | * |
4071 | * The service needed by bfqq is measured |
4072 | * quite precisely by bfqq->entity.service. |
4073 | * Since bfqq does not enjoy device idling, |
4074 | * bfqq->entity.service is equal to the number |
4075 | * of sectors that the process associated with |
4076 | * bfqq requested to read/write before waiting |
4077 | * for request completions, or blocking for |
4078 | * other reasons. |
4079 | */ |
4080 | budget = max_t(int, bfqq->entity.service, min_budget); |
4081 | break; |
4082 | default: |
4083 | return; |
4084 | } |
4085 | } else if (!bfq_bfqq_sync(bfqq)) { |
4086 | /* |
4087 | * Async queues get always the maximum possible |
4088 | * budget, as for them we do not care about latency |
4089 | * (in addition, their ability to dispatch is limited |
4090 | * by the charging factor). |
4091 | */ |
4092 | budget = bfqd->bfq_max_budget; |
4093 | } |
4094 | |
4095 | bfqq->max_budget = budget; |
4096 | |
4097 | if (bfqd->budgets_assigned >= bfq_stats_min_budgets && |
4098 | !bfqd->bfq_user_max_budget) |
4099 | bfqq->max_budget = min(bfqq->max_budget, bfqd->bfq_max_budget); |
4100 | |
4101 | /* |
4102 | * If there is still backlog, then assign a new budget, making |
4103 | * sure that it is large enough for the next request. Since |
4104 | * the finish time of bfqq must be kept in sync with the |
4105 | * budget, be sure to call __bfq_bfqq_expire() *after* this |
4106 | * update. |
4107 | * |
4108 | * If there is no backlog, then no need to update the budget; |
4109 | * it will be updated on the arrival of a new request. |
4110 | */ |
4111 | next_rq = bfqq->next_rq; |
4112 | if (next_rq) |
4113 | bfqq->entity.budget = max_t(unsigned long, bfqq->max_budget, |
4114 | bfq_serv_to_charge(next_rq, bfqq)); |
4115 | |
4116 | bfq_log_bfqq(bfqd, bfqq, "head sect: %u, new budget %d" , |
4117 | next_rq ? blk_rq_sectors(next_rq) : 0, |
4118 | bfqq->entity.budget); |
4119 | } |
4120 | |
4121 | /* |
4122 | * Return true if the process associated with bfqq is "slow". The slow |
4123 | * flag is used, in addition to the budget timeout, to reduce the |
4124 | * amount of service provided to seeky processes, and thus reduce |
4125 | * their chances to lower the throughput. More details in the comments |
4126 | * on the function bfq_bfqq_expire(). |
4127 | * |
4128 | * An important observation is in order: as discussed in the comments |
4129 | * on the function bfq_update_peak_rate(), with devices with internal |
4130 | * queues, it is hard if ever possible to know when and for how long |
4131 | * an I/O request is processed by the device (apart from the trivial |
4132 | * I/O pattern where a new request is dispatched only after the |
4133 | * previous one has been completed). This makes it hard to evaluate |
4134 | * the real rate at which the I/O requests of each bfq_queue are |
4135 | * served. In fact, for an I/O scheduler like BFQ, serving a |
4136 | * bfq_queue means just dispatching its requests during its service |
4137 | * slot (i.e., until the budget of the queue is exhausted, or the |
4138 | * queue remains idle, or, finally, a timeout fires). But, during the |
4139 | * service slot of a bfq_queue, around 100 ms at most, the device may |
4140 | * be even still processing requests of bfq_queues served in previous |
4141 | * service slots. On the opposite end, the requests of the in-service |
4142 | * bfq_queue may be completed after the service slot of the queue |
4143 | * finishes. |
4144 | * |
4145 | * Anyway, unless more sophisticated solutions are used |
4146 | * (where possible), the sum of the sizes of the requests dispatched |
4147 | * during the service slot of a bfq_queue is probably the only |
4148 | * approximation available for the service received by the bfq_queue |
4149 | * during its service slot. And this sum is the quantity used in this |
4150 | * function to evaluate the I/O speed of a process. |
4151 | */ |
4152 | static bool bfq_bfqq_is_slow(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
4153 | bool compensate, unsigned long *delta_ms) |
4154 | { |
4155 | ktime_t delta_ktime; |
4156 | u32 delta_usecs; |
4157 | bool slow = BFQQ_SEEKY(bfqq); /* if delta too short, use seekyness */ |
4158 | |
4159 | if (!bfq_bfqq_sync(bfqq)) |
4160 | return false; |
4161 | |
4162 | if (compensate) |
4163 | delta_ktime = bfqd->last_idling_start; |
4164 | else |
4165 | delta_ktime = ktime_get(); |
4166 | delta_ktime = ktime_sub(delta_ktime, bfqd->last_budget_start); |
4167 | delta_usecs = ktime_to_us(kt: delta_ktime); |
4168 | |
4169 | /* don't use too short time intervals */ |
4170 | if (delta_usecs < 1000) { |
4171 | if (blk_queue_nonrot(bfqd->queue)) |
4172 | /* |
4173 | * give same worst-case guarantees as idling |
4174 | * for seeky |
4175 | */ |
4176 | *delta_ms = BFQ_MIN_TT / NSEC_PER_MSEC; |
4177 | else /* charge at least one seek */ |
4178 | *delta_ms = bfq_slice_idle / NSEC_PER_MSEC; |
4179 | |
4180 | return slow; |
4181 | } |
4182 | |
4183 | *delta_ms = delta_usecs / USEC_PER_MSEC; |
4184 | |
4185 | /* |
4186 | * Use only long (> 20ms) intervals to filter out excessive |
4187 | * spikes in service rate estimation. |
4188 | */ |
4189 | if (delta_usecs > 20000) { |
4190 | /* |
4191 | * Caveat for rotational devices: processes doing I/O |
4192 | * in the slower disk zones tend to be slow(er) even |
4193 | * if not seeky. In this respect, the estimated peak |
4194 | * rate is likely to be an average over the disk |
4195 | * surface. Accordingly, to not be too harsh with |
4196 | * unlucky processes, a process is deemed slow only if |
4197 | * its rate has been lower than half of the estimated |
4198 | * peak rate. |
4199 | */ |
4200 | slow = bfqq->entity.service < bfqd->bfq_max_budget / 2; |
4201 | } |
4202 | |
4203 | bfq_log_bfqq(bfqd, bfqq, "bfq_bfqq_is_slow: slow %d" , slow); |
4204 | |
4205 | return slow; |
4206 | } |
4207 | |
4208 | /* |
4209 | * To be deemed as soft real-time, an application must meet two |
4210 | * requirements. First, the application must not require an average |
4211 | * bandwidth higher than the approximate bandwidth required to playback or |
4212 | * record a compressed high-definition video. |
4213 | * The next function is invoked on the completion of the last request of a |
4214 | * batch, to compute the next-start time instant, soft_rt_next_start, such |
4215 | * that, if the next request of the application does not arrive before |
4216 | * soft_rt_next_start, then the above requirement on the bandwidth is met. |
4217 | * |
4218 | * The second requirement is that the request pattern of the application is |
4219 | * isochronous, i.e., that, after issuing a request or a batch of requests, |
4220 | * the application stops issuing new requests until all its pending requests |
4221 | * have been completed. After that, the application may issue a new batch, |
4222 | * and so on. |
4223 | * For this reason the next function is invoked to compute |
4224 | * soft_rt_next_start only for applications that meet this requirement, |
4225 | * whereas soft_rt_next_start is set to infinity for applications that do |
4226 | * not. |
4227 | * |
4228 | * Unfortunately, even a greedy (i.e., I/O-bound) application may |
4229 | * happen to meet, occasionally or systematically, both the above |
4230 | * bandwidth and isochrony requirements. This may happen at least in |
4231 | * the following circumstances. First, if the CPU load is high. The |
4232 | * application may stop issuing requests while the CPUs are busy |
4233 | * serving other processes, then restart, then stop again for a while, |
4234 | * and so on. The other circumstances are related to the storage |
4235 | * device: the storage device is highly loaded or reaches a low-enough |
4236 | * throughput with the I/O of the application (e.g., because the I/O |
4237 | * is random and/or the device is slow). In all these cases, the |
4238 | * I/O of the application may be simply slowed down enough to meet |
4239 | * the bandwidth and isochrony requirements. To reduce the probability |
4240 | * that greedy applications are deemed as soft real-time in these |
4241 | * corner cases, a further rule is used in the computation of |
4242 | * soft_rt_next_start: the return value of this function is forced to |
4243 | * be higher than the maximum between the following two quantities. |
4244 | * |
4245 | * (a) Current time plus: (1) the maximum time for which the arrival |
4246 | * of a request is waited for when a sync queue becomes idle, |
4247 | * namely bfqd->bfq_slice_idle, and (2) a few extra jiffies. We |
4248 | * postpone for a moment the reason for adding a few extra |
4249 | * jiffies; we get back to it after next item (b). Lower-bounding |
4250 | * the return value of this function with the current time plus |
4251 | * bfqd->bfq_slice_idle tends to filter out greedy applications, |
4252 | * because the latter issue their next request as soon as possible |
4253 | * after the last one has been completed. In contrast, a soft |
4254 | * real-time application spends some time processing data, after a |
4255 | * batch of its requests has been completed. |
4256 | * |
4257 | * (b) Current value of bfqq->soft_rt_next_start. As pointed out |
4258 | * above, greedy applications may happen to meet both the |
4259 | * bandwidth and isochrony requirements under heavy CPU or |
4260 | * storage-device load. In more detail, in these scenarios, these |
4261 | * applications happen, only for limited time periods, to do I/O |
4262 | * slowly enough to meet all the requirements described so far, |
4263 | * including the filtering in above item (a). These slow-speed |
4264 | * time intervals are usually interspersed between other time |
4265 | * intervals during which these applications do I/O at a very high |
4266 | * speed. Fortunately, exactly because of the high speed of the |
4267 | * I/O in the high-speed intervals, the values returned by this |
4268 | * function happen to be so high, near the end of any such |
4269 | * high-speed interval, to be likely to fall *after* the end of |
4270 | * the low-speed time interval that follows. These high values are |
4271 | * stored in bfqq->soft_rt_next_start after each invocation of |
4272 | * this function. As a consequence, if the last value of |
4273 | * bfqq->soft_rt_next_start is constantly used to lower-bound the |
4274 | * next value that this function may return, then, from the very |
4275 | * beginning of a low-speed interval, bfqq->soft_rt_next_start is |
4276 | * likely to be constantly kept so high that any I/O request |
4277 | * issued during the low-speed interval is considered as arriving |
4278 | * to soon for the application to be deemed as soft |
4279 | * real-time. Then, in the high-speed interval that follows, the |
4280 | * application will not be deemed as soft real-time, just because |
4281 | * it will do I/O at a high speed. And so on. |
4282 | * |
4283 | * Getting back to the filtering in item (a), in the following two |
4284 | * cases this filtering might be easily passed by a greedy |
4285 | * application, if the reference quantity was just |
4286 | * bfqd->bfq_slice_idle: |
4287 | * 1) HZ is so low that the duration of a jiffy is comparable to or |
4288 | * higher than bfqd->bfq_slice_idle. This happens, e.g., on slow |
4289 | * devices with HZ=100. The time granularity may be so coarse |
4290 | * that the approximation, in jiffies, of bfqd->bfq_slice_idle |
4291 | * is rather lower than the exact value. |
4292 | * 2) jiffies, instead of increasing at a constant rate, may stop increasing |
4293 | * for a while, then suddenly 'jump' by several units to recover the lost |
4294 | * increments. This seems to happen, e.g., inside virtual machines. |
4295 | * To address this issue, in the filtering in (a) we do not use as a |
4296 | * reference time interval just bfqd->bfq_slice_idle, but |
4297 | * bfqd->bfq_slice_idle plus a few jiffies. In particular, we add the |
4298 | * minimum number of jiffies for which the filter seems to be quite |
4299 | * precise also in embedded systems and KVM/QEMU virtual machines. |
4300 | */ |
4301 | static unsigned long bfq_bfqq_softrt_next_start(struct bfq_data *bfqd, |
4302 | struct bfq_queue *bfqq) |
4303 | { |
4304 | return max3(bfqq->soft_rt_next_start, |
4305 | bfqq->last_idle_bklogged + |
4306 | HZ * bfqq->service_from_backlogged / |
4307 | bfqd->bfq_wr_max_softrt_rate, |
4308 | jiffies + nsecs_to_jiffies(bfqq->bfqd->bfq_slice_idle) + 4); |
4309 | } |
4310 | |
4311 | /** |
4312 | * bfq_bfqq_expire - expire a queue. |
4313 | * @bfqd: device owning the queue. |
4314 | * @bfqq: the queue to expire. |
4315 | * @compensate: if true, compensate for the time spent idling. |
4316 | * @reason: the reason causing the expiration. |
4317 | * |
4318 | * If the process associated with bfqq does slow I/O (e.g., because it |
4319 | * issues random requests), we charge bfqq with the time it has been |
4320 | * in service instead of the service it has received (see |
4321 | * bfq_bfqq_charge_time for details on how this goal is achieved). As |
4322 | * a consequence, bfqq will typically get higher timestamps upon |
4323 | * reactivation, and hence it will be rescheduled as if it had |
4324 | * received more service than what it has actually received. In the |
4325 | * end, bfqq receives less service in proportion to how slowly its |
4326 | * associated process consumes its budgets (and hence how seriously it |
4327 | * tends to lower the throughput). In addition, this time-charging |
4328 | * strategy guarantees time fairness among slow processes. In |
4329 | * contrast, if the process associated with bfqq is not slow, we |
4330 | * charge bfqq exactly with the service it has received. |
4331 | * |
4332 | * Charging time to the first type of queues and the exact service to |
4333 | * the other has the effect of using the WF2Q+ policy to schedule the |
4334 | * former on a timeslice basis, without violating service domain |
4335 | * guarantees among the latter. |
4336 | */ |
4337 | void bfq_bfqq_expire(struct bfq_data *bfqd, |
4338 | struct bfq_queue *bfqq, |
4339 | bool compensate, |
4340 | enum bfqq_expiration reason) |
4341 | { |
4342 | bool slow; |
4343 | unsigned long delta = 0; |
4344 | struct bfq_entity *entity = &bfqq->entity; |
4345 | |
4346 | /* |
4347 | * Check whether the process is slow (see bfq_bfqq_is_slow). |
4348 | */ |
4349 | slow = bfq_bfqq_is_slow(bfqd, bfqq, compensate, delta_ms: &delta); |
4350 | |
4351 | /* |
4352 | * As above explained, charge slow (typically seeky) and |
4353 | * timed-out queues with the time and not the service |
4354 | * received, to favor sequential workloads. |
4355 | * |
4356 | * Processes doing I/O in the slower disk zones will tend to |
4357 | * be slow(er) even if not seeky. Therefore, since the |
4358 | * estimated peak rate is actually an average over the disk |
4359 | * surface, these processes may timeout just for bad luck. To |
4360 | * avoid punishing them, do not charge time to processes that |
4361 | * succeeded in consuming at least 2/3 of their budget. This |
4362 | * allows BFQ to preserve enough elasticity to still perform |
4363 | * bandwidth, and not time, distribution with little unlucky |
4364 | * or quasi-sequential processes. |
4365 | */ |
4366 | if (bfqq->wr_coeff == 1 && |
4367 | (slow || |
4368 | (reason == BFQQE_BUDGET_TIMEOUT && |
4369 | bfq_bfqq_budget_left(bfqq) >= entity->budget / 3))) |
4370 | bfq_bfqq_charge_time(bfqd, bfqq, time_ms: delta); |
4371 | |
4372 | if (bfqd->low_latency && bfqq->wr_coeff == 1) |
4373 | bfqq->last_wr_start_finish = jiffies; |
4374 | |
4375 | if (bfqd->low_latency && bfqd->bfq_wr_max_softrt_rate > 0 && |
4376 | RB_EMPTY_ROOT(&bfqq->sort_list)) { |
4377 | /* |
4378 | * If we get here, and there are no outstanding |
4379 | * requests, then the request pattern is isochronous |
4380 | * (see the comments on the function |
4381 | * bfq_bfqq_softrt_next_start()). Therefore we can |
4382 | * compute soft_rt_next_start. |
4383 | * |
4384 | * If, instead, the queue still has outstanding |
4385 | * requests, then we have to wait for the completion |
4386 | * of all the outstanding requests to discover whether |
4387 | * the request pattern is actually isochronous. |
4388 | */ |
4389 | if (bfqq->dispatched == 0) |
4390 | bfqq->soft_rt_next_start = |
4391 | bfq_bfqq_softrt_next_start(bfqd, bfqq); |
4392 | else if (bfqq->dispatched > 0) { |
4393 | /* |
4394 | * Schedule an update of soft_rt_next_start to when |
4395 | * the task may be discovered to be isochronous. |
4396 | */ |
4397 | bfq_mark_bfqq_softrt_update(bfqq); |
4398 | } |
4399 | } |
4400 | |
4401 | bfq_log_bfqq(bfqd, bfqq, |
4402 | "expire (%d, slow %d, num_disp %d, short_ttime %d)" , reason, |
4403 | slow, bfqq->dispatched, bfq_bfqq_has_short_ttime(bfqq)); |
4404 | |
4405 | /* |
4406 | * bfqq expired, so no total service time needs to be computed |
4407 | * any longer: reset state machine for measuring total service |
4408 | * times. |
4409 | */ |
4410 | bfqd->rqs_injected = bfqd->wait_dispatch = false; |
4411 | bfqd->waited_rq = NULL; |
4412 | |
4413 | /* |
4414 | * Increase, decrease or leave budget unchanged according to |
4415 | * reason. |
4416 | */ |
4417 | __bfq_bfqq_recalc_budget(bfqd, bfqq, reason); |
4418 | if (__bfq_bfqq_expire(bfqd, bfqq, reason)) |
4419 | /* bfqq is gone, no more actions on it */ |
4420 | return; |
4421 | |
4422 | /* mark bfqq as waiting a request only if a bic still points to it */ |
4423 | if (!bfq_bfqq_busy(bfqq) && |
4424 | reason != BFQQE_BUDGET_TIMEOUT && |
4425 | reason != BFQQE_BUDGET_EXHAUSTED) { |
4426 | bfq_mark_bfqq_non_blocking_wait_rq(bfqq); |
4427 | /* |
4428 | * Not setting service to 0, because, if the next rq |
4429 | * arrives in time, the queue will go on receiving |
4430 | * service with this same budget (as if it never expired) |
4431 | */ |
4432 | } else |
4433 | entity->service = 0; |
4434 | |
4435 | /* |
4436 | * Reset the received-service counter for every parent entity. |
4437 | * Differently from what happens with bfqq->entity.service, |
4438 | * the resetting of this counter never needs to be postponed |
4439 | * for parent entities. In fact, in case bfqq may have a |
4440 | * chance to go on being served using the last, partially |
4441 | * consumed budget, bfqq->entity.service needs to be kept, |
4442 | * because if bfqq then actually goes on being served using |
4443 | * the same budget, the last value of bfqq->entity.service is |
4444 | * needed to properly decrement bfqq->entity.budget by the |
4445 | * portion already consumed. In contrast, it is not necessary |
4446 | * to keep entity->service for parent entities too, because |
4447 | * the bubble up of the new value of bfqq->entity.budget will |
4448 | * make sure that the budgets of parent entities are correct, |
4449 | * even in case bfqq and thus parent entities go on receiving |
4450 | * service with the same budget. |
4451 | */ |
4452 | entity = entity->parent; |
4453 | for_each_entity(entity) |
4454 | entity->service = 0; |
4455 | } |
4456 | |
4457 | /* |
4458 | * Budget timeout is not implemented through a dedicated timer, but |
4459 | * just checked on request arrivals and completions, as well as on |
4460 | * idle timer expirations. |
4461 | */ |
4462 | static bool bfq_bfqq_budget_timeout(struct bfq_queue *bfqq) |
4463 | { |
4464 | return time_is_before_eq_jiffies(bfqq->budget_timeout); |
4465 | } |
4466 | |
4467 | /* |
4468 | * If we expire a queue that is actively waiting (i.e., with the |
4469 | * device idled) for the arrival of a new request, then we may incur |
4470 | * the timestamp misalignment problem described in the body of the |
4471 | * function __bfq_activate_entity. Hence we return true only if this |
4472 | * condition does not hold, or if the queue is slow enough to deserve |
4473 | * only to be kicked off for preserving a high throughput. |
4474 | */ |
4475 | static bool bfq_may_expire_for_budg_timeout(struct bfq_queue *bfqq) |
4476 | { |
4477 | bfq_log_bfqq(bfqq->bfqd, bfqq, |
4478 | "may_budget_timeout: wait_request %d left %d timeout %d" , |
4479 | bfq_bfqq_wait_request(bfqq), |
4480 | bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3, |
4481 | bfq_bfqq_budget_timeout(bfqq)); |
4482 | |
4483 | return (!bfq_bfqq_wait_request(bfqq) || |
4484 | bfq_bfqq_budget_left(bfqq) >= bfqq->entity.budget / 3) |
4485 | && |
4486 | bfq_bfqq_budget_timeout(bfqq); |
4487 | } |
4488 | |
4489 | static bool idling_boosts_thr_without_issues(struct bfq_data *bfqd, |
4490 | struct bfq_queue *bfqq) |
4491 | { |
4492 | bool rot_without_queueing = |
4493 | !blk_queue_nonrot(bfqd->queue) && !bfqd->hw_tag, |
4494 | bfqq_sequential_and_IO_bound, |
4495 | idling_boosts_thr; |
4496 | |
4497 | /* No point in idling for bfqq if it won't get requests any longer */ |
4498 | if (unlikely(!bfqq_process_refs(bfqq))) |
4499 | return false; |
4500 | |
4501 | bfqq_sequential_and_IO_bound = !BFQQ_SEEKY(bfqq) && |
4502 | bfq_bfqq_IO_bound(bfqq) && bfq_bfqq_has_short_ttime(bfqq); |
4503 | |
4504 | /* |
4505 | * The next variable takes into account the cases where idling |
4506 | * boosts the throughput. |
4507 | * |
4508 | * The value of the variable is computed considering, first, that |
4509 | * idling is virtually always beneficial for the throughput if: |
4510 | * (a) the device is not NCQ-capable and rotational, or |
4511 | * (b) regardless of the presence of NCQ, the device is rotational and |
4512 | * the request pattern for bfqq is I/O-bound and sequential, or |
4513 | * (c) regardless of whether it is rotational, the device is |
4514 | * not NCQ-capable and the request pattern for bfqq is |
4515 | * I/O-bound and sequential. |
4516 | * |
4517 | * Secondly, and in contrast to the above item (b), idling an |
4518 | * NCQ-capable flash-based device would not boost the |
4519 | * throughput even with sequential I/O; rather it would lower |
4520 | * the throughput in proportion to how fast the device |
4521 | * is. Accordingly, the next variable is true if any of the |
4522 | * above conditions (a), (b) or (c) is true, and, in |
4523 | * particular, happens to be false if bfqd is an NCQ-capable |
4524 | * flash-based device. |
4525 | */ |
4526 | idling_boosts_thr = rot_without_queueing || |
4527 | ((!blk_queue_nonrot(bfqd->queue) || !bfqd->hw_tag) && |
4528 | bfqq_sequential_and_IO_bound); |
4529 | |
4530 | /* |
4531 | * The return value of this function is equal to that of |
4532 | * idling_boosts_thr, unless a special case holds. In this |
4533 | * special case, described below, idling may cause problems to |
4534 | * weight-raised queues. |
4535 | * |
4536 | * When the request pool is saturated (e.g., in the presence |
4537 | * of write hogs), if the processes associated with |
4538 | * non-weight-raised queues ask for requests at a lower rate, |
4539 | * then processes associated with weight-raised queues have a |
4540 | * higher probability to get a request from the pool |
4541 | * immediately (or at least soon) when they need one. Thus |
4542 | * they have a higher probability to actually get a fraction |
4543 | * of the device throughput proportional to their high |
4544 | * weight. This is especially true with NCQ-capable drives, |
4545 | * which enqueue several requests in advance, and further |
4546 | * reorder internally-queued requests. |
4547 | * |
4548 | * For this reason, we force to false the return value if |
4549 | * there are weight-raised busy queues. In this case, and if |
4550 | * bfqq is not weight-raised, this guarantees that the device |
4551 | * is not idled for bfqq (if, instead, bfqq is weight-raised, |
4552 | * then idling will be guaranteed by another variable, see |
4553 | * below). Combined with the timestamping rules of BFQ (see |
4554 | * [1] for details), this behavior causes bfqq, and hence any |
4555 | * sync non-weight-raised queue, to get a lower number of |
4556 | * requests served, and thus to ask for a lower number of |
4557 | * requests from the request pool, before the busy |
4558 | * weight-raised queues get served again. This often mitigates |
4559 | * starvation problems in the presence of heavy write |
4560 | * workloads and NCQ, thereby guaranteeing a higher |
4561 | * application and system responsiveness in these hostile |
4562 | * scenarios. |
4563 | */ |
4564 | return idling_boosts_thr && |
4565 | bfqd->wr_busy_queues == 0; |
4566 | } |
4567 | |
4568 | /* |
4569 | * For a queue that becomes empty, device idling is allowed only if |
4570 | * this function returns true for that queue. As a consequence, since |
4571 | * device idling plays a critical role for both throughput boosting |
4572 | * and service guarantees, the return value of this function plays a |
4573 | * critical role as well. |
4574 | * |
4575 | * In a nutshell, this function returns true only if idling is |
4576 | * beneficial for throughput or, even if detrimental for throughput, |
4577 | * idling is however necessary to preserve service guarantees (low |
4578 | * latency, desired throughput distribution, ...). In particular, on |
4579 | * NCQ-capable devices, this function tries to return false, so as to |
4580 | * help keep the drives' internal queues full, whenever this helps the |
4581 | * device boost the throughput without causing any service-guarantee |
4582 | * issue. |
4583 | * |
4584 | * Most of the issues taken into account to get the return value of |
4585 | * this function are not trivial. We discuss these issues in the two |
4586 | * functions providing the main pieces of information needed by this |
4587 | * function. |
4588 | */ |
4589 | static bool bfq_better_to_idle(struct bfq_queue *bfqq) |
4590 | { |
4591 | struct bfq_data *bfqd = bfqq->bfqd; |
4592 | bool idling_boosts_thr_with_no_issue, idling_needed_for_service_guar; |
4593 | |
4594 | /* No point in idling for bfqq if it won't get requests any longer */ |
4595 | if (unlikely(!bfqq_process_refs(bfqq))) |
4596 | return false; |
4597 | |
4598 | if (unlikely(bfqd->strict_guarantees)) |
4599 | return true; |
4600 | |
4601 | /* |
4602 | * Idling is performed only if slice_idle > 0. In addition, we |
4603 | * do not idle if |
4604 | * (a) bfqq is async |
4605 | * (b) bfqq is in the idle io prio class: in this case we do |
4606 | * not idle because we want to minimize the bandwidth that |
4607 | * queues in this class can steal to higher-priority queues |
4608 | */ |
4609 | if (bfqd->bfq_slice_idle == 0 || !bfq_bfqq_sync(bfqq) || |
4610 | bfq_class_idle(bfqq)) |
4611 | return false; |
4612 | |
4613 | idling_boosts_thr_with_no_issue = |
4614 | idling_boosts_thr_without_issues(bfqd, bfqq); |
4615 | |
4616 | idling_needed_for_service_guar = |
4617 | idling_needed_for_service_guarantees(bfqd, bfqq); |
4618 | |
4619 | /* |
4620 | * We have now the two components we need to compute the |
4621 | * return value of the function, which is true only if idling |
4622 | * either boosts the throughput (without issues), or is |
4623 | * necessary to preserve service guarantees. |
4624 | */ |
4625 | return idling_boosts_thr_with_no_issue || |
4626 | idling_needed_for_service_guar; |
4627 | } |
4628 | |
4629 | /* |
4630 | * If the in-service queue is empty but the function bfq_better_to_idle |
4631 | * returns true, then: |
4632 | * 1) the queue must remain in service and cannot be expired, and |
4633 | * 2) the device must be idled to wait for the possible arrival of a new |
4634 | * request for the queue. |
4635 | * See the comments on the function bfq_better_to_idle for the reasons |
4636 | * why performing device idling is the best choice to boost the throughput |
4637 | * and preserve service guarantees when bfq_better_to_idle itself |
4638 | * returns true. |
4639 | */ |
4640 | static bool bfq_bfqq_must_idle(struct bfq_queue *bfqq) |
4641 | { |
4642 | return RB_EMPTY_ROOT(&bfqq->sort_list) && bfq_better_to_idle(bfqq); |
4643 | } |
4644 | |
4645 | /* |
4646 | * This function chooses the queue from which to pick the next extra |
4647 | * I/O request to inject, if it finds a compatible queue. See the |
4648 | * comments on bfq_update_inject_limit() for details on the injection |
4649 | * mechanism, and for the definitions of the quantities mentioned |
4650 | * below. |
4651 | */ |
4652 | static struct bfq_queue * |
4653 | bfq_choose_bfqq_for_injection(struct bfq_data *bfqd) |
4654 | { |
4655 | struct bfq_queue *bfqq, *in_serv_bfqq = bfqd->in_service_queue; |
4656 | unsigned int limit = in_serv_bfqq->inject_limit; |
4657 | int i; |
4658 | |
4659 | /* |
4660 | * If |
4661 | * - bfqq is not weight-raised and therefore does not carry |
4662 | * time-critical I/O, |
4663 | * or |
4664 | * - regardless of whether bfqq is weight-raised, bfqq has |
4665 | * however a long think time, during which it can absorb the |
4666 | * effect of an appropriate number of extra I/O requests |
4667 | * from other queues (see bfq_update_inject_limit for |
4668 | * details on the computation of this number); |
4669 | * then injection can be performed without restrictions. |
4670 | */ |
4671 | bool in_serv_always_inject = in_serv_bfqq->wr_coeff == 1 || |
4672 | !bfq_bfqq_has_short_ttime(bfqq: in_serv_bfqq); |
4673 | |
4674 | /* |
4675 | * If |
4676 | * - the baseline total service time could not be sampled yet, |
4677 | * so the inject limit happens to be still 0, and |
4678 | * - a lot of time has elapsed since the plugging of I/O |
4679 | * dispatching started, so drive speed is being wasted |
4680 | * significantly; |
4681 | * then temporarily raise inject limit to one request. |
4682 | */ |
4683 | if (limit == 0 && in_serv_bfqq->last_serv_time_ns == 0 && |
4684 | bfq_bfqq_wait_request(bfqq: in_serv_bfqq) && |
4685 | time_is_before_eq_jiffies(bfqd->last_idling_start_jiffies + |
4686 | bfqd->bfq_slice_idle) |
4687 | ) |
4688 | limit = 1; |
4689 | |
4690 | if (bfqd->tot_rq_in_driver >= limit) |
4691 | return NULL; |
4692 | |
4693 | /* |
4694 | * Linear search of the source queue for injection; but, with |
4695 | * a high probability, very few steps are needed to find a |
4696 | * candidate queue, i.e., a queue with enough budget left for |
4697 | * its next request. In fact: |
4698 | * - BFQ dynamically updates the budget of every queue so as |
4699 | * to accommodate the expected backlog of the queue; |
4700 | * - if a queue gets all its requests dispatched as injected |
4701 | * service, then the queue is removed from the active list |
4702 | * (and re-added only if it gets new requests, but then it |
4703 | * is assigned again enough budget for its new backlog). |
4704 | */ |
4705 | for (i = 0; i < bfqd->num_actuators; i++) { |
4706 | list_for_each_entry(bfqq, &bfqd->active_list[i], bfqq_list) |
4707 | if (!RB_EMPTY_ROOT(&bfqq->sort_list) && |
4708 | (in_serv_always_inject || bfqq->wr_coeff > 1) && |
4709 | bfq_serv_to_charge(rq: bfqq->next_rq, bfqq) <= |
4710 | bfq_bfqq_budget_left(bfqq)) { |
4711 | /* |
4712 | * Allow for only one large in-flight request |
4713 | * on non-rotational devices, for the |
4714 | * following reason. On non-rotationl drives, |
4715 | * large requests take much longer than |
4716 | * smaller requests to be served. In addition, |
4717 | * the drive prefers to serve large requests |
4718 | * w.r.t. to small ones, if it can choose. So, |
4719 | * having more than one large requests queued |
4720 | * in the drive may easily make the next first |
4721 | * request of the in-service queue wait for so |
4722 | * long to break bfqq's service guarantees. On |
4723 | * the bright side, large requests let the |
4724 | * drive reach a very high throughput, even if |
4725 | * there is only one in-flight large request |
4726 | * at a time. |
4727 | */ |
4728 | if (blk_queue_nonrot(bfqd->queue) && |
4729 | blk_rq_sectors(rq: bfqq->next_rq) >= |
4730 | BFQQ_SECT_THR_NONROT && |
4731 | bfqd->tot_rq_in_driver >= 1) |
4732 | continue; |
4733 | else { |
4734 | bfqd->rqs_injected = true; |
4735 | return bfqq; |
4736 | } |
4737 | } |
4738 | } |
4739 | |
4740 | return NULL; |
4741 | } |
4742 | |
4743 | static struct bfq_queue * |
4744 | bfq_find_active_bfqq_for_actuator(struct bfq_data *bfqd, int idx) |
4745 | { |
4746 | struct bfq_queue *bfqq; |
4747 | |
4748 | if (bfqd->in_service_queue && |
4749 | bfqd->in_service_queue->actuator_idx == idx) |
4750 | return bfqd->in_service_queue; |
4751 | |
4752 | list_for_each_entry(bfqq, &bfqd->active_list[idx], bfqq_list) { |
4753 | if (!RB_EMPTY_ROOT(&bfqq->sort_list) && |
4754 | bfq_serv_to_charge(rq: bfqq->next_rq, bfqq) <= |
4755 | bfq_bfqq_budget_left(bfqq)) { |
4756 | return bfqq; |
4757 | } |
4758 | } |
4759 | |
4760 | return NULL; |
4761 | } |
4762 | |
4763 | /* |
4764 | * Perform a linear scan of each actuator, until an actuator is found |
4765 | * for which the following three conditions hold: the load of the |
4766 | * actuator is below the threshold (see comments on |
4767 | * actuator_load_threshold for details) and lower than that of the |
4768 | * next actuator (comments on this extra condition below), and there |
4769 | * is a queue that contains I/O for that actuator. On success, return |
4770 | * that queue. |
4771 | * |
4772 | * Performing a plain linear scan entails a prioritization among |
4773 | * actuators. The extra condition above breaks this prioritization and |
4774 | * tends to distribute injection uniformly across actuators. |
4775 | */ |
4776 | static struct bfq_queue * |
4777 | bfq_find_bfqq_for_underused_actuator(struct bfq_data *bfqd) |
4778 | { |
4779 | int i; |
4780 | |
4781 | for (i = 0 ; i < bfqd->num_actuators; i++) { |
4782 | if (bfqd->rq_in_driver[i] < bfqd->actuator_load_threshold && |
4783 | (i == bfqd->num_actuators - 1 || |
4784 | bfqd->rq_in_driver[i] < bfqd->rq_in_driver[i+1])) { |
4785 | struct bfq_queue *bfqq = |
4786 | bfq_find_active_bfqq_for_actuator(bfqd, idx: i); |
4787 | |
4788 | if (bfqq) |
4789 | return bfqq; |
4790 | } |
4791 | } |
4792 | |
4793 | return NULL; |
4794 | } |
4795 | |
4796 | |
4797 | /* |
4798 | * Select a queue for service. If we have a current queue in service, |
4799 | * check whether to continue servicing it, or retrieve and set a new one. |
4800 | */ |
4801 | static struct bfq_queue *bfq_select_queue(struct bfq_data *bfqd) |
4802 | { |
4803 | struct bfq_queue *bfqq, *inject_bfqq; |
4804 | struct request *next_rq; |
4805 | enum bfqq_expiration reason = BFQQE_BUDGET_TIMEOUT; |
4806 | |
4807 | bfqq = bfqd->in_service_queue; |
4808 | if (!bfqq) |
4809 | goto new_queue; |
4810 | |
4811 | bfq_log_bfqq(bfqd, bfqq, "select_queue: already in-service queue" ); |
4812 | |
4813 | /* |
4814 | * Do not expire bfqq for budget timeout if bfqq may be about |
4815 | * to enjoy device idling. The reason why, in this case, we |
4816 | * prevent bfqq from expiring is the same as in the comments |
4817 | * on the case where bfq_bfqq_must_idle() returns true, in |
4818 | * bfq_completed_request(). |
4819 | */ |
4820 | if (bfq_may_expire_for_budg_timeout(bfqq) && |
4821 | !bfq_bfqq_must_idle(bfqq)) |
4822 | goto expire; |
4823 | |
4824 | check_queue: |
4825 | /* |
4826 | * If some actuator is underutilized, but the in-service |
4827 | * queue does not contain I/O for that actuator, then try to |
4828 | * inject I/O for that actuator. |
4829 | */ |
4830 | inject_bfqq = bfq_find_bfqq_for_underused_actuator(bfqd); |
4831 | if (inject_bfqq && inject_bfqq != bfqq) |
4832 | return inject_bfqq; |
4833 | |
4834 | /* |
4835 | * This loop is rarely executed more than once. Even when it |
4836 | * happens, it is much more convenient to re-execute this loop |
4837 | * than to return NULL and trigger a new dispatch to get a |
4838 | * request served. |
4839 | */ |
4840 | next_rq = bfqq->next_rq; |
4841 | /* |
4842 | * If bfqq has requests queued and it has enough budget left to |
4843 | * serve them, keep the queue, otherwise expire it. |
4844 | */ |
4845 | if (next_rq) { |
4846 | if (bfq_serv_to_charge(rq: next_rq, bfqq) > |
4847 | bfq_bfqq_budget_left(bfqq)) { |
4848 | /* |
4849 | * Expire the queue for budget exhaustion, |
4850 | * which makes sure that the next budget is |
4851 | * enough to serve the next request, even if |
4852 | * it comes from the fifo expired path. |
4853 | */ |
4854 | reason = BFQQE_BUDGET_EXHAUSTED; |
4855 | goto expire; |
4856 | } else { |
4857 | /* |
4858 | * The idle timer may be pending because we may |
4859 | * not disable disk idling even when a new request |
4860 | * arrives. |
4861 | */ |
4862 | if (bfq_bfqq_wait_request(bfqq)) { |
4863 | /* |
4864 | * If we get here: 1) at least a new request |
4865 | * has arrived but we have not disabled the |
4866 | * timer because the request was too small, |
4867 | * 2) then the block layer has unplugged |
4868 | * the device, causing the dispatch to be |
4869 | * invoked. |
4870 | * |
4871 | * Since the device is unplugged, now the |
4872 | * requests are probably large enough to |
4873 | * provide a reasonable throughput. |
4874 | * So we disable idling. |
4875 | */ |
4876 | bfq_clear_bfqq_wait_request(bfqq); |
4877 | hrtimer_try_to_cancel(timer: &bfqd->idle_slice_timer); |
4878 | } |
4879 | goto keep_queue; |
4880 | } |
4881 | } |
4882 | |
4883 | /* |
4884 | * No requests pending. However, if the in-service queue is idling |
4885 | * for a new request, or has requests waiting for a completion and |
4886 | * may idle after their completion, then keep it anyway. |
4887 | * |
4888 | * Yet, inject service from other queues if it boosts |
4889 | * throughput and is possible. |
4890 | */ |
4891 | if (bfq_bfqq_wait_request(bfqq) || |
4892 | (bfqq->dispatched != 0 && bfq_better_to_idle(bfqq))) { |
4893 | unsigned int act_idx = bfqq->actuator_idx; |
4894 | struct bfq_queue *async_bfqq = NULL; |
4895 | struct bfq_queue *blocked_bfqq = |
4896 | !hlist_empty(h: &bfqq->woken_list) ? |
4897 | container_of(bfqq->woken_list.first, |
4898 | struct bfq_queue, |
4899 | woken_list_node) |
4900 | : NULL; |
4901 | |
4902 | if (bfqq->bic && bfqq->bic->bfqq[0][act_idx] && |
4903 | bfq_bfqq_busy(bfqq: bfqq->bic->bfqq[0][act_idx]) && |
4904 | bfqq->bic->bfqq[0][act_idx]->next_rq) |
4905 | async_bfqq = bfqq->bic->bfqq[0][act_idx]; |
4906 | /* |
4907 | * The next four mutually-exclusive ifs decide |
4908 | * whether to try injection, and choose the queue to |
4909 | * pick an I/O request from. |
4910 | * |
4911 | * The first if checks whether the process associated |
4912 | * with bfqq has also async I/O pending. If so, it |
4913 | * injects such I/O unconditionally. Injecting async |
4914 | * I/O from the same process can cause no harm to the |
4915 | * process. On the contrary, it can only increase |
4916 | * bandwidth and reduce latency for the process. |
4917 | * |
4918 | * The second if checks whether there happens to be a |
4919 | * non-empty waker queue for bfqq, i.e., a queue whose |
4920 | * I/O needs to be completed for bfqq to receive new |
4921 | * I/O. This happens, e.g., if bfqq is associated with |
4922 | * a process that does some sync. A sync generates |
4923 | * extra blocking I/O, which must be completed before |
4924 | * the process associated with bfqq can go on with its |
4925 | * I/O. If the I/O of the waker queue is not served, |
4926 | * then bfqq remains empty, and no I/O is dispatched, |
4927 | * until the idle timeout fires for bfqq. This is |
4928 | * likely to result in lower bandwidth and higher |
4929 | * latencies for bfqq, and in a severe loss of total |
4930 | * throughput. The best action to take is therefore to |
4931 | * serve the waker queue as soon as possible. So do it |
4932 | * (without relying on the third alternative below for |
4933 | * eventually serving waker_bfqq's I/O; see the last |
4934 | * paragraph for further details). This systematic |
4935 | * injection of I/O from the waker queue does not |
4936 | * cause any delay to bfqq's I/O. On the contrary, |
4937 | * next bfqq's I/O is brought forward dramatically, |
4938 | * for it is not blocked for milliseconds. |
4939 | * |
4940 | * The third if checks whether there is a queue woken |
4941 | * by bfqq, and currently with pending I/O. Such a |
4942 | * woken queue does not steal bandwidth from bfqq, |
4943 | * because it remains soon without I/O if bfqq is not |
4944 | * served. So there is virtually no risk of loss of |
4945 | * bandwidth for bfqq if this woken queue has I/O |
4946 | * dispatched while bfqq is waiting for new I/O. |
4947 | * |
4948 | * The fourth if checks whether bfqq is a queue for |
4949 | * which it is better to avoid injection. It is so if |
4950 | * bfqq delivers more throughput when served without |
4951 | * any further I/O from other queues in the middle, or |
4952 | * if the service times of bfqq's I/O requests both |
4953 | * count more than overall throughput, and may be |
4954 | * easily increased by injection (this happens if bfqq |
4955 | * has a short think time). If none of these |
4956 | * conditions holds, then a candidate queue for |
4957 | * injection is looked for through |
4958 | * bfq_choose_bfqq_for_injection(). Note that the |
4959 | * latter may return NULL (for example if the inject |
4960 | * limit for bfqq is currently 0). |
4961 | * |
4962 | * NOTE: motivation for the second alternative |
4963 | * |
4964 | * Thanks to the way the inject limit is updated in |
4965 | * bfq_update_has_short_ttime(), it is rather likely |
4966 | * that, if I/O is being plugged for bfqq and the |
4967 | * waker queue has pending I/O requests that are |
4968 | * blocking bfqq's I/O, then the fourth alternative |
4969 | * above lets the waker queue get served before the |
4970 | * I/O-plugging timeout fires. So one may deem the |
4971 | * second alternative superfluous. It is not, because |
4972 | * the fourth alternative may be way less effective in |
4973 | * case of a synchronization. For two main |
4974 | * reasons. First, throughput may be low because the |
4975 | * inject limit may be too low to guarantee the same |
4976 | * amount of injected I/O, from the waker queue or |
4977 | * other queues, that the second alternative |
4978 | * guarantees (the second alternative unconditionally |
4979 | * injects a pending I/O request of the waker queue |
4980 | * for each bfq_dispatch_request()). Second, with the |
4981 | * fourth alternative, the duration of the plugging, |
4982 | * i.e., the time before bfqq finally receives new I/O, |
4983 | * may not be minimized, because the waker queue may |
4984 | * happen to be served only after other queues. |
4985 | */ |
4986 | if (async_bfqq && |
4987 | icq_to_bic(icq: async_bfqq->next_rq->elv.icq) == bfqq->bic && |
4988 | bfq_serv_to_charge(rq: async_bfqq->next_rq, bfqq: async_bfqq) <= |
4989 | bfq_bfqq_budget_left(bfqq: async_bfqq)) |
4990 | bfqq = async_bfqq; |
4991 | else if (bfqq->waker_bfqq && |
4992 | bfq_bfqq_busy(bfqq: bfqq->waker_bfqq) && |
4993 | bfqq->waker_bfqq->next_rq && |
4994 | bfq_serv_to_charge(rq: bfqq->waker_bfqq->next_rq, |
4995 | bfqq: bfqq->waker_bfqq) <= |
4996 | bfq_bfqq_budget_left(bfqq: bfqq->waker_bfqq) |
4997 | ) |
4998 | bfqq = bfqq->waker_bfqq; |
4999 | else if (blocked_bfqq && |
5000 | bfq_bfqq_busy(bfqq: blocked_bfqq) && |
5001 | blocked_bfqq->next_rq && |
5002 | bfq_serv_to_charge(rq: blocked_bfqq->next_rq, |
5003 | bfqq: blocked_bfqq) <= |
5004 | bfq_bfqq_budget_left(bfqq: blocked_bfqq) |
5005 | ) |
5006 | bfqq = blocked_bfqq; |
5007 | else if (!idling_boosts_thr_without_issues(bfqd, bfqq) && |
5008 | (bfqq->wr_coeff == 1 || bfqd->wr_busy_queues > 1 || |
5009 | !bfq_bfqq_has_short_ttime(bfqq))) |
5010 | bfqq = bfq_choose_bfqq_for_injection(bfqd); |
5011 | else |
5012 | bfqq = NULL; |
5013 | |
5014 | goto keep_queue; |
5015 | } |
5016 | |
5017 | reason = BFQQE_NO_MORE_REQUESTS; |
5018 | expire: |
5019 | bfq_bfqq_expire(bfqd, bfqq, compensate: false, reason); |
5020 | new_queue: |
5021 | bfqq = bfq_set_in_service_queue(bfqd); |
5022 | if (bfqq) { |
5023 | bfq_log_bfqq(bfqd, bfqq, "select_queue: checking new queue" ); |
5024 | goto check_queue; |
5025 | } |
5026 | keep_queue: |
5027 | if (bfqq) |
5028 | bfq_log_bfqq(bfqd, bfqq, "select_queue: returned this queue" ); |
5029 | else |
5030 | bfq_log(bfqd, "select_queue: no queue returned" ); |
5031 | |
5032 | return bfqq; |
5033 | } |
5034 | |
5035 | static void bfq_update_wr_data(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
5036 | { |
5037 | struct bfq_entity *entity = &bfqq->entity; |
5038 | |
5039 | if (bfqq->wr_coeff > 1) { /* queue is being weight-raised */ |
5040 | bfq_log_bfqq(bfqd, bfqq, |
5041 | "raising period dur %u/%u msec, old coeff %u, w %d(%d)" , |
5042 | jiffies_to_msecs(jiffies - bfqq->last_wr_start_finish), |
5043 | jiffies_to_msecs(bfqq->wr_cur_max_time), |
5044 | bfqq->wr_coeff, |
5045 | bfqq->entity.weight, bfqq->entity.orig_weight); |
5046 | |
5047 | if (entity->prio_changed) |
5048 | bfq_log_bfqq(bfqd, bfqq, "WARN: pending prio change" ); |
5049 | |
5050 | /* |
5051 | * If the queue was activated in a burst, or too much |
5052 | * time has elapsed from the beginning of this |
5053 | * weight-raising period, then end weight raising. |
5054 | */ |
5055 | if (bfq_bfqq_in_large_burst(bfqq)) |
5056 | bfq_bfqq_end_wr(bfqq); |
5057 | else if (time_is_before_jiffies(bfqq->last_wr_start_finish + |
5058 | bfqq->wr_cur_max_time)) { |
5059 | if (bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time || |
5060 | time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + |
5061 | bfq_wr_duration(bfqd))) { |
5062 | /* |
5063 | * Either in interactive weight |
5064 | * raising, or in soft_rt weight |
5065 | * raising with the |
5066 | * interactive-weight-raising period |
5067 | * elapsed (so no switch back to |
5068 | * interactive weight raising). |
5069 | */ |
5070 | bfq_bfqq_end_wr(bfqq); |
5071 | } else { /* |
5072 | * soft_rt finishing while still in |
5073 | * interactive period, switch back to |
5074 | * interactive weight raising |
5075 | */ |
5076 | switch_back_to_interactive_wr(bfqq, bfqd); |
5077 | bfqq->entity.prio_changed = 1; |
5078 | } |
5079 | } |
5080 | if (bfqq->wr_coeff > 1 && |
5081 | bfqq->wr_cur_max_time != bfqd->bfq_wr_rt_max_time && |
5082 | bfqq->service_from_wr > max_service_from_wr) { |
5083 | /* see comments on max_service_from_wr */ |
5084 | bfq_bfqq_end_wr(bfqq); |
5085 | } |
5086 | } |
5087 | /* |
5088 | * To improve latency (for this or other queues), immediately |
5089 | * update weight both if it must be raised and if it must be |
5090 | * lowered. Since, entity may be on some active tree here, and |
5091 | * might have a pending change of its ioprio class, invoke |
5092 | * next function with the last parameter unset (see the |
5093 | * comments on the function). |
5094 | */ |
5095 | if ((entity->weight > entity->orig_weight) != (bfqq->wr_coeff > 1)) |
5096 | __bfq_entity_update_weight_prio(old_st: bfq_entity_service_tree(entity), |
5097 | entity, update_class_too: false); |
5098 | } |
5099 | |
5100 | /* |
5101 | * Dispatch next request from bfqq. |
5102 | */ |
5103 | static struct request *bfq_dispatch_rq_from_bfqq(struct bfq_data *bfqd, |
5104 | struct bfq_queue *bfqq) |
5105 | { |
5106 | struct request *rq = bfqq->next_rq; |
5107 | unsigned long service_to_charge; |
5108 | |
5109 | service_to_charge = bfq_serv_to_charge(rq, bfqq); |
5110 | |
5111 | bfq_bfqq_served(bfqq, served: service_to_charge); |
5112 | |
5113 | if (bfqq == bfqd->in_service_queue && bfqd->wait_dispatch) { |
5114 | bfqd->wait_dispatch = false; |
5115 | bfqd->waited_rq = rq; |
5116 | } |
5117 | |
5118 | bfq_dispatch_remove(q: bfqd->queue, rq); |
5119 | |
5120 | if (bfqq != bfqd->in_service_queue) |
5121 | return rq; |
5122 | |
5123 | /* |
5124 | * If weight raising has to terminate for bfqq, then next |
5125 | * function causes an immediate update of bfqq's weight, |
5126 | * without waiting for next activation. As a consequence, on |
5127 | * expiration, bfqq will be timestamped as if has never been |
5128 | * weight-raised during this service slot, even if it has |
5129 | * received part or even most of the service as a |
5130 | * weight-raised queue. This inflates bfqq's timestamps, which |
5131 | * is beneficial, as bfqq is then more willing to leave the |
5132 | * device immediately to possible other weight-raised queues. |
5133 | */ |
5134 | bfq_update_wr_data(bfqd, bfqq); |
5135 | |
5136 | /* |
5137 | * Expire bfqq, pretending that its budget expired, if bfqq |
5138 | * belongs to CLASS_IDLE and other queues are waiting for |
5139 | * service. |
5140 | */ |
5141 | if (bfq_tot_busy_queues(bfqd) > 1 && bfq_class_idle(bfqq)) |
5142 | bfq_bfqq_expire(bfqd, bfqq, compensate: false, reason: BFQQE_BUDGET_EXHAUSTED); |
5143 | |
5144 | return rq; |
5145 | } |
5146 | |
5147 | static bool bfq_has_work(struct blk_mq_hw_ctx *hctx) |
5148 | { |
5149 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
5150 | |
5151 | /* |
5152 | * Avoiding lock: a race on bfqd->queued should cause at |
5153 | * most a call to dispatch for nothing |
5154 | */ |
5155 | return !list_empty_careful(head: &bfqd->dispatch) || |
5156 | READ_ONCE(bfqd->queued); |
5157 | } |
5158 | |
5159 | static struct request *__bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
5160 | { |
5161 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
5162 | struct request *rq = NULL; |
5163 | struct bfq_queue *bfqq = NULL; |
5164 | |
5165 | if (!list_empty(head: &bfqd->dispatch)) { |
5166 | rq = list_first_entry(&bfqd->dispatch, struct request, |
5167 | queuelist); |
5168 | list_del_init(entry: &rq->queuelist); |
5169 | |
5170 | bfqq = RQ_BFQQ(rq); |
5171 | |
5172 | if (bfqq) { |
5173 | /* |
5174 | * Increment counters here, because this |
5175 | * dispatch does not follow the standard |
5176 | * dispatch flow (where counters are |
5177 | * incremented) |
5178 | */ |
5179 | bfqq->dispatched++; |
5180 | |
5181 | goto inc_in_driver_start_rq; |
5182 | } |
5183 | |
5184 | /* |
5185 | * We exploit the bfq_finish_requeue_request hook to |
5186 | * decrement tot_rq_in_driver, but |
5187 | * bfq_finish_requeue_request will not be invoked on |
5188 | * this request. So, to avoid unbalance, just start |
5189 | * this request, without incrementing tot_rq_in_driver. As |
5190 | * a negative consequence, tot_rq_in_driver is deceptively |
5191 | * lower than it should be while this request is in |
5192 | * service. This may cause bfq_schedule_dispatch to be |
5193 | * invoked uselessly. |
5194 | * |
5195 | * As for implementing an exact solution, the |
5196 | * bfq_finish_requeue_request hook, if defined, is |
5197 | * probably invoked also on this request. So, by |
5198 | * exploiting this hook, we could 1) increment |
5199 | * tot_rq_in_driver here, and 2) decrement it in |
5200 | * bfq_finish_requeue_request. Such a solution would |
5201 | * let the value of the counter be always accurate, |
5202 | * but it would entail using an extra interface |
5203 | * function. This cost seems higher than the benefit, |
5204 | * being the frequency of non-elevator-private |
5205 | * requests very low. |
5206 | */ |
5207 | goto start_rq; |
5208 | } |
5209 | |
5210 | bfq_log(bfqd, "dispatch requests: %d busy queues" , |
5211 | bfq_tot_busy_queues(bfqd)); |
5212 | |
5213 | if (bfq_tot_busy_queues(bfqd) == 0) |
5214 | goto exit; |
5215 | |
5216 | /* |
5217 | * Force device to serve one request at a time if |
5218 | * strict_guarantees is true. Forcing this service scheme is |
5219 | * currently the ONLY way to guarantee that the request |
5220 | * service order enforced by the scheduler is respected by a |
5221 | * queueing device. Otherwise the device is free even to make |
5222 | * some unlucky request wait for as long as the device |
5223 | * wishes. |
5224 | * |
5225 | * Of course, serving one request at a time may cause loss of |
5226 | * throughput. |
5227 | */ |
5228 | if (bfqd->strict_guarantees && bfqd->tot_rq_in_driver > 0) |
5229 | goto exit; |
5230 | |
5231 | bfqq = bfq_select_queue(bfqd); |
5232 | if (!bfqq) |
5233 | goto exit; |
5234 | |
5235 | rq = bfq_dispatch_rq_from_bfqq(bfqd, bfqq); |
5236 | |
5237 | if (rq) { |
5238 | inc_in_driver_start_rq: |
5239 | bfqd->rq_in_driver[bfqq->actuator_idx]++; |
5240 | bfqd->tot_rq_in_driver++; |
5241 | start_rq: |
5242 | rq->rq_flags |= RQF_STARTED; |
5243 | } |
5244 | exit: |
5245 | return rq; |
5246 | } |
5247 | |
5248 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
5249 | static void bfq_update_dispatch_stats(struct request_queue *q, |
5250 | struct request *rq, |
5251 | struct bfq_queue *in_serv_queue, |
5252 | bool idle_timer_disabled) |
5253 | { |
5254 | struct bfq_queue *bfqq = rq ? RQ_BFQQ(rq) : NULL; |
5255 | |
5256 | if (!idle_timer_disabled && !bfqq) |
5257 | return; |
5258 | |
5259 | /* |
5260 | * rq and bfqq are guaranteed to exist until this function |
5261 | * ends, for the following reasons. First, rq can be |
5262 | * dispatched to the device, and then can be completed and |
5263 | * freed, only after this function ends. Second, rq cannot be |
5264 | * merged (and thus freed because of a merge) any longer, |
5265 | * because it has already started. Thus rq cannot be freed |
5266 | * before this function ends, and, since rq has a reference to |
5267 | * bfqq, the same guarantee holds for bfqq too. |
5268 | * |
5269 | * In addition, the following queue lock guarantees that |
5270 | * bfqq_group(bfqq) exists as well. |
5271 | */ |
5272 | spin_lock_irq(lock: &q->queue_lock); |
5273 | if (idle_timer_disabled) |
5274 | /* |
5275 | * Since the idle timer has been disabled, |
5276 | * in_serv_queue contained some request when |
5277 | * __bfq_dispatch_request was invoked above, which |
5278 | * implies that rq was picked exactly from |
5279 | * in_serv_queue. Thus in_serv_queue == bfqq, and is |
5280 | * therefore guaranteed to exist because of the above |
5281 | * arguments. |
5282 | */ |
5283 | bfqg_stats_update_idle_time(bfqg: bfqq_group(bfqq: in_serv_queue)); |
5284 | if (bfqq) { |
5285 | struct bfq_group *bfqg = bfqq_group(bfqq); |
5286 | |
5287 | bfqg_stats_update_avg_queue_size(bfqg); |
5288 | bfqg_stats_set_start_empty_time(bfqg); |
5289 | bfqg_stats_update_io_remove(bfqg, opf: rq->cmd_flags); |
5290 | } |
5291 | spin_unlock_irq(lock: &q->queue_lock); |
5292 | } |
5293 | #else |
5294 | static inline void bfq_update_dispatch_stats(struct request_queue *q, |
5295 | struct request *rq, |
5296 | struct bfq_queue *in_serv_queue, |
5297 | bool idle_timer_disabled) {} |
5298 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
5299 | |
5300 | static struct request *bfq_dispatch_request(struct blk_mq_hw_ctx *hctx) |
5301 | { |
5302 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
5303 | struct request *rq; |
5304 | struct bfq_queue *in_serv_queue; |
5305 | bool waiting_rq, idle_timer_disabled = false; |
5306 | |
5307 | spin_lock_irq(lock: &bfqd->lock); |
5308 | |
5309 | in_serv_queue = bfqd->in_service_queue; |
5310 | waiting_rq = in_serv_queue && bfq_bfqq_wait_request(bfqq: in_serv_queue); |
5311 | |
5312 | rq = __bfq_dispatch_request(hctx); |
5313 | if (in_serv_queue == bfqd->in_service_queue) { |
5314 | idle_timer_disabled = |
5315 | waiting_rq && !bfq_bfqq_wait_request(bfqq: in_serv_queue); |
5316 | } |
5317 | |
5318 | spin_unlock_irq(lock: &bfqd->lock); |
5319 | bfq_update_dispatch_stats(q: hctx->queue, rq, |
5320 | in_serv_queue: idle_timer_disabled ? in_serv_queue : NULL, |
5321 | idle_timer_disabled); |
5322 | |
5323 | return rq; |
5324 | } |
5325 | |
5326 | /* |
5327 | * Task holds one reference to the queue, dropped when task exits. Each rq |
5328 | * in-flight on this queue also holds a reference, dropped when rq is freed. |
5329 | * |
5330 | * Scheduler lock must be held here. Recall not to use bfqq after calling |
5331 | * this function on it. |
5332 | */ |
5333 | void bfq_put_queue(struct bfq_queue *bfqq) |
5334 | { |
5335 | struct bfq_queue *item; |
5336 | struct hlist_node *n; |
5337 | struct bfq_group *bfqg = bfqq_group(bfqq); |
5338 | |
5339 | bfq_log_bfqq(bfqq->bfqd, bfqq, "put_queue: %p %d" , bfqq, bfqq->ref); |
5340 | |
5341 | bfqq->ref--; |
5342 | if (bfqq->ref) |
5343 | return; |
5344 | |
5345 | if (!hlist_unhashed(h: &bfqq->burst_list_node)) { |
5346 | hlist_del_init(n: &bfqq->burst_list_node); |
5347 | /* |
5348 | * Decrement also burst size after the removal, if the |
5349 | * process associated with bfqq is exiting, and thus |
5350 | * does not contribute to the burst any longer. This |
5351 | * decrement helps filter out false positives of large |
5352 | * bursts, when some short-lived process (often due to |
5353 | * the execution of commands by some service) happens |
5354 | * to start and exit while a complex application is |
5355 | * starting, and thus spawning several processes that |
5356 | * do I/O (and that *must not* be treated as a large |
5357 | * burst, see comments on bfq_handle_burst). |
5358 | * |
5359 | * In particular, the decrement is performed only if: |
5360 | * 1) bfqq is not a merged queue, because, if it is, |
5361 | * then this free of bfqq is not triggered by the exit |
5362 | * of the process bfqq is associated with, but exactly |
5363 | * by the fact that bfqq has just been merged. |
5364 | * 2) burst_size is greater than 0, to handle |
5365 | * unbalanced decrements. Unbalanced decrements may |
5366 | * happen in te following case: bfqq is inserted into |
5367 | * the current burst list--without incrementing |
5368 | * bust_size--because of a split, but the current |
5369 | * burst list is not the burst list bfqq belonged to |
5370 | * (see comments on the case of a split in |
5371 | * bfq_set_request). |
5372 | */ |
5373 | if (bfqq->bic && bfqq->bfqd->burst_size > 0) |
5374 | bfqq->bfqd->burst_size--; |
5375 | } |
5376 | |
5377 | /* |
5378 | * bfqq does not exist any longer, so it cannot be woken by |
5379 | * any other queue, and cannot wake any other queue. Then bfqq |
5380 | * must be removed from the woken list of its possible waker |
5381 | * queue, and all queues in the woken list of bfqq must stop |
5382 | * having a waker queue. Strictly speaking, these updates |
5383 | * should be performed when bfqq remains with no I/O source |
5384 | * attached to it, which happens before bfqq gets freed. In |
5385 | * particular, this happens when the last process associated |
5386 | * with bfqq exits or gets associated with a different |
5387 | * queue. However, both events lead to bfqq being freed soon, |
5388 | * and dangling references would come out only after bfqq gets |
5389 | * freed. So these updates are done here, as a simple and safe |
5390 | * way to handle all cases. |
5391 | */ |
5392 | /* remove bfqq from woken list */ |
5393 | if (!hlist_unhashed(h: &bfqq->woken_list_node)) |
5394 | hlist_del_init(n: &bfqq->woken_list_node); |
5395 | |
5396 | /* reset waker for all queues in woken list */ |
5397 | hlist_for_each_entry_safe(item, n, &bfqq->woken_list, |
5398 | woken_list_node) { |
5399 | item->waker_bfqq = NULL; |
5400 | hlist_del_init(n: &item->woken_list_node); |
5401 | } |
5402 | |
5403 | if (bfqq->bfqd->last_completed_rq_bfqq == bfqq) |
5404 | bfqq->bfqd->last_completed_rq_bfqq = NULL; |
5405 | |
5406 | WARN_ON_ONCE(!list_empty(&bfqq->fifo)); |
5407 | WARN_ON_ONCE(!RB_EMPTY_ROOT(&bfqq->sort_list)); |
5408 | WARN_ON_ONCE(bfqq->dispatched); |
5409 | |
5410 | kmem_cache_free(s: bfq_pool, objp: bfqq); |
5411 | bfqg_and_blkg_put(bfqg); |
5412 | } |
5413 | |
5414 | static void bfq_put_stable_ref(struct bfq_queue *bfqq) |
5415 | { |
5416 | bfqq->stable_ref--; |
5417 | bfq_put_queue(bfqq); |
5418 | } |
5419 | |
5420 | void bfq_put_cooperator(struct bfq_queue *bfqq) |
5421 | { |
5422 | struct bfq_queue *__bfqq, *next; |
5423 | |
5424 | /* |
5425 | * If this queue was scheduled to merge with another queue, be |
5426 | * sure to drop the reference taken on that queue (and others in |
5427 | * the merge chain). See bfq_setup_merge and bfq_merge_bfqqs. |
5428 | */ |
5429 | __bfqq = bfqq->new_bfqq; |
5430 | while (__bfqq) { |
5431 | next = __bfqq->new_bfqq; |
5432 | bfq_put_queue(bfqq: __bfqq); |
5433 | __bfqq = next; |
5434 | } |
5435 | } |
5436 | |
5437 | static void bfq_exit_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
5438 | { |
5439 | if (bfqq == bfqd->in_service_queue) { |
5440 | __bfq_bfqq_expire(bfqd, bfqq, reason: BFQQE_BUDGET_TIMEOUT); |
5441 | bfq_schedule_dispatch(bfqd); |
5442 | } |
5443 | |
5444 | bfq_log_bfqq(bfqd, bfqq, "exit_bfqq: %p, %d" , bfqq, bfqq->ref); |
5445 | |
5446 | bfq_put_cooperator(bfqq); |
5447 | |
5448 | bfq_release_process_ref(bfqd, bfqq); |
5449 | } |
5450 | |
5451 | static void bfq_exit_icq_bfqq(struct bfq_io_cq *bic, bool is_sync, |
5452 | unsigned int actuator_idx) |
5453 | { |
5454 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, actuator_idx); |
5455 | struct bfq_data *bfqd; |
5456 | |
5457 | if (bfqq) |
5458 | bfqd = bfqq->bfqd; /* NULL if scheduler already exited */ |
5459 | |
5460 | if (bfqq && bfqd) { |
5461 | bic_set_bfqq(bic, NULL, is_sync, actuator_idx); |
5462 | bfq_exit_bfqq(bfqd, bfqq); |
5463 | } |
5464 | } |
5465 | |
5466 | static void bfq_exit_icq(struct io_cq *icq) |
5467 | { |
5468 | struct bfq_io_cq *bic = icq_to_bic(icq); |
5469 | struct bfq_data *bfqd = bic_to_bfqd(bic); |
5470 | unsigned long flags; |
5471 | unsigned int act_idx; |
5472 | /* |
5473 | * If bfqd and thus bfqd->num_actuators is not available any |
5474 | * longer, then cycle over all possible per-actuator bfqqs in |
5475 | * next loop. We rely on bic being zeroed on creation, and |
5476 | * therefore on its unused per-actuator fields being NULL. |
5477 | */ |
5478 | unsigned int num_actuators = BFQ_MAX_ACTUATORS; |
5479 | struct bfq_iocq_bfqq_data *bfqq_data = bic->bfqq_data; |
5480 | |
5481 | /* |
5482 | * bfqd is NULL if scheduler already exited, and in that case |
5483 | * this is the last time these queues are accessed. |
5484 | */ |
5485 | if (bfqd) { |
5486 | spin_lock_irqsave(&bfqd->lock, flags); |
5487 | num_actuators = bfqd->num_actuators; |
5488 | } |
5489 | |
5490 | for (act_idx = 0; act_idx < num_actuators; act_idx++) { |
5491 | if (bfqq_data[act_idx].stable_merge_bfqq) |
5492 | bfq_put_stable_ref(bfqq: bfqq_data[act_idx].stable_merge_bfqq); |
5493 | |
5494 | bfq_exit_icq_bfqq(bic, is_sync: true, actuator_idx: act_idx); |
5495 | bfq_exit_icq_bfqq(bic, is_sync: false, actuator_idx: act_idx); |
5496 | } |
5497 | |
5498 | if (bfqd) |
5499 | spin_unlock_irqrestore(lock: &bfqd->lock, flags); |
5500 | } |
5501 | |
5502 | /* |
5503 | * Update the entity prio values; note that the new values will not |
5504 | * be used until the next (re)activation. |
5505 | */ |
5506 | static void |
5507 | bfq_set_next_ioprio_data(struct bfq_queue *bfqq, struct bfq_io_cq *bic) |
5508 | { |
5509 | struct task_struct *tsk = current; |
5510 | int ioprio_class; |
5511 | struct bfq_data *bfqd = bfqq->bfqd; |
5512 | |
5513 | if (!bfqd) |
5514 | return; |
5515 | |
5516 | ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); |
5517 | switch (ioprio_class) { |
5518 | default: |
5519 | pr_err("bdi %s: bfq: bad prio class %d\n" , |
5520 | bdi_dev_name(bfqq->bfqd->queue->disk->bdi), |
5521 | ioprio_class); |
5522 | fallthrough; |
5523 | case IOPRIO_CLASS_NONE: |
5524 | /* |
5525 | * No prio set, inherit CPU scheduling settings. |
5526 | */ |
5527 | bfqq->new_ioprio = task_nice_ioprio(task: tsk); |
5528 | bfqq->new_ioprio_class = task_nice_ioclass(task: tsk); |
5529 | break; |
5530 | case IOPRIO_CLASS_RT: |
5531 | bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); |
5532 | bfqq->new_ioprio_class = IOPRIO_CLASS_RT; |
5533 | break; |
5534 | case IOPRIO_CLASS_BE: |
5535 | bfqq->new_ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); |
5536 | bfqq->new_ioprio_class = IOPRIO_CLASS_BE; |
5537 | break; |
5538 | case IOPRIO_CLASS_IDLE: |
5539 | bfqq->new_ioprio_class = IOPRIO_CLASS_IDLE; |
5540 | bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1; |
5541 | break; |
5542 | } |
5543 | |
5544 | if (bfqq->new_ioprio >= IOPRIO_NR_LEVELS) { |
5545 | pr_crit("bfq_set_next_ioprio_data: new_ioprio %d\n" , |
5546 | bfqq->new_ioprio); |
5547 | bfqq->new_ioprio = IOPRIO_NR_LEVELS - 1; |
5548 | } |
5549 | |
5550 | bfqq->entity.new_weight = bfq_ioprio_to_weight(ioprio: bfqq->new_ioprio); |
5551 | bfq_log_bfqq(bfqd, bfqq, "new_ioprio %d new_weight %d" , |
5552 | bfqq->new_ioprio, bfqq->entity.new_weight); |
5553 | bfqq->entity.prio_changed = 1; |
5554 | } |
5555 | |
5556 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, |
5557 | struct bio *bio, bool is_sync, |
5558 | struct bfq_io_cq *bic, |
5559 | bool respawn); |
5560 | |
5561 | static void bfq_check_ioprio_change(struct bfq_io_cq *bic, struct bio *bio) |
5562 | { |
5563 | struct bfq_data *bfqd = bic_to_bfqd(bic); |
5564 | struct bfq_queue *bfqq; |
5565 | int ioprio = bic->icq.ioc->ioprio; |
5566 | |
5567 | /* |
5568 | * This condition may trigger on a newly created bic, be sure to |
5569 | * drop the lock before returning. |
5570 | */ |
5571 | if (unlikely(!bfqd) || likely(bic->ioprio == ioprio)) |
5572 | return; |
5573 | |
5574 | bic->ioprio = ioprio; |
5575 | |
5576 | bfqq = bic_to_bfqq(bic, is_sync: false, actuator_idx: bfq_actuator_index(bfqd, bio)); |
5577 | if (bfqq) { |
5578 | struct bfq_queue *old_bfqq = bfqq; |
5579 | |
5580 | bfqq = bfq_get_queue(bfqd, bio, is_sync: false, bic, respawn: true); |
5581 | bic_set_bfqq(bic, bfqq, is_sync: false, actuator_idx: bfq_actuator_index(bfqd, bio)); |
5582 | bfq_release_process_ref(bfqd, bfqq: old_bfqq); |
5583 | } |
5584 | |
5585 | bfqq = bic_to_bfqq(bic, is_sync: true, actuator_idx: bfq_actuator_index(bfqd, bio)); |
5586 | if (bfqq) |
5587 | bfq_set_next_ioprio_data(bfqq, bic); |
5588 | } |
5589 | |
5590 | static void bfq_init_bfqq(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
5591 | struct bfq_io_cq *bic, pid_t pid, int is_sync, |
5592 | unsigned int act_idx) |
5593 | { |
5594 | u64 now_ns = ktime_get_ns(); |
5595 | |
5596 | bfqq->actuator_idx = act_idx; |
5597 | RB_CLEAR_NODE(&bfqq->entity.rb_node); |
5598 | INIT_LIST_HEAD(list: &bfqq->fifo); |
5599 | INIT_HLIST_NODE(h: &bfqq->burst_list_node); |
5600 | INIT_HLIST_NODE(h: &bfqq->woken_list_node); |
5601 | INIT_HLIST_HEAD(&bfqq->woken_list); |
5602 | |
5603 | bfqq->ref = 0; |
5604 | bfqq->bfqd = bfqd; |
5605 | |
5606 | if (bic) |
5607 | bfq_set_next_ioprio_data(bfqq, bic); |
5608 | |
5609 | if (is_sync) { |
5610 | /* |
5611 | * No need to mark as has_short_ttime if in |
5612 | * idle_class, because no device idling is performed |
5613 | * for queues in idle class |
5614 | */ |
5615 | if (!bfq_class_idle(bfqq)) |
5616 | /* tentatively mark as has_short_ttime */ |
5617 | bfq_mark_bfqq_has_short_ttime(bfqq); |
5618 | bfq_mark_bfqq_sync(bfqq); |
5619 | bfq_mark_bfqq_just_created(bfqq); |
5620 | } else |
5621 | bfq_clear_bfqq_sync(bfqq); |
5622 | |
5623 | /* set end request to minus infinity from now */ |
5624 | bfqq->ttime.last_end_request = now_ns + 1; |
5625 | |
5626 | bfqq->creation_time = jiffies; |
5627 | |
5628 | bfqq->io_start_time = now_ns; |
5629 | |
5630 | bfq_mark_bfqq_IO_bound(bfqq); |
5631 | |
5632 | bfqq->pid = pid; |
5633 | |
5634 | /* Tentative initial value to trade off between thr and lat */ |
5635 | bfqq->max_budget = (2 * bfq_max_budget(bfqd)) / 3; |
5636 | bfqq->budget_timeout = bfq_smallest_from_now(); |
5637 | |
5638 | bfqq->wr_coeff = 1; |
5639 | bfqq->last_wr_start_finish = jiffies; |
5640 | bfqq->wr_start_at_switch_to_srt = bfq_smallest_from_now(); |
5641 | bfqq->split_time = bfq_smallest_from_now(); |
5642 | |
5643 | /* |
5644 | * To not forget the possibly high bandwidth consumed by a |
5645 | * process/queue in the recent past, |
5646 | * bfq_bfqq_softrt_next_start() returns a value at least equal |
5647 | * to the current value of bfqq->soft_rt_next_start (see |
5648 | * comments on bfq_bfqq_softrt_next_start). Set |
5649 | * soft_rt_next_start to now, to mean that bfqq has consumed |
5650 | * no bandwidth so far. |
5651 | */ |
5652 | bfqq->soft_rt_next_start = jiffies; |
5653 | |
5654 | /* first request is almost certainly seeky */ |
5655 | bfqq->seek_history = 1; |
5656 | |
5657 | bfqq->decrease_time_jif = jiffies; |
5658 | } |
5659 | |
5660 | static struct bfq_queue **bfq_async_queue_prio(struct bfq_data *bfqd, |
5661 | struct bfq_group *bfqg, |
5662 | int ioprio_class, int ioprio, int act_idx) |
5663 | { |
5664 | switch (ioprio_class) { |
5665 | case IOPRIO_CLASS_RT: |
5666 | return &bfqg->async_bfqq[0][ioprio][act_idx]; |
5667 | case IOPRIO_CLASS_NONE: |
5668 | ioprio = IOPRIO_BE_NORM; |
5669 | fallthrough; |
5670 | case IOPRIO_CLASS_BE: |
5671 | return &bfqg->async_bfqq[1][ioprio][act_idx]; |
5672 | case IOPRIO_CLASS_IDLE: |
5673 | return &bfqg->async_idle_bfqq[act_idx]; |
5674 | default: |
5675 | return NULL; |
5676 | } |
5677 | } |
5678 | |
5679 | static struct bfq_queue * |
5680 | bfq_do_early_stable_merge(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
5681 | struct bfq_io_cq *bic, |
5682 | struct bfq_queue *last_bfqq_created) |
5683 | { |
5684 | unsigned int a_idx = last_bfqq_created->actuator_idx; |
5685 | struct bfq_queue *new_bfqq = |
5686 | bfq_setup_merge(bfqq, new_bfqq: last_bfqq_created); |
5687 | |
5688 | if (!new_bfqq) |
5689 | return bfqq; |
5690 | |
5691 | if (new_bfqq->bic) |
5692 | new_bfqq->bic->bfqq_data[a_idx].stably_merged = true; |
5693 | bic->bfqq_data[a_idx].stably_merged = true; |
5694 | |
5695 | /* |
5696 | * Reusing merge functions. This implies that |
5697 | * bfqq->bic must be set too, for |
5698 | * bfq_merge_bfqqs to correctly save bfqq's |
5699 | * state before killing it. |
5700 | */ |
5701 | bfqq->bic = bic; |
5702 | bfq_merge_bfqqs(bfqd, bic, bfqq, new_bfqq); |
5703 | |
5704 | return new_bfqq; |
5705 | } |
5706 | |
5707 | /* |
5708 | * Many throughput-sensitive workloads are made of several parallel |
5709 | * I/O flows, with all flows generated by the same application, or |
5710 | * more generically by the same task (e.g., system boot). The most |
5711 | * counterproductive action with these workloads is plugging I/O |
5712 | * dispatch when one of the bfq_queues associated with these flows |
5713 | * remains temporarily empty. |
5714 | * |
5715 | * To avoid this plugging, BFQ has been using a burst-handling |
5716 | * mechanism for years now. This mechanism has proven effective for |
5717 | * throughput, and not detrimental for service guarantees. The |
5718 | * following function pushes this mechanism a little bit further, |
5719 | * basing on the following two facts. |
5720 | * |
5721 | * First, all the I/O flows of a the same application or task |
5722 | * contribute to the execution/completion of that common application |
5723 | * or task. So the performance figures that matter are total |
5724 | * throughput of the flows and task-wide I/O latency. In particular, |
5725 | * these flows do not need to be protected from each other, in terms |
5726 | * of individual bandwidth or latency. |
5727 | * |
5728 | * Second, the above fact holds regardless of the number of flows. |
5729 | * |
5730 | * Putting these two facts together, this commits merges stably the |
5731 | * bfq_queues associated with these I/O flows, i.e., with the |
5732 | * processes that generate these IO/ flows, regardless of how many the |
5733 | * involved processes are. |
5734 | * |
5735 | * To decide whether a set of bfq_queues is actually associated with |
5736 | * the I/O flows of a common application or task, and to merge these |
5737 | * queues stably, this function operates as follows: given a bfq_queue, |
5738 | * say Q2, currently being created, and the last bfq_queue, say Q1, |
5739 | * created before Q2, Q2 is merged stably with Q1 if |
5740 | * - very little time has elapsed since when Q1 was created |
5741 | * - Q2 has the same ioprio as Q1 |
5742 | * - Q2 belongs to the same group as Q1 |
5743 | * |
5744 | * Merging bfq_queues also reduces scheduling overhead. A fio test |
5745 | * with ten random readers on /dev/nullb shows a throughput boost of |
5746 | * 40%, with a quadcore. Since BFQ's execution time amounts to ~50% of |
5747 | * the total per-request processing time, the above throughput boost |
5748 | * implies that BFQ's overhead is reduced by more than 50%. |
5749 | * |
5750 | * This new mechanism most certainly obsoletes the current |
5751 | * burst-handling heuristics. We keep those heuristics for the moment. |
5752 | */ |
5753 | static struct bfq_queue *bfq_do_or_sched_stable_merge(struct bfq_data *bfqd, |
5754 | struct bfq_queue *bfqq, |
5755 | struct bfq_io_cq *bic) |
5756 | { |
5757 | struct bfq_queue **source_bfqq = bfqq->entity.parent ? |
5758 | &bfqq->entity.parent->last_bfqq_created : |
5759 | &bfqd->last_bfqq_created; |
5760 | |
5761 | struct bfq_queue *last_bfqq_created = *source_bfqq; |
5762 | |
5763 | /* |
5764 | * If last_bfqq_created has not been set yet, then init it. If |
5765 | * it has been set already, but too long ago, then move it |
5766 | * forward to bfqq. Finally, move also if bfqq belongs to a |
5767 | * different group than last_bfqq_created, or if bfqq has a |
5768 | * different ioprio, ioprio_class or actuator_idx. If none of |
5769 | * these conditions holds true, then try an early stable merge |
5770 | * or schedule a delayed stable merge. As for the condition on |
5771 | * actuator_idx, the reason is that, if queues associated with |
5772 | * different actuators are merged, then control is lost on |
5773 | * each actuator. Therefore some actuator may be |
5774 | * underutilized, and throughput may decrease. |
5775 | * |
5776 | * A delayed merge is scheduled (instead of performing an |
5777 | * early merge), in case bfqq might soon prove to be more |
5778 | * throughput-beneficial if not merged. Currently this is |
5779 | * possible only if bfqd is rotational with no queueing. For |
5780 | * such a drive, not merging bfqq is better for throughput if |
5781 | * bfqq happens to contain sequential I/O. So, we wait a |
5782 | * little bit for enough I/O to flow through bfqq. After that, |
5783 | * if such an I/O is sequential, then the merge is |
5784 | * canceled. Otherwise the merge is finally performed. |
5785 | */ |
5786 | if (!last_bfqq_created || |
5787 | time_before(last_bfqq_created->creation_time + |
5788 | msecs_to_jiffies(bfq_activation_stable_merging), |
5789 | bfqq->creation_time) || |
5790 | bfqq->entity.parent != last_bfqq_created->entity.parent || |
5791 | bfqq->ioprio != last_bfqq_created->ioprio || |
5792 | bfqq->ioprio_class != last_bfqq_created->ioprio_class || |
5793 | bfqq->actuator_idx != last_bfqq_created->actuator_idx) |
5794 | *source_bfqq = bfqq; |
5795 | else if (time_after_eq(last_bfqq_created->creation_time + |
5796 | bfqd->bfq_burst_interval, |
5797 | bfqq->creation_time)) { |
5798 | if (likely(bfqd->nonrot_with_queueing)) |
5799 | /* |
5800 | * With this type of drive, leaving |
5801 | * bfqq alone may provide no |
5802 | * throughput benefits compared with |
5803 | * merging bfqq. So merge bfqq now. |
5804 | */ |
5805 | bfqq = bfq_do_early_stable_merge(bfqd, bfqq, |
5806 | bic, |
5807 | last_bfqq_created); |
5808 | else { /* schedule tentative stable merge */ |
5809 | /* |
5810 | * get reference on last_bfqq_created, |
5811 | * to prevent it from being freed, |
5812 | * until we decide whether to merge |
5813 | */ |
5814 | last_bfqq_created->ref++; |
5815 | /* |
5816 | * need to keep track of stable refs, to |
5817 | * compute process refs correctly |
5818 | */ |
5819 | last_bfqq_created->stable_ref++; |
5820 | /* |
5821 | * Record the bfqq to merge to. |
5822 | */ |
5823 | bic->bfqq_data[last_bfqq_created->actuator_idx].stable_merge_bfqq = |
5824 | last_bfqq_created; |
5825 | } |
5826 | } |
5827 | |
5828 | return bfqq; |
5829 | } |
5830 | |
5831 | |
5832 | static struct bfq_queue *bfq_get_queue(struct bfq_data *bfqd, |
5833 | struct bio *bio, bool is_sync, |
5834 | struct bfq_io_cq *bic, |
5835 | bool respawn) |
5836 | { |
5837 | const int ioprio = IOPRIO_PRIO_LEVEL(bic->ioprio); |
5838 | const int ioprio_class = IOPRIO_PRIO_CLASS(bic->ioprio); |
5839 | struct bfq_queue **async_bfqq = NULL; |
5840 | struct bfq_queue *bfqq; |
5841 | struct bfq_group *bfqg; |
5842 | |
5843 | bfqg = bfq_bio_bfqg(bfqd, bio); |
5844 | if (!is_sync) { |
5845 | async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class, |
5846 | ioprio, |
5847 | act_idx: bfq_actuator_index(bfqd, bio)); |
5848 | bfqq = *async_bfqq; |
5849 | if (bfqq) |
5850 | goto out; |
5851 | } |
5852 | |
5853 | bfqq = kmem_cache_alloc_node(s: bfq_pool, |
5854 | GFP_NOWAIT | __GFP_ZERO | __GFP_NOWARN, |
5855 | node: bfqd->queue->node); |
5856 | |
5857 | if (bfqq) { |
5858 | bfq_init_bfqq(bfqd, bfqq, bic, current->pid, |
5859 | is_sync, act_idx: bfq_actuator_index(bfqd, bio)); |
5860 | bfq_init_entity(entity: &bfqq->entity, bfqg); |
5861 | bfq_log_bfqq(bfqd, bfqq, "allocated" ); |
5862 | } else { |
5863 | bfqq = &bfqd->oom_bfqq; |
5864 | bfq_log_bfqq(bfqd, bfqq, "using oom bfqq" ); |
5865 | goto out; |
5866 | } |
5867 | |
5868 | /* |
5869 | * Pin the queue now that it's allocated, scheduler exit will |
5870 | * prune it. |
5871 | */ |
5872 | if (async_bfqq) { |
5873 | bfqq->ref++; /* |
5874 | * Extra group reference, w.r.t. sync |
5875 | * queue. This extra reference is removed |
5876 | * only if bfqq->bfqg disappears, to |
5877 | * guarantee that this queue is not freed |
5878 | * until its group goes away. |
5879 | */ |
5880 | bfq_log_bfqq(bfqd, bfqq, "get_queue, bfqq not in async: %p, %d" , |
5881 | bfqq, bfqq->ref); |
5882 | *async_bfqq = bfqq; |
5883 | } |
5884 | |
5885 | out: |
5886 | bfqq->ref++; /* get a process reference to this queue */ |
5887 | |
5888 | if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn) |
5889 | bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic); |
5890 | return bfqq; |
5891 | } |
5892 | |
5893 | static void bfq_update_io_thinktime(struct bfq_data *bfqd, |
5894 | struct bfq_queue *bfqq) |
5895 | { |
5896 | struct bfq_ttime *ttime = &bfqq->ttime; |
5897 | u64 elapsed; |
5898 | |
5899 | /* |
5900 | * We are really interested in how long it takes for the queue to |
5901 | * become busy when there is no outstanding IO for this queue. So |
5902 | * ignore cases when the bfq queue has already IO queued. |
5903 | */ |
5904 | if (bfqq->dispatched || bfq_bfqq_busy(bfqq)) |
5905 | return; |
5906 | elapsed = ktime_get_ns() - bfqq->ttime.last_end_request; |
5907 | elapsed = min_t(u64, elapsed, 2ULL * bfqd->bfq_slice_idle); |
5908 | |
5909 | ttime->ttime_samples = (7*ttime->ttime_samples + 256) / 8; |
5910 | ttime->ttime_total = div_u64(dividend: 7*ttime->ttime_total + 256*elapsed, divisor: 8); |
5911 | ttime->ttime_mean = div64_ul(ttime->ttime_total + 128, |
5912 | ttime->ttime_samples); |
5913 | } |
5914 | |
5915 | static void |
5916 | bfq_update_io_seektime(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
5917 | struct request *rq) |
5918 | { |
5919 | bfqq->seek_history <<= 1; |
5920 | bfqq->seek_history |= BFQ_RQ_SEEKY(bfqd, bfqq->last_request_pos, rq); |
5921 | |
5922 | if (bfqq->wr_coeff > 1 && |
5923 | bfqq->wr_cur_max_time == bfqd->bfq_wr_rt_max_time && |
5924 | BFQQ_TOTALLY_SEEKY(bfqq)) { |
5925 | if (time_is_before_jiffies(bfqq->wr_start_at_switch_to_srt + |
5926 | bfq_wr_duration(bfqd))) { |
5927 | /* |
5928 | * In soft_rt weight raising with the |
5929 | * interactive-weight-raising period |
5930 | * elapsed (so no switch back to |
5931 | * interactive weight raising). |
5932 | */ |
5933 | bfq_bfqq_end_wr(bfqq); |
5934 | } else { /* |
5935 | * stopping soft_rt weight raising |
5936 | * while still in interactive period, |
5937 | * switch back to interactive weight |
5938 | * raising |
5939 | */ |
5940 | switch_back_to_interactive_wr(bfqq, bfqd); |
5941 | bfqq->entity.prio_changed = 1; |
5942 | } |
5943 | } |
5944 | } |
5945 | |
5946 | static void bfq_update_has_short_ttime(struct bfq_data *bfqd, |
5947 | struct bfq_queue *bfqq, |
5948 | struct bfq_io_cq *bic) |
5949 | { |
5950 | bool has_short_ttime = true, state_changed; |
5951 | |
5952 | /* |
5953 | * No need to update has_short_ttime if bfqq is async or in |
5954 | * idle io prio class, or if bfq_slice_idle is zero, because |
5955 | * no device idling is performed for bfqq in this case. |
5956 | */ |
5957 | if (!bfq_bfqq_sync(bfqq) || bfq_class_idle(bfqq) || |
5958 | bfqd->bfq_slice_idle == 0) |
5959 | return; |
5960 | |
5961 | /* Idle window just restored, statistics are meaningless. */ |
5962 | if (time_is_after_eq_jiffies(bfqq->split_time + |
5963 | bfqd->bfq_wr_min_idle_time)) |
5964 | return; |
5965 | |
5966 | /* Think time is infinite if no process is linked to |
5967 | * bfqq. Otherwise check average think time to decide whether |
5968 | * to mark as has_short_ttime. To this goal, compare average |
5969 | * think time with half the I/O-plugging timeout. |
5970 | */ |
5971 | if (atomic_read(v: &bic->icq.ioc->active_ref) == 0 || |
5972 | (bfq_sample_valid(bfqq->ttime.ttime_samples) && |
5973 | bfqq->ttime.ttime_mean > bfqd->bfq_slice_idle>>1)) |
5974 | has_short_ttime = false; |
5975 | |
5976 | state_changed = has_short_ttime != bfq_bfqq_has_short_ttime(bfqq); |
5977 | |
5978 | if (has_short_ttime) |
5979 | bfq_mark_bfqq_has_short_ttime(bfqq); |
5980 | else |
5981 | bfq_clear_bfqq_has_short_ttime(bfqq); |
5982 | |
5983 | /* |
5984 | * Until the base value for the total service time gets |
5985 | * finally computed for bfqq, the inject limit does depend on |
5986 | * the think-time state (short|long). In particular, the limit |
5987 | * is 0 or 1 if the think time is deemed, respectively, as |
5988 | * short or long (details in the comments in |
5989 | * bfq_update_inject_limit()). Accordingly, the next |
5990 | * instructions reset the inject limit if the think-time state |
5991 | * has changed and the above base value is still to be |
5992 | * computed. |
5993 | * |
5994 | * However, the reset is performed only if more than 100 ms |
5995 | * have elapsed since the last update of the inject limit, or |
5996 | * (inclusive) if the change is from short to long think |
5997 | * time. The reason for this waiting is as follows. |
5998 | * |
5999 | * bfqq may have a long think time because of a |
6000 | * synchronization with some other queue, i.e., because the |
6001 | * I/O of some other queue may need to be completed for bfqq |
6002 | * to receive new I/O. Details in the comments on the choice |
6003 | * of the queue for injection in bfq_select_queue(). |
6004 | * |
6005 | * As stressed in those comments, if such a synchronization is |
6006 | * actually in place, then, without injection on bfqq, the |
6007 | * blocking I/O cannot happen to served while bfqq is in |
6008 | * service. As a consequence, if bfqq is granted |
6009 | * I/O-dispatch-plugging, then bfqq remains empty, and no I/O |
6010 | * is dispatched, until the idle timeout fires. This is likely |
6011 | * to result in lower bandwidth and higher latencies for bfqq, |
6012 | * and in a severe loss of total throughput. |
6013 | * |
6014 | * On the opposite end, a non-zero inject limit may allow the |
6015 | * I/O that blocks bfqq to be executed soon, and therefore |
6016 | * bfqq to receive new I/O soon. |
6017 | * |
6018 | * But, if the blocking gets actually eliminated, then the |
6019 | * next think-time sample for bfqq may be very low. This in |
6020 | * turn may cause bfqq's think time to be deemed |
6021 | * short. Without the 100 ms barrier, this new state change |
6022 | * would cause the body of the next if to be executed |
6023 | * immediately. But this would set to 0 the inject |
6024 | * limit. Without injection, the blocking I/O would cause the |
6025 | * think time of bfqq to become long again, and therefore the |
6026 | * inject limit to be raised again, and so on. The only effect |
6027 | * of such a steady oscillation between the two think-time |
6028 | * states would be to prevent effective injection on bfqq. |
6029 | * |
6030 | * In contrast, if the inject limit is not reset during such a |
6031 | * long time interval as 100 ms, then the number of short |
6032 | * think time samples can grow significantly before the reset |
6033 | * is performed. As a consequence, the think time state can |
6034 | * become stable before the reset. Therefore there will be no |
6035 | * state change when the 100 ms elapse, and no reset of the |
6036 | * inject limit. The inject limit remains steadily equal to 1 |
6037 | * both during and after the 100 ms. So injection can be |
6038 | * performed at all times, and throughput gets boosted. |
6039 | * |
6040 | * An inject limit equal to 1 is however in conflict, in |
6041 | * general, with the fact that the think time of bfqq is |
6042 | * short, because injection may be likely to delay bfqq's I/O |
6043 | * (as explained in the comments in |
6044 | * bfq_update_inject_limit()). But this does not happen in |
6045 | * this special case, because bfqq's low think time is due to |
6046 | * an effective handling of a synchronization, through |
6047 | * injection. In this special case, bfqq's I/O does not get |
6048 | * delayed by injection; on the contrary, bfqq's I/O is |
6049 | * brought forward, because it is not blocked for |
6050 | * milliseconds. |
6051 | * |
6052 | * In addition, serving the blocking I/O much sooner, and much |
6053 | * more frequently than once per I/O-plugging timeout, makes |
6054 | * it much quicker to detect a waker queue (the concept of |
6055 | * waker queue is defined in the comments in |
6056 | * bfq_add_request()). This makes it possible to start sooner |
6057 | * to boost throughput more effectively, by injecting the I/O |
6058 | * of the waker queue unconditionally on every |
6059 | * bfq_dispatch_request(). |
6060 | * |
6061 | * One last, important benefit of not resetting the inject |
6062 | * limit before 100 ms is that, during this time interval, the |
6063 | * base value for the total service time is likely to get |
6064 | * finally computed for bfqq, freeing the inject limit from |
6065 | * its relation with the think time. |
6066 | */ |
6067 | if (state_changed && bfqq->last_serv_time_ns == 0 && |
6068 | (time_is_before_eq_jiffies(bfqq->decrease_time_jif + |
6069 | msecs_to_jiffies(100)) || |
6070 | !has_short_ttime)) |
6071 | bfq_reset_inject_limit(bfqd, bfqq); |
6072 | } |
6073 | |
6074 | /* |
6075 | * Called when a new fs request (rq) is added to bfqq. Check if there's |
6076 | * something we should do about it. |
6077 | */ |
6078 | static void bfq_rq_enqueued(struct bfq_data *bfqd, struct bfq_queue *bfqq, |
6079 | struct request *rq) |
6080 | { |
6081 | if (rq->cmd_flags & REQ_META) |
6082 | bfqq->meta_pending++; |
6083 | |
6084 | bfqq->last_request_pos = blk_rq_pos(rq) + blk_rq_sectors(rq); |
6085 | |
6086 | if (bfqq == bfqd->in_service_queue && bfq_bfqq_wait_request(bfqq)) { |
6087 | bool small_req = bfqq->queued[rq_is_sync(rq)] == 1 && |
6088 | blk_rq_sectors(rq) < 32; |
6089 | bool budget_timeout = bfq_bfqq_budget_timeout(bfqq); |
6090 | |
6091 | /* |
6092 | * There is just this request queued: if |
6093 | * - the request is small, and |
6094 | * - we are idling to boost throughput, and |
6095 | * - the queue is not to be expired, |
6096 | * then just exit. |
6097 | * |
6098 | * In this way, if the device is being idled to wait |
6099 | * for a new request from the in-service queue, we |
6100 | * avoid unplugging the device and committing the |
6101 | * device to serve just a small request. In contrast |
6102 | * we wait for the block layer to decide when to |
6103 | * unplug the device: hopefully, new requests will be |
6104 | * merged to this one quickly, then the device will be |
6105 | * unplugged and larger requests will be dispatched. |
6106 | */ |
6107 | if (small_req && idling_boosts_thr_without_issues(bfqd, bfqq) && |
6108 | !budget_timeout) |
6109 | return; |
6110 | |
6111 | /* |
6112 | * A large enough request arrived, or idling is being |
6113 | * performed to preserve service guarantees, or |
6114 | * finally the queue is to be expired: in all these |
6115 | * cases disk idling is to be stopped, so clear |
6116 | * wait_request flag and reset timer. |
6117 | */ |
6118 | bfq_clear_bfqq_wait_request(bfqq); |
6119 | hrtimer_try_to_cancel(timer: &bfqd->idle_slice_timer); |
6120 | |
6121 | /* |
6122 | * The queue is not empty, because a new request just |
6123 | * arrived. Hence we can safely expire the queue, in |
6124 | * case of budget timeout, without risking that the |
6125 | * timestamps of the queue are not updated correctly. |
6126 | * See [1] for more details. |
6127 | */ |
6128 | if (budget_timeout) |
6129 | bfq_bfqq_expire(bfqd, bfqq, compensate: false, |
6130 | reason: BFQQE_BUDGET_TIMEOUT); |
6131 | } |
6132 | } |
6133 | |
6134 | static void bfqq_request_allocated(struct bfq_queue *bfqq) |
6135 | { |
6136 | struct bfq_entity *entity = &bfqq->entity; |
6137 | |
6138 | for_each_entity(entity) |
6139 | entity->allocated++; |
6140 | } |
6141 | |
6142 | static void bfqq_request_freed(struct bfq_queue *bfqq) |
6143 | { |
6144 | struct bfq_entity *entity = &bfqq->entity; |
6145 | |
6146 | for_each_entity(entity) |
6147 | entity->allocated--; |
6148 | } |
6149 | |
6150 | /* returns true if it causes the idle timer to be disabled */ |
6151 | static bool __bfq_insert_request(struct bfq_data *bfqd, struct request *rq) |
6152 | { |
6153 | struct bfq_queue *bfqq = RQ_BFQQ(rq), |
6154 | *new_bfqq = bfq_setup_cooperator(bfqd, bfqq, io_struct: rq, request: true, |
6155 | RQ_BIC(rq)); |
6156 | bool waiting, idle_timer_disabled = false; |
6157 | |
6158 | if (new_bfqq) { |
6159 | /* |
6160 | * Release the request's reference to the old bfqq |
6161 | * and make sure one is taken to the shared queue. |
6162 | */ |
6163 | bfqq_request_allocated(bfqq: new_bfqq); |
6164 | bfqq_request_freed(bfqq); |
6165 | new_bfqq->ref++; |
6166 | /* |
6167 | * If the bic associated with the process |
6168 | * issuing this request still points to bfqq |
6169 | * (and thus has not been already redirected |
6170 | * to new_bfqq or even some other bfq_queue), |
6171 | * then complete the merge and redirect it to |
6172 | * new_bfqq. |
6173 | */ |
6174 | if (bic_to_bfqq(RQ_BIC(rq), is_sync: true, |
6175 | actuator_idx: bfq_actuator_index(bfqd, bio: rq->bio)) == bfqq) |
6176 | bfq_merge_bfqqs(bfqd, RQ_BIC(rq), |
6177 | bfqq, new_bfqq); |
6178 | |
6179 | bfq_clear_bfqq_just_created(bfqq); |
6180 | /* |
6181 | * rq is about to be enqueued into new_bfqq, |
6182 | * release rq reference on bfqq |
6183 | */ |
6184 | bfq_put_queue(bfqq); |
6185 | rq->elv.priv[1] = new_bfqq; |
6186 | bfqq = new_bfqq; |
6187 | } |
6188 | |
6189 | bfq_update_io_thinktime(bfqd, bfqq); |
6190 | bfq_update_has_short_ttime(bfqd, bfqq, RQ_BIC(rq)); |
6191 | bfq_update_io_seektime(bfqd, bfqq, rq); |
6192 | |
6193 | waiting = bfqq && bfq_bfqq_wait_request(bfqq); |
6194 | bfq_add_request(rq); |
6195 | idle_timer_disabled = waiting && !bfq_bfqq_wait_request(bfqq); |
6196 | |
6197 | rq->fifo_time = ktime_get_ns() + bfqd->bfq_fifo_expire[rq_is_sync(rq)]; |
6198 | list_add_tail(new: &rq->queuelist, head: &bfqq->fifo); |
6199 | |
6200 | bfq_rq_enqueued(bfqd, bfqq, rq); |
6201 | |
6202 | return idle_timer_disabled; |
6203 | } |
6204 | |
6205 | #ifdef CONFIG_BFQ_CGROUP_DEBUG |
6206 | static void bfq_update_insert_stats(struct request_queue *q, |
6207 | struct bfq_queue *bfqq, |
6208 | bool idle_timer_disabled, |
6209 | blk_opf_t cmd_flags) |
6210 | { |
6211 | if (!bfqq) |
6212 | return; |
6213 | |
6214 | /* |
6215 | * bfqq still exists, because it can disappear only after |
6216 | * either it is merged with another queue, or the process it |
6217 | * is associated with exits. But both actions must be taken by |
6218 | * the same process currently executing this flow of |
6219 | * instructions. |
6220 | * |
6221 | * In addition, the following queue lock guarantees that |
6222 | * bfqq_group(bfqq) exists as well. |
6223 | */ |
6224 | spin_lock_irq(lock: &q->queue_lock); |
6225 | bfqg_stats_update_io_add(bfqg: bfqq_group(bfqq), bfqq, opf: cmd_flags); |
6226 | if (idle_timer_disabled) |
6227 | bfqg_stats_update_idle_time(bfqg: bfqq_group(bfqq)); |
6228 | spin_unlock_irq(lock: &q->queue_lock); |
6229 | } |
6230 | #else |
6231 | static inline void bfq_update_insert_stats(struct request_queue *q, |
6232 | struct bfq_queue *bfqq, |
6233 | bool idle_timer_disabled, |
6234 | blk_opf_t cmd_flags) {} |
6235 | #endif /* CONFIG_BFQ_CGROUP_DEBUG */ |
6236 | |
6237 | static struct bfq_queue *bfq_init_rq(struct request *rq); |
6238 | |
6239 | static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq, |
6240 | blk_insert_t flags) |
6241 | { |
6242 | struct request_queue *q = hctx->queue; |
6243 | struct bfq_data *bfqd = q->elevator->elevator_data; |
6244 | struct bfq_queue *bfqq; |
6245 | bool idle_timer_disabled = false; |
6246 | blk_opf_t cmd_flags; |
6247 | LIST_HEAD(free); |
6248 | |
6249 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
6250 | if (!cgroup_subsys_on_dfl(io_cgrp_subsys) && rq->bio) |
6251 | bfqg_stats_update_legacy_io(q, rq); |
6252 | #endif |
6253 | spin_lock_irq(lock: &bfqd->lock); |
6254 | bfqq = bfq_init_rq(rq); |
6255 | if (blk_mq_sched_try_insert_merge(q, rq, free: &free)) { |
6256 | spin_unlock_irq(lock: &bfqd->lock); |
6257 | blk_mq_free_requests(list: &free); |
6258 | return; |
6259 | } |
6260 | |
6261 | trace_block_rq_insert(rq); |
6262 | |
6263 | if (flags & BLK_MQ_INSERT_AT_HEAD) { |
6264 | list_add(new: &rq->queuelist, head: &bfqd->dispatch); |
6265 | } else if (!bfqq) { |
6266 | list_add_tail(new: &rq->queuelist, head: &bfqd->dispatch); |
6267 | } else { |
6268 | idle_timer_disabled = __bfq_insert_request(bfqd, rq); |
6269 | /* |
6270 | * Update bfqq, because, if a queue merge has occurred |
6271 | * in __bfq_insert_request, then rq has been |
6272 | * redirected into a new queue. |
6273 | */ |
6274 | bfqq = RQ_BFQQ(rq); |
6275 | |
6276 | if (rq_mergeable(rq)) { |
6277 | elv_rqhash_add(q, rq); |
6278 | if (!q->last_merge) |
6279 | q->last_merge = rq; |
6280 | } |
6281 | } |
6282 | |
6283 | /* |
6284 | * Cache cmd_flags before releasing scheduler lock, because rq |
6285 | * may disappear afterwards (for example, because of a request |
6286 | * merge). |
6287 | */ |
6288 | cmd_flags = rq->cmd_flags; |
6289 | spin_unlock_irq(lock: &bfqd->lock); |
6290 | |
6291 | bfq_update_insert_stats(q, bfqq, idle_timer_disabled, |
6292 | cmd_flags); |
6293 | } |
6294 | |
6295 | static void bfq_insert_requests(struct blk_mq_hw_ctx *hctx, |
6296 | struct list_head *list, |
6297 | blk_insert_t flags) |
6298 | { |
6299 | while (!list_empty(head: list)) { |
6300 | struct request *rq; |
6301 | |
6302 | rq = list_first_entry(list, struct request, queuelist); |
6303 | list_del_init(entry: &rq->queuelist); |
6304 | bfq_insert_request(hctx, rq, flags); |
6305 | } |
6306 | } |
6307 | |
6308 | static void bfq_update_hw_tag(struct bfq_data *bfqd) |
6309 | { |
6310 | struct bfq_queue *bfqq = bfqd->in_service_queue; |
6311 | |
6312 | bfqd->max_rq_in_driver = max_t(int, bfqd->max_rq_in_driver, |
6313 | bfqd->tot_rq_in_driver); |
6314 | |
6315 | if (bfqd->hw_tag == 1) |
6316 | return; |
6317 | |
6318 | /* |
6319 | * This sample is valid if the number of outstanding requests |
6320 | * is large enough to allow a queueing behavior. Note that the |
6321 | * sum is not exact, as it's not taking into account deactivated |
6322 | * requests. |
6323 | */ |
6324 | if (bfqd->tot_rq_in_driver + bfqd->queued <= BFQ_HW_QUEUE_THRESHOLD) |
6325 | return; |
6326 | |
6327 | /* |
6328 | * If active queue hasn't enough requests and can idle, bfq might not |
6329 | * dispatch sufficient requests to hardware. Don't zero hw_tag in this |
6330 | * case |
6331 | */ |
6332 | if (bfqq && bfq_bfqq_has_short_ttime(bfqq) && |
6333 | bfqq->dispatched + bfqq->queued[0] + bfqq->queued[1] < |
6334 | BFQ_HW_QUEUE_THRESHOLD && |
6335 | bfqd->tot_rq_in_driver < BFQ_HW_QUEUE_THRESHOLD) |
6336 | return; |
6337 | |
6338 | if (bfqd->hw_tag_samples++ < BFQ_HW_QUEUE_SAMPLES) |
6339 | return; |
6340 | |
6341 | bfqd->hw_tag = bfqd->max_rq_in_driver > BFQ_HW_QUEUE_THRESHOLD; |
6342 | bfqd->max_rq_in_driver = 0; |
6343 | bfqd->hw_tag_samples = 0; |
6344 | |
6345 | bfqd->nonrot_with_queueing = |
6346 | blk_queue_nonrot(bfqd->queue) && bfqd->hw_tag; |
6347 | } |
6348 | |
6349 | static void bfq_completed_request(struct bfq_queue *bfqq, struct bfq_data *bfqd) |
6350 | { |
6351 | u64 now_ns; |
6352 | u32 delta_us; |
6353 | |
6354 | bfq_update_hw_tag(bfqd); |
6355 | |
6356 | bfqd->rq_in_driver[bfqq->actuator_idx]--; |
6357 | bfqd->tot_rq_in_driver--; |
6358 | bfqq->dispatched--; |
6359 | |
6360 | if (!bfqq->dispatched && !bfq_bfqq_busy(bfqq)) { |
6361 | /* |
6362 | * Set budget_timeout (which we overload to store the |
6363 | * time at which the queue remains with no backlog and |
6364 | * no outstanding request; used by the weight-raising |
6365 | * mechanism). |
6366 | */ |
6367 | bfqq->budget_timeout = jiffies; |
6368 | |
6369 | bfq_del_bfqq_in_groups_with_pending_reqs(bfqq); |
6370 | bfq_weights_tree_remove(bfqq); |
6371 | } |
6372 | |
6373 | now_ns = ktime_get_ns(); |
6374 | |
6375 | bfqq->ttime.last_end_request = now_ns; |
6376 | |
6377 | /* |
6378 | * Using us instead of ns, to get a reasonable precision in |
6379 | * computing rate in next check. |
6380 | */ |
6381 | delta_us = div_u64(dividend: now_ns - bfqd->last_completion, NSEC_PER_USEC); |
6382 | |
6383 | /* |
6384 | * If the request took rather long to complete, and, according |
6385 | * to the maximum request size recorded, this completion latency |
6386 | * implies that the request was certainly served at a very low |
6387 | * rate (less than 1M sectors/sec), then the whole observation |
6388 | * interval that lasts up to this time instant cannot be a |
6389 | * valid time interval for computing a new peak rate. Invoke |
6390 | * bfq_update_rate_reset to have the following three steps |
6391 | * taken: |
6392 | * - close the observation interval at the last (previous) |
6393 | * request dispatch or completion |
6394 | * - compute rate, if possible, for that observation interval |
6395 | * - reset to zero samples, which will trigger a proper |
6396 | * re-initialization of the observation interval on next |
6397 | * dispatch |
6398 | */ |
6399 | if (delta_us > BFQ_MIN_TT/NSEC_PER_USEC && |
6400 | (bfqd->last_rq_max_size<<BFQ_RATE_SHIFT)/delta_us < |
6401 | 1UL<<(BFQ_RATE_SHIFT - 10)) |
6402 | bfq_update_rate_reset(bfqd, NULL); |
6403 | bfqd->last_completion = now_ns; |
6404 | /* |
6405 | * Shared queues are likely to receive I/O at a high |
6406 | * rate. This may deceptively let them be considered as wakers |
6407 | * of other queues. But a false waker will unjustly steal |
6408 | * bandwidth to its supposedly woken queue. So considering |
6409 | * also shared queues in the waking mechanism may cause more |
6410 | * control troubles than throughput benefits. Then reset |
6411 | * last_completed_rq_bfqq if bfqq is a shared queue. |
6412 | */ |
6413 | if (!bfq_bfqq_coop(bfqq)) |
6414 | bfqd->last_completed_rq_bfqq = bfqq; |
6415 | else |
6416 | bfqd->last_completed_rq_bfqq = NULL; |
6417 | |
6418 | /* |
6419 | * If we are waiting to discover whether the request pattern |
6420 | * of the task associated with the queue is actually |
6421 | * isochronous, and both requisites for this condition to hold |
6422 | * are now satisfied, then compute soft_rt_next_start (see the |
6423 | * comments on the function bfq_bfqq_softrt_next_start()). We |
6424 | * do not compute soft_rt_next_start if bfqq is in interactive |
6425 | * weight raising (see the comments in bfq_bfqq_expire() for |
6426 | * an explanation). We schedule this delayed update when bfqq |
6427 | * expires, if it still has in-flight requests. |
6428 | */ |
6429 | if (bfq_bfqq_softrt_update(bfqq) && bfqq->dispatched == 0 && |
6430 | RB_EMPTY_ROOT(&bfqq->sort_list) && |
6431 | bfqq->wr_coeff != bfqd->bfq_wr_coeff) |
6432 | bfqq->soft_rt_next_start = |
6433 | bfq_bfqq_softrt_next_start(bfqd, bfqq); |
6434 | |
6435 | /* |
6436 | * If this is the in-service queue, check if it needs to be expired, |
6437 | * or if we want to idle in case it has no pending requests. |
6438 | */ |
6439 | if (bfqd->in_service_queue == bfqq) { |
6440 | if (bfq_bfqq_must_idle(bfqq)) { |
6441 | if (bfqq->dispatched == 0) |
6442 | bfq_arm_slice_timer(bfqd); |
6443 | /* |
6444 | * If we get here, we do not expire bfqq, even |
6445 | * if bfqq was in budget timeout or had no |
6446 | * more requests (as controlled in the next |
6447 | * conditional instructions). The reason for |
6448 | * not expiring bfqq is as follows. |
6449 | * |
6450 | * Here bfqq->dispatched > 0 holds, but |
6451 | * bfq_bfqq_must_idle() returned true. This |
6452 | * implies that, even if no request arrives |
6453 | * for bfqq before bfqq->dispatched reaches 0, |
6454 | * bfqq will, however, not be expired on the |
6455 | * completion event that causes bfqq->dispatch |
6456 | * to reach zero. In contrast, on this event, |
6457 | * bfqq will start enjoying device idling |
6458 | * (I/O-dispatch plugging). |
6459 | * |
6460 | * But, if we expired bfqq here, bfqq would |
6461 | * not have the chance to enjoy device idling |
6462 | * when bfqq->dispatched finally reaches |
6463 | * zero. This would expose bfqq to violation |
6464 | * of its reserved service guarantees. |
6465 | */ |
6466 | return; |
6467 | } else if (bfq_may_expire_for_budg_timeout(bfqq)) |
6468 | bfq_bfqq_expire(bfqd, bfqq, compensate: false, |
6469 | reason: BFQQE_BUDGET_TIMEOUT); |
6470 | else if (RB_EMPTY_ROOT(&bfqq->sort_list) && |
6471 | (bfqq->dispatched == 0 || |
6472 | !bfq_better_to_idle(bfqq))) |
6473 | bfq_bfqq_expire(bfqd, bfqq, compensate: false, |
6474 | reason: BFQQE_NO_MORE_REQUESTS); |
6475 | } |
6476 | |
6477 | if (!bfqd->tot_rq_in_driver) |
6478 | bfq_schedule_dispatch(bfqd); |
6479 | } |
6480 | |
6481 | /* |
6482 | * The processes associated with bfqq may happen to generate their |
6483 | * cumulative I/O at a lower rate than the rate at which the device |
6484 | * could serve the same I/O. This is rather probable, e.g., if only |
6485 | * one process is associated with bfqq and the device is an SSD. It |
6486 | * results in bfqq becoming often empty while in service. In this |
6487 | * respect, if BFQ is allowed to switch to another queue when bfqq |
6488 | * remains empty, then the device goes on being fed with I/O requests, |
6489 | * and the throughput is not affected. In contrast, if BFQ is not |
6490 | * allowed to switch to another queue---because bfqq is sync and |
6491 | * I/O-dispatch needs to be plugged while bfqq is temporarily |
6492 | * empty---then, during the service of bfqq, there will be frequent |
6493 | * "service holes", i.e., time intervals during which bfqq gets empty |
6494 | * and the device can only consume the I/O already queued in its |
6495 | * hardware queues. During service holes, the device may even get to |
6496 | * remaining idle. In the end, during the service of bfqq, the device |
6497 | * is driven at a lower speed than the one it can reach with the kind |
6498 | * of I/O flowing through bfqq. |
6499 | * |
6500 | * To counter this loss of throughput, BFQ implements a "request |
6501 | * injection mechanism", which tries to fill the above service holes |
6502 | * with I/O requests taken from other queues. The hard part in this |
6503 | * mechanism is finding the right amount of I/O to inject, so as to |
6504 | * both boost throughput and not break bfqq's bandwidth and latency |
6505 | * guarantees. In this respect, the mechanism maintains a per-queue |
6506 | * inject limit, computed as below. While bfqq is empty, the injection |
6507 | * mechanism dispatches extra I/O requests only until the total number |
6508 | * of I/O requests in flight---i.e., already dispatched but not yet |
6509 | * completed---remains lower than this limit. |
6510 | * |
6511 | * A first definition comes in handy to introduce the algorithm by |
6512 | * which the inject limit is computed. We define as first request for |
6513 | * bfqq, an I/O request for bfqq that arrives while bfqq is in |
6514 | * service, and causes bfqq to switch from empty to non-empty. The |
6515 | * algorithm updates the limit as a function of the effect of |
6516 | * injection on the service times of only the first requests of |
6517 | * bfqq. The reason for this restriction is that these are the |
6518 | * requests whose service time is affected most, because they are the |
6519 | * first to arrive after injection possibly occurred. |
6520 | * |
6521 | * To evaluate the effect of injection, the algorithm measures the |
6522 | * "total service time" of first requests. We define as total service |
6523 | * time of an I/O request, the time that elapses since when the |
6524 | * request is enqueued into bfqq, to when it is completed. This |
6525 | * quantity allows the whole effect of injection to be measured. It is |
6526 | * easy to see why. Suppose that some requests of other queues are |
6527 | * actually injected while bfqq is empty, and that a new request R |
6528 | * then arrives for bfqq. If the device does start to serve all or |
6529 | * part of the injected requests during the service hole, then, |
6530 | * because of this extra service, it may delay the next invocation of |
6531 | * the dispatch hook of BFQ. Then, even after R gets eventually |
6532 | * dispatched, the device may delay the actual service of R if it is |
6533 | * still busy serving the extra requests, or if it decides to serve, |
6534 | * before R, some extra request still present in its queues. As a |
6535 | * conclusion, the cumulative extra delay caused by injection can be |
6536 | * easily evaluated by just comparing the total service time of first |
6537 | * requests with and without injection. |
6538 | * |
6539 | * The limit-update algorithm works as follows. On the arrival of a |
6540 | * first request of bfqq, the algorithm measures the total time of the |
6541 | * request only if one of the three cases below holds, and, for each |
6542 | * case, it updates the limit as described below: |
6543 | * |
6544 | * (1) If there is no in-flight request. This gives a baseline for the |
6545 | * total service time of the requests of bfqq. If the baseline has |
6546 | * not been computed yet, then, after computing it, the limit is |
6547 | * set to 1, to start boosting throughput, and to prepare the |
6548 | * ground for the next case. If the baseline has already been |
6549 | * computed, then it is updated, in case it results to be lower |
6550 | * than the previous value. |
6551 | * |
6552 | * (2) If the limit is higher than 0 and there are in-flight |
6553 | * requests. By comparing the total service time in this case with |
6554 | * the above baseline, it is possible to know at which extent the |
6555 | * current value of the limit is inflating the total service |
6556 | * time. If the inflation is below a certain threshold, then bfqq |
6557 | * is assumed to be suffering from no perceivable loss of its |
6558 | * service guarantees, and the limit is even tentatively |
6559 | * increased. If the inflation is above the threshold, then the |
6560 | * limit is decreased. Due to the lack of any hysteresis, this |
6561 | * logic makes the limit oscillate even in steady workload |
6562 | * conditions. Yet we opted for it, because it is fast in reaching |
6563 | * the best value for the limit, as a function of the current I/O |
6564 | * workload. To reduce oscillations, this step is disabled for a |
6565 | * short time interval after the limit happens to be decreased. |
6566 | * |
6567 | * (3) Periodically, after resetting the limit, to make sure that the |
6568 | * limit eventually drops in case the workload changes. This is |
6569 | * needed because, after the limit has gone safely up for a |
6570 | * certain workload, it is impossible to guess whether the |
6571 | * baseline total service time may have changed, without measuring |
6572 | * it again without injection. A more effective version of this |
6573 | * step might be to just sample the baseline, by interrupting |
6574 | * injection only once, and then to reset/lower the limit only if |
6575 | * the total service time with the current limit does happen to be |
6576 | * too large. |
6577 | * |
6578 | * More details on each step are provided in the comments on the |
6579 | * pieces of code that implement these steps: the branch handling the |
6580 | * transition from empty to non empty in bfq_add_request(), the branch |
6581 | * handling injection in bfq_select_queue(), and the function |
6582 | * bfq_choose_bfqq_for_injection(). These comments also explain some |
6583 | * exceptions, made by the injection mechanism in some special cases. |
6584 | */ |
6585 | static void bfq_update_inject_limit(struct bfq_data *bfqd, |
6586 | struct bfq_queue *bfqq) |
6587 | { |
6588 | u64 tot_time_ns = ktime_get_ns() - bfqd->last_empty_occupied_ns; |
6589 | unsigned int old_limit = bfqq->inject_limit; |
6590 | |
6591 | if (bfqq->last_serv_time_ns > 0 && bfqd->rqs_injected) { |
6592 | u64 threshold = (bfqq->last_serv_time_ns * 3)>>1; |
6593 | |
6594 | if (tot_time_ns >= threshold && old_limit > 0) { |
6595 | bfqq->inject_limit--; |
6596 | bfqq->decrease_time_jif = jiffies; |
6597 | } else if (tot_time_ns < threshold && |
6598 | old_limit <= bfqd->max_rq_in_driver) |
6599 | bfqq->inject_limit++; |
6600 | } |
6601 | |
6602 | /* |
6603 | * Either we still have to compute the base value for the |
6604 | * total service time, and there seem to be the right |
6605 | * conditions to do it, or we can lower the last base value |
6606 | * computed. |
6607 | * |
6608 | * NOTE: (bfqd->tot_rq_in_driver == 1) means that there is no I/O |
6609 | * request in flight, because this function is in the code |
6610 | * path that handles the completion of a request of bfqq, and, |
6611 | * in particular, this function is executed before |
6612 | * bfqd->tot_rq_in_driver is decremented in such a code path. |
6613 | */ |
6614 | if ((bfqq->last_serv_time_ns == 0 && bfqd->tot_rq_in_driver == 1) || |
6615 | tot_time_ns < bfqq->last_serv_time_ns) { |
6616 | if (bfqq->last_serv_time_ns == 0) { |
6617 | /* |
6618 | * Now we certainly have a base value: make sure we |
6619 | * start trying injection. |
6620 | */ |
6621 | bfqq->inject_limit = max_t(unsigned int, 1, old_limit); |
6622 | } |
6623 | bfqq->last_serv_time_ns = tot_time_ns; |
6624 | } else if (!bfqd->rqs_injected && bfqd->tot_rq_in_driver == 1) |
6625 | /* |
6626 | * No I/O injected and no request still in service in |
6627 | * the drive: these are the exact conditions for |
6628 | * computing the base value of the total service time |
6629 | * for bfqq. So let's update this value, because it is |
6630 | * rather variable. For example, it varies if the size |
6631 | * or the spatial locality of the I/O requests in bfqq |
6632 | * change. |
6633 | */ |
6634 | bfqq->last_serv_time_ns = tot_time_ns; |
6635 | |
6636 | |
6637 | /* update complete, not waiting for any request completion any longer */ |
6638 | bfqd->waited_rq = NULL; |
6639 | bfqd->rqs_injected = false; |
6640 | } |
6641 | |
6642 | /* |
6643 | * Handle either a requeue or a finish for rq. The things to do are |
6644 | * the same in both cases: all references to rq are to be dropped. In |
6645 | * particular, rq is considered completed from the point of view of |
6646 | * the scheduler. |
6647 | */ |
6648 | static void bfq_finish_requeue_request(struct request *rq) |
6649 | { |
6650 | struct bfq_queue *bfqq = RQ_BFQQ(rq); |
6651 | struct bfq_data *bfqd; |
6652 | unsigned long flags; |
6653 | |
6654 | /* |
6655 | * rq either is not associated with any icq, or is an already |
6656 | * requeued request that has not (yet) been re-inserted into |
6657 | * a bfq_queue. |
6658 | */ |
6659 | if (!rq->elv.icq || !bfqq) |
6660 | return; |
6661 | |
6662 | bfqd = bfqq->bfqd; |
6663 | |
6664 | if (rq->rq_flags & RQF_STARTED) |
6665 | bfqg_stats_update_completion(bfqg: bfqq_group(bfqq), |
6666 | start_time_ns: rq->start_time_ns, |
6667 | io_start_time_ns: rq->io_start_time_ns, |
6668 | opf: rq->cmd_flags); |
6669 | |
6670 | spin_lock_irqsave(&bfqd->lock, flags); |
6671 | if (likely(rq->rq_flags & RQF_STARTED)) { |
6672 | if (rq == bfqd->waited_rq) |
6673 | bfq_update_inject_limit(bfqd, bfqq); |
6674 | |
6675 | bfq_completed_request(bfqq, bfqd); |
6676 | } |
6677 | bfqq_request_freed(bfqq); |
6678 | bfq_put_queue(bfqq); |
6679 | RQ_BIC(rq)->requests--; |
6680 | spin_unlock_irqrestore(lock: &bfqd->lock, flags); |
6681 | |
6682 | /* |
6683 | * Reset private fields. In case of a requeue, this allows |
6684 | * this function to correctly do nothing if it is spuriously |
6685 | * invoked again on this same request (see the check at the |
6686 | * beginning of the function). Probably, a better general |
6687 | * design would be to prevent blk-mq from invoking the requeue |
6688 | * or finish hooks of an elevator, for a request that is not |
6689 | * referred by that elevator. |
6690 | * |
6691 | * Resetting the following fields would break the |
6692 | * request-insertion logic if rq is re-inserted into a bfq |
6693 | * internal queue, without a re-preparation. Here we assume |
6694 | * that re-insertions of requeued requests, without |
6695 | * re-preparation, can happen only for pass_through or at_head |
6696 | * requests (which are not re-inserted into bfq internal |
6697 | * queues). |
6698 | */ |
6699 | rq->elv.priv[0] = NULL; |
6700 | rq->elv.priv[1] = NULL; |
6701 | } |
6702 | |
6703 | static void bfq_finish_request(struct request *rq) |
6704 | { |
6705 | bfq_finish_requeue_request(rq); |
6706 | |
6707 | if (rq->elv.icq) { |
6708 | put_io_context(ioc: rq->elv.icq->ioc); |
6709 | rq->elv.icq = NULL; |
6710 | } |
6711 | } |
6712 | |
6713 | /* |
6714 | * Removes the association between the current task and bfqq, assuming |
6715 | * that bic points to the bfq iocontext of the task. |
6716 | * Returns NULL if a new bfqq should be allocated, or the old bfqq if this |
6717 | * was the last process referring to that bfqq. |
6718 | */ |
6719 | static struct bfq_queue * |
6720 | bfq_split_bfqq(struct bfq_io_cq *bic, struct bfq_queue *bfqq) |
6721 | { |
6722 | bfq_log_bfqq(bfqq->bfqd, bfqq, "splitting queue" ); |
6723 | |
6724 | if (bfqq_process_refs(bfqq) == 1) { |
6725 | bfqq->pid = current->pid; |
6726 | bfq_clear_bfqq_coop(bfqq); |
6727 | bfq_clear_bfqq_split_coop(bfqq); |
6728 | return bfqq; |
6729 | } |
6730 | |
6731 | bic_set_bfqq(bic, NULL, is_sync: true, actuator_idx: bfqq->actuator_idx); |
6732 | |
6733 | bfq_put_cooperator(bfqq); |
6734 | |
6735 | bfq_release_process_ref(bfqd: bfqq->bfqd, bfqq); |
6736 | return NULL; |
6737 | } |
6738 | |
6739 | static struct bfq_queue *bfq_get_bfqq_handle_split(struct bfq_data *bfqd, |
6740 | struct bfq_io_cq *bic, |
6741 | struct bio *bio, |
6742 | bool split, bool is_sync, |
6743 | bool *new_queue) |
6744 | { |
6745 | unsigned int act_idx = bfq_actuator_index(bfqd, bio); |
6746 | struct bfq_queue *bfqq = bic_to_bfqq(bic, is_sync, actuator_idx: act_idx); |
6747 | struct bfq_iocq_bfqq_data *bfqq_data = &bic->bfqq_data[act_idx]; |
6748 | |
6749 | if (likely(bfqq && bfqq != &bfqd->oom_bfqq)) |
6750 | return bfqq; |
6751 | |
6752 | if (new_queue) |
6753 | *new_queue = true; |
6754 | |
6755 | if (bfqq) |
6756 | bfq_put_queue(bfqq); |
6757 | bfqq = bfq_get_queue(bfqd, bio, is_sync, bic, respawn: split); |
6758 | |
6759 | bic_set_bfqq(bic, bfqq, is_sync, actuator_idx: act_idx); |
6760 | if (split && is_sync) { |
6761 | if ((bfqq_data->was_in_burst_list && bfqd->large_burst) || |
6762 | bfqq_data->saved_in_large_burst) |
6763 | bfq_mark_bfqq_in_large_burst(bfqq); |
6764 | else { |
6765 | bfq_clear_bfqq_in_large_burst(bfqq); |
6766 | if (bfqq_data->was_in_burst_list) |
6767 | /* |
6768 | * If bfqq was in the current |
6769 | * burst list before being |
6770 | * merged, then we have to add |
6771 | * it back. And we do not need |
6772 | * to increase burst_size, as |
6773 | * we did not decrement |
6774 | * burst_size when we removed |
6775 | * bfqq from the burst list as |
6776 | * a consequence of a merge |
6777 | * (see comments in |
6778 | * bfq_put_queue). In this |
6779 | * respect, it would be rather |
6780 | * costly to know whether the |
6781 | * current burst list is still |
6782 | * the same burst list from |
6783 | * which bfqq was removed on |
6784 | * the merge. To avoid this |
6785 | * cost, if bfqq was in a |
6786 | * burst list, then we add |
6787 | * bfqq to the current burst |
6788 | * list without any further |
6789 | * check. This can cause |
6790 | * inappropriate insertions, |
6791 | * but rarely enough to not |
6792 | * harm the detection of large |
6793 | * bursts significantly. |
6794 | */ |
6795 | hlist_add_head(n: &bfqq->burst_list_node, |
6796 | h: &bfqd->burst_list); |
6797 | } |
6798 | bfqq->split_time = jiffies; |
6799 | } |
6800 | |
6801 | return bfqq; |
6802 | } |
6803 | |
6804 | /* |
6805 | * Only reset private fields. The actual request preparation will be |
6806 | * performed by bfq_init_rq, when rq is either inserted or merged. See |
6807 | * comments on bfq_init_rq for the reason behind this delayed |
6808 | * preparation. |
6809 | */ |
6810 | static void bfq_prepare_request(struct request *rq) |
6811 | { |
6812 | rq->elv.icq = ioc_find_get_icq(q: rq->q); |
6813 | |
6814 | /* |
6815 | * Regardless of whether we have an icq attached, we have to |
6816 | * clear the scheduler pointers, as they might point to |
6817 | * previously allocated bic/bfqq structs. |
6818 | */ |
6819 | rq->elv.priv[0] = rq->elv.priv[1] = NULL; |
6820 | } |
6821 | |
6822 | /* |
6823 | * If needed, init rq, allocate bfq data structures associated with |
6824 | * rq, and increment reference counters in the destination bfq_queue |
6825 | * for rq. Return the destination bfq_queue for rq, or NULL is rq is |
6826 | * not associated with any bfq_queue. |
6827 | * |
6828 | * This function is invoked by the functions that perform rq insertion |
6829 | * or merging. One may have expected the above preparation operations |
6830 | * to be performed in bfq_prepare_request, and not delayed to when rq |
6831 | * is inserted or merged. The rationale behind this delayed |
6832 | * preparation is that, after the prepare_request hook is invoked for |
6833 | * rq, rq may still be transformed into a request with no icq, i.e., a |
6834 | * request not associated with any queue. No bfq hook is invoked to |
6835 | * signal this transformation. As a consequence, should these |
6836 | * preparation operations be performed when the prepare_request hook |
6837 | * is invoked, and should rq be transformed one moment later, bfq |
6838 | * would end up in an inconsistent state, because it would have |
6839 | * incremented some queue counters for an rq destined to |
6840 | * transformation, without any chance to correctly lower these |
6841 | * counters back. In contrast, no transformation can still happen for |
6842 | * rq after rq has been inserted or merged. So, it is safe to execute |
6843 | * these preparation operations when rq is finally inserted or merged. |
6844 | */ |
6845 | static struct bfq_queue *bfq_init_rq(struct request *rq) |
6846 | { |
6847 | struct request_queue *q = rq->q; |
6848 | struct bio *bio = rq->bio; |
6849 | struct bfq_data *bfqd = q->elevator->elevator_data; |
6850 | struct bfq_io_cq *bic; |
6851 | const int is_sync = rq_is_sync(rq); |
6852 | struct bfq_queue *bfqq; |
6853 | bool new_queue = false; |
6854 | bool bfqq_already_existing = false, split = false; |
6855 | unsigned int a_idx = bfq_actuator_index(bfqd, bio); |
6856 | |
6857 | if (unlikely(!rq->elv.icq)) |
6858 | return NULL; |
6859 | |
6860 | /* |
6861 | * Assuming that RQ_BFQQ(rq) is set only if everything is set |
6862 | * for this rq. This holds true, because this function is |
6863 | * invoked only for insertion or merging, and, after such |
6864 | * events, a request cannot be manipulated any longer before |
6865 | * being removed from bfq. |
6866 | */ |
6867 | if (RQ_BFQQ(rq)) |
6868 | return RQ_BFQQ(rq); |
6869 | |
6870 | bic = icq_to_bic(icq: rq->elv.icq); |
6871 | |
6872 | bfq_check_ioprio_change(bic, bio); |
6873 | |
6874 | bfq_bic_update_cgroup(bic, bio); |
6875 | |
6876 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, split: false, is_sync, |
6877 | new_queue: &new_queue); |
6878 | |
6879 | if (likely(!new_queue)) { |
6880 | /* If the queue was seeky for too long, break it apart. */ |
6881 | if (bfq_bfqq_coop(bfqq) && bfq_bfqq_split_coop(bfqq) && |
6882 | !bic->bfqq_data[a_idx].stably_merged) { |
6883 | struct bfq_queue *old_bfqq = bfqq; |
6884 | |
6885 | /* Update bic before losing reference to bfqq */ |
6886 | if (bfq_bfqq_in_large_burst(bfqq)) |
6887 | bic->bfqq_data[a_idx].saved_in_large_burst = |
6888 | true; |
6889 | |
6890 | bfqq = bfq_split_bfqq(bic, bfqq); |
6891 | split = true; |
6892 | |
6893 | if (!bfqq) { |
6894 | bfqq = bfq_get_bfqq_handle_split(bfqd, bic, bio, |
6895 | split: true, is_sync, |
6896 | NULL); |
6897 | if (unlikely(bfqq == &bfqd->oom_bfqq)) |
6898 | bfqq_already_existing = true; |
6899 | } else |
6900 | bfqq_already_existing = true; |
6901 | |
6902 | if (!bfqq_already_existing) { |
6903 | bfqq->waker_bfqq = old_bfqq->waker_bfqq; |
6904 | bfqq->tentative_waker_bfqq = NULL; |
6905 | |
6906 | /* |
6907 | * If the waker queue disappears, then |
6908 | * new_bfqq->waker_bfqq must be |
6909 | * reset. So insert new_bfqq into the |
6910 | * woken_list of the waker. See |
6911 | * bfq_check_waker for details. |
6912 | */ |
6913 | if (bfqq->waker_bfqq) |
6914 | hlist_add_head(n: &bfqq->woken_list_node, |
6915 | h: &bfqq->waker_bfqq->woken_list); |
6916 | } |
6917 | } |
6918 | } |
6919 | |
6920 | bfqq_request_allocated(bfqq); |
6921 | bfqq->ref++; |
6922 | bic->requests++; |
6923 | bfq_log_bfqq(bfqd, bfqq, "get_request %p: bfqq %p, %d" , |
6924 | rq, bfqq, bfqq->ref); |
6925 | |
6926 | rq->elv.priv[0] = bic; |
6927 | rq->elv.priv[1] = bfqq; |
6928 | |
6929 | /* |
6930 | * If a bfq_queue has only one process reference, it is owned |
6931 | * by only this bic: we can then set bfqq->bic = bic. in |
6932 | * addition, if the queue has also just been split, we have to |
6933 | * resume its state. |
6934 | */ |
6935 | if (likely(bfqq != &bfqd->oom_bfqq) && bfqq_process_refs(bfqq) == 1) { |
6936 | bfqq->bic = bic; |
6937 | if (split) { |
6938 | /* |
6939 | * The queue has just been split from a shared |
6940 | * queue: restore the idle window and the |
6941 | * possible weight raising period. |
6942 | */ |
6943 | bfq_bfqq_resume_state(bfqq, bfqd, bic, |
6944 | bfq_already_existing: bfqq_already_existing); |
6945 | } |
6946 | } |
6947 | |
6948 | /* |
6949 | * Consider bfqq as possibly belonging to a burst of newly |
6950 | * created queues only if: |
6951 | * 1) A burst is actually happening (bfqd->burst_size > 0) |
6952 | * or |
6953 | * 2) There is no other active queue. In fact, if, in |
6954 | * contrast, there are active queues not belonging to the |
6955 | * possible burst bfqq may belong to, then there is no gain |
6956 | * in considering bfqq as belonging to a burst, and |
6957 | * therefore in not weight-raising bfqq. See comments on |
6958 | * bfq_handle_burst(). |
6959 | * |
6960 | * This filtering also helps eliminating false positives, |
6961 | * occurring when bfqq does not belong to an actual large |
6962 | * burst, but some background task (e.g., a service) happens |
6963 | * to trigger the creation of new queues very close to when |
6964 | * bfqq and its possible companion queues are created. See |
6965 | * comments on bfq_handle_burst() for further details also on |
6966 | * this issue. |
6967 | */ |
6968 | if (unlikely(bfq_bfqq_just_created(bfqq) && |
6969 | (bfqd->burst_size > 0 || |
6970 | bfq_tot_busy_queues(bfqd) == 0))) |
6971 | bfq_handle_burst(bfqd, bfqq); |
6972 | |
6973 | return bfqq; |
6974 | } |
6975 | |
6976 | static void |
6977 | bfq_idle_slice_timer_body(struct bfq_data *bfqd, struct bfq_queue *bfqq) |
6978 | { |
6979 | enum bfqq_expiration reason; |
6980 | unsigned long flags; |
6981 | |
6982 | spin_lock_irqsave(&bfqd->lock, flags); |
6983 | |
6984 | /* |
6985 | * Considering that bfqq may be in race, we should firstly check |
6986 | * whether bfqq is in service before doing something on it. If |
6987 | * the bfqq in race is not in service, it has already been expired |
6988 | * through __bfq_bfqq_expire func and its wait_request flags has |
6989 | * been cleared in __bfq_bfqd_reset_in_service func. |
6990 | */ |
6991 | if (bfqq != bfqd->in_service_queue) { |
6992 | spin_unlock_irqrestore(lock: &bfqd->lock, flags); |
6993 | return; |
6994 | } |
6995 | |
6996 | bfq_clear_bfqq_wait_request(bfqq); |
6997 | |
6998 | if (bfq_bfqq_budget_timeout(bfqq)) |
6999 | /* |
7000 | * Also here the queue can be safely expired |
7001 | * for budget timeout without wasting |
7002 | * guarantees |
7003 | */ |
7004 | reason = BFQQE_BUDGET_TIMEOUT; |
7005 | else if (bfqq->queued[0] == 0 && bfqq->queued[1] == 0) |
7006 | /* |
7007 | * The queue may not be empty upon timer expiration, |
7008 | * because we may not disable the timer when the |
7009 | * first request of the in-service queue arrives |
7010 | * during disk idling. |
7011 | */ |
7012 | reason = BFQQE_TOO_IDLE; |
7013 | else |
7014 | goto schedule_dispatch; |
7015 | |
7016 | bfq_bfqq_expire(bfqd, bfqq, compensate: true, reason); |
7017 | |
7018 | schedule_dispatch: |
7019 | bfq_schedule_dispatch(bfqd); |
7020 | spin_unlock_irqrestore(lock: &bfqd->lock, flags); |
7021 | } |
7022 | |
7023 | /* |
7024 | * Handler of the expiration of the timer running if the in-service queue |
7025 | * is idling inside its time slice. |
7026 | */ |
7027 | static enum hrtimer_restart bfq_idle_slice_timer(struct hrtimer *timer) |
7028 | { |
7029 | struct bfq_data *bfqd = container_of(timer, struct bfq_data, |
7030 | idle_slice_timer); |
7031 | struct bfq_queue *bfqq = bfqd->in_service_queue; |
7032 | |
7033 | /* |
7034 | * Theoretical race here: the in-service queue can be NULL or |
7035 | * different from the queue that was idling if a new request |
7036 | * arrives for the current queue and there is a full dispatch |
7037 | * cycle that changes the in-service queue. This can hardly |
7038 | * happen, but in the worst case we just expire a queue too |
7039 | * early. |
7040 | */ |
7041 | if (bfqq) |
7042 | bfq_idle_slice_timer_body(bfqd, bfqq); |
7043 | |
7044 | return HRTIMER_NORESTART; |
7045 | } |
7046 | |
7047 | static void __bfq_put_async_bfqq(struct bfq_data *bfqd, |
7048 | struct bfq_queue **bfqq_ptr) |
7049 | { |
7050 | struct bfq_queue *bfqq = *bfqq_ptr; |
7051 | |
7052 | bfq_log(bfqd, "put_async_bfqq: %p" , bfqq); |
7053 | if (bfqq) { |
7054 | bfq_bfqq_move(bfqd, bfqq, bfqg: bfqd->root_group); |
7055 | |
7056 | bfq_log_bfqq(bfqd, bfqq, "put_async_bfqq: putting %p, %d" , |
7057 | bfqq, bfqq->ref); |
7058 | bfq_put_queue(bfqq); |
7059 | *bfqq_ptr = NULL; |
7060 | } |
7061 | } |
7062 | |
7063 | /* |
7064 | * Release all the bfqg references to its async queues. If we are |
7065 | * deallocating the group these queues may still contain requests, so |
7066 | * we reparent them to the root cgroup (i.e., the only one that will |
7067 | * exist for sure until all the requests on a device are gone). |
7068 | */ |
7069 | void bfq_put_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg) |
7070 | { |
7071 | int i, j, k; |
7072 | |
7073 | for (k = 0; k < bfqd->num_actuators; k++) { |
7074 | for (i = 0; i < 2; i++) |
7075 | for (j = 0; j < IOPRIO_NR_LEVELS; j++) |
7076 | __bfq_put_async_bfqq(bfqd, bfqq_ptr: &bfqg->async_bfqq[i][j][k]); |
7077 | |
7078 | __bfq_put_async_bfqq(bfqd, bfqq_ptr: &bfqg->async_idle_bfqq[k]); |
7079 | } |
7080 | } |
7081 | |
7082 | /* |
7083 | * See the comments on bfq_limit_depth for the purpose of |
7084 | * the depths set in the function. Return minimum shallow depth we'll use. |
7085 | */ |
7086 | static void bfq_update_depths(struct bfq_data *bfqd, struct sbitmap_queue *bt) |
7087 | { |
7088 | unsigned int depth = 1U << bt->sb.shift; |
7089 | |
7090 | bfqd->full_depth_shift = bt->sb.shift; |
7091 | /* |
7092 | * In-word depths if no bfq_queue is being weight-raised: |
7093 | * leaving 25% of tags only for sync reads. |
7094 | * |
7095 | * In next formulas, right-shift the value |
7096 | * (1U<<bt->sb.shift), instead of computing directly |
7097 | * (1U<<(bt->sb.shift - something)), to be robust against |
7098 | * any possible value of bt->sb.shift, without having to |
7099 | * limit 'something'. |
7100 | */ |
7101 | /* no more than 50% of tags for async I/O */ |
7102 | bfqd->word_depths[0][0] = max(depth >> 1, 1U); |
7103 | /* |
7104 | * no more than 75% of tags for sync writes (25% extra tags |
7105 | * w.r.t. async I/O, to prevent async I/O from starving sync |
7106 | * writes) |
7107 | */ |
7108 | bfqd->word_depths[0][1] = max((depth * 3) >> 2, 1U); |
7109 | |
7110 | /* |
7111 | * In-word depths in case some bfq_queue is being weight- |
7112 | * raised: leaving ~63% of tags for sync reads. This is the |
7113 | * highest percentage for which, in our tests, application |
7114 | * start-up times didn't suffer from any regression due to tag |
7115 | * shortage. |
7116 | */ |
7117 | /* no more than ~18% of tags for async I/O */ |
7118 | bfqd->word_depths[1][0] = max((depth * 3) >> 4, 1U); |
7119 | /* no more than ~37% of tags for sync writes (~20% extra tags) */ |
7120 | bfqd->word_depths[1][1] = max((depth * 6) >> 4, 1U); |
7121 | } |
7122 | |
7123 | static void bfq_depth_updated(struct blk_mq_hw_ctx *hctx) |
7124 | { |
7125 | struct bfq_data *bfqd = hctx->queue->elevator->elevator_data; |
7126 | struct blk_mq_tags *tags = hctx->sched_tags; |
7127 | |
7128 | bfq_update_depths(bfqd, bt: &tags->bitmap_tags); |
7129 | sbitmap_queue_min_shallow_depth(sbq: &tags->bitmap_tags, min_shallow_depth: 1); |
7130 | } |
7131 | |
7132 | static int bfq_init_hctx(struct blk_mq_hw_ctx *hctx, unsigned int index) |
7133 | { |
7134 | bfq_depth_updated(hctx); |
7135 | return 0; |
7136 | } |
7137 | |
7138 | static void bfq_exit_queue(struct elevator_queue *e) |
7139 | { |
7140 | struct bfq_data *bfqd = e->elevator_data; |
7141 | struct bfq_queue *bfqq, *n; |
7142 | unsigned int actuator; |
7143 | |
7144 | hrtimer_cancel(timer: &bfqd->idle_slice_timer); |
7145 | |
7146 | spin_lock_irq(lock: &bfqd->lock); |
7147 | list_for_each_entry_safe(bfqq, n, &bfqd->idle_list, bfqq_list) |
7148 | bfq_deactivate_bfqq(bfqd, bfqq, ins_into_idle_tree: false, expiration: false); |
7149 | spin_unlock_irq(lock: &bfqd->lock); |
7150 | |
7151 | for (actuator = 0; actuator < bfqd->num_actuators; actuator++) |
7152 | WARN_ON_ONCE(bfqd->rq_in_driver[actuator]); |
7153 | WARN_ON_ONCE(bfqd->tot_rq_in_driver); |
7154 | |
7155 | hrtimer_cancel(timer: &bfqd->idle_slice_timer); |
7156 | |
7157 | /* release oom-queue reference to root group */ |
7158 | bfqg_and_blkg_put(bfqg: bfqd->root_group); |
7159 | |
7160 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
7161 | blkcg_deactivate_policy(disk: bfqd->queue->disk, pol: &blkcg_policy_bfq); |
7162 | #else |
7163 | spin_lock_irq(&bfqd->lock); |
7164 | bfq_put_async_queues(bfqd, bfqd->root_group); |
7165 | kfree(bfqd->root_group); |
7166 | spin_unlock_irq(&bfqd->lock); |
7167 | #endif |
7168 | |
7169 | blk_stat_disable_accounting(q: bfqd->queue); |
7170 | clear_bit(ELEVATOR_FLAG_DISABLE_WBT, addr: &e->flags); |
7171 | wbt_enable_default(disk: bfqd->queue->disk); |
7172 | |
7173 | kfree(objp: bfqd); |
7174 | } |
7175 | |
7176 | static void bfq_init_root_group(struct bfq_group *root_group, |
7177 | struct bfq_data *bfqd) |
7178 | { |
7179 | int i; |
7180 | |
7181 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
7182 | root_group->entity.parent = NULL; |
7183 | root_group->my_entity = NULL; |
7184 | root_group->bfqd = bfqd; |
7185 | #endif |
7186 | root_group->rq_pos_tree = RB_ROOT; |
7187 | for (i = 0; i < BFQ_IOPRIO_CLASSES; i++) |
7188 | root_group->sched_data.service_tree[i] = BFQ_SERVICE_TREE_INIT; |
7189 | root_group->sched_data.bfq_class_idle_last_service = jiffies; |
7190 | } |
7191 | |
7192 | static int bfq_init_queue(struct request_queue *q, struct elevator_type *e) |
7193 | { |
7194 | struct bfq_data *bfqd; |
7195 | struct elevator_queue *eq; |
7196 | unsigned int i; |
7197 | struct blk_independent_access_ranges *ia_ranges = q->disk->ia_ranges; |
7198 | |
7199 | eq = elevator_alloc(q, e); |
7200 | if (!eq) |
7201 | return -ENOMEM; |
7202 | |
7203 | bfqd = kzalloc_node(size: sizeof(*bfqd), GFP_KERNEL, node: q->node); |
7204 | if (!bfqd) { |
7205 | kobject_put(kobj: &eq->kobj); |
7206 | return -ENOMEM; |
7207 | } |
7208 | eq->elevator_data = bfqd; |
7209 | |
7210 | spin_lock_irq(lock: &q->queue_lock); |
7211 | q->elevator = eq; |
7212 | spin_unlock_irq(lock: &q->queue_lock); |
7213 | |
7214 | /* |
7215 | * Our fallback bfqq if bfq_find_alloc_queue() runs into OOM issues. |
7216 | * Grab a permanent reference to it, so that the normal code flow |
7217 | * will not attempt to free it. |
7218 | * Set zero as actuator index: we will pretend that |
7219 | * all I/O requests are for the same actuator. |
7220 | */ |
7221 | bfq_init_bfqq(bfqd, bfqq: &bfqd->oom_bfqq, NULL, pid: 1, is_sync: 0, act_idx: 0); |
7222 | bfqd->oom_bfqq.ref++; |
7223 | bfqd->oom_bfqq.new_ioprio = BFQ_DEFAULT_QUEUE_IOPRIO; |
7224 | bfqd->oom_bfqq.new_ioprio_class = IOPRIO_CLASS_BE; |
7225 | bfqd->oom_bfqq.entity.new_weight = |
7226 | bfq_ioprio_to_weight(ioprio: bfqd->oom_bfqq.new_ioprio); |
7227 | |
7228 | /* oom_bfqq does not participate to bursts */ |
7229 | bfq_clear_bfqq_just_created(bfqq: &bfqd->oom_bfqq); |
7230 | |
7231 | /* |
7232 | * Trigger weight initialization, according to ioprio, at the |
7233 | * oom_bfqq's first activation. The oom_bfqq's ioprio and ioprio |
7234 | * class won't be changed any more. |
7235 | */ |
7236 | bfqd->oom_bfqq.entity.prio_changed = 1; |
7237 | |
7238 | bfqd->queue = q; |
7239 | |
7240 | bfqd->num_actuators = 1; |
7241 | /* |
7242 | * If the disk supports multiple actuators, copy independent |
7243 | * access ranges from the request queue structure. |
7244 | */ |
7245 | spin_lock_irq(lock: &q->queue_lock); |
7246 | if (ia_ranges) { |
7247 | /* |
7248 | * Check if the disk ia_ranges size exceeds the current bfq |
7249 | * actuator limit. |
7250 | */ |
7251 | if (ia_ranges->nr_ia_ranges > BFQ_MAX_ACTUATORS) { |
7252 | pr_crit("nr_ia_ranges higher than act limit: iars=%d, max=%d.\n" , |
7253 | ia_ranges->nr_ia_ranges, BFQ_MAX_ACTUATORS); |
7254 | pr_crit("Falling back to single actuator mode.\n" ); |
7255 | } else { |
7256 | bfqd->num_actuators = ia_ranges->nr_ia_ranges; |
7257 | |
7258 | for (i = 0; i < bfqd->num_actuators; i++) { |
7259 | bfqd->sector[i] = ia_ranges->ia_range[i].sector; |
7260 | bfqd->nr_sectors[i] = |
7261 | ia_ranges->ia_range[i].nr_sectors; |
7262 | } |
7263 | } |
7264 | } |
7265 | |
7266 | /* Otherwise use single-actuator dev info */ |
7267 | if (bfqd->num_actuators == 1) { |
7268 | bfqd->sector[0] = 0; |
7269 | bfqd->nr_sectors[0] = get_capacity(disk: q->disk); |
7270 | } |
7271 | spin_unlock_irq(lock: &q->queue_lock); |
7272 | |
7273 | INIT_LIST_HEAD(list: &bfqd->dispatch); |
7274 | |
7275 | hrtimer_init(timer: &bfqd->idle_slice_timer, CLOCK_MONOTONIC, |
7276 | mode: HRTIMER_MODE_REL); |
7277 | bfqd->idle_slice_timer.function = bfq_idle_slice_timer; |
7278 | |
7279 | bfqd->queue_weights_tree = RB_ROOT_CACHED; |
7280 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
7281 | bfqd->num_groups_with_pending_reqs = 0; |
7282 | #endif |
7283 | |
7284 | INIT_LIST_HEAD(list: &bfqd->active_list[0]); |
7285 | INIT_LIST_HEAD(list: &bfqd->active_list[1]); |
7286 | INIT_LIST_HEAD(list: &bfqd->idle_list); |
7287 | INIT_HLIST_HEAD(&bfqd->burst_list); |
7288 | |
7289 | bfqd->hw_tag = -1; |
7290 | bfqd->nonrot_with_queueing = blk_queue_nonrot(bfqd->queue); |
7291 | |
7292 | bfqd->bfq_max_budget = bfq_default_max_budget; |
7293 | |
7294 | bfqd->bfq_fifo_expire[0] = bfq_fifo_expire[0]; |
7295 | bfqd->bfq_fifo_expire[1] = bfq_fifo_expire[1]; |
7296 | bfqd->bfq_back_max = bfq_back_max; |
7297 | bfqd->bfq_back_penalty = bfq_back_penalty; |
7298 | bfqd->bfq_slice_idle = bfq_slice_idle; |
7299 | bfqd->bfq_timeout = bfq_timeout; |
7300 | |
7301 | bfqd->bfq_large_burst_thresh = 8; |
7302 | bfqd->bfq_burst_interval = msecs_to_jiffies(m: 180); |
7303 | |
7304 | bfqd->low_latency = true; |
7305 | |
7306 | /* |
7307 | * Trade-off between responsiveness and fairness. |
7308 | */ |
7309 | bfqd->bfq_wr_coeff = 30; |
7310 | bfqd->bfq_wr_rt_max_time = msecs_to_jiffies(m: 300); |
7311 | bfqd->bfq_wr_min_idle_time = msecs_to_jiffies(m: 2000); |
7312 | bfqd->bfq_wr_min_inter_arr_async = msecs_to_jiffies(m: 500); |
7313 | bfqd->bfq_wr_max_softrt_rate = 7000; /* |
7314 | * Approximate rate required |
7315 | * to playback or record a |
7316 | * high-definition compressed |
7317 | * video. |
7318 | */ |
7319 | bfqd->wr_busy_queues = 0; |
7320 | |
7321 | /* |
7322 | * Begin by assuming, optimistically, that the device peak |
7323 | * rate is equal to 2/3 of the highest reference rate. |
7324 | */ |
7325 | bfqd->rate_dur_prod = ref_rate[blk_queue_nonrot(bfqd->queue)] * |
7326 | ref_wr_duration[blk_queue_nonrot(bfqd->queue)]; |
7327 | bfqd->peak_rate = ref_rate[blk_queue_nonrot(bfqd->queue)] * 2 / 3; |
7328 | |
7329 | /* see comments on the definition of next field inside bfq_data */ |
7330 | bfqd->actuator_load_threshold = 4; |
7331 | |
7332 | spin_lock_init(&bfqd->lock); |
7333 | |
7334 | /* |
7335 | * The invocation of the next bfq_create_group_hierarchy |
7336 | * function is the head of a chain of function calls |
7337 | * (bfq_create_group_hierarchy->blkcg_activate_policy-> |
7338 | * blk_mq_freeze_queue) that may lead to the invocation of the |
7339 | * has_work hook function. For this reason, |
7340 | * bfq_create_group_hierarchy is invoked only after all |
7341 | * scheduler data has been initialized, apart from the fields |
7342 | * that can be initialized only after invoking |
7343 | * bfq_create_group_hierarchy. This, in particular, enables |
7344 | * has_work to correctly return false. Of course, to avoid |
7345 | * other inconsistencies, the blk-mq stack must then refrain |
7346 | * from invoking further scheduler hooks before this init |
7347 | * function is finished. |
7348 | */ |
7349 | bfqd->root_group = bfq_create_group_hierarchy(bfqd, node: q->node); |
7350 | if (!bfqd->root_group) |
7351 | goto out_free; |
7352 | bfq_init_root_group(root_group: bfqd->root_group, bfqd); |
7353 | bfq_init_entity(entity: &bfqd->oom_bfqq.entity, bfqg: bfqd->root_group); |
7354 | |
7355 | /* We dispatch from request queue wide instead of hw queue */ |
7356 | blk_queue_flag_set(QUEUE_FLAG_SQ_SCHED, q); |
7357 | |
7358 | set_bit(ELEVATOR_FLAG_DISABLE_WBT, addr: &eq->flags); |
7359 | wbt_disable_default(disk: q->disk); |
7360 | blk_stat_enable_accounting(q); |
7361 | |
7362 | return 0; |
7363 | |
7364 | out_free: |
7365 | kfree(objp: bfqd); |
7366 | kobject_put(kobj: &eq->kobj); |
7367 | return -ENOMEM; |
7368 | } |
7369 | |
7370 | static void bfq_slab_kill(void) |
7371 | { |
7372 | kmem_cache_destroy(s: bfq_pool); |
7373 | } |
7374 | |
7375 | static int __init bfq_slab_setup(void) |
7376 | { |
7377 | bfq_pool = KMEM_CACHE(bfq_queue, 0); |
7378 | if (!bfq_pool) |
7379 | return -ENOMEM; |
7380 | return 0; |
7381 | } |
7382 | |
7383 | static ssize_t bfq_var_show(unsigned int var, char *page) |
7384 | { |
7385 | return sprintf(buf: page, fmt: "%u\n" , var); |
7386 | } |
7387 | |
7388 | static int bfq_var_store(unsigned long *var, const char *page) |
7389 | { |
7390 | unsigned long new_val; |
7391 | int ret = kstrtoul(s: page, base: 10, res: &new_val); |
7392 | |
7393 | if (ret) |
7394 | return ret; |
7395 | *var = new_val; |
7396 | return 0; |
7397 | } |
7398 | |
7399 | #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ |
7400 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
7401 | { \ |
7402 | struct bfq_data *bfqd = e->elevator_data; \ |
7403 | u64 __data = __VAR; \ |
7404 | if (__CONV == 1) \ |
7405 | __data = jiffies_to_msecs(__data); \ |
7406 | else if (__CONV == 2) \ |
7407 | __data = div_u64(__data, NSEC_PER_MSEC); \ |
7408 | return bfq_var_show(__data, (page)); \ |
7409 | } |
7410 | SHOW_FUNCTION(bfq_fifo_expire_sync_show, bfqd->bfq_fifo_expire[1], 2); |
7411 | SHOW_FUNCTION(bfq_fifo_expire_async_show, bfqd->bfq_fifo_expire[0], 2); |
7412 | SHOW_FUNCTION(bfq_back_seek_max_show, bfqd->bfq_back_max, 0); |
7413 | SHOW_FUNCTION(bfq_back_seek_penalty_show, bfqd->bfq_back_penalty, 0); |
7414 | SHOW_FUNCTION(bfq_slice_idle_show, bfqd->bfq_slice_idle, 2); |
7415 | SHOW_FUNCTION(bfq_max_budget_show, bfqd->bfq_user_max_budget, 0); |
7416 | SHOW_FUNCTION(bfq_timeout_sync_show, bfqd->bfq_timeout, 1); |
7417 | SHOW_FUNCTION(bfq_strict_guarantees_show, bfqd->strict_guarantees, 0); |
7418 | SHOW_FUNCTION(bfq_low_latency_show, bfqd->low_latency, 0); |
7419 | #undef SHOW_FUNCTION |
7420 | |
7421 | #define USEC_SHOW_FUNCTION(__FUNC, __VAR) \ |
7422 | static ssize_t __FUNC(struct elevator_queue *e, char *page) \ |
7423 | { \ |
7424 | struct bfq_data *bfqd = e->elevator_data; \ |
7425 | u64 __data = __VAR; \ |
7426 | __data = div_u64(__data, NSEC_PER_USEC); \ |
7427 | return bfq_var_show(__data, (page)); \ |
7428 | } |
7429 | USEC_SHOW_FUNCTION(bfq_slice_idle_us_show, bfqd->bfq_slice_idle); |
7430 | #undef USEC_SHOW_FUNCTION |
7431 | |
7432 | #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ |
7433 | static ssize_t \ |
7434 | __FUNC(struct elevator_queue *e, const char *page, size_t count) \ |
7435 | { \ |
7436 | struct bfq_data *bfqd = e->elevator_data; \ |
7437 | unsigned long __data, __min = (MIN), __max = (MAX); \ |
7438 | int ret; \ |
7439 | \ |
7440 | ret = bfq_var_store(&__data, (page)); \ |
7441 | if (ret) \ |
7442 | return ret; \ |
7443 | if (__data < __min) \ |
7444 | __data = __min; \ |
7445 | else if (__data > __max) \ |
7446 | __data = __max; \ |
7447 | if (__CONV == 1) \ |
7448 | *(__PTR) = msecs_to_jiffies(__data); \ |
7449 | else if (__CONV == 2) \ |
7450 | *(__PTR) = (u64)__data * NSEC_PER_MSEC; \ |
7451 | else \ |
7452 | *(__PTR) = __data; \ |
7453 | return count; \ |
7454 | } |
7455 | STORE_FUNCTION(bfq_fifo_expire_sync_store, &bfqd->bfq_fifo_expire[1], 1, |
7456 | INT_MAX, 2); |
7457 | STORE_FUNCTION(bfq_fifo_expire_async_store, &bfqd->bfq_fifo_expire[0], 1, |
7458 | INT_MAX, 2); |
7459 | STORE_FUNCTION(bfq_back_seek_max_store, &bfqd->bfq_back_max, 0, INT_MAX, 0); |
7460 | STORE_FUNCTION(bfq_back_seek_penalty_store, &bfqd->bfq_back_penalty, 1, |
7461 | INT_MAX, 0); |
7462 | STORE_FUNCTION(bfq_slice_idle_store, &bfqd->bfq_slice_idle, 0, INT_MAX, 2); |
7463 | #undef STORE_FUNCTION |
7464 | |
7465 | #define USEC_STORE_FUNCTION(__FUNC, __PTR, MIN, MAX) \ |
7466 | static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count)\ |
7467 | { \ |
7468 | struct bfq_data *bfqd = e->elevator_data; \ |
7469 | unsigned long __data, __min = (MIN), __max = (MAX); \ |
7470 | int ret; \ |
7471 | \ |
7472 | ret = bfq_var_store(&__data, (page)); \ |
7473 | if (ret) \ |
7474 | return ret; \ |
7475 | if (__data < __min) \ |
7476 | __data = __min; \ |
7477 | else if (__data > __max) \ |
7478 | __data = __max; \ |
7479 | *(__PTR) = (u64)__data * NSEC_PER_USEC; \ |
7480 | return count; \ |
7481 | } |
7482 | USEC_STORE_FUNCTION(bfq_slice_idle_us_store, &bfqd->bfq_slice_idle, 0, |
7483 | UINT_MAX); |
7484 | #undef USEC_STORE_FUNCTION |
7485 | |
7486 | static ssize_t bfq_max_budget_store(struct elevator_queue *e, |
7487 | const char *page, size_t count) |
7488 | { |
7489 | struct bfq_data *bfqd = e->elevator_data; |
7490 | unsigned long __data; |
7491 | int ret; |
7492 | |
7493 | ret = bfq_var_store(var: &__data, (page)); |
7494 | if (ret) |
7495 | return ret; |
7496 | |
7497 | if (__data == 0) |
7498 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); |
7499 | else { |
7500 | if (__data > INT_MAX) |
7501 | __data = INT_MAX; |
7502 | bfqd->bfq_max_budget = __data; |
7503 | } |
7504 | |
7505 | bfqd->bfq_user_max_budget = __data; |
7506 | |
7507 | return count; |
7508 | } |
7509 | |
7510 | /* |
7511 | * Leaving this name to preserve name compatibility with cfq |
7512 | * parameters, but this timeout is used for both sync and async. |
7513 | */ |
7514 | static ssize_t bfq_timeout_sync_store(struct elevator_queue *e, |
7515 | const char *page, size_t count) |
7516 | { |
7517 | struct bfq_data *bfqd = e->elevator_data; |
7518 | unsigned long __data; |
7519 | int ret; |
7520 | |
7521 | ret = bfq_var_store(var: &__data, (page)); |
7522 | if (ret) |
7523 | return ret; |
7524 | |
7525 | if (__data < 1) |
7526 | __data = 1; |
7527 | else if (__data > INT_MAX) |
7528 | __data = INT_MAX; |
7529 | |
7530 | bfqd->bfq_timeout = msecs_to_jiffies(m: __data); |
7531 | if (bfqd->bfq_user_max_budget == 0) |
7532 | bfqd->bfq_max_budget = bfq_calc_max_budget(bfqd); |
7533 | |
7534 | return count; |
7535 | } |
7536 | |
7537 | static ssize_t bfq_strict_guarantees_store(struct elevator_queue *e, |
7538 | const char *page, size_t count) |
7539 | { |
7540 | struct bfq_data *bfqd = e->elevator_data; |
7541 | unsigned long __data; |
7542 | int ret; |
7543 | |
7544 | ret = bfq_var_store(var: &__data, (page)); |
7545 | if (ret) |
7546 | return ret; |
7547 | |
7548 | if (__data > 1) |
7549 | __data = 1; |
7550 | if (!bfqd->strict_guarantees && __data == 1 |
7551 | && bfqd->bfq_slice_idle < 8 * NSEC_PER_MSEC) |
7552 | bfqd->bfq_slice_idle = 8 * NSEC_PER_MSEC; |
7553 | |
7554 | bfqd->strict_guarantees = __data; |
7555 | |
7556 | return count; |
7557 | } |
7558 | |
7559 | static ssize_t bfq_low_latency_store(struct elevator_queue *e, |
7560 | const char *page, size_t count) |
7561 | { |
7562 | struct bfq_data *bfqd = e->elevator_data; |
7563 | unsigned long __data; |
7564 | int ret; |
7565 | |
7566 | ret = bfq_var_store(var: &__data, (page)); |
7567 | if (ret) |
7568 | return ret; |
7569 | |
7570 | if (__data > 1) |
7571 | __data = 1; |
7572 | if (__data == 0 && bfqd->low_latency != 0) |
7573 | bfq_end_wr(bfqd); |
7574 | bfqd->low_latency = __data; |
7575 | |
7576 | return count; |
7577 | } |
7578 | |
7579 | #define BFQ_ATTR(name) \ |
7580 | __ATTR(name, 0644, bfq_##name##_show, bfq_##name##_store) |
7581 | |
7582 | static struct elv_fs_entry bfq_attrs[] = { |
7583 | BFQ_ATTR(fifo_expire_sync), |
7584 | BFQ_ATTR(fifo_expire_async), |
7585 | BFQ_ATTR(back_seek_max), |
7586 | BFQ_ATTR(back_seek_penalty), |
7587 | BFQ_ATTR(slice_idle), |
7588 | BFQ_ATTR(slice_idle_us), |
7589 | BFQ_ATTR(max_budget), |
7590 | BFQ_ATTR(timeout_sync), |
7591 | BFQ_ATTR(strict_guarantees), |
7592 | BFQ_ATTR(low_latency), |
7593 | __ATTR_NULL |
7594 | }; |
7595 | |
7596 | static struct elevator_type iosched_bfq_mq = { |
7597 | .ops = { |
7598 | .limit_depth = bfq_limit_depth, |
7599 | .prepare_request = bfq_prepare_request, |
7600 | .requeue_request = bfq_finish_requeue_request, |
7601 | .finish_request = bfq_finish_request, |
7602 | .exit_icq = bfq_exit_icq, |
7603 | .insert_requests = bfq_insert_requests, |
7604 | .dispatch_request = bfq_dispatch_request, |
7605 | .next_request = elv_rb_latter_request, |
7606 | .former_request = elv_rb_former_request, |
7607 | .allow_merge = bfq_allow_bio_merge, |
7608 | .bio_merge = bfq_bio_merge, |
7609 | .request_merge = bfq_request_merge, |
7610 | .requests_merged = bfq_requests_merged, |
7611 | .request_merged = bfq_request_merged, |
7612 | .has_work = bfq_has_work, |
7613 | .depth_updated = bfq_depth_updated, |
7614 | .init_hctx = bfq_init_hctx, |
7615 | .init_sched = bfq_init_queue, |
7616 | .exit_sched = bfq_exit_queue, |
7617 | }, |
7618 | |
7619 | .icq_size = sizeof(struct bfq_io_cq), |
7620 | .icq_align = __alignof__(struct bfq_io_cq), |
7621 | .elevator_attrs = bfq_attrs, |
7622 | .elevator_name = "bfq" , |
7623 | .elevator_owner = THIS_MODULE, |
7624 | }; |
7625 | MODULE_ALIAS("bfq-iosched" ); |
7626 | |
7627 | static int __init bfq_init(void) |
7628 | { |
7629 | int ret; |
7630 | |
7631 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
7632 | ret = blkcg_policy_register(pol: &blkcg_policy_bfq); |
7633 | if (ret) |
7634 | return ret; |
7635 | #endif |
7636 | |
7637 | ret = -ENOMEM; |
7638 | if (bfq_slab_setup()) |
7639 | goto err_pol_unreg; |
7640 | |
7641 | /* |
7642 | * Times to load large popular applications for the typical |
7643 | * systems installed on the reference devices (see the |
7644 | * comments before the definition of the next |
7645 | * array). Actually, we use slightly lower values, as the |
7646 | * estimated peak rate tends to be smaller than the actual |
7647 | * peak rate. The reason for this last fact is that estimates |
7648 | * are computed over much shorter time intervals than the long |
7649 | * intervals typically used for benchmarking. Why? First, to |
7650 | * adapt more quickly to variations. Second, because an I/O |
7651 | * scheduler cannot rely on a peak-rate-evaluation workload to |
7652 | * be run for a long time. |
7653 | */ |
7654 | ref_wr_duration[0] = msecs_to_jiffies(m: 7000); /* actually 8 sec */ |
7655 | ref_wr_duration[1] = msecs_to_jiffies(m: 2500); /* actually 3 sec */ |
7656 | |
7657 | ret = elv_register(&iosched_bfq_mq); |
7658 | if (ret) |
7659 | goto slab_kill; |
7660 | |
7661 | return 0; |
7662 | |
7663 | slab_kill: |
7664 | bfq_slab_kill(); |
7665 | err_pol_unreg: |
7666 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
7667 | blkcg_policy_unregister(pol: &blkcg_policy_bfq); |
7668 | #endif |
7669 | return ret; |
7670 | } |
7671 | |
7672 | static void __exit bfq_exit(void) |
7673 | { |
7674 | elv_unregister(&iosched_bfq_mq); |
7675 | #ifdef CONFIG_BFQ_GROUP_IOSCHED |
7676 | blkcg_policy_unregister(pol: &blkcg_policy_bfq); |
7677 | #endif |
7678 | bfq_slab_kill(); |
7679 | } |
7680 | |
7681 | module_init(bfq_init); |
7682 | module_exit(bfq_exit); |
7683 | |
7684 | MODULE_AUTHOR("Paolo Valente" ); |
7685 | MODULE_LICENSE("GPL" ); |
7686 | MODULE_DESCRIPTION("MQ Budget Fair Queueing I/O Scheduler" ); |
7687 | |