1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Interface for controlling IO bandwidth on a request queue |
4 | * |
5 | * Copyright (C) 2010 Vivek Goyal <vgoyal@redhat.com> |
6 | */ |
7 | |
8 | #include <linux/module.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/blkdev.h> |
11 | #include <linux/bio.h> |
12 | #include <linux/blktrace_api.h> |
13 | #include "blk.h" |
14 | #include "blk-cgroup-rwstat.h" |
15 | #include "blk-stat.h" |
16 | #include "blk-throttle.h" |
17 | |
18 | /* Max dispatch from a group in 1 round */ |
19 | #define THROTL_GRP_QUANTUM 8 |
20 | |
21 | /* Total max dispatch from all groups in one round */ |
22 | #define THROTL_QUANTUM 32 |
23 | |
24 | /* Throttling is performed over a slice and after that slice is renewed */ |
25 | #define DFL_THROTL_SLICE_HD (HZ / 10) |
26 | #define DFL_THROTL_SLICE_SSD (HZ / 50) |
27 | #define MAX_THROTL_SLICE (HZ) |
28 | #define MAX_IDLE_TIME (5L * 1000 * 1000) /* 5 s */ |
29 | #define MIN_THROTL_BPS (320 * 1024) |
30 | #define MIN_THROTL_IOPS (10) |
31 | #define DFL_LATENCY_TARGET (-1L) |
32 | #define DFL_IDLE_THRESHOLD (0) |
33 | #define DFL_HD_BASELINE_LATENCY (4000L) /* 4ms */ |
34 | #define LATENCY_FILTERED_SSD (0) |
35 | /* |
36 | * For HD, very small latency comes from sequential IO. Such IO is helpless to |
37 | * help determine if its IO is impacted by others, hence we ignore the IO |
38 | */ |
39 | #define LATENCY_FILTERED_HD (1000L) /* 1ms */ |
40 | |
41 | /* A workqueue to queue throttle related work */ |
42 | static struct workqueue_struct *kthrotld_workqueue; |
43 | |
44 | #define rb_entry_tg(node) rb_entry((node), struct throtl_grp, rb_node) |
45 | |
46 | /* We measure latency for request size from <= 4k to >= 1M */ |
47 | #define LATENCY_BUCKET_SIZE 9 |
48 | |
49 | struct latency_bucket { |
50 | unsigned long total_latency; /* ns / 1024 */ |
51 | int samples; |
52 | }; |
53 | |
54 | struct avg_latency_bucket { |
55 | unsigned long latency; /* ns / 1024 */ |
56 | bool valid; |
57 | }; |
58 | |
59 | struct throtl_data |
60 | { |
61 | /* service tree for active throtl groups */ |
62 | struct throtl_service_queue service_queue; |
63 | |
64 | struct request_queue *queue; |
65 | |
66 | /* Total Number of queued bios on READ and WRITE lists */ |
67 | unsigned int nr_queued[2]; |
68 | |
69 | unsigned int throtl_slice; |
70 | |
71 | /* Work for dispatching throttled bios */ |
72 | struct work_struct dispatch_work; |
73 | unsigned int limit_index; |
74 | bool limit_valid[LIMIT_CNT]; |
75 | |
76 | unsigned long low_upgrade_time; |
77 | unsigned long low_downgrade_time; |
78 | |
79 | unsigned int scale; |
80 | |
81 | struct latency_bucket tmp_buckets[2][LATENCY_BUCKET_SIZE]; |
82 | struct avg_latency_bucket avg_buckets[2][LATENCY_BUCKET_SIZE]; |
83 | struct latency_bucket __percpu *latency_buckets[2]; |
84 | unsigned long last_calculate_time; |
85 | unsigned long filtered_latency; |
86 | |
87 | bool track_bio_latency; |
88 | }; |
89 | |
90 | static void throtl_pending_timer_fn(struct timer_list *t); |
91 | |
92 | static inline struct blkcg_gq *tg_to_blkg(struct throtl_grp *tg) |
93 | { |
94 | return pd_to_blkg(pd: &tg->pd); |
95 | } |
96 | |
97 | /** |
98 | * sq_to_tg - return the throl_grp the specified service queue belongs to |
99 | * @sq: the throtl_service_queue of interest |
100 | * |
101 | * Return the throtl_grp @sq belongs to. If @sq is the top-level one |
102 | * embedded in throtl_data, %NULL is returned. |
103 | */ |
104 | static struct throtl_grp *sq_to_tg(struct throtl_service_queue *sq) |
105 | { |
106 | if (sq && sq->parent_sq) |
107 | return container_of(sq, struct throtl_grp, service_queue); |
108 | else |
109 | return NULL; |
110 | } |
111 | |
112 | /** |
113 | * sq_to_td - return throtl_data the specified service queue belongs to |
114 | * @sq: the throtl_service_queue of interest |
115 | * |
116 | * A service_queue can be embedded in either a throtl_grp or throtl_data. |
117 | * Determine the associated throtl_data accordingly and return it. |
118 | */ |
119 | static struct throtl_data *sq_to_td(struct throtl_service_queue *sq) |
120 | { |
121 | struct throtl_grp *tg = sq_to_tg(sq); |
122 | |
123 | if (tg) |
124 | return tg->td; |
125 | else |
126 | return container_of(sq, struct throtl_data, service_queue); |
127 | } |
128 | |
129 | /* |
130 | * cgroup's limit in LIMIT_MAX is scaled if low limit is set. This scale is to |
131 | * make the IO dispatch more smooth. |
132 | * Scale up: linearly scale up according to elapsed time since upgrade. For |
133 | * every throtl_slice, the limit scales up 1/2 .low limit till the |
134 | * limit hits .max limit |
135 | * Scale down: exponentially scale down if a cgroup doesn't hit its .low limit |
136 | */ |
137 | static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) |
138 | { |
139 | /* arbitrary value to avoid too big scale */ |
140 | if (td->scale < 4096 && time_after_eq(jiffies, |
141 | td->low_upgrade_time + td->scale * td->throtl_slice)) |
142 | td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; |
143 | |
144 | return low + (low >> 1) * td->scale; |
145 | } |
146 | |
147 | static uint64_t tg_bps_limit(struct throtl_grp *tg, int rw) |
148 | { |
149 | struct blkcg_gq *blkg = tg_to_blkg(tg); |
150 | struct throtl_data *td; |
151 | uint64_t ret; |
152 | |
153 | if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) |
154 | return U64_MAX; |
155 | |
156 | td = tg->td; |
157 | ret = tg->bps[rw][td->limit_index]; |
158 | if (ret == 0 && td->limit_index == LIMIT_LOW) { |
159 | /* intermediate node or iops isn't 0 */ |
160 | if (!list_empty(head: &blkg->blkcg->css.children) || |
161 | tg->iops[rw][td->limit_index]) |
162 | return U64_MAX; |
163 | else |
164 | return MIN_THROTL_BPS; |
165 | } |
166 | |
167 | if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && |
168 | tg->bps[rw][LIMIT_LOW] != tg->bps[rw][LIMIT_MAX]) { |
169 | uint64_t adjusted; |
170 | |
171 | adjusted = throtl_adjusted_limit(low: tg->bps[rw][LIMIT_LOW], td); |
172 | ret = min(tg->bps[rw][LIMIT_MAX], adjusted); |
173 | } |
174 | return ret; |
175 | } |
176 | |
177 | static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) |
178 | { |
179 | struct blkcg_gq *blkg = tg_to_blkg(tg); |
180 | struct throtl_data *td; |
181 | unsigned int ret; |
182 | |
183 | if (cgroup_subsys_on_dfl(io_cgrp_subsys) && !blkg->parent) |
184 | return UINT_MAX; |
185 | |
186 | td = tg->td; |
187 | ret = tg->iops[rw][td->limit_index]; |
188 | if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { |
189 | /* intermediate node or bps isn't 0 */ |
190 | if (!list_empty(head: &blkg->blkcg->css.children) || |
191 | tg->bps[rw][td->limit_index]) |
192 | return UINT_MAX; |
193 | else |
194 | return MIN_THROTL_IOPS; |
195 | } |
196 | |
197 | if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && |
198 | tg->iops[rw][LIMIT_LOW] != tg->iops[rw][LIMIT_MAX]) { |
199 | uint64_t adjusted; |
200 | |
201 | adjusted = throtl_adjusted_limit(low: tg->iops[rw][LIMIT_LOW], td); |
202 | if (adjusted > UINT_MAX) |
203 | adjusted = UINT_MAX; |
204 | ret = min_t(unsigned int, tg->iops[rw][LIMIT_MAX], adjusted); |
205 | } |
206 | return ret; |
207 | } |
208 | |
209 | #define request_bucket_index(sectors) \ |
210 | clamp_t(int, order_base_2(sectors) - 3, 0, LATENCY_BUCKET_SIZE - 1) |
211 | |
212 | /** |
213 | * throtl_log - log debug message via blktrace |
214 | * @sq: the service_queue being reported |
215 | * @fmt: printf format string |
216 | * @args: printf args |
217 | * |
218 | * The messages are prefixed with "throtl BLKG_NAME" if @sq belongs to a |
219 | * throtl_grp; otherwise, just "throtl". |
220 | */ |
221 | #define throtl_log(sq, fmt, args...) do { \ |
222 | struct throtl_grp *__tg = sq_to_tg((sq)); \ |
223 | struct throtl_data *__td = sq_to_td((sq)); \ |
224 | \ |
225 | (void)__td; \ |
226 | if (likely(!blk_trace_note_message_enabled(__td->queue))) \ |
227 | break; \ |
228 | if ((__tg)) { \ |
229 | blk_add_cgroup_trace_msg(__td->queue, \ |
230 | &tg_to_blkg(__tg)->blkcg->css, "throtl " fmt, ##args);\ |
231 | } else { \ |
232 | blk_add_trace_msg(__td->queue, "throtl " fmt, ##args); \ |
233 | } \ |
234 | } while (0) |
235 | |
236 | static inline unsigned int throtl_bio_data_size(struct bio *bio) |
237 | { |
238 | /* assume it's one sector */ |
239 | if (unlikely(bio_op(bio) == REQ_OP_DISCARD)) |
240 | return 512; |
241 | return bio->bi_iter.bi_size; |
242 | } |
243 | |
244 | static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) |
245 | { |
246 | INIT_LIST_HEAD(list: &qn->node); |
247 | bio_list_init(bl: &qn->bios); |
248 | qn->tg = tg; |
249 | } |
250 | |
251 | /** |
252 | * throtl_qnode_add_bio - add a bio to a throtl_qnode and activate it |
253 | * @bio: bio being added |
254 | * @qn: qnode to add bio to |
255 | * @queued: the service_queue->queued[] list @qn belongs to |
256 | * |
257 | * Add @bio to @qn and put @qn on @queued if it's not already on. |
258 | * @qn->tg's reference count is bumped when @qn is activated. See the |
259 | * comment on top of throtl_qnode definition for details. |
260 | */ |
261 | static void throtl_qnode_add_bio(struct bio *bio, struct throtl_qnode *qn, |
262 | struct list_head *queued) |
263 | { |
264 | bio_list_add(bl: &qn->bios, bio); |
265 | if (list_empty(head: &qn->node)) { |
266 | list_add_tail(new: &qn->node, head: queued); |
267 | blkg_get(blkg: tg_to_blkg(tg: qn->tg)); |
268 | } |
269 | } |
270 | |
271 | /** |
272 | * throtl_peek_queued - peek the first bio on a qnode list |
273 | * @queued: the qnode list to peek |
274 | */ |
275 | static struct bio *throtl_peek_queued(struct list_head *queued) |
276 | { |
277 | struct throtl_qnode *qn; |
278 | struct bio *bio; |
279 | |
280 | if (list_empty(head: queued)) |
281 | return NULL; |
282 | |
283 | qn = list_first_entry(queued, struct throtl_qnode, node); |
284 | bio = bio_list_peek(bl: &qn->bios); |
285 | WARN_ON_ONCE(!bio); |
286 | return bio; |
287 | } |
288 | |
289 | /** |
290 | * throtl_pop_queued - pop the first bio form a qnode list |
291 | * @queued: the qnode list to pop a bio from |
292 | * @tg_to_put: optional out argument for throtl_grp to put |
293 | * |
294 | * Pop the first bio from the qnode list @queued. After popping, the first |
295 | * qnode is removed from @queued if empty or moved to the end of @queued so |
296 | * that the popping order is round-robin. |
297 | * |
298 | * When the first qnode is removed, its associated throtl_grp should be put |
299 | * too. If @tg_to_put is NULL, this function automatically puts it; |
300 | * otherwise, *@tg_to_put is set to the throtl_grp to put and the caller is |
301 | * responsible for putting it. |
302 | */ |
303 | static struct bio *throtl_pop_queued(struct list_head *queued, |
304 | struct throtl_grp **tg_to_put) |
305 | { |
306 | struct throtl_qnode *qn; |
307 | struct bio *bio; |
308 | |
309 | if (list_empty(head: queued)) |
310 | return NULL; |
311 | |
312 | qn = list_first_entry(queued, struct throtl_qnode, node); |
313 | bio = bio_list_pop(bl: &qn->bios); |
314 | WARN_ON_ONCE(!bio); |
315 | |
316 | if (bio_list_empty(bl: &qn->bios)) { |
317 | list_del_init(entry: &qn->node); |
318 | if (tg_to_put) |
319 | *tg_to_put = qn->tg; |
320 | else |
321 | blkg_put(blkg: tg_to_blkg(tg: qn->tg)); |
322 | } else { |
323 | list_move_tail(list: &qn->node, head: queued); |
324 | } |
325 | |
326 | return bio; |
327 | } |
328 | |
329 | /* init a service_queue, assumes the caller zeroed it */ |
330 | static void throtl_service_queue_init(struct throtl_service_queue *sq) |
331 | { |
332 | INIT_LIST_HEAD(list: &sq->queued[READ]); |
333 | INIT_LIST_HEAD(list: &sq->queued[WRITE]); |
334 | sq->pending_tree = RB_ROOT_CACHED; |
335 | timer_setup(&sq->pending_timer, throtl_pending_timer_fn, 0); |
336 | } |
337 | |
338 | static struct blkg_policy_data *throtl_pd_alloc(struct gendisk *disk, |
339 | struct blkcg *blkcg, gfp_t gfp) |
340 | { |
341 | struct throtl_grp *tg; |
342 | int rw; |
343 | |
344 | tg = kzalloc_node(size: sizeof(*tg), flags: gfp, node: disk->node_id); |
345 | if (!tg) |
346 | return NULL; |
347 | |
348 | if (blkg_rwstat_init(rwstat: &tg->stat_bytes, gfp)) |
349 | goto err_free_tg; |
350 | |
351 | if (blkg_rwstat_init(rwstat: &tg->stat_ios, gfp)) |
352 | goto err_exit_stat_bytes; |
353 | |
354 | throtl_service_queue_init(sq: &tg->service_queue); |
355 | |
356 | for (rw = READ; rw <= WRITE; rw++) { |
357 | throtl_qnode_init(qn: &tg->qnode_on_self[rw], tg); |
358 | throtl_qnode_init(qn: &tg->qnode_on_parent[rw], tg); |
359 | } |
360 | |
361 | RB_CLEAR_NODE(&tg->rb_node); |
362 | tg->bps[READ][LIMIT_MAX] = U64_MAX; |
363 | tg->bps[WRITE][LIMIT_MAX] = U64_MAX; |
364 | tg->iops[READ][LIMIT_MAX] = UINT_MAX; |
365 | tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; |
366 | tg->bps_conf[READ][LIMIT_MAX] = U64_MAX; |
367 | tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; |
368 | tg->iops_conf[READ][LIMIT_MAX] = UINT_MAX; |
369 | tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; |
370 | /* LIMIT_LOW will have default value 0 */ |
371 | |
372 | tg->latency_target = DFL_LATENCY_TARGET; |
373 | tg->latency_target_conf = DFL_LATENCY_TARGET; |
374 | tg->idletime_threshold = DFL_IDLE_THRESHOLD; |
375 | tg->idletime_threshold_conf = DFL_IDLE_THRESHOLD; |
376 | |
377 | return &tg->pd; |
378 | |
379 | err_exit_stat_bytes: |
380 | blkg_rwstat_exit(rwstat: &tg->stat_bytes); |
381 | err_free_tg: |
382 | kfree(objp: tg); |
383 | return NULL; |
384 | } |
385 | |
386 | static void throtl_pd_init(struct blkg_policy_data *pd) |
387 | { |
388 | struct throtl_grp *tg = pd_to_tg(pd); |
389 | struct blkcg_gq *blkg = tg_to_blkg(tg); |
390 | struct throtl_data *td = blkg->q->td; |
391 | struct throtl_service_queue *sq = &tg->service_queue; |
392 | |
393 | /* |
394 | * If on the default hierarchy, we switch to properly hierarchical |
395 | * behavior where limits on a given throtl_grp are applied to the |
396 | * whole subtree rather than just the group itself. e.g. If 16M |
397 | * read_bps limit is set on a parent group, summary bps of |
398 | * parent group and its subtree groups can't exceed 16M for the |
399 | * device. |
400 | * |
401 | * If not on the default hierarchy, the broken flat hierarchy |
402 | * behavior is retained where all throtl_grps are treated as if |
403 | * they're all separate root groups right below throtl_data. |
404 | * Limits of a group don't interact with limits of other groups |
405 | * regardless of the position of the group in the hierarchy. |
406 | */ |
407 | sq->parent_sq = &td->service_queue; |
408 | if (cgroup_subsys_on_dfl(io_cgrp_subsys) && blkg->parent) |
409 | sq->parent_sq = &blkg_to_tg(blkg: blkg->parent)->service_queue; |
410 | tg->td = td; |
411 | } |
412 | |
413 | /* |
414 | * Set has_rules[] if @tg or any of its parents have limits configured. |
415 | * This doesn't require walking up to the top of the hierarchy as the |
416 | * parent's has_rules[] is guaranteed to be correct. |
417 | */ |
418 | static void tg_update_has_rules(struct throtl_grp *tg) |
419 | { |
420 | struct throtl_grp *parent_tg = sq_to_tg(sq: tg->service_queue.parent_sq); |
421 | struct throtl_data *td = tg->td; |
422 | int rw; |
423 | |
424 | for (rw = READ; rw <= WRITE; rw++) { |
425 | tg->has_rules_iops[rw] = |
426 | (parent_tg && parent_tg->has_rules_iops[rw]) || |
427 | (td->limit_valid[td->limit_index] && |
428 | tg_iops_limit(tg, rw) != UINT_MAX); |
429 | tg->has_rules_bps[rw] = |
430 | (parent_tg && parent_tg->has_rules_bps[rw]) || |
431 | (td->limit_valid[td->limit_index] && |
432 | (tg_bps_limit(tg, rw) != U64_MAX)); |
433 | } |
434 | } |
435 | |
436 | static void throtl_pd_online(struct blkg_policy_data *pd) |
437 | { |
438 | struct throtl_grp *tg = pd_to_tg(pd); |
439 | /* |
440 | * We don't want new groups to escape the limits of its ancestors. |
441 | * Update has_rules[] after a new group is brought online. |
442 | */ |
443 | tg_update_has_rules(tg); |
444 | } |
445 | |
446 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
447 | static void blk_throtl_update_limit_valid(struct throtl_data *td) |
448 | { |
449 | struct cgroup_subsys_state *pos_css; |
450 | struct blkcg_gq *blkg; |
451 | bool low_valid = false; |
452 | |
453 | rcu_read_lock(); |
454 | blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { |
455 | struct throtl_grp *tg = blkg_to_tg(blkg); |
456 | |
457 | if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || |
458 | tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { |
459 | low_valid = true; |
460 | break; |
461 | } |
462 | } |
463 | rcu_read_unlock(); |
464 | |
465 | td->limit_valid[LIMIT_LOW] = low_valid; |
466 | } |
467 | #else |
468 | static inline void blk_throtl_update_limit_valid(struct throtl_data *td) |
469 | { |
470 | } |
471 | #endif |
472 | |
473 | static void throtl_upgrade_state(struct throtl_data *td); |
474 | static void throtl_pd_offline(struct blkg_policy_data *pd) |
475 | { |
476 | struct throtl_grp *tg = pd_to_tg(pd); |
477 | |
478 | tg->bps[READ][LIMIT_LOW] = 0; |
479 | tg->bps[WRITE][LIMIT_LOW] = 0; |
480 | tg->iops[READ][LIMIT_LOW] = 0; |
481 | tg->iops[WRITE][LIMIT_LOW] = 0; |
482 | |
483 | blk_throtl_update_limit_valid(td: tg->td); |
484 | |
485 | if (!tg->td->limit_valid[tg->td->limit_index]) |
486 | throtl_upgrade_state(td: tg->td); |
487 | } |
488 | |
489 | static void throtl_pd_free(struct blkg_policy_data *pd) |
490 | { |
491 | struct throtl_grp *tg = pd_to_tg(pd); |
492 | |
493 | del_timer_sync(timer: &tg->service_queue.pending_timer); |
494 | blkg_rwstat_exit(rwstat: &tg->stat_bytes); |
495 | blkg_rwstat_exit(rwstat: &tg->stat_ios); |
496 | kfree(objp: tg); |
497 | } |
498 | |
499 | static struct throtl_grp * |
500 | throtl_rb_first(struct throtl_service_queue *parent_sq) |
501 | { |
502 | struct rb_node *n; |
503 | |
504 | n = rb_first_cached(&parent_sq->pending_tree); |
505 | WARN_ON_ONCE(!n); |
506 | if (!n) |
507 | return NULL; |
508 | return rb_entry_tg(n); |
509 | } |
510 | |
511 | static void throtl_rb_erase(struct rb_node *n, |
512 | struct throtl_service_queue *parent_sq) |
513 | { |
514 | rb_erase_cached(node: n, root: &parent_sq->pending_tree); |
515 | RB_CLEAR_NODE(n); |
516 | } |
517 | |
518 | static void update_min_dispatch_time(struct throtl_service_queue *parent_sq) |
519 | { |
520 | struct throtl_grp *tg; |
521 | |
522 | tg = throtl_rb_first(parent_sq); |
523 | if (!tg) |
524 | return; |
525 | |
526 | parent_sq->first_pending_disptime = tg->disptime; |
527 | } |
528 | |
529 | static void tg_service_queue_add(struct throtl_grp *tg) |
530 | { |
531 | struct throtl_service_queue *parent_sq = tg->service_queue.parent_sq; |
532 | struct rb_node **node = &parent_sq->pending_tree.rb_root.rb_node; |
533 | struct rb_node *parent = NULL; |
534 | struct throtl_grp *__tg; |
535 | unsigned long key = tg->disptime; |
536 | bool leftmost = true; |
537 | |
538 | while (*node != NULL) { |
539 | parent = *node; |
540 | __tg = rb_entry_tg(parent); |
541 | |
542 | if (time_before(key, __tg->disptime)) |
543 | node = &parent->rb_left; |
544 | else { |
545 | node = &parent->rb_right; |
546 | leftmost = false; |
547 | } |
548 | } |
549 | |
550 | rb_link_node(node: &tg->rb_node, parent, rb_link: node); |
551 | rb_insert_color_cached(node: &tg->rb_node, root: &parent_sq->pending_tree, |
552 | leftmost); |
553 | } |
554 | |
555 | static void throtl_enqueue_tg(struct throtl_grp *tg) |
556 | { |
557 | if (!(tg->flags & THROTL_TG_PENDING)) { |
558 | tg_service_queue_add(tg); |
559 | tg->flags |= THROTL_TG_PENDING; |
560 | tg->service_queue.parent_sq->nr_pending++; |
561 | } |
562 | } |
563 | |
564 | static void throtl_dequeue_tg(struct throtl_grp *tg) |
565 | { |
566 | if (tg->flags & THROTL_TG_PENDING) { |
567 | struct throtl_service_queue *parent_sq = |
568 | tg->service_queue.parent_sq; |
569 | |
570 | throtl_rb_erase(n: &tg->rb_node, parent_sq); |
571 | --parent_sq->nr_pending; |
572 | tg->flags &= ~THROTL_TG_PENDING; |
573 | } |
574 | } |
575 | |
576 | /* Call with queue lock held */ |
577 | static void throtl_schedule_pending_timer(struct throtl_service_queue *sq, |
578 | unsigned long expires) |
579 | { |
580 | unsigned long max_expire = jiffies + 8 * sq_to_td(sq)->throtl_slice; |
581 | |
582 | /* |
583 | * Since we are adjusting the throttle limit dynamically, the sleep |
584 | * time calculated according to previous limit might be invalid. It's |
585 | * possible the cgroup sleep time is very long and no other cgroups |
586 | * have IO running so notify the limit changes. Make sure the cgroup |
587 | * doesn't sleep too long to avoid the missed notification. |
588 | */ |
589 | if (time_after(expires, max_expire)) |
590 | expires = max_expire; |
591 | mod_timer(timer: &sq->pending_timer, expires); |
592 | throtl_log(sq, "schedule timer. delay=%lu jiffies=%lu" , |
593 | expires - jiffies, jiffies); |
594 | } |
595 | |
596 | /** |
597 | * throtl_schedule_next_dispatch - schedule the next dispatch cycle |
598 | * @sq: the service_queue to schedule dispatch for |
599 | * @force: force scheduling |
600 | * |
601 | * Arm @sq->pending_timer so that the next dispatch cycle starts on the |
602 | * dispatch time of the first pending child. Returns %true if either timer |
603 | * is armed or there's no pending child left. %false if the current |
604 | * dispatch window is still open and the caller should continue |
605 | * dispatching. |
606 | * |
607 | * If @force is %true, the dispatch timer is always scheduled and this |
608 | * function is guaranteed to return %true. This is to be used when the |
609 | * caller can't dispatch itself and needs to invoke pending_timer |
610 | * unconditionally. Note that forced scheduling is likely to induce short |
611 | * delay before dispatch starts even if @sq->first_pending_disptime is not |
612 | * in the future and thus shouldn't be used in hot paths. |
613 | */ |
614 | static bool throtl_schedule_next_dispatch(struct throtl_service_queue *sq, |
615 | bool force) |
616 | { |
617 | /* any pending children left? */ |
618 | if (!sq->nr_pending) |
619 | return true; |
620 | |
621 | update_min_dispatch_time(parent_sq: sq); |
622 | |
623 | /* is the next dispatch time in the future? */ |
624 | if (force || time_after(sq->first_pending_disptime, jiffies)) { |
625 | throtl_schedule_pending_timer(sq, expires: sq->first_pending_disptime); |
626 | return true; |
627 | } |
628 | |
629 | /* tell the caller to continue dispatching */ |
630 | return false; |
631 | } |
632 | |
633 | static inline void throtl_start_new_slice_with_credit(struct throtl_grp *tg, |
634 | bool rw, unsigned long start) |
635 | { |
636 | tg->bytes_disp[rw] = 0; |
637 | tg->io_disp[rw] = 0; |
638 | tg->carryover_bytes[rw] = 0; |
639 | tg->carryover_ios[rw] = 0; |
640 | |
641 | /* |
642 | * Previous slice has expired. We must have trimmed it after last |
643 | * bio dispatch. That means since start of last slice, we never used |
644 | * that bandwidth. Do try to make use of that bandwidth while giving |
645 | * credit. |
646 | */ |
647 | if (time_after(start, tg->slice_start[rw])) |
648 | tg->slice_start[rw] = start; |
649 | |
650 | tg->slice_end[rw] = jiffies + tg->td->throtl_slice; |
651 | throtl_log(&tg->service_queue, |
652 | "[%c] new slice with credit start=%lu end=%lu jiffies=%lu" , |
653 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
654 | tg->slice_end[rw], jiffies); |
655 | } |
656 | |
657 | static inline void throtl_start_new_slice(struct throtl_grp *tg, bool rw, |
658 | bool clear_carryover) |
659 | { |
660 | tg->bytes_disp[rw] = 0; |
661 | tg->io_disp[rw] = 0; |
662 | tg->slice_start[rw] = jiffies; |
663 | tg->slice_end[rw] = jiffies + tg->td->throtl_slice; |
664 | if (clear_carryover) { |
665 | tg->carryover_bytes[rw] = 0; |
666 | tg->carryover_ios[rw] = 0; |
667 | } |
668 | |
669 | throtl_log(&tg->service_queue, |
670 | "[%c] new slice start=%lu end=%lu jiffies=%lu" , |
671 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
672 | tg->slice_end[rw], jiffies); |
673 | } |
674 | |
675 | static inline void throtl_set_slice_end(struct throtl_grp *tg, bool rw, |
676 | unsigned long jiffy_end) |
677 | { |
678 | tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); |
679 | } |
680 | |
681 | static inline void throtl_extend_slice(struct throtl_grp *tg, bool rw, |
682 | unsigned long jiffy_end) |
683 | { |
684 | throtl_set_slice_end(tg, rw, jiffy_end); |
685 | throtl_log(&tg->service_queue, |
686 | "[%c] extend slice start=%lu end=%lu jiffies=%lu" , |
687 | rw == READ ? 'R' : 'W', tg->slice_start[rw], |
688 | tg->slice_end[rw], jiffies); |
689 | } |
690 | |
691 | /* Determine if previously allocated or extended slice is complete or not */ |
692 | static bool throtl_slice_used(struct throtl_grp *tg, bool rw) |
693 | { |
694 | if (time_in_range(jiffies, tg->slice_start[rw], tg->slice_end[rw])) |
695 | return false; |
696 | |
697 | return true; |
698 | } |
699 | |
700 | static unsigned int calculate_io_allowed(u32 iops_limit, |
701 | unsigned long jiffy_elapsed) |
702 | { |
703 | unsigned int io_allowed; |
704 | u64 tmp; |
705 | |
706 | /* |
707 | * jiffy_elapsed should not be a big value as minimum iops can be |
708 | * 1 then at max jiffy elapsed should be equivalent of 1 second as we |
709 | * will allow dispatch after 1 second and after that slice should |
710 | * have been trimmed. |
711 | */ |
712 | |
713 | tmp = (u64)iops_limit * jiffy_elapsed; |
714 | do_div(tmp, HZ); |
715 | |
716 | if (tmp > UINT_MAX) |
717 | io_allowed = UINT_MAX; |
718 | else |
719 | io_allowed = tmp; |
720 | |
721 | return io_allowed; |
722 | } |
723 | |
724 | static u64 calculate_bytes_allowed(u64 bps_limit, unsigned long jiffy_elapsed) |
725 | { |
726 | /* |
727 | * Can result be wider than 64 bits? |
728 | * We check against 62, not 64, due to ilog2 truncation. |
729 | */ |
730 | if (ilog2(bps_limit) + ilog2(jiffy_elapsed) - ilog2(HZ) > 62) |
731 | return U64_MAX; |
732 | return mul_u64_u64_div_u64(a: bps_limit, mul: (u64)jiffy_elapsed, div: (u64)HZ); |
733 | } |
734 | |
735 | /* Trim the used slices and adjust slice start accordingly */ |
736 | static inline void throtl_trim_slice(struct throtl_grp *tg, bool rw) |
737 | { |
738 | unsigned long time_elapsed; |
739 | long long bytes_trim; |
740 | int io_trim; |
741 | |
742 | BUG_ON(time_before(tg->slice_end[rw], tg->slice_start[rw])); |
743 | |
744 | /* |
745 | * If bps are unlimited (-1), then time slice don't get |
746 | * renewed. Don't try to trim the slice if slice is used. A new |
747 | * slice will start when appropriate. |
748 | */ |
749 | if (throtl_slice_used(tg, rw)) |
750 | return; |
751 | |
752 | /* |
753 | * A bio has been dispatched. Also adjust slice_end. It might happen |
754 | * that initially cgroup limit was very low resulting in high |
755 | * slice_end, but later limit was bumped up and bio was dispatched |
756 | * sooner, then we need to reduce slice_end. A high bogus slice_end |
757 | * is bad because it does not allow new slice to start. |
758 | */ |
759 | |
760 | throtl_set_slice_end(tg, rw, jiffy_end: jiffies + tg->td->throtl_slice); |
761 | |
762 | time_elapsed = rounddown(jiffies - tg->slice_start[rw], |
763 | tg->td->throtl_slice); |
764 | if (!time_elapsed) |
765 | return; |
766 | |
767 | bytes_trim = calculate_bytes_allowed(bps_limit: tg_bps_limit(tg, rw), |
768 | jiffy_elapsed: time_elapsed) + |
769 | tg->carryover_bytes[rw]; |
770 | io_trim = calculate_io_allowed(iops_limit: tg_iops_limit(tg, rw), jiffy_elapsed: time_elapsed) + |
771 | tg->carryover_ios[rw]; |
772 | if (bytes_trim <= 0 && io_trim <= 0) |
773 | return; |
774 | |
775 | tg->carryover_bytes[rw] = 0; |
776 | if ((long long)tg->bytes_disp[rw] >= bytes_trim) |
777 | tg->bytes_disp[rw] -= bytes_trim; |
778 | else |
779 | tg->bytes_disp[rw] = 0; |
780 | |
781 | tg->carryover_ios[rw] = 0; |
782 | if ((int)tg->io_disp[rw] >= io_trim) |
783 | tg->io_disp[rw] -= io_trim; |
784 | else |
785 | tg->io_disp[rw] = 0; |
786 | |
787 | tg->slice_start[rw] += time_elapsed; |
788 | |
789 | throtl_log(&tg->service_queue, |
790 | "[%c] trim slice nr=%lu bytes=%lld io=%d start=%lu end=%lu jiffies=%lu" , |
791 | rw == READ ? 'R' : 'W', time_elapsed / tg->td->throtl_slice, |
792 | bytes_trim, io_trim, tg->slice_start[rw], tg->slice_end[rw], |
793 | jiffies); |
794 | } |
795 | |
796 | static void __tg_update_carryover(struct throtl_grp *tg, bool rw) |
797 | { |
798 | unsigned long jiffy_elapsed = jiffies - tg->slice_start[rw]; |
799 | u64 bps_limit = tg_bps_limit(tg, rw); |
800 | u32 iops_limit = tg_iops_limit(tg, rw); |
801 | |
802 | /* |
803 | * If config is updated while bios are still throttled, calculate and |
804 | * accumulate how many bytes/ios are waited across changes. And |
805 | * carryover_bytes/ios will be used to calculate new wait time under new |
806 | * configuration. |
807 | */ |
808 | if (bps_limit != U64_MAX) |
809 | tg->carryover_bytes[rw] += |
810 | calculate_bytes_allowed(bps_limit, jiffy_elapsed) - |
811 | tg->bytes_disp[rw]; |
812 | if (iops_limit != UINT_MAX) |
813 | tg->carryover_ios[rw] += |
814 | calculate_io_allowed(iops_limit, jiffy_elapsed) - |
815 | tg->io_disp[rw]; |
816 | } |
817 | |
818 | static void tg_update_carryover(struct throtl_grp *tg) |
819 | { |
820 | if (tg->service_queue.nr_queued[READ]) |
821 | __tg_update_carryover(tg, READ); |
822 | if (tg->service_queue.nr_queued[WRITE]) |
823 | __tg_update_carryover(tg, WRITE); |
824 | |
825 | /* see comments in struct throtl_grp for meaning of these fields. */ |
826 | throtl_log(&tg->service_queue, "%s: %lld %lld %d %d\n" , __func__, |
827 | tg->carryover_bytes[READ], tg->carryover_bytes[WRITE], |
828 | tg->carryover_ios[READ], tg->carryover_ios[WRITE]); |
829 | } |
830 | |
831 | static unsigned long tg_within_iops_limit(struct throtl_grp *tg, struct bio *bio, |
832 | u32 iops_limit) |
833 | { |
834 | bool rw = bio_data_dir(bio); |
835 | int io_allowed; |
836 | unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; |
837 | |
838 | if (iops_limit == UINT_MAX) { |
839 | return 0; |
840 | } |
841 | |
842 | jiffy_elapsed = jiffies - tg->slice_start[rw]; |
843 | |
844 | /* Round up to the next throttle slice, wait time must be nonzero */ |
845 | jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); |
846 | io_allowed = calculate_io_allowed(iops_limit, jiffy_elapsed: jiffy_elapsed_rnd) + |
847 | tg->carryover_ios[rw]; |
848 | if (io_allowed > 0 && tg->io_disp[rw] + 1 <= io_allowed) |
849 | return 0; |
850 | |
851 | /* Calc approx time to dispatch */ |
852 | jiffy_wait = jiffy_elapsed_rnd - jiffy_elapsed; |
853 | return jiffy_wait; |
854 | } |
855 | |
856 | static unsigned long tg_within_bps_limit(struct throtl_grp *tg, struct bio *bio, |
857 | u64 bps_limit) |
858 | { |
859 | bool rw = bio_data_dir(bio); |
860 | long long bytes_allowed; |
861 | u64 ; |
862 | unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; |
863 | unsigned int bio_size = throtl_bio_data_size(bio); |
864 | |
865 | /* no need to throttle if this bio's bytes have been accounted */ |
866 | if (bps_limit == U64_MAX || bio_flagged(bio, bit: BIO_BPS_THROTTLED)) { |
867 | return 0; |
868 | } |
869 | |
870 | jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; |
871 | |
872 | /* Slice has just started. Consider one slice interval */ |
873 | if (!jiffy_elapsed) |
874 | jiffy_elapsed_rnd = tg->td->throtl_slice; |
875 | |
876 | jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); |
877 | bytes_allowed = calculate_bytes_allowed(bps_limit, jiffy_elapsed: jiffy_elapsed_rnd) + |
878 | tg->carryover_bytes[rw]; |
879 | if (bytes_allowed > 0 && tg->bytes_disp[rw] + bio_size <= bytes_allowed) |
880 | return 0; |
881 | |
882 | /* Calc approx time to dispatch */ |
883 | extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed; |
884 | jiffy_wait = div64_u64(dividend: extra_bytes * HZ, divisor: bps_limit); |
885 | |
886 | if (!jiffy_wait) |
887 | jiffy_wait = 1; |
888 | |
889 | /* |
890 | * This wait time is without taking into consideration the rounding |
891 | * up we did. Add that time also. |
892 | */ |
893 | jiffy_wait = jiffy_wait + (jiffy_elapsed_rnd - jiffy_elapsed); |
894 | return jiffy_wait; |
895 | } |
896 | |
897 | /* |
898 | * Returns whether one can dispatch a bio or not. Also returns approx number |
899 | * of jiffies to wait before this bio is with-in IO rate and can be dispatched |
900 | */ |
901 | static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, |
902 | unsigned long *wait) |
903 | { |
904 | bool rw = bio_data_dir(bio); |
905 | unsigned long bps_wait = 0, iops_wait = 0, max_wait = 0; |
906 | u64 bps_limit = tg_bps_limit(tg, rw); |
907 | u32 iops_limit = tg_iops_limit(tg, rw); |
908 | |
909 | /* |
910 | * Currently whole state machine of group depends on first bio |
911 | * queued in the group bio list. So one should not be calling |
912 | * this function with a different bio if there are other bios |
913 | * queued. |
914 | */ |
915 | BUG_ON(tg->service_queue.nr_queued[rw] && |
916 | bio != throtl_peek_queued(&tg->service_queue.queued[rw])); |
917 | |
918 | /* If tg->bps = -1, then BW is unlimited */ |
919 | if ((bps_limit == U64_MAX && iops_limit == UINT_MAX) || |
920 | tg->flags & THROTL_TG_CANCELING) { |
921 | if (wait) |
922 | *wait = 0; |
923 | return true; |
924 | } |
925 | |
926 | /* |
927 | * If previous slice expired, start a new one otherwise renew/extend |
928 | * existing slice to make sure it is at least throtl_slice interval |
929 | * long since now. New slice is started only for empty throttle group. |
930 | * If there is queued bio, that means there should be an active |
931 | * slice and it should be extended instead. |
932 | */ |
933 | if (throtl_slice_used(tg, rw) && !(tg->service_queue.nr_queued[rw])) |
934 | throtl_start_new_slice(tg, rw, clear_carryover: true); |
935 | else { |
936 | if (time_before(tg->slice_end[rw], |
937 | jiffies + tg->td->throtl_slice)) |
938 | throtl_extend_slice(tg, rw, |
939 | jiffy_end: jiffies + tg->td->throtl_slice); |
940 | } |
941 | |
942 | bps_wait = tg_within_bps_limit(tg, bio, bps_limit); |
943 | iops_wait = tg_within_iops_limit(tg, bio, iops_limit); |
944 | if (bps_wait + iops_wait == 0) { |
945 | if (wait) |
946 | *wait = 0; |
947 | return true; |
948 | } |
949 | |
950 | max_wait = max(bps_wait, iops_wait); |
951 | |
952 | if (wait) |
953 | *wait = max_wait; |
954 | |
955 | if (time_before(tg->slice_end[rw], jiffies + max_wait)) |
956 | throtl_extend_slice(tg, rw, jiffy_end: jiffies + max_wait); |
957 | |
958 | return false; |
959 | } |
960 | |
961 | static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) |
962 | { |
963 | bool rw = bio_data_dir(bio); |
964 | unsigned int bio_size = throtl_bio_data_size(bio); |
965 | |
966 | /* Charge the bio to the group */ |
967 | if (!bio_flagged(bio, bit: BIO_BPS_THROTTLED)) { |
968 | tg->bytes_disp[rw] += bio_size; |
969 | tg->last_bytes_disp[rw] += bio_size; |
970 | } |
971 | |
972 | tg->io_disp[rw]++; |
973 | tg->last_io_disp[rw]++; |
974 | } |
975 | |
976 | /** |
977 | * throtl_add_bio_tg - add a bio to the specified throtl_grp |
978 | * @bio: bio to add |
979 | * @qn: qnode to use |
980 | * @tg: the target throtl_grp |
981 | * |
982 | * Add @bio to @tg's service_queue using @qn. If @qn is not specified, |
983 | * tg->qnode_on_self[] is used. |
984 | */ |
985 | static void throtl_add_bio_tg(struct bio *bio, struct throtl_qnode *qn, |
986 | struct throtl_grp *tg) |
987 | { |
988 | struct throtl_service_queue *sq = &tg->service_queue; |
989 | bool rw = bio_data_dir(bio); |
990 | |
991 | if (!qn) |
992 | qn = &tg->qnode_on_self[rw]; |
993 | |
994 | /* |
995 | * If @tg doesn't currently have any bios queued in the same |
996 | * direction, queueing @bio can change when @tg should be |
997 | * dispatched. Mark that @tg was empty. This is automatically |
998 | * cleared on the next tg_update_disptime(). |
999 | */ |
1000 | if (!sq->nr_queued[rw]) |
1001 | tg->flags |= THROTL_TG_WAS_EMPTY; |
1002 | |
1003 | throtl_qnode_add_bio(bio, qn, queued: &sq->queued[rw]); |
1004 | |
1005 | sq->nr_queued[rw]++; |
1006 | throtl_enqueue_tg(tg); |
1007 | } |
1008 | |
1009 | static void tg_update_disptime(struct throtl_grp *tg) |
1010 | { |
1011 | struct throtl_service_queue *sq = &tg->service_queue; |
1012 | unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; |
1013 | struct bio *bio; |
1014 | |
1015 | bio = throtl_peek_queued(queued: &sq->queued[READ]); |
1016 | if (bio) |
1017 | tg_may_dispatch(tg, bio, wait: &read_wait); |
1018 | |
1019 | bio = throtl_peek_queued(queued: &sq->queued[WRITE]); |
1020 | if (bio) |
1021 | tg_may_dispatch(tg, bio, wait: &write_wait); |
1022 | |
1023 | min_wait = min(read_wait, write_wait); |
1024 | disptime = jiffies + min_wait; |
1025 | |
1026 | /* Update dispatch time */ |
1027 | throtl_rb_erase(n: &tg->rb_node, parent_sq: tg->service_queue.parent_sq); |
1028 | tg->disptime = disptime; |
1029 | tg_service_queue_add(tg); |
1030 | |
1031 | /* see throtl_add_bio_tg() */ |
1032 | tg->flags &= ~THROTL_TG_WAS_EMPTY; |
1033 | } |
1034 | |
1035 | static void start_parent_slice_with_credit(struct throtl_grp *child_tg, |
1036 | struct throtl_grp *parent_tg, bool rw) |
1037 | { |
1038 | if (throtl_slice_used(tg: parent_tg, rw)) { |
1039 | throtl_start_new_slice_with_credit(tg: parent_tg, rw, |
1040 | start: child_tg->slice_start[rw]); |
1041 | } |
1042 | |
1043 | } |
1044 | |
1045 | static void tg_dispatch_one_bio(struct throtl_grp *tg, bool rw) |
1046 | { |
1047 | struct throtl_service_queue *sq = &tg->service_queue; |
1048 | struct throtl_service_queue *parent_sq = sq->parent_sq; |
1049 | struct throtl_grp *parent_tg = sq_to_tg(sq: parent_sq); |
1050 | struct throtl_grp *tg_to_put = NULL; |
1051 | struct bio *bio; |
1052 | |
1053 | /* |
1054 | * @bio is being transferred from @tg to @parent_sq. Popping a bio |
1055 | * from @tg may put its reference and @parent_sq might end up |
1056 | * getting released prematurely. Remember the tg to put and put it |
1057 | * after @bio is transferred to @parent_sq. |
1058 | */ |
1059 | bio = throtl_pop_queued(queued: &sq->queued[rw], tg_to_put: &tg_to_put); |
1060 | sq->nr_queued[rw]--; |
1061 | |
1062 | throtl_charge_bio(tg, bio); |
1063 | |
1064 | /* |
1065 | * If our parent is another tg, we just need to transfer @bio to |
1066 | * the parent using throtl_add_bio_tg(). If our parent is |
1067 | * @td->service_queue, @bio is ready to be issued. Put it on its |
1068 | * bio_lists[] and decrease total number queued. The caller is |
1069 | * responsible for issuing these bios. |
1070 | */ |
1071 | if (parent_tg) { |
1072 | throtl_add_bio_tg(bio, qn: &tg->qnode_on_parent[rw], tg: parent_tg); |
1073 | start_parent_slice_with_credit(child_tg: tg, parent_tg, rw); |
1074 | } else { |
1075 | bio_set_flag(bio, bit: BIO_BPS_THROTTLED); |
1076 | throtl_qnode_add_bio(bio, qn: &tg->qnode_on_parent[rw], |
1077 | queued: &parent_sq->queued[rw]); |
1078 | BUG_ON(tg->td->nr_queued[rw] <= 0); |
1079 | tg->td->nr_queued[rw]--; |
1080 | } |
1081 | |
1082 | throtl_trim_slice(tg, rw); |
1083 | |
1084 | if (tg_to_put) |
1085 | blkg_put(blkg: tg_to_blkg(tg: tg_to_put)); |
1086 | } |
1087 | |
1088 | static int throtl_dispatch_tg(struct throtl_grp *tg) |
1089 | { |
1090 | struct throtl_service_queue *sq = &tg->service_queue; |
1091 | unsigned int nr_reads = 0, nr_writes = 0; |
1092 | unsigned int max_nr_reads = THROTL_GRP_QUANTUM * 3 / 4; |
1093 | unsigned int max_nr_writes = THROTL_GRP_QUANTUM - max_nr_reads; |
1094 | struct bio *bio; |
1095 | |
1096 | /* Try to dispatch 75% READS and 25% WRITES */ |
1097 | |
1098 | while ((bio = throtl_peek_queued(queued: &sq->queued[READ])) && |
1099 | tg_may_dispatch(tg, bio, NULL)) { |
1100 | |
1101 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); |
1102 | nr_reads++; |
1103 | |
1104 | if (nr_reads >= max_nr_reads) |
1105 | break; |
1106 | } |
1107 | |
1108 | while ((bio = throtl_peek_queued(queued: &sq->queued[WRITE])) && |
1109 | tg_may_dispatch(tg, bio, NULL)) { |
1110 | |
1111 | tg_dispatch_one_bio(tg, bio_data_dir(bio)); |
1112 | nr_writes++; |
1113 | |
1114 | if (nr_writes >= max_nr_writes) |
1115 | break; |
1116 | } |
1117 | |
1118 | return nr_reads + nr_writes; |
1119 | } |
1120 | |
1121 | static int throtl_select_dispatch(struct throtl_service_queue *parent_sq) |
1122 | { |
1123 | unsigned int nr_disp = 0; |
1124 | |
1125 | while (1) { |
1126 | struct throtl_grp *tg; |
1127 | struct throtl_service_queue *sq; |
1128 | |
1129 | if (!parent_sq->nr_pending) |
1130 | break; |
1131 | |
1132 | tg = throtl_rb_first(parent_sq); |
1133 | if (!tg) |
1134 | break; |
1135 | |
1136 | if (time_before(jiffies, tg->disptime)) |
1137 | break; |
1138 | |
1139 | nr_disp += throtl_dispatch_tg(tg); |
1140 | |
1141 | sq = &tg->service_queue; |
1142 | if (sq->nr_queued[READ] || sq->nr_queued[WRITE]) |
1143 | tg_update_disptime(tg); |
1144 | else |
1145 | throtl_dequeue_tg(tg); |
1146 | |
1147 | if (nr_disp >= THROTL_QUANTUM) |
1148 | break; |
1149 | } |
1150 | |
1151 | return nr_disp; |
1152 | } |
1153 | |
1154 | static bool throtl_can_upgrade(struct throtl_data *td, |
1155 | struct throtl_grp *this_tg); |
1156 | /** |
1157 | * throtl_pending_timer_fn - timer function for service_queue->pending_timer |
1158 | * @t: the pending_timer member of the throtl_service_queue being serviced |
1159 | * |
1160 | * This timer is armed when a child throtl_grp with active bio's become |
1161 | * pending and queued on the service_queue's pending_tree and expires when |
1162 | * the first child throtl_grp should be dispatched. This function |
1163 | * dispatches bio's from the children throtl_grps to the parent |
1164 | * service_queue. |
1165 | * |
1166 | * If the parent's parent is another throtl_grp, dispatching is propagated |
1167 | * by either arming its pending_timer or repeating dispatch directly. If |
1168 | * the top-level service_tree is reached, throtl_data->dispatch_work is |
1169 | * kicked so that the ready bio's are issued. |
1170 | */ |
1171 | static void throtl_pending_timer_fn(struct timer_list *t) |
1172 | { |
1173 | struct throtl_service_queue *sq = from_timer(sq, t, pending_timer); |
1174 | struct throtl_grp *tg = sq_to_tg(sq); |
1175 | struct throtl_data *td = sq_to_td(sq); |
1176 | struct throtl_service_queue *parent_sq; |
1177 | struct request_queue *q; |
1178 | bool dispatched; |
1179 | int ret; |
1180 | |
1181 | /* throtl_data may be gone, so figure out request queue by blkg */ |
1182 | if (tg) |
1183 | q = tg->pd.blkg->q; |
1184 | else |
1185 | q = td->queue; |
1186 | |
1187 | spin_lock_irq(lock: &q->queue_lock); |
1188 | |
1189 | if (!q->root_blkg) |
1190 | goto out_unlock; |
1191 | |
1192 | if (throtl_can_upgrade(td, NULL)) |
1193 | throtl_upgrade_state(td); |
1194 | |
1195 | again: |
1196 | parent_sq = sq->parent_sq; |
1197 | dispatched = false; |
1198 | |
1199 | while (true) { |
1200 | throtl_log(sq, "dispatch nr_queued=%u read=%u write=%u" , |
1201 | sq->nr_queued[READ] + sq->nr_queued[WRITE], |
1202 | sq->nr_queued[READ], sq->nr_queued[WRITE]); |
1203 | |
1204 | ret = throtl_select_dispatch(parent_sq: sq); |
1205 | if (ret) { |
1206 | throtl_log(sq, "bios disp=%u" , ret); |
1207 | dispatched = true; |
1208 | } |
1209 | |
1210 | if (throtl_schedule_next_dispatch(sq, force: false)) |
1211 | break; |
1212 | |
1213 | /* this dispatch windows is still open, relax and repeat */ |
1214 | spin_unlock_irq(lock: &q->queue_lock); |
1215 | cpu_relax(); |
1216 | spin_lock_irq(lock: &q->queue_lock); |
1217 | } |
1218 | |
1219 | if (!dispatched) |
1220 | goto out_unlock; |
1221 | |
1222 | if (parent_sq) { |
1223 | /* @parent_sq is another throl_grp, propagate dispatch */ |
1224 | if (tg->flags & THROTL_TG_WAS_EMPTY) { |
1225 | tg_update_disptime(tg); |
1226 | if (!throtl_schedule_next_dispatch(sq: parent_sq, force: false)) { |
1227 | /* window is already open, repeat dispatching */ |
1228 | sq = parent_sq; |
1229 | tg = sq_to_tg(sq); |
1230 | goto again; |
1231 | } |
1232 | } |
1233 | } else { |
1234 | /* reached the top-level, queue issuing */ |
1235 | queue_work(wq: kthrotld_workqueue, work: &td->dispatch_work); |
1236 | } |
1237 | out_unlock: |
1238 | spin_unlock_irq(lock: &q->queue_lock); |
1239 | } |
1240 | |
1241 | /** |
1242 | * blk_throtl_dispatch_work_fn - work function for throtl_data->dispatch_work |
1243 | * @work: work item being executed |
1244 | * |
1245 | * This function is queued for execution when bios reach the bio_lists[] |
1246 | * of throtl_data->service_queue. Those bios are ready and issued by this |
1247 | * function. |
1248 | */ |
1249 | static void blk_throtl_dispatch_work_fn(struct work_struct *work) |
1250 | { |
1251 | struct throtl_data *td = container_of(work, struct throtl_data, |
1252 | dispatch_work); |
1253 | struct throtl_service_queue *td_sq = &td->service_queue; |
1254 | struct request_queue *q = td->queue; |
1255 | struct bio_list bio_list_on_stack; |
1256 | struct bio *bio; |
1257 | struct blk_plug plug; |
1258 | int rw; |
1259 | |
1260 | bio_list_init(bl: &bio_list_on_stack); |
1261 | |
1262 | spin_lock_irq(lock: &q->queue_lock); |
1263 | for (rw = READ; rw <= WRITE; rw++) |
1264 | while ((bio = throtl_pop_queued(queued: &td_sq->queued[rw], NULL))) |
1265 | bio_list_add(bl: &bio_list_on_stack, bio); |
1266 | spin_unlock_irq(lock: &q->queue_lock); |
1267 | |
1268 | if (!bio_list_empty(bl: &bio_list_on_stack)) { |
1269 | blk_start_plug(&plug); |
1270 | while ((bio = bio_list_pop(bl: &bio_list_on_stack))) |
1271 | submit_bio_noacct_nocheck(bio); |
1272 | blk_finish_plug(&plug); |
1273 | } |
1274 | } |
1275 | |
1276 | static u64 tg_prfill_conf_u64(struct seq_file *sf, struct blkg_policy_data *pd, |
1277 | int off) |
1278 | { |
1279 | struct throtl_grp *tg = pd_to_tg(pd); |
1280 | u64 v = *(u64 *)((void *)tg + off); |
1281 | |
1282 | if (v == U64_MAX) |
1283 | return 0; |
1284 | return __blkg_prfill_u64(sf, pd, v); |
1285 | } |
1286 | |
1287 | static u64 tg_prfill_conf_uint(struct seq_file *sf, struct blkg_policy_data *pd, |
1288 | int off) |
1289 | { |
1290 | struct throtl_grp *tg = pd_to_tg(pd); |
1291 | unsigned int v = *(unsigned int *)((void *)tg + off); |
1292 | |
1293 | if (v == UINT_MAX) |
1294 | return 0; |
1295 | return __blkg_prfill_u64(sf, pd, v); |
1296 | } |
1297 | |
1298 | static int tg_print_conf_u64(struct seq_file *sf, void *v) |
1299 | { |
1300 | blkcg_print_blkgs(sf, blkcg: css_to_blkcg(css: seq_css(seq: sf)), prfill: tg_prfill_conf_u64, |
1301 | pol: &blkcg_policy_throtl, data: seq_cft(seq: sf)->private, show_total: false); |
1302 | return 0; |
1303 | } |
1304 | |
1305 | static int tg_print_conf_uint(struct seq_file *sf, void *v) |
1306 | { |
1307 | blkcg_print_blkgs(sf, blkcg: css_to_blkcg(css: seq_css(seq: sf)), prfill: tg_prfill_conf_uint, |
1308 | pol: &blkcg_policy_throtl, data: seq_cft(seq: sf)->private, show_total: false); |
1309 | return 0; |
1310 | } |
1311 | |
1312 | static void tg_conf_updated(struct throtl_grp *tg, bool global) |
1313 | { |
1314 | struct throtl_service_queue *sq = &tg->service_queue; |
1315 | struct cgroup_subsys_state *pos_css; |
1316 | struct blkcg_gq *blkg; |
1317 | |
1318 | throtl_log(&tg->service_queue, |
1319 | "limit change rbps=%llu wbps=%llu riops=%u wiops=%u" , |
1320 | tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), |
1321 | tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); |
1322 | |
1323 | /* |
1324 | * Update has_rules[] flags for the updated tg's subtree. A tg is |
1325 | * considered to have rules if either the tg itself or any of its |
1326 | * ancestors has rules. This identifies groups without any |
1327 | * restrictions in the whole hierarchy and allows them to bypass |
1328 | * blk-throttle. |
1329 | */ |
1330 | blkg_for_each_descendant_pre(blkg, pos_css, |
1331 | global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { |
1332 | struct throtl_grp *this_tg = blkg_to_tg(blkg); |
1333 | struct throtl_grp *parent_tg; |
1334 | |
1335 | tg_update_has_rules(tg: this_tg); |
1336 | /* ignore root/second level */ |
1337 | if (!cgroup_subsys_on_dfl(io_cgrp_subsys) || !blkg->parent || |
1338 | !blkg->parent->parent) |
1339 | continue; |
1340 | parent_tg = blkg_to_tg(blkg: blkg->parent); |
1341 | /* |
1342 | * make sure all children has lower idle time threshold and |
1343 | * higher latency target |
1344 | */ |
1345 | this_tg->idletime_threshold = min(this_tg->idletime_threshold, |
1346 | parent_tg->idletime_threshold); |
1347 | this_tg->latency_target = max(this_tg->latency_target, |
1348 | parent_tg->latency_target); |
1349 | } |
1350 | |
1351 | /* |
1352 | * We're already holding queue_lock and know @tg is valid. Let's |
1353 | * apply the new config directly. |
1354 | * |
1355 | * Restart the slices for both READ and WRITES. It might happen |
1356 | * that a group's limit are dropped suddenly and we don't want to |
1357 | * account recently dispatched IO with new low rate. |
1358 | */ |
1359 | throtl_start_new_slice(tg, READ, clear_carryover: false); |
1360 | throtl_start_new_slice(tg, WRITE, clear_carryover: false); |
1361 | |
1362 | if (tg->flags & THROTL_TG_PENDING) { |
1363 | tg_update_disptime(tg); |
1364 | throtl_schedule_next_dispatch(sq: sq->parent_sq, force: true); |
1365 | } |
1366 | } |
1367 | |
1368 | static ssize_t tg_set_conf(struct kernfs_open_file *of, |
1369 | char *buf, size_t nbytes, loff_t off, bool is_u64) |
1370 | { |
1371 | struct blkcg *blkcg = css_to_blkcg(css: of_css(of)); |
1372 | struct blkg_conf_ctx ctx; |
1373 | struct throtl_grp *tg; |
1374 | int ret; |
1375 | u64 v; |
1376 | |
1377 | blkg_conf_init(ctx: &ctx, input: buf); |
1378 | |
1379 | ret = blkg_conf_prep(blkcg, pol: &blkcg_policy_throtl, ctx: &ctx); |
1380 | if (ret) |
1381 | goto out_finish; |
1382 | |
1383 | ret = -EINVAL; |
1384 | if (sscanf(ctx.body, "%llu" , &v) != 1) |
1385 | goto out_finish; |
1386 | if (!v) |
1387 | v = U64_MAX; |
1388 | |
1389 | tg = blkg_to_tg(blkg: ctx.blkg); |
1390 | tg_update_carryover(tg); |
1391 | |
1392 | if (is_u64) |
1393 | *(u64 *)((void *)tg + of_cft(of)->private) = v; |
1394 | else |
1395 | *(unsigned int *)((void *)tg + of_cft(of)->private) = v; |
1396 | |
1397 | tg_conf_updated(tg, global: false); |
1398 | ret = 0; |
1399 | out_finish: |
1400 | blkg_conf_exit(ctx: &ctx); |
1401 | return ret ?: nbytes; |
1402 | } |
1403 | |
1404 | static ssize_t tg_set_conf_u64(struct kernfs_open_file *of, |
1405 | char *buf, size_t nbytes, loff_t off) |
1406 | { |
1407 | return tg_set_conf(of, buf, nbytes, off, is_u64: true); |
1408 | } |
1409 | |
1410 | static ssize_t tg_set_conf_uint(struct kernfs_open_file *of, |
1411 | char *buf, size_t nbytes, loff_t off) |
1412 | { |
1413 | return tg_set_conf(of, buf, nbytes, off, is_u64: false); |
1414 | } |
1415 | |
1416 | static int tg_print_rwstat(struct seq_file *sf, void *v) |
1417 | { |
1418 | blkcg_print_blkgs(sf, blkcg: css_to_blkcg(css: seq_css(seq: sf)), |
1419 | prfill: blkg_prfill_rwstat, pol: &blkcg_policy_throtl, |
1420 | data: seq_cft(seq: sf)->private, show_total: true); |
1421 | return 0; |
1422 | } |
1423 | |
1424 | static u64 tg_prfill_rwstat_recursive(struct seq_file *sf, |
1425 | struct blkg_policy_data *pd, int off) |
1426 | { |
1427 | struct blkg_rwstat_sample sum; |
1428 | |
1429 | blkg_rwstat_recursive_sum(blkg: pd_to_blkg(pd), pol: &blkcg_policy_throtl, off, |
1430 | sum: &sum); |
1431 | return __blkg_prfill_rwstat(sf, pd, rwstat: &sum); |
1432 | } |
1433 | |
1434 | static int tg_print_rwstat_recursive(struct seq_file *sf, void *v) |
1435 | { |
1436 | blkcg_print_blkgs(sf, blkcg: css_to_blkcg(css: seq_css(seq: sf)), |
1437 | prfill: tg_prfill_rwstat_recursive, pol: &blkcg_policy_throtl, |
1438 | data: seq_cft(seq: sf)->private, show_total: true); |
1439 | return 0; |
1440 | } |
1441 | |
1442 | static struct cftype throtl_legacy_files[] = { |
1443 | { |
1444 | .name = "throttle.read_bps_device" , |
1445 | .private = offsetof(struct throtl_grp, bps[READ][LIMIT_MAX]), |
1446 | .seq_show = tg_print_conf_u64, |
1447 | .write = tg_set_conf_u64, |
1448 | }, |
1449 | { |
1450 | .name = "throttle.write_bps_device" , |
1451 | .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]), |
1452 | .seq_show = tg_print_conf_u64, |
1453 | .write = tg_set_conf_u64, |
1454 | }, |
1455 | { |
1456 | .name = "throttle.read_iops_device" , |
1457 | .private = offsetof(struct throtl_grp, iops[READ][LIMIT_MAX]), |
1458 | .seq_show = tg_print_conf_uint, |
1459 | .write = tg_set_conf_uint, |
1460 | }, |
1461 | { |
1462 | .name = "throttle.write_iops_device" , |
1463 | .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]), |
1464 | .seq_show = tg_print_conf_uint, |
1465 | .write = tg_set_conf_uint, |
1466 | }, |
1467 | { |
1468 | .name = "throttle.io_service_bytes" , |
1469 | .private = offsetof(struct throtl_grp, stat_bytes), |
1470 | .seq_show = tg_print_rwstat, |
1471 | }, |
1472 | { |
1473 | .name = "throttle.io_service_bytes_recursive" , |
1474 | .private = offsetof(struct throtl_grp, stat_bytes), |
1475 | .seq_show = tg_print_rwstat_recursive, |
1476 | }, |
1477 | { |
1478 | .name = "throttle.io_serviced" , |
1479 | .private = offsetof(struct throtl_grp, stat_ios), |
1480 | .seq_show = tg_print_rwstat, |
1481 | }, |
1482 | { |
1483 | .name = "throttle.io_serviced_recursive" , |
1484 | .private = offsetof(struct throtl_grp, stat_ios), |
1485 | .seq_show = tg_print_rwstat_recursive, |
1486 | }, |
1487 | { } /* terminate */ |
1488 | }; |
1489 | |
1490 | static u64 tg_prfill_limit(struct seq_file *sf, struct blkg_policy_data *pd, |
1491 | int off) |
1492 | { |
1493 | struct throtl_grp *tg = pd_to_tg(pd); |
1494 | const char *dname = blkg_dev_name(blkg: pd->blkg); |
1495 | char bufs[4][21] = { "max" , "max" , "max" , "max" }; |
1496 | u64 bps_dft; |
1497 | unsigned int iops_dft; |
1498 | char idle_time[26] = "" ; |
1499 | char latency_time[26] = "" ; |
1500 | |
1501 | if (!dname) |
1502 | return 0; |
1503 | |
1504 | if (off == LIMIT_LOW) { |
1505 | bps_dft = 0; |
1506 | iops_dft = 0; |
1507 | } else { |
1508 | bps_dft = U64_MAX; |
1509 | iops_dft = UINT_MAX; |
1510 | } |
1511 | |
1512 | if (tg->bps_conf[READ][off] == bps_dft && |
1513 | tg->bps_conf[WRITE][off] == bps_dft && |
1514 | tg->iops_conf[READ][off] == iops_dft && |
1515 | tg->iops_conf[WRITE][off] == iops_dft && |
1516 | (off != LIMIT_LOW || |
1517 | (tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD && |
1518 | tg->latency_target_conf == DFL_LATENCY_TARGET))) |
1519 | return 0; |
1520 | |
1521 | if (tg->bps_conf[READ][off] != U64_MAX) |
1522 | snprintf(buf: bufs[0], size: sizeof(bufs[0]), fmt: "%llu" , |
1523 | tg->bps_conf[READ][off]); |
1524 | if (tg->bps_conf[WRITE][off] != U64_MAX) |
1525 | snprintf(buf: bufs[1], size: sizeof(bufs[1]), fmt: "%llu" , |
1526 | tg->bps_conf[WRITE][off]); |
1527 | if (tg->iops_conf[READ][off] != UINT_MAX) |
1528 | snprintf(buf: bufs[2], size: sizeof(bufs[2]), fmt: "%u" , |
1529 | tg->iops_conf[READ][off]); |
1530 | if (tg->iops_conf[WRITE][off] != UINT_MAX) |
1531 | snprintf(buf: bufs[3], size: sizeof(bufs[3]), fmt: "%u" , |
1532 | tg->iops_conf[WRITE][off]); |
1533 | if (off == LIMIT_LOW) { |
1534 | if (tg->idletime_threshold_conf == ULONG_MAX) |
1535 | strcpy(p: idle_time, q: " idle=max" ); |
1536 | else |
1537 | snprintf(buf: idle_time, size: sizeof(idle_time), fmt: " idle=%lu" , |
1538 | tg->idletime_threshold_conf); |
1539 | |
1540 | if (tg->latency_target_conf == ULONG_MAX) |
1541 | strcpy(p: latency_time, q: " latency=max" ); |
1542 | else |
1543 | snprintf(buf: latency_time, size: sizeof(latency_time), |
1544 | fmt: " latency=%lu" , tg->latency_target_conf); |
1545 | } |
1546 | |
1547 | seq_printf(m: sf, fmt: "%s rbps=%s wbps=%s riops=%s wiops=%s%s%s\n" , |
1548 | dname, bufs[0], bufs[1], bufs[2], bufs[3], idle_time, |
1549 | latency_time); |
1550 | return 0; |
1551 | } |
1552 | |
1553 | static int tg_print_limit(struct seq_file *sf, void *v) |
1554 | { |
1555 | blkcg_print_blkgs(sf, blkcg: css_to_blkcg(css: seq_css(seq: sf)), prfill: tg_prfill_limit, |
1556 | pol: &blkcg_policy_throtl, data: seq_cft(seq: sf)->private, show_total: false); |
1557 | return 0; |
1558 | } |
1559 | |
1560 | static ssize_t tg_set_limit(struct kernfs_open_file *of, |
1561 | char *buf, size_t nbytes, loff_t off) |
1562 | { |
1563 | struct blkcg *blkcg = css_to_blkcg(css: of_css(of)); |
1564 | struct blkg_conf_ctx ctx; |
1565 | struct throtl_grp *tg; |
1566 | u64 v[4]; |
1567 | unsigned long idle_time; |
1568 | unsigned long latency_time; |
1569 | int ret; |
1570 | int index = of_cft(of)->private; |
1571 | |
1572 | blkg_conf_init(ctx: &ctx, input: buf); |
1573 | |
1574 | ret = blkg_conf_prep(blkcg, pol: &blkcg_policy_throtl, ctx: &ctx); |
1575 | if (ret) |
1576 | goto out_finish; |
1577 | |
1578 | tg = blkg_to_tg(blkg: ctx.blkg); |
1579 | tg_update_carryover(tg); |
1580 | |
1581 | v[0] = tg->bps_conf[READ][index]; |
1582 | v[1] = tg->bps_conf[WRITE][index]; |
1583 | v[2] = tg->iops_conf[READ][index]; |
1584 | v[3] = tg->iops_conf[WRITE][index]; |
1585 | |
1586 | idle_time = tg->idletime_threshold_conf; |
1587 | latency_time = tg->latency_target_conf; |
1588 | while (true) { |
1589 | char tok[27]; /* wiops=18446744073709551616 */ |
1590 | char *p; |
1591 | u64 val = U64_MAX; |
1592 | int len; |
1593 | |
1594 | if (sscanf(ctx.body, "%26s%n" , tok, &len) != 1) |
1595 | break; |
1596 | if (tok[0] == '\0') |
1597 | break; |
1598 | ctx.body += len; |
1599 | |
1600 | ret = -EINVAL; |
1601 | p = tok; |
1602 | strsep(&p, "=" ); |
1603 | if (!p || (sscanf(p, "%llu" , &val) != 1 && strcmp(p, "max" ))) |
1604 | goto out_finish; |
1605 | |
1606 | ret = -ERANGE; |
1607 | if (!val) |
1608 | goto out_finish; |
1609 | |
1610 | ret = -EINVAL; |
1611 | if (!strcmp(tok, "rbps" ) && val > 1) |
1612 | v[0] = val; |
1613 | else if (!strcmp(tok, "wbps" ) && val > 1) |
1614 | v[1] = val; |
1615 | else if (!strcmp(tok, "riops" ) && val > 1) |
1616 | v[2] = min_t(u64, val, UINT_MAX); |
1617 | else if (!strcmp(tok, "wiops" ) && val > 1) |
1618 | v[3] = min_t(u64, val, UINT_MAX); |
1619 | else if (off == LIMIT_LOW && !strcmp(tok, "idle" )) |
1620 | idle_time = val; |
1621 | else if (off == LIMIT_LOW && !strcmp(tok, "latency" )) |
1622 | latency_time = val; |
1623 | else |
1624 | goto out_finish; |
1625 | } |
1626 | |
1627 | tg->bps_conf[READ][index] = v[0]; |
1628 | tg->bps_conf[WRITE][index] = v[1]; |
1629 | tg->iops_conf[READ][index] = v[2]; |
1630 | tg->iops_conf[WRITE][index] = v[3]; |
1631 | |
1632 | if (index == LIMIT_MAX) { |
1633 | tg->bps[READ][index] = v[0]; |
1634 | tg->bps[WRITE][index] = v[1]; |
1635 | tg->iops[READ][index] = v[2]; |
1636 | tg->iops[WRITE][index] = v[3]; |
1637 | } |
1638 | tg->bps[READ][LIMIT_LOW] = min(tg->bps_conf[READ][LIMIT_LOW], |
1639 | tg->bps_conf[READ][LIMIT_MAX]); |
1640 | tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], |
1641 | tg->bps_conf[WRITE][LIMIT_MAX]); |
1642 | tg->iops[READ][LIMIT_LOW] = min(tg->iops_conf[READ][LIMIT_LOW], |
1643 | tg->iops_conf[READ][LIMIT_MAX]); |
1644 | tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], |
1645 | tg->iops_conf[WRITE][LIMIT_MAX]); |
1646 | tg->idletime_threshold_conf = idle_time; |
1647 | tg->latency_target_conf = latency_time; |
1648 | |
1649 | /* force user to configure all settings for low limit */ |
1650 | if (!(tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW] || |
1651 | tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || |
1652 | tg->idletime_threshold_conf == DFL_IDLE_THRESHOLD || |
1653 | tg->latency_target_conf == DFL_LATENCY_TARGET) { |
1654 | tg->bps[READ][LIMIT_LOW] = 0; |
1655 | tg->bps[WRITE][LIMIT_LOW] = 0; |
1656 | tg->iops[READ][LIMIT_LOW] = 0; |
1657 | tg->iops[WRITE][LIMIT_LOW] = 0; |
1658 | tg->idletime_threshold = DFL_IDLE_THRESHOLD; |
1659 | tg->latency_target = DFL_LATENCY_TARGET; |
1660 | } else if (index == LIMIT_LOW) { |
1661 | tg->idletime_threshold = tg->idletime_threshold_conf; |
1662 | tg->latency_target = tg->latency_target_conf; |
1663 | } |
1664 | |
1665 | blk_throtl_update_limit_valid(td: tg->td); |
1666 | if (tg->td->limit_valid[LIMIT_LOW]) { |
1667 | if (index == LIMIT_LOW) |
1668 | tg->td->limit_index = LIMIT_LOW; |
1669 | } else |
1670 | tg->td->limit_index = LIMIT_MAX; |
1671 | tg_conf_updated(tg, global: index == LIMIT_LOW && |
1672 | tg->td->limit_valid[LIMIT_LOW]); |
1673 | ret = 0; |
1674 | out_finish: |
1675 | blkg_conf_exit(ctx: &ctx); |
1676 | return ret ?: nbytes; |
1677 | } |
1678 | |
1679 | static struct cftype throtl_files[] = { |
1680 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
1681 | { |
1682 | .name = "low" , |
1683 | .flags = CFTYPE_NOT_ON_ROOT, |
1684 | .seq_show = tg_print_limit, |
1685 | .write = tg_set_limit, |
1686 | .private = LIMIT_LOW, |
1687 | }, |
1688 | #endif |
1689 | { |
1690 | .name = "max" , |
1691 | .flags = CFTYPE_NOT_ON_ROOT, |
1692 | .seq_show = tg_print_limit, |
1693 | .write = tg_set_limit, |
1694 | .private = LIMIT_MAX, |
1695 | }, |
1696 | { } /* terminate */ |
1697 | }; |
1698 | |
1699 | static void throtl_shutdown_wq(struct request_queue *q) |
1700 | { |
1701 | struct throtl_data *td = q->td; |
1702 | |
1703 | cancel_work_sync(work: &td->dispatch_work); |
1704 | } |
1705 | |
1706 | struct blkcg_policy blkcg_policy_throtl = { |
1707 | .dfl_cftypes = throtl_files, |
1708 | .legacy_cftypes = throtl_legacy_files, |
1709 | |
1710 | .pd_alloc_fn = throtl_pd_alloc, |
1711 | .pd_init_fn = throtl_pd_init, |
1712 | .pd_online_fn = throtl_pd_online, |
1713 | .pd_offline_fn = throtl_pd_offline, |
1714 | .pd_free_fn = throtl_pd_free, |
1715 | }; |
1716 | |
1717 | void blk_throtl_cancel_bios(struct gendisk *disk) |
1718 | { |
1719 | struct request_queue *q = disk->queue; |
1720 | struct cgroup_subsys_state *pos_css; |
1721 | struct blkcg_gq *blkg; |
1722 | |
1723 | spin_lock_irq(lock: &q->queue_lock); |
1724 | /* |
1725 | * queue_lock is held, rcu lock is not needed here technically. |
1726 | * However, rcu lock is still held to emphasize that following |
1727 | * path need RCU protection and to prevent warning from lockdep. |
1728 | */ |
1729 | rcu_read_lock(); |
1730 | blkg_for_each_descendant_post(blkg, pos_css, q->root_blkg) { |
1731 | struct throtl_grp *tg = blkg_to_tg(blkg); |
1732 | struct throtl_service_queue *sq = &tg->service_queue; |
1733 | |
1734 | /* |
1735 | * Set the flag to make sure throtl_pending_timer_fn() won't |
1736 | * stop until all throttled bios are dispatched. |
1737 | */ |
1738 | tg->flags |= THROTL_TG_CANCELING; |
1739 | |
1740 | /* |
1741 | * Do not dispatch cgroup without THROTL_TG_PENDING or cgroup |
1742 | * will be inserted to service queue without THROTL_TG_PENDING |
1743 | * set in tg_update_disptime below. Then IO dispatched from |
1744 | * child in tg_dispatch_one_bio will trigger double insertion |
1745 | * and corrupt the tree. |
1746 | */ |
1747 | if (!(tg->flags & THROTL_TG_PENDING)) |
1748 | continue; |
1749 | |
1750 | /* |
1751 | * Update disptime after setting the above flag to make sure |
1752 | * throtl_select_dispatch() won't exit without dispatching. |
1753 | */ |
1754 | tg_update_disptime(tg); |
1755 | |
1756 | throtl_schedule_pending_timer(sq, expires: jiffies + 1); |
1757 | } |
1758 | rcu_read_unlock(); |
1759 | spin_unlock_irq(lock: &q->queue_lock); |
1760 | } |
1761 | |
1762 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
1763 | static unsigned long __tg_last_low_overflow_time(struct throtl_grp *tg) |
1764 | { |
1765 | unsigned long rtime = jiffies, wtime = jiffies; |
1766 | |
1767 | if (tg->bps[READ][LIMIT_LOW] || tg->iops[READ][LIMIT_LOW]) |
1768 | rtime = tg->last_low_overflow_time[READ]; |
1769 | if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) |
1770 | wtime = tg->last_low_overflow_time[WRITE]; |
1771 | return min(rtime, wtime); |
1772 | } |
1773 | |
1774 | static unsigned long tg_last_low_overflow_time(struct throtl_grp *tg) |
1775 | { |
1776 | struct throtl_service_queue *parent_sq; |
1777 | struct throtl_grp *parent = tg; |
1778 | unsigned long ret = __tg_last_low_overflow_time(tg); |
1779 | |
1780 | while (true) { |
1781 | parent_sq = parent->service_queue.parent_sq; |
1782 | parent = sq_to_tg(sq: parent_sq); |
1783 | if (!parent) |
1784 | break; |
1785 | |
1786 | /* |
1787 | * The parent doesn't have low limit, it always reaches low |
1788 | * limit. Its overflow time is useless for children |
1789 | */ |
1790 | if (!parent->bps[READ][LIMIT_LOW] && |
1791 | !parent->iops[READ][LIMIT_LOW] && |
1792 | !parent->bps[WRITE][LIMIT_LOW] && |
1793 | !parent->iops[WRITE][LIMIT_LOW]) |
1794 | continue; |
1795 | if (time_after(__tg_last_low_overflow_time(parent), ret)) |
1796 | ret = __tg_last_low_overflow_time(tg: parent); |
1797 | } |
1798 | return ret; |
1799 | } |
1800 | |
1801 | static bool throtl_tg_is_idle(struct throtl_grp *tg) |
1802 | { |
1803 | /* |
1804 | * cgroup is idle if: |
1805 | * - single idle is too long, longer than a fixed value (in case user |
1806 | * configure a too big threshold) or 4 times of idletime threshold |
1807 | * - average think time is more than threshold |
1808 | * - IO latency is largely below threshold |
1809 | */ |
1810 | unsigned long time; |
1811 | bool ret; |
1812 | |
1813 | time = min_t(unsigned long, MAX_IDLE_TIME, 4 * tg->idletime_threshold); |
1814 | ret = tg->latency_target == DFL_LATENCY_TARGET || |
1815 | tg->idletime_threshold == DFL_IDLE_THRESHOLD || |
1816 | (ktime_get_ns() >> 10) - tg->last_finish_time > time || |
1817 | tg->avg_idletime > tg->idletime_threshold || |
1818 | (tg->latency_target && tg->bio_cnt && |
1819 | tg->bad_bio_cnt * 5 < tg->bio_cnt); |
1820 | throtl_log(&tg->service_queue, |
1821 | "avg_idle=%ld, idle_threshold=%ld, bad_bio=%d, total_bio=%d, is_idle=%d, scale=%d" , |
1822 | tg->avg_idletime, tg->idletime_threshold, tg->bad_bio_cnt, |
1823 | tg->bio_cnt, ret, tg->td->scale); |
1824 | return ret; |
1825 | } |
1826 | |
1827 | static bool throtl_low_limit_reached(struct throtl_grp *tg, int rw) |
1828 | { |
1829 | struct throtl_service_queue *sq = &tg->service_queue; |
1830 | bool limit = tg->bps[rw][LIMIT_LOW] || tg->iops[rw][LIMIT_LOW]; |
1831 | |
1832 | /* |
1833 | * if low limit is zero, low limit is always reached. |
1834 | * if low limit is non-zero, we can check if there is any request |
1835 | * is queued to determine if low limit is reached as we throttle |
1836 | * request according to limit. |
1837 | */ |
1838 | return !limit || sq->nr_queued[rw]; |
1839 | } |
1840 | |
1841 | static bool throtl_tg_can_upgrade(struct throtl_grp *tg) |
1842 | { |
1843 | /* |
1844 | * cgroup reaches low limit when low limit of READ and WRITE are |
1845 | * both reached, it's ok to upgrade to next limit if cgroup reaches |
1846 | * low limit |
1847 | */ |
1848 | if (throtl_low_limit_reached(tg, READ) && |
1849 | throtl_low_limit_reached(tg, WRITE)) |
1850 | return true; |
1851 | |
1852 | if (time_after_eq(jiffies, |
1853 | tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && |
1854 | throtl_tg_is_idle(tg)) |
1855 | return true; |
1856 | return false; |
1857 | } |
1858 | |
1859 | static bool throtl_hierarchy_can_upgrade(struct throtl_grp *tg) |
1860 | { |
1861 | while (true) { |
1862 | if (throtl_tg_can_upgrade(tg)) |
1863 | return true; |
1864 | tg = sq_to_tg(sq: tg->service_queue.parent_sq); |
1865 | if (!tg || !tg_to_blkg(tg)->parent) |
1866 | return false; |
1867 | } |
1868 | return false; |
1869 | } |
1870 | |
1871 | static bool throtl_can_upgrade(struct throtl_data *td, |
1872 | struct throtl_grp *this_tg) |
1873 | { |
1874 | struct cgroup_subsys_state *pos_css; |
1875 | struct blkcg_gq *blkg; |
1876 | |
1877 | if (td->limit_index != LIMIT_LOW) |
1878 | return false; |
1879 | |
1880 | if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) |
1881 | return false; |
1882 | |
1883 | rcu_read_lock(); |
1884 | blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { |
1885 | struct throtl_grp *tg = blkg_to_tg(blkg); |
1886 | |
1887 | if (tg == this_tg) |
1888 | continue; |
1889 | if (!list_empty(head: &tg_to_blkg(tg)->blkcg->css.children)) |
1890 | continue; |
1891 | if (!throtl_hierarchy_can_upgrade(tg)) { |
1892 | rcu_read_unlock(); |
1893 | return false; |
1894 | } |
1895 | } |
1896 | rcu_read_unlock(); |
1897 | return true; |
1898 | } |
1899 | |
1900 | static void throtl_upgrade_check(struct throtl_grp *tg) |
1901 | { |
1902 | unsigned long now = jiffies; |
1903 | |
1904 | if (tg->td->limit_index != LIMIT_LOW) |
1905 | return; |
1906 | |
1907 | if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) |
1908 | return; |
1909 | |
1910 | tg->last_check_time = now; |
1911 | |
1912 | if (!time_after_eq(now, |
1913 | __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) |
1914 | return; |
1915 | |
1916 | if (throtl_can_upgrade(td: tg->td, NULL)) |
1917 | throtl_upgrade_state(td: tg->td); |
1918 | } |
1919 | |
1920 | static void throtl_upgrade_state(struct throtl_data *td) |
1921 | { |
1922 | struct cgroup_subsys_state *pos_css; |
1923 | struct blkcg_gq *blkg; |
1924 | |
1925 | throtl_log(&td->service_queue, "upgrade to max" ); |
1926 | td->limit_index = LIMIT_MAX; |
1927 | td->low_upgrade_time = jiffies; |
1928 | td->scale = 0; |
1929 | rcu_read_lock(); |
1930 | blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { |
1931 | struct throtl_grp *tg = blkg_to_tg(blkg); |
1932 | struct throtl_service_queue *sq = &tg->service_queue; |
1933 | |
1934 | tg->disptime = jiffies - 1; |
1935 | throtl_select_dispatch(parent_sq: sq); |
1936 | throtl_schedule_next_dispatch(sq, force: true); |
1937 | } |
1938 | rcu_read_unlock(); |
1939 | throtl_select_dispatch(parent_sq: &td->service_queue); |
1940 | throtl_schedule_next_dispatch(sq: &td->service_queue, force: true); |
1941 | queue_work(wq: kthrotld_workqueue, work: &td->dispatch_work); |
1942 | } |
1943 | |
1944 | static void throtl_downgrade_state(struct throtl_data *td) |
1945 | { |
1946 | td->scale /= 2; |
1947 | |
1948 | throtl_log(&td->service_queue, "downgrade, scale %d" , td->scale); |
1949 | if (td->scale) { |
1950 | td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; |
1951 | return; |
1952 | } |
1953 | |
1954 | td->limit_index = LIMIT_LOW; |
1955 | td->low_downgrade_time = jiffies; |
1956 | } |
1957 | |
1958 | static bool throtl_tg_can_downgrade(struct throtl_grp *tg) |
1959 | { |
1960 | struct throtl_data *td = tg->td; |
1961 | unsigned long now = jiffies; |
1962 | |
1963 | /* |
1964 | * If cgroup is below low limit, consider downgrade and throttle other |
1965 | * cgroups |
1966 | */ |
1967 | if (time_after_eq(now, tg_last_low_overflow_time(tg) + |
1968 | td->throtl_slice) && |
1969 | (!throtl_tg_is_idle(tg) || |
1970 | !list_empty(head: &tg_to_blkg(tg)->blkcg->css.children))) |
1971 | return true; |
1972 | return false; |
1973 | } |
1974 | |
1975 | static bool throtl_hierarchy_can_downgrade(struct throtl_grp *tg) |
1976 | { |
1977 | struct throtl_data *td = tg->td; |
1978 | |
1979 | if (time_before(jiffies, td->low_upgrade_time + td->throtl_slice)) |
1980 | return false; |
1981 | |
1982 | while (true) { |
1983 | if (!throtl_tg_can_downgrade(tg)) |
1984 | return false; |
1985 | tg = sq_to_tg(sq: tg->service_queue.parent_sq); |
1986 | if (!tg || !tg_to_blkg(tg)->parent) |
1987 | break; |
1988 | } |
1989 | return true; |
1990 | } |
1991 | |
1992 | static void throtl_downgrade_check(struct throtl_grp *tg) |
1993 | { |
1994 | uint64_t bps; |
1995 | unsigned int iops; |
1996 | unsigned long elapsed_time; |
1997 | unsigned long now = jiffies; |
1998 | |
1999 | if (tg->td->limit_index != LIMIT_MAX || |
2000 | !tg->td->limit_valid[LIMIT_LOW]) |
2001 | return; |
2002 | if (!list_empty(head: &tg_to_blkg(tg)->blkcg->css.children)) |
2003 | return; |
2004 | if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) |
2005 | return; |
2006 | |
2007 | elapsed_time = now - tg->last_check_time; |
2008 | tg->last_check_time = now; |
2009 | |
2010 | if (time_before(now, tg_last_low_overflow_time(tg) + |
2011 | tg->td->throtl_slice)) |
2012 | return; |
2013 | |
2014 | if (tg->bps[READ][LIMIT_LOW]) { |
2015 | bps = tg->last_bytes_disp[READ] * HZ; |
2016 | do_div(bps, elapsed_time); |
2017 | if (bps >= tg->bps[READ][LIMIT_LOW]) |
2018 | tg->last_low_overflow_time[READ] = now; |
2019 | } |
2020 | |
2021 | if (tg->bps[WRITE][LIMIT_LOW]) { |
2022 | bps = tg->last_bytes_disp[WRITE] * HZ; |
2023 | do_div(bps, elapsed_time); |
2024 | if (bps >= tg->bps[WRITE][LIMIT_LOW]) |
2025 | tg->last_low_overflow_time[WRITE] = now; |
2026 | } |
2027 | |
2028 | if (tg->iops[READ][LIMIT_LOW]) { |
2029 | iops = tg->last_io_disp[READ] * HZ / elapsed_time; |
2030 | if (iops >= tg->iops[READ][LIMIT_LOW]) |
2031 | tg->last_low_overflow_time[READ] = now; |
2032 | } |
2033 | |
2034 | if (tg->iops[WRITE][LIMIT_LOW]) { |
2035 | iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; |
2036 | if (iops >= tg->iops[WRITE][LIMIT_LOW]) |
2037 | tg->last_low_overflow_time[WRITE] = now; |
2038 | } |
2039 | |
2040 | /* |
2041 | * If cgroup is below low limit, consider downgrade and throttle other |
2042 | * cgroups |
2043 | */ |
2044 | if (throtl_hierarchy_can_downgrade(tg)) |
2045 | throtl_downgrade_state(td: tg->td); |
2046 | |
2047 | tg->last_bytes_disp[READ] = 0; |
2048 | tg->last_bytes_disp[WRITE] = 0; |
2049 | tg->last_io_disp[READ] = 0; |
2050 | tg->last_io_disp[WRITE] = 0; |
2051 | } |
2052 | |
2053 | static void blk_throtl_update_idletime(struct throtl_grp *tg) |
2054 | { |
2055 | unsigned long now; |
2056 | unsigned long last_finish_time = tg->last_finish_time; |
2057 | |
2058 | if (last_finish_time == 0) |
2059 | return; |
2060 | |
2061 | now = ktime_get_ns() >> 10; |
2062 | if (now <= last_finish_time || |
2063 | last_finish_time == tg->checked_last_finish_time) |
2064 | return; |
2065 | |
2066 | tg->avg_idletime = (tg->avg_idletime * 7 + now - last_finish_time) >> 3; |
2067 | tg->checked_last_finish_time = last_finish_time; |
2068 | } |
2069 | |
2070 | static void throtl_update_latency_buckets(struct throtl_data *td) |
2071 | { |
2072 | struct avg_latency_bucket avg_latency[2][LATENCY_BUCKET_SIZE]; |
2073 | int i, cpu, rw; |
2074 | unsigned long last_latency[2] = { 0 }; |
2075 | unsigned long latency[2]; |
2076 | |
2077 | if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW]) |
2078 | return; |
2079 | if (time_before(jiffies, td->last_calculate_time + HZ)) |
2080 | return; |
2081 | td->last_calculate_time = jiffies; |
2082 | |
2083 | memset(avg_latency, 0, sizeof(avg_latency)); |
2084 | for (rw = READ; rw <= WRITE; rw++) { |
2085 | for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { |
2086 | struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; |
2087 | |
2088 | for_each_possible_cpu(cpu) { |
2089 | struct latency_bucket *bucket; |
2090 | |
2091 | /* this isn't race free, but ok in practice */ |
2092 | bucket = per_cpu_ptr(td->latency_buckets[rw], |
2093 | cpu); |
2094 | tmp->total_latency += bucket[i].total_latency; |
2095 | tmp->samples += bucket[i].samples; |
2096 | bucket[i].total_latency = 0; |
2097 | bucket[i].samples = 0; |
2098 | } |
2099 | |
2100 | if (tmp->samples >= 32) { |
2101 | int samples = tmp->samples; |
2102 | |
2103 | latency[rw] = tmp->total_latency; |
2104 | |
2105 | tmp->total_latency = 0; |
2106 | tmp->samples = 0; |
2107 | latency[rw] /= samples; |
2108 | if (latency[rw] == 0) |
2109 | continue; |
2110 | avg_latency[rw][i].latency = latency[rw]; |
2111 | } |
2112 | } |
2113 | } |
2114 | |
2115 | for (rw = READ; rw <= WRITE; rw++) { |
2116 | for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { |
2117 | if (!avg_latency[rw][i].latency) { |
2118 | if (td->avg_buckets[rw][i].latency < last_latency[rw]) |
2119 | td->avg_buckets[rw][i].latency = |
2120 | last_latency[rw]; |
2121 | continue; |
2122 | } |
2123 | |
2124 | if (!td->avg_buckets[rw][i].valid) |
2125 | latency[rw] = avg_latency[rw][i].latency; |
2126 | else |
2127 | latency[rw] = (td->avg_buckets[rw][i].latency * 7 + |
2128 | avg_latency[rw][i].latency) >> 3; |
2129 | |
2130 | td->avg_buckets[rw][i].latency = max(latency[rw], |
2131 | last_latency[rw]); |
2132 | td->avg_buckets[rw][i].valid = true; |
2133 | last_latency[rw] = td->avg_buckets[rw][i].latency; |
2134 | } |
2135 | } |
2136 | |
2137 | for (i = 0; i < LATENCY_BUCKET_SIZE; i++) |
2138 | throtl_log(&td->service_queue, |
2139 | "Latency bucket %d: read latency=%ld, read valid=%d, " |
2140 | "write latency=%ld, write valid=%d" , i, |
2141 | td->avg_buckets[READ][i].latency, |
2142 | td->avg_buckets[READ][i].valid, |
2143 | td->avg_buckets[WRITE][i].latency, |
2144 | td->avg_buckets[WRITE][i].valid); |
2145 | } |
2146 | #else |
2147 | static inline void throtl_update_latency_buckets(struct throtl_data *td) |
2148 | { |
2149 | } |
2150 | |
2151 | static void blk_throtl_update_idletime(struct throtl_grp *tg) |
2152 | { |
2153 | } |
2154 | |
2155 | static void throtl_downgrade_check(struct throtl_grp *tg) |
2156 | { |
2157 | } |
2158 | |
2159 | static void throtl_upgrade_check(struct throtl_grp *tg) |
2160 | { |
2161 | } |
2162 | |
2163 | static bool throtl_can_upgrade(struct throtl_data *td, |
2164 | struct throtl_grp *this_tg) |
2165 | { |
2166 | return false; |
2167 | } |
2168 | |
2169 | static void throtl_upgrade_state(struct throtl_data *td) |
2170 | { |
2171 | } |
2172 | #endif |
2173 | |
2174 | bool __blk_throtl_bio(struct bio *bio) |
2175 | { |
2176 | struct request_queue *q = bdev_get_queue(bdev: bio->bi_bdev); |
2177 | struct blkcg_gq *blkg = bio->bi_blkg; |
2178 | struct throtl_qnode *qn = NULL; |
2179 | struct throtl_grp *tg = blkg_to_tg(blkg); |
2180 | struct throtl_service_queue *sq; |
2181 | bool rw = bio_data_dir(bio); |
2182 | bool throttled = false; |
2183 | struct throtl_data *td = tg->td; |
2184 | |
2185 | rcu_read_lock(); |
2186 | |
2187 | spin_lock_irq(lock: &q->queue_lock); |
2188 | |
2189 | throtl_update_latency_buckets(td); |
2190 | |
2191 | blk_throtl_update_idletime(tg); |
2192 | |
2193 | sq = &tg->service_queue; |
2194 | |
2195 | again: |
2196 | while (true) { |
2197 | if (tg->last_low_overflow_time[rw] == 0) |
2198 | tg->last_low_overflow_time[rw] = jiffies; |
2199 | throtl_downgrade_check(tg); |
2200 | throtl_upgrade_check(tg); |
2201 | /* throtl is FIFO - if bios are already queued, should queue */ |
2202 | if (sq->nr_queued[rw]) |
2203 | break; |
2204 | |
2205 | /* if above limits, break to queue */ |
2206 | if (!tg_may_dispatch(tg, bio, NULL)) { |
2207 | tg->last_low_overflow_time[rw] = jiffies; |
2208 | if (throtl_can_upgrade(td, this_tg: tg)) { |
2209 | throtl_upgrade_state(td); |
2210 | goto again; |
2211 | } |
2212 | break; |
2213 | } |
2214 | |
2215 | /* within limits, let's charge and dispatch directly */ |
2216 | throtl_charge_bio(tg, bio); |
2217 | |
2218 | /* |
2219 | * We need to trim slice even when bios are not being queued |
2220 | * otherwise it might happen that a bio is not queued for |
2221 | * a long time and slice keeps on extending and trim is not |
2222 | * called for a long time. Now if limits are reduced suddenly |
2223 | * we take into account all the IO dispatched so far at new |
2224 | * low rate and * newly queued IO gets a really long dispatch |
2225 | * time. |
2226 | * |
2227 | * So keep on trimming slice even if bio is not queued. |
2228 | */ |
2229 | throtl_trim_slice(tg, rw); |
2230 | |
2231 | /* |
2232 | * @bio passed through this layer without being throttled. |
2233 | * Climb up the ladder. If we're already at the top, it |
2234 | * can be executed directly. |
2235 | */ |
2236 | qn = &tg->qnode_on_parent[rw]; |
2237 | sq = sq->parent_sq; |
2238 | tg = sq_to_tg(sq); |
2239 | if (!tg) { |
2240 | bio_set_flag(bio, bit: BIO_BPS_THROTTLED); |
2241 | goto out_unlock; |
2242 | } |
2243 | } |
2244 | |
2245 | /* out-of-limit, queue to @tg */ |
2246 | throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d" , |
2247 | rw == READ ? 'R' : 'W', |
2248 | tg->bytes_disp[rw], bio->bi_iter.bi_size, |
2249 | tg_bps_limit(tg, rw), |
2250 | tg->io_disp[rw], tg_iops_limit(tg, rw), |
2251 | sq->nr_queued[READ], sq->nr_queued[WRITE]); |
2252 | |
2253 | tg->last_low_overflow_time[rw] = jiffies; |
2254 | |
2255 | td->nr_queued[rw]++; |
2256 | throtl_add_bio_tg(bio, qn, tg); |
2257 | throttled = true; |
2258 | |
2259 | /* |
2260 | * Update @tg's dispatch time and force schedule dispatch if @tg |
2261 | * was empty before @bio. The forced scheduling isn't likely to |
2262 | * cause undue delay as @bio is likely to be dispatched directly if |
2263 | * its @tg's disptime is not in the future. |
2264 | */ |
2265 | if (tg->flags & THROTL_TG_WAS_EMPTY) { |
2266 | tg_update_disptime(tg); |
2267 | throtl_schedule_next_dispatch(sq: tg->service_queue.parent_sq, force: true); |
2268 | } |
2269 | |
2270 | out_unlock: |
2271 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
2272 | if (throttled || !td->track_bio_latency) |
2273 | bio->bi_issue.value |= BIO_ISSUE_THROTL_SKIP_LATENCY; |
2274 | #endif |
2275 | spin_unlock_irq(lock: &q->queue_lock); |
2276 | |
2277 | rcu_read_unlock(); |
2278 | return throttled; |
2279 | } |
2280 | |
2281 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
2282 | static void throtl_track_latency(struct throtl_data *td, sector_t size, |
2283 | enum req_op op, unsigned long time) |
2284 | { |
2285 | const bool rw = op_is_write(op); |
2286 | struct latency_bucket *latency; |
2287 | int index; |
2288 | |
2289 | if (!td || td->limit_index != LIMIT_LOW || |
2290 | !(op == REQ_OP_READ || op == REQ_OP_WRITE) || |
2291 | !blk_queue_nonrot(td->queue)) |
2292 | return; |
2293 | |
2294 | index = request_bucket_index(size); |
2295 | |
2296 | latency = get_cpu_ptr(td->latency_buckets[rw]); |
2297 | latency[index].total_latency += time; |
2298 | latency[index].samples++; |
2299 | put_cpu_ptr(td->latency_buckets[rw]); |
2300 | } |
2301 | |
2302 | void blk_throtl_stat_add(struct request *rq, u64 time_ns) |
2303 | { |
2304 | struct request_queue *q = rq->q; |
2305 | struct throtl_data *td = q->td; |
2306 | |
2307 | throtl_track_latency(td, size: blk_rq_stats_sectors(rq), op: req_op(req: rq), |
2308 | time: time_ns >> 10); |
2309 | } |
2310 | |
2311 | void blk_throtl_bio_endio(struct bio *bio) |
2312 | { |
2313 | struct blkcg_gq *blkg; |
2314 | struct throtl_grp *tg; |
2315 | u64 finish_time_ns; |
2316 | unsigned long finish_time; |
2317 | unsigned long start_time; |
2318 | unsigned long lat; |
2319 | int rw = bio_data_dir(bio); |
2320 | |
2321 | blkg = bio->bi_blkg; |
2322 | if (!blkg) |
2323 | return; |
2324 | tg = blkg_to_tg(blkg); |
2325 | if (!tg->td->limit_valid[LIMIT_LOW]) |
2326 | return; |
2327 | |
2328 | finish_time_ns = ktime_get_ns(); |
2329 | tg->last_finish_time = finish_time_ns >> 10; |
2330 | |
2331 | start_time = bio_issue_time(issue: &bio->bi_issue) >> 10; |
2332 | finish_time = __bio_issue_time(time: finish_time_ns) >> 10; |
2333 | if (!start_time || finish_time <= start_time) |
2334 | return; |
2335 | |
2336 | lat = finish_time - start_time; |
2337 | /* this is only for bio based driver */ |
2338 | if (!(bio->bi_issue.value & BIO_ISSUE_THROTL_SKIP_LATENCY)) |
2339 | throtl_track_latency(td: tg->td, size: bio_issue_size(issue: &bio->bi_issue), |
2340 | op: bio_op(bio), time: lat); |
2341 | |
2342 | if (tg->latency_target && lat >= tg->td->filtered_latency) { |
2343 | int bucket; |
2344 | unsigned int threshold; |
2345 | |
2346 | bucket = request_bucket_index(bio_issue_size(&bio->bi_issue)); |
2347 | threshold = tg->td->avg_buckets[rw][bucket].latency + |
2348 | tg->latency_target; |
2349 | if (lat > threshold) |
2350 | tg->bad_bio_cnt++; |
2351 | /* |
2352 | * Not race free, could get wrong count, which means cgroups |
2353 | * will be throttled |
2354 | */ |
2355 | tg->bio_cnt++; |
2356 | } |
2357 | |
2358 | if (time_after(jiffies, tg->bio_cnt_reset_time) || tg->bio_cnt > 1024) { |
2359 | tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; |
2360 | tg->bio_cnt /= 2; |
2361 | tg->bad_bio_cnt /= 2; |
2362 | } |
2363 | } |
2364 | #endif |
2365 | |
2366 | int blk_throtl_init(struct gendisk *disk) |
2367 | { |
2368 | struct request_queue *q = disk->queue; |
2369 | struct throtl_data *td; |
2370 | int ret; |
2371 | |
2372 | td = kzalloc_node(size: sizeof(*td), GFP_KERNEL, node: q->node); |
2373 | if (!td) |
2374 | return -ENOMEM; |
2375 | td->latency_buckets[READ] = __alloc_percpu(size: sizeof(struct latency_bucket) * |
2376 | LATENCY_BUCKET_SIZE, align: __alignof__(u64)); |
2377 | if (!td->latency_buckets[READ]) { |
2378 | kfree(objp: td); |
2379 | return -ENOMEM; |
2380 | } |
2381 | td->latency_buckets[WRITE] = __alloc_percpu(size: sizeof(struct latency_bucket) * |
2382 | LATENCY_BUCKET_SIZE, align: __alignof__(u64)); |
2383 | if (!td->latency_buckets[WRITE]) { |
2384 | free_percpu(pdata: td->latency_buckets[READ]); |
2385 | kfree(objp: td); |
2386 | return -ENOMEM; |
2387 | } |
2388 | |
2389 | INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); |
2390 | throtl_service_queue_init(sq: &td->service_queue); |
2391 | |
2392 | q->td = td; |
2393 | td->queue = q; |
2394 | |
2395 | td->limit_valid[LIMIT_MAX] = true; |
2396 | td->limit_index = LIMIT_MAX; |
2397 | td->low_upgrade_time = jiffies; |
2398 | td->low_downgrade_time = jiffies; |
2399 | |
2400 | /* activate policy */ |
2401 | ret = blkcg_activate_policy(disk, pol: &blkcg_policy_throtl); |
2402 | if (ret) { |
2403 | free_percpu(pdata: td->latency_buckets[READ]); |
2404 | free_percpu(pdata: td->latency_buckets[WRITE]); |
2405 | kfree(objp: td); |
2406 | } |
2407 | return ret; |
2408 | } |
2409 | |
2410 | void blk_throtl_exit(struct gendisk *disk) |
2411 | { |
2412 | struct request_queue *q = disk->queue; |
2413 | |
2414 | BUG_ON(!q->td); |
2415 | del_timer_sync(timer: &q->td->service_queue.pending_timer); |
2416 | throtl_shutdown_wq(q); |
2417 | blkcg_deactivate_policy(disk, pol: &blkcg_policy_throtl); |
2418 | free_percpu(pdata: q->td->latency_buckets[READ]); |
2419 | free_percpu(pdata: q->td->latency_buckets[WRITE]); |
2420 | kfree(objp: q->td); |
2421 | } |
2422 | |
2423 | void blk_throtl_register(struct gendisk *disk) |
2424 | { |
2425 | struct request_queue *q = disk->queue; |
2426 | struct throtl_data *td; |
2427 | int i; |
2428 | |
2429 | td = q->td; |
2430 | BUG_ON(!td); |
2431 | |
2432 | if (blk_queue_nonrot(q)) { |
2433 | td->throtl_slice = DFL_THROTL_SLICE_SSD; |
2434 | td->filtered_latency = LATENCY_FILTERED_SSD; |
2435 | } else { |
2436 | td->throtl_slice = DFL_THROTL_SLICE_HD; |
2437 | td->filtered_latency = LATENCY_FILTERED_HD; |
2438 | for (i = 0; i < LATENCY_BUCKET_SIZE; i++) { |
2439 | td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; |
2440 | td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; |
2441 | } |
2442 | } |
2443 | #ifndef CONFIG_BLK_DEV_THROTTLING_LOW |
2444 | /* if no low limit, use previous default */ |
2445 | td->throtl_slice = DFL_THROTL_SLICE_HD; |
2446 | |
2447 | #else |
2448 | td->track_bio_latency = !queue_is_mq(q); |
2449 | if (!td->track_bio_latency) |
2450 | blk_stat_enable_accounting(q); |
2451 | #endif |
2452 | } |
2453 | |
2454 | #ifdef CONFIG_BLK_DEV_THROTTLING_LOW |
2455 | ssize_t blk_throtl_sample_time_show(struct request_queue *q, char *page) |
2456 | { |
2457 | if (!q->td) |
2458 | return -EINVAL; |
2459 | return sprintf(buf: page, fmt: "%u\n" , jiffies_to_msecs(j: q->td->throtl_slice)); |
2460 | } |
2461 | |
2462 | ssize_t blk_throtl_sample_time_store(struct request_queue *q, |
2463 | const char *page, size_t count) |
2464 | { |
2465 | unsigned long v; |
2466 | unsigned long t; |
2467 | |
2468 | if (!q->td) |
2469 | return -EINVAL; |
2470 | if (kstrtoul(s: page, base: 10, res: &v)) |
2471 | return -EINVAL; |
2472 | t = msecs_to_jiffies(m: v); |
2473 | if (t == 0 || t > MAX_THROTL_SLICE) |
2474 | return -EINVAL; |
2475 | q->td->throtl_slice = t; |
2476 | return count; |
2477 | } |
2478 | #endif |
2479 | |
2480 | static int __init throtl_init(void) |
2481 | { |
2482 | kthrotld_workqueue = alloc_workqueue(fmt: "kthrotld" , flags: WQ_MEM_RECLAIM, max_active: 0); |
2483 | if (!kthrotld_workqueue) |
2484 | panic(fmt: "Failed to create kthrotld\n" ); |
2485 | |
2486 | return blkcg_policy_register(pol: &blkcg_policy_throtl); |
2487 | } |
2488 | |
2489 | module_init(throtl_init); |
2490 | |