1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Tag allocation using scalable bitmaps. Uses active queue tracking to support |
4 | * fairer distribution of tags between multiple submitters when a shared tag map |
5 | * is used. |
6 | * |
7 | * Copyright (C) 2013-2014 Jens Axboe |
8 | */ |
9 | #include <linux/kernel.h> |
10 | #include <linux/module.h> |
11 | |
12 | #include <linux/delay.h> |
13 | #include "blk.h" |
14 | #include "blk-mq.h" |
15 | #include "blk-mq-sched.h" |
16 | |
17 | /* |
18 | * Recalculate wakeup batch when tag is shared by hctx. |
19 | */ |
20 | static void blk_mq_update_wake_batch(struct blk_mq_tags *tags, |
21 | unsigned int users) |
22 | { |
23 | if (!users) |
24 | return; |
25 | |
26 | sbitmap_queue_recalculate_wake_batch(sbq: &tags->bitmap_tags, |
27 | users); |
28 | sbitmap_queue_recalculate_wake_batch(sbq: &tags->breserved_tags, |
29 | users); |
30 | } |
31 | |
32 | /* |
33 | * If a previously inactive queue goes active, bump the active user count. |
34 | * We need to do this before try to allocate driver tag, then even if fail |
35 | * to get tag when first time, the other shared-tag users could reserve |
36 | * budget for it. |
37 | */ |
38 | void __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx) |
39 | { |
40 | unsigned int users; |
41 | struct blk_mq_tags *tags = hctx->tags; |
42 | |
43 | /* |
44 | * calling test_bit() prior to test_and_set_bit() is intentional, |
45 | * it avoids dirtying the cacheline if the queue is already active. |
46 | */ |
47 | if (blk_mq_is_shared_tags(flags: hctx->flags)) { |
48 | struct request_queue *q = hctx->queue; |
49 | |
50 | if (test_bit(QUEUE_FLAG_HCTX_ACTIVE, &q->queue_flags) || |
51 | test_and_set_bit(QUEUE_FLAG_HCTX_ACTIVE, addr: &q->queue_flags)) |
52 | return; |
53 | } else { |
54 | if (test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) || |
55 | test_and_set_bit(nr: BLK_MQ_S_TAG_ACTIVE, addr: &hctx->state)) |
56 | return; |
57 | } |
58 | |
59 | spin_lock_irq(lock: &tags->lock); |
60 | users = tags->active_queues + 1; |
61 | WRITE_ONCE(tags->active_queues, users); |
62 | blk_mq_update_wake_batch(tags, users); |
63 | spin_unlock_irq(lock: &tags->lock); |
64 | } |
65 | |
66 | /* |
67 | * Wakeup all potentially sleeping on tags |
68 | */ |
69 | void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve) |
70 | { |
71 | sbitmap_queue_wake_all(sbq: &tags->bitmap_tags); |
72 | if (include_reserve) |
73 | sbitmap_queue_wake_all(sbq: &tags->breserved_tags); |
74 | } |
75 | |
76 | /* |
77 | * If a previously busy queue goes inactive, potential waiters could now |
78 | * be allowed to queue. Wake them up and check. |
79 | */ |
80 | void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx) |
81 | { |
82 | struct blk_mq_tags *tags = hctx->tags; |
83 | unsigned int users; |
84 | |
85 | if (blk_mq_is_shared_tags(flags: hctx->flags)) { |
86 | struct request_queue *q = hctx->queue; |
87 | |
88 | if (!test_and_clear_bit(QUEUE_FLAG_HCTX_ACTIVE, |
89 | addr: &q->queue_flags)) |
90 | return; |
91 | } else { |
92 | if (!test_and_clear_bit(nr: BLK_MQ_S_TAG_ACTIVE, addr: &hctx->state)) |
93 | return; |
94 | } |
95 | |
96 | spin_lock_irq(lock: &tags->lock); |
97 | users = tags->active_queues - 1; |
98 | WRITE_ONCE(tags->active_queues, users); |
99 | blk_mq_update_wake_batch(tags, users); |
100 | spin_unlock_irq(lock: &tags->lock); |
101 | |
102 | blk_mq_tag_wakeup_all(tags, include_reserve: false); |
103 | } |
104 | |
105 | static int __blk_mq_get_tag(struct blk_mq_alloc_data *data, |
106 | struct sbitmap_queue *bt) |
107 | { |
108 | if (!data->q->elevator && !(data->flags & BLK_MQ_REQ_RESERVED) && |
109 | !hctx_may_queue(hctx: data->hctx, bt)) |
110 | return BLK_MQ_NO_TAG; |
111 | |
112 | if (data->shallow_depth) |
113 | return sbitmap_queue_get_shallow(sbq: bt, shallow_depth: data->shallow_depth); |
114 | else |
115 | return __sbitmap_queue_get(sbq: bt); |
116 | } |
117 | |
118 | unsigned long blk_mq_get_tags(struct blk_mq_alloc_data *data, int nr_tags, |
119 | unsigned int *offset) |
120 | { |
121 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
122 | struct sbitmap_queue *bt = &tags->bitmap_tags; |
123 | unsigned long ret; |
124 | |
125 | if (data->shallow_depth ||data->flags & BLK_MQ_REQ_RESERVED || |
126 | data->hctx->flags & BLK_MQ_F_TAG_QUEUE_SHARED) |
127 | return 0; |
128 | ret = __sbitmap_queue_get_batch(sbq: bt, nr_tags, offset); |
129 | *offset += tags->nr_reserved_tags; |
130 | return ret; |
131 | } |
132 | |
133 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
134 | { |
135 | struct blk_mq_tags *tags = blk_mq_tags_from_data(data); |
136 | struct sbitmap_queue *bt; |
137 | struct sbq_wait_state *ws; |
138 | DEFINE_SBQ_WAIT(wait); |
139 | unsigned int tag_offset; |
140 | int tag; |
141 | |
142 | if (data->flags & BLK_MQ_REQ_RESERVED) { |
143 | if (unlikely(!tags->nr_reserved_tags)) { |
144 | WARN_ON_ONCE(1); |
145 | return BLK_MQ_NO_TAG; |
146 | } |
147 | bt = &tags->breserved_tags; |
148 | tag_offset = 0; |
149 | } else { |
150 | bt = &tags->bitmap_tags; |
151 | tag_offset = tags->nr_reserved_tags; |
152 | } |
153 | |
154 | tag = __blk_mq_get_tag(data, bt); |
155 | if (tag != BLK_MQ_NO_TAG) |
156 | goto found_tag; |
157 | |
158 | if (data->flags & BLK_MQ_REQ_NOWAIT) |
159 | return BLK_MQ_NO_TAG; |
160 | |
161 | ws = bt_wait_ptr(bt, hctx: data->hctx); |
162 | do { |
163 | struct sbitmap_queue *bt_prev; |
164 | |
165 | /* |
166 | * We're out of tags on this hardware queue, kick any |
167 | * pending IO submits before going to sleep waiting for |
168 | * some to complete. |
169 | */ |
170 | blk_mq_run_hw_queue(hctx: data->hctx, async: false); |
171 | |
172 | /* |
173 | * Retry tag allocation after running the hardware queue, |
174 | * as running the queue may also have found completions. |
175 | */ |
176 | tag = __blk_mq_get_tag(data, bt); |
177 | if (tag != BLK_MQ_NO_TAG) |
178 | break; |
179 | |
180 | sbitmap_prepare_to_wait(sbq: bt, ws, sbq_wait: &wait, TASK_UNINTERRUPTIBLE); |
181 | |
182 | tag = __blk_mq_get_tag(data, bt); |
183 | if (tag != BLK_MQ_NO_TAG) |
184 | break; |
185 | |
186 | bt_prev = bt; |
187 | io_schedule(); |
188 | |
189 | sbitmap_finish_wait(sbq: bt, ws, sbq_wait: &wait); |
190 | |
191 | data->ctx = blk_mq_get_ctx(q: data->q); |
192 | data->hctx = blk_mq_map_queue(q: data->q, opf: data->cmd_flags, |
193 | ctx: data->ctx); |
194 | tags = blk_mq_tags_from_data(data); |
195 | if (data->flags & BLK_MQ_REQ_RESERVED) |
196 | bt = &tags->breserved_tags; |
197 | else |
198 | bt = &tags->bitmap_tags; |
199 | |
200 | /* |
201 | * If destination hw queue is changed, fake wake up on |
202 | * previous queue for compensating the wake up miss, so |
203 | * other allocations on previous queue won't be starved. |
204 | */ |
205 | if (bt != bt_prev) |
206 | sbitmap_queue_wake_up(sbq: bt_prev, nr: 1); |
207 | |
208 | ws = bt_wait_ptr(bt, hctx: data->hctx); |
209 | } while (1); |
210 | |
211 | sbitmap_finish_wait(sbq: bt, ws, sbq_wait: &wait); |
212 | |
213 | found_tag: |
214 | /* |
215 | * Give up this allocation if the hctx is inactive. The caller will |
216 | * retry on an active hctx. |
217 | */ |
218 | if (unlikely(test_bit(BLK_MQ_S_INACTIVE, &data->hctx->state))) { |
219 | blk_mq_put_tag(tags, ctx: data->ctx, tag: tag + tag_offset); |
220 | return BLK_MQ_NO_TAG; |
221 | } |
222 | return tag + tag_offset; |
223 | } |
224 | |
225 | void blk_mq_put_tag(struct blk_mq_tags *tags, struct blk_mq_ctx *ctx, |
226 | unsigned int tag) |
227 | { |
228 | if (!blk_mq_tag_is_reserved(tags, tag)) { |
229 | const int real_tag = tag - tags->nr_reserved_tags; |
230 | |
231 | BUG_ON(real_tag >= tags->nr_tags); |
232 | sbitmap_queue_clear(sbq: &tags->bitmap_tags, nr: real_tag, cpu: ctx->cpu); |
233 | } else { |
234 | sbitmap_queue_clear(sbq: &tags->breserved_tags, nr: tag, cpu: ctx->cpu); |
235 | } |
236 | } |
237 | |
238 | void blk_mq_put_tags(struct blk_mq_tags *tags, int *tag_array, int nr_tags) |
239 | { |
240 | sbitmap_queue_clear_batch(sbq: &tags->bitmap_tags, offset: tags->nr_reserved_tags, |
241 | tags: tag_array, nr_tags); |
242 | } |
243 | |
244 | struct bt_iter_data { |
245 | struct blk_mq_hw_ctx *hctx; |
246 | struct request_queue *q; |
247 | busy_tag_iter_fn *fn; |
248 | void *data; |
249 | bool reserved; |
250 | }; |
251 | |
252 | static struct request *blk_mq_find_and_get_req(struct blk_mq_tags *tags, |
253 | unsigned int bitnr) |
254 | { |
255 | struct request *rq; |
256 | unsigned long flags; |
257 | |
258 | spin_lock_irqsave(&tags->lock, flags); |
259 | rq = tags->rqs[bitnr]; |
260 | if (!rq || rq->tag != bitnr || !req_ref_inc_not_zero(req: rq)) |
261 | rq = NULL; |
262 | spin_unlock_irqrestore(lock: &tags->lock, flags); |
263 | return rq; |
264 | } |
265 | |
266 | static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
267 | { |
268 | struct bt_iter_data *iter_data = data; |
269 | struct blk_mq_hw_ctx *hctx = iter_data->hctx; |
270 | struct request_queue *q = iter_data->q; |
271 | struct blk_mq_tag_set *set = q->tag_set; |
272 | struct blk_mq_tags *tags; |
273 | struct request *rq; |
274 | bool ret = true; |
275 | |
276 | if (blk_mq_is_shared_tags(flags: set->flags)) |
277 | tags = set->shared_tags; |
278 | else |
279 | tags = hctx->tags; |
280 | |
281 | if (!iter_data->reserved) |
282 | bitnr += tags->nr_reserved_tags; |
283 | /* |
284 | * We can hit rq == NULL here, because the tagging functions |
285 | * test and set the bit before assigning ->rqs[]. |
286 | */ |
287 | rq = blk_mq_find_and_get_req(tags, bitnr); |
288 | if (!rq) |
289 | return true; |
290 | |
291 | if (rq->q == q && (!hctx || rq->mq_hctx == hctx)) |
292 | ret = iter_data->fn(rq, iter_data->data); |
293 | blk_mq_put_rq_ref(rq); |
294 | return ret; |
295 | } |
296 | |
297 | /** |
298 | * bt_for_each - iterate over the requests associated with a hardware queue |
299 | * @hctx: Hardware queue to examine. |
300 | * @q: Request queue to examine. |
301 | * @bt: sbitmap to examine. This is either the breserved_tags member |
302 | * or the bitmap_tags member of struct blk_mq_tags. |
303 | * @fn: Pointer to the function that will be called for each request |
304 | * associated with @hctx that has been assigned a driver tag. |
305 | * @fn will be called as follows: @fn(@hctx, rq, @data, @reserved) |
306 | * where rq is a pointer to a request. Return true to continue |
307 | * iterating tags, false to stop. |
308 | * @data: Will be passed as third argument to @fn. |
309 | * @reserved: Indicates whether @bt is the breserved_tags member or the |
310 | * bitmap_tags member of struct blk_mq_tags. |
311 | */ |
312 | static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct request_queue *q, |
313 | struct sbitmap_queue *bt, busy_tag_iter_fn *fn, |
314 | void *data, bool reserved) |
315 | { |
316 | struct bt_iter_data iter_data = { |
317 | .hctx = hctx, |
318 | .fn = fn, |
319 | .data = data, |
320 | .reserved = reserved, |
321 | .q = q, |
322 | }; |
323 | |
324 | sbitmap_for_each_set(sb: &bt->sb, fn: bt_iter, data: &iter_data); |
325 | } |
326 | |
327 | struct bt_tags_iter_data { |
328 | struct blk_mq_tags *tags; |
329 | busy_tag_iter_fn *fn; |
330 | void *data; |
331 | unsigned int flags; |
332 | }; |
333 | |
334 | #define BT_TAG_ITER_RESERVED (1 << 0) |
335 | #define BT_TAG_ITER_STARTED (1 << 1) |
336 | #define BT_TAG_ITER_STATIC_RQS (1 << 2) |
337 | |
338 | static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data) |
339 | { |
340 | struct bt_tags_iter_data *iter_data = data; |
341 | struct blk_mq_tags *tags = iter_data->tags; |
342 | struct request *rq; |
343 | bool ret = true; |
344 | bool iter_static_rqs = !!(iter_data->flags & BT_TAG_ITER_STATIC_RQS); |
345 | |
346 | if (!(iter_data->flags & BT_TAG_ITER_RESERVED)) |
347 | bitnr += tags->nr_reserved_tags; |
348 | |
349 | /* |
350 | * We can hit rq == NULL here, because the tagging functions |
351 | * test and set the bit before assigning ->rqs[]. |
352 | */ |
353 | if (iter_static_rqs) |
354 | rq = tags->static_rqs[bitnr]; |
355 | else |
356 | rq = blk_mq_find_and_get_req(tags, bitnr); |
357 | if (!rq) |
358 | return true; |
359 | |
360 | if (!(iter_data->flags & BT_TAG_ITER_STARTED) || |
361 | blk_mq_request_started(rq)) |
362 | ret = iter_data->fn(rq, iter_data->data); |
363 | if (!iter_static_rqs) |
364 | blk_mq_put_rq_ref(rq); |
365 | return ret; |
366 | } |
367 | |
368 | /** |
369 | * bt_tags_for_each - iterate over the requests in a tag map |
370 | * @tags: Tag map to iterate over. |
371 | * @bt: sbitmap to examine. This is either the breserved_tags member |
372 | * or the bitmap_tags member of struct blk_mq_tags. |
373 | * @fn: Pointer to the function that will be called for each started |
374 | * request. @fn will be called as follows: @fn(rq, @data, |
375 | * @reserved) where rq is a pointer to a request. Return true |
376 | * to continue iterating tags, false to stop. |
377 | * @data: Will be passed as second argument to @fn. |
378 | * @flags: BT_TAG_ITER_* |
379 | */ |
380 | static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt, |
381 | busy_tag_iter_fn *fn, void *data, unsigned int flags) |
382 | { |
383 | struct bt_tags_iter_data iter_data = { |
384 | .tags = tags, |
385 | .fn = fn, |
386 | .data = data, |
387 | .flags = flags, |
388 | }; |
389 | |
390 | if (tags->rqs) |
391 | sbitmap_for_each_set(sb: &bt->sb, fn: bt_tags_iter, data: &iter_data); |
392 | } |
393 | |
394 | static void __blk_mq_all_tag_iter(struct blk_mq_tags *tags, |
395 | busy_tag_iter_fn *fn, void *priv, unsigned int flags) |
396 | { |
397 | WARN_ON_ONCE(flags & BT_TAG_ITER_RESERVED); |
398 | |
399 | if (tags->nr_reserved_tags) |
400 | bt_tags_for_each(tags, bt: &tags->breserved_tags, fn, data: priv, |
401 | flags: flags | BT_TAG_ITER_RESERVED); |
402 | bt_tags_for_each(tags, bt: &tags->bitmap_tags, fn, data: priv, flags); |
403 | } |
404 | |
405 | /** |
406 | * blk_mq_all_tag_iter - iterate over all requests in a tag map |
407 | * @tags: Tag map to iterate over. |
408 | * @fn: Pointer to the function that will be called for each |
409 | * request. @fn will be called as follows: @fn(rq, @priv, |
410 | * reserved) where rq is a pointer to a request. 'reserved' |
411 | * indicates whether or not @rq is a reserved request. Return |
412 | * true to continue iterating tags, false to stop. |
413 | * @priv: Will be passed as second argument to @fn. |
414 | * |
415 | * Caller has to pass the tag map from which requests are allocated. |
416 | */ |
417 | void blk_mq_all_tag_iter(struct blk_mq_tags *tags, busy_tag_iter_fn *fn, |
418 | void *priv) |
419 | { |
420 | __blk_mq_all_tag_iter(tags, fn, priv, BT_TAG_ITER_STATIC_RQS); |
421 | } |
422 | |
423 | /** |
424 | * blk_mq_tagset_busy_iter - iterate over all started requests in a tag set |
425 | * @tagset: Tag set to iterate over. |
426 | * @fn: Pointer to the function that will be called for each started |
427 | * request. @fn will be called as follows: @fn(rq, @priv, |
428 | * reserved) where rq is a pointer to a request. 'reserved' |
429 | * indicates whether or not @rq is a reserved request. Return |
430 | * true to continue iterating tags, false to stop. |
431 | * @priv: Will be passed as second argument to @fn. |
432 | * |
433 | * We grab one request reference before calling @fn and release it after |
434 | * @fn returns. |
435 | */ |
436 | void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset, |
437 | busy_tag_iter_fn *fn, void *priv) |
438 | { |
439 | unsigned int flags = tagset->flags; |
440 | int i, nr_tags; |
441 | |
442 | nr_tags = blk_mq_is_shared_tags(flags) ? 1 : tagset->nr_hw_queues; |
443 | |
444 | for (i = 0; i < nr_tags; i++) { |
445 | if (tagset->tags && tagset->tags[i]) |
446 | __blk_mq_all_tag_iter(tags: tagset->tags[i], fn, priv, |
447 | BT_TAG_ITER_STARTED); |
448 | } |
449 | } |
450 | EXPORT_SYMBOL(blk_mq_tagset_busy_iter); |
451 | |
452 | static bool blk_mq_tagset_count_completed_rqs(struct request *rq, void *data) |
453 | { |
454 | unsigned *count = data; |
455 | |
456 | if (blk_mq_request_completed(rq)) |
457 | (*count)++; |
458 | return true; |
459 | } |
460 | |
461 | /** |
462 | * blk_mq_tagset_wait_completed_request - Wait until all scheduled request |
463 | * completions have finished. |
464 | * @tagset: Tag set to drain completed request |
465 | * |
466 | * Note: This function has to be run after all IO queues are shutdown |
467 | */ |
468 | void blk_mq_tagset_wait_completed_request(struct blk_mq_tag_set *tagset) |
469 | { |
470 | while (true) { |
471 | unsigned count = 0; |
472 | |
473 | blk_mq_tagset_busy_iter(tagset, |
474 | blk_mq_tagset_count_completed_rqs, &count); |
475 | if (!count) |
476 | break; |
477 | msleep(msecs: 5); |
478 | } |
479 | } |
480 | EXPORT_SYMBOL(blk_mq_tagset_wait_completed_request); |
481 | |
482 | /** |
483 | * blk_mq_queue_tag_busy_iter - iterate over all requests with a driver tag |
484 | * @q: Request queue to examine. |
485 | * @fn: Pointer to the function that will be called for each request |
486 | * on @q. @fn will be called as follows: @fn(hctx, rq, @priv, |
487 | * reserved) where rq is a pointer to a request and hctx points |
488 | * to the hardware queue associated with the request. 'reserved' |
489 | * indicates whether or not @rq is a reserved request. |
490 | * @priv: Will be passed as third argument to @fn. |
491 | * |
492 | * Note: if @q->tag_set is shared with other request queues then @fn will be |
493 | * called for all requests on all queues that share that tag set and not only |
494 | * for requests associated with @q. |
495 | */ |
496 | void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_tag_iter_fn *fn, |
497 | void *priv) |
498 | { |
499 | /* |
500 | * __blk_mq_update_nr_hw_queues() updates nr_hw_queues and hctx_table |
501 | * while the queue is frozen. So we can use q_usage_counter to avoid |
502 | * racing with it. |
503 | */ |
504 | if (!percpu_ref_tryget(ref: &q->q_usage_counter)) |
505 | return; |
506 | |
507 | if (blk_mq_is_shared_tags(flags: q->tag_set->flags)) { |
508 | struct blk_mq_tags *tags = q->tag_set->shared_tags; |
509 | struct sbitmap_queue *bresv = &tags->breserved_tags; |
510 | struct sbitmap_queue *btags = &tags->bitmap_tags; |
511 | |
512 | if (tags->nr_reserved_tags) |
513 | bt_for_each(NULL, q, bt: bresv, fn, data: priv, reserved: true); |
514 | bt_for_each(NULL, q, bt: btags, fn, data: priv, reserved: false); |
515 | } else { |
516 | struct blk_mq_hw_ctx *hctx; |
517 | unsigned long i; |
518 | |
519 | queue_for_each_hw_ctx(q, hctx, i) { |
520 | struct blk_mq_tags *tags = hctx->tags; |
521 | struct sbitmap_queue *bresv = &tags->breserved_tags; |
522 | struct sbitmap_queue *btags = &tags->bitmap_tags; |
523 | |
524 | /* |
525 | * If no software queues are currently mapped to this |
526 | * hardware queue, there's nothing to check |
527 | */ |
528 | if (!blk_mq_hw_queue_mapped(hctx)) |
529 | continue; |
530 | |
531 | if (tags->nr_reserved_tags) |
532 | bt_for_each(hctx, q, bt: bresv, fn, data: priv, reserved: true); |
533 | bt_for_each(hctx, q, bt: btags, fn, data: priv, reserved: false); |
534 | } |
535 | } |
536 | blk_queue_exit(q); |
537 | } |
538 | |
539 | static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, |
540 | bool round_robin, int node) |
541 | { |
542 | return sbitmap_queue_init_node(sbq: bt, depth, shift: -1, round_robin, GFP_KERNEL, |
543 | node); |
544 | } |
545 | |
546 | int blk_mq_init_bitmaps(struct sbitmap_queue *bitmap_tags, |
547 | struct sbitmap_queue *breserved_tags, |
548 | unsigned int queue_depth, unsigned int reserved, |
549 | int node, int alloc_policy) |
550 | { |
551 | unsigned int depth = queue_depth - reserved; |
552 | bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR; |
553 | |
554 | if (bt_alloc(bt: bitmap_tags, depth, round_robin, node)) |
555 | return -ENOMEM; |
556 | if (bt_alloc(bt: breserved_tags, depth: reserved, round_robin, node)) |
557 | goto free_bitmap_tags; |
558 | |
559 | return 0; |
560 | |
561 | free_bitmap_tags: |
562 | sbitmap_queue_free(sbq: bitmap_tags); |
563 | return -ENOMEM; |
564 | } |
565 | |
566 | struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags, |
567 | unsigned int reserved_tags, |
568 | int node, int alloc_policy) |
569 | { |
570 | struct blk_mq_tags *tags; |
571 | |
572 | if (total_tags > BLK_MQ_TAG_MAX) { |
573 | pr_err("blk-mq: tag depth too large\n" ); |
574 | return NULL; |
575 | } |
576 | |
577 | tags = kzalloc_node(size: sizeof(*tags), GFP_KERNEL, node); |
578 | if (!tags) |
579 | return NULL; |
580 | |
581 | tags->nr_tags = total_tags; |
582 | tags->nr_reserved_tags = reserved_tags; |
583 | spin_lock_init(&tags->lock); |
584 | |
585 | if (blk_mq_init_bitmaps(bitmap_tags: &tags->bitmap_tags, breserved_tags: &tags->breserved_tags, |
586 | queue_depth: total_tags, reserved: reserved_tags, node, |
587 | alloc_policy) < 0) { |
588 | kfree(objp: tags); |
589 | return NULL; |
590 | } |
591 | return tags; |
592 | } |
593 | |
594 | void blk_mq_free_tags(struct blk_mq_tags *tags) |
595 | { |
596 | sbitmap_queue_free(sbq: &tags->bitmap_tags); |
597 | sbitmap_queue_free(sbq: &tags->breserved_tags); |
598 | kfree(objp: tags); |
599 | } |
600 | |
601 | int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx, |
602 | struct blk_mq_tags **tagsptr, unsigned int tdepth, |
603 | bool can_grow) |
604 | { |
605 | struct blk_mq_tags *tags = *tagsptr; |
606 | |
607 | if (tdepth <= tags->nr_reserved_tags) |
608 | return -EINVAL; |
609 | |
610 | /* |
611 | * If we are allowed to grow beyond the original size, allocate |
612 | * a new set of tags before freeing the old one. |
613 | */ |
614 | if (tdepth > tags->nr_tags) { |
615 | struct blk_mq_tag_set *set = hctx->queue->tag_set; |
616 | struct blk_mq_tags *new; |
617 | |
618 | if (!can_grow) |
619 | return -EINVAL; |
620 | |
621 | /* |
622 | * We need some sort of upper limit, set it high enough that |
623 | * no valid use cases should require more. |
624 | */ |
625 | if (tdepth > MAX_SCHED_RQ) |
626 | return -EINVAL; |
627 | |
628 | /* |
629 | * Only the sbitmap needs resizing since we allocated the max |
630 | * initially. |
631 | */ |
632 | if (blk_mq_is_shared_tags(flags: set->flags)) |
633 | return 0; |
634 | |
635 | new = blk_mq_alloc_map_and_rqs(set, hctx_idx: hctx->queue_num, depth: tdepth); |
636 | if (!new) |
637 | return -ENOMEM; |
638 | |
639 | blk_mq_free_map_and_rqs(set, tags: *tagsptr, hctx_idx: hctx->queue_num); |
640 | *tagsptr = new; |
641 | } else { |
642 | /* |
643 | * Don't need (or can't) update reserved tags here, they |
644 | * remain static and should never need resizing. |
645 | */ |
646 | sbitmap_queue_resize(sbq: &tags->bitmap_tags, |
647 | depth: tdepth - tags->nr_reserved_tags); |
648 | } |
649 | |
650 | return 0; |
651 | } |
652 | |
653 | void blk_mq_tag_resize_shared_tags(struct blk_mq_tag_set *set, unsigned int size) |
654 | { |
655 | struct blk_mq_tags *tags = set->shared_tags; |
656 | |
657 | sbitmap_queue_resize(sbq: &tags->bitmap_tags, depth: size - set->reserved_tags); |
658 | } |
659 | |
660 | void blk_mq_tag_update_sched_shared_tags(struct request_queue *q) |
661 | { |
662 | sbitmap_queue_resize(sbq: &q->sched_shared_tags->bitmap_tags, |
663 | depth: q->nr_requests - q->tag_set->reserved_tags); |
664 | } |
665 | |
666 | /** |
667 | * blk_mq_unique_tag() - return a tag that is unique queue-wide |
668 | * @rq: request for which to compute a unique tag |
669 | * |
670 | * The tag field in struct request is unique per hardware queue but not over |
671 | * all hardware queues. Hence this function that returns a tag with the |
672 | * hardware context index in the upper bits and the per hardware queue tag in |
673 | * the lower bits. |
674 | * |
675 | * Note: When called for a request that is queued on a non-multiqueue request |
676 | * queue, the hardware context index is set to zero. |
677 | */ |
678 | u32 blk_mq_unique_tag(struct request *rq) |
679 | { |
680 | return (rq->mq_hctx->queue_num << BLK_MQ_UNIQUE_TAG_BITS) | |
681 | (rq->tag & BLK_MQ_UNIQUE_TAG_MASK); |
682 | } |
683 | EXPORT_SYMBOL(blk_mq_unique_tag); |
684 | |