1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 1999 Eric Youngdale |
4 | * Copyright (C) 2014 Christoph Hellwig |
5 | * |
6 | * SCSI queueing library. |
7 | * Initial versions: Eric Youngdale (eric@andante.org). |
8 | * Based upon conversations with large numbers |
9 | * of people at Linux Expo. |
10 | */ |
11 | |
12 | #include <linux/bio.h> |
13 | #include <linux/bitops.h> |
14 | #include <linux/blkdev.h> |
15 | #include <linux/completion.h> |
16 | #include <linux/kernel.h> |
17 | #include <linux/export.h> |
18 | #include <linux/init.h> |
19 | #include <linux/pci.h> |
20 | #include <linux/delay.h> |
21 | #include <linux/hardirq.h> |
22 | #include <linux/scatterlist.h> |
23 | #include <linux/blk-mq.h> |
24 | #include <linux/blk-integrity.h> |
25 | #include <linux/ratelimit.h> |
26 | #include <asm/unaligned.h> |
27 | |
28 | #include <scsi/scsi.h> |
29 | #include <scsi/scsi_cmnd.h> |
30 | #include <scsi/scsi_dbg.h> |
31 | #include <scsi/scsi_device.h> |
32 | #include <scsi/scsi_driver.h> |
33 | #include <scsi/scsi_eh.h> |
34 | #include <scsi/scsi_host.h> |
35 | #include <scsi/scsi_transport.h> /* __scsi_init_queue() */ |
36 | #include <scsi/scsi_dh.h> |
37 | |
38 | #include <trace/events/scsi.h> |
39 | |
40 | #include "scsi_debugfs.h" |
41 | #include "scsi_priv.h" |
42 | #include "scsi_logging.h" |
43 | |
44 | /* |
45 | * Size of integrity metadata is usually small, 1 inline sg should |
46 | * cover normal cases. |
47 | */ |
48 | #ifdef CONFIG_ARCH_NO_SG_CHAIN |
49 | #define SCSI_INLINE_PROT_SG_CNT 0 |
50 | #define SCSI_INLINE_SG_CNT 0 |
51 | #else |
52 | #define SCSI_INLINE_PROT_SG_CNT 1 |
53 | #define SCSI_INLINE_SG_CNT 2 |
54 | #endif |
55 | |
56 | static struct kmem_cache *scsi_sense_cache; |
57 | static DEFINE_MUTEX(scsi_sense_cache_mutex); |
58 | |
59 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd); |
60 | |
61 | int scsi_init_sense_cache(struct Scsi_Host *shost) |
62 | { |
63 | int ret = 0; |
64 | |
65 | mutex_lock(&scsi_sense_cache_mutex); |
66 | if (!scsi_sense_cache) { |
67 | scsi_sense_cache = |
68 | kmem_cache_create_usercopy(name: "scsi_sense_cache" , |
69 | SCSI_SENSE_BUFFERSIZE, align: 0, SLAB_HWCACHE_ALIGN, |
70 | useroffset: 0, SCSI_SENSE_BUFFERSIZE, NULL); |
71 | if (!scsi_sense_cache) |
72 | ret = -ENOMEM; |
73 | } |
74 | mutex_unlock(lock: &scsi_sense_cache_mutex); |
75 | return ret; |
76 | } |
77 | |
78 | static void |
79 | scsi_set_blocked(struct scsi_cmnd *cmd, int reason) |
80 | { |
81 | struct Scsi_Host *host = cmd->device->host; |
82 | struct scsi_device *device = cmd->device; |
83 | struct scsi_target *starget = scsi_target(sdev: device); |
84 | |
85 | /* |
86 | * Set the appropriate busy bit for the device/host. |
87 | * |
88 | * If the host/device isn't busy, assume that something actually |
89 | * completed, and that we should be able to queue a command now. |
90 | * |
91 | * Note that the prior mid-layer assumption that any host could |
92 | * always queue at least one command is now broken. The mid-layer |
93 | * will implement a user specifiable stall (see |
94 | * scsi_host.max_host_blocked and scsi_device.max_device_blocked) |
95 | * if a command is requeued with no other commands outstanding |
96 | * either for the device or for the host. |
97 | */ |
98 | switch (reason) { |
99 | case SCSI_MLQUEUE_HOST_BUSY: |
100 | atomic_set(v: &host->host_blocked, i: host->max_host_blocked); |
101 | break; |
102 | case SCSI_MLQUEUE_DEVICE_BUSY: |
103 | case SCSI_MLQUEUE_EH_RETRY: |
104 | atomic_set(v: &device->device_blocked, |
105 | i: device->max_device_blocked); |
106 | break; |
107 | case SCSI_MLQUEUE_TARGET_BUSY: |
108 | atomic_set(v: &starget->target_blocked, |
109 | i: starget->max_target_blocked); |
110 | break; |
111 | } |
112 | } |
113 | |
114 | static void scsi_mq_requeue_cmd(struct scsi_cmnd *cmd, unsigned long msecs) |
115 | { |
116 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); |
117 | |
118 | if (rq->rq_flags & RQF_DONTPREP) { |
119 | rq->rq_flags &= ~RQF_DONTPREP; |
120 | scsi_mq_uninit_cmd(cmd); |
121 | } else { |
122 | WARN_ON_ONCE(true); |
123 | } |
124 | |
125 | blk_mq_requeue_request(rq, kick_requeue_list: false); |
126 | if (!scsi_host_in_recovery(shost: cmd->device->host)) |
127 | blk_mq_delay_kick_requeue_list(q: rq->q, msecs); |
128 | } |
129 | |
130 | /** |
131 | * __scsi_queue_insert - private queue insertion |
132 | * @cmd: The SCSI command being requeued |
133 | * @reason: The reason for the requeue |
134 | * @unbusy: Whether the queue should be unbusied |
135 | * |
136 | * This is a private queue insertion. The public interface |
137 | * scsi_queue_insert() always assumes the queue should be unbusied |
138 | * because it's always called before the completion. This function is |
139 | * for a requeue after completion, which should only occur in this |
140 | * file. |
141 | */ |
142 | static void __scsi_queue_insert(struct scsi_cmnd *cmd, int reason, bool unbusy) |
143 | { |
144 | struct scsi_device *device = cmd->device; |
145 | |
146 | SCSI_LOG_MLQUEUE(1, scmd_printk(KERN_INFO, cmd, |
147 | "Inserting command %p into mlqueue\n" , cmd)); |
148 | |
149 | scsi_set_blocked(cmd, reason); |
150 | |
151 | /* |
152 | * Decrement the counters, since these commands are no longer |
153 | * active on the host/device. |
154 | */ |
155 | if (unbusy) |
156 | scsi_device_unbusy(sdev: device, cmd); |
157 | |
158 | /* |
159 | * Requeue this command. It will go before all other commands |
160 | * that are already in the queue. Schedule requeue work under |
161 | * lock such that the kblockd_schedule_work() call happens |
162 | * before blk_mq_destroy_queue() finishes. |
163 | */ |
164 | cmd->result = 0; |
165 | |
166 | blk_mq_requeue_request(rq: scsi_cmd_to_rq(scmd: cmd), |
167 | kick_requeue_list: !scsi_host_in_recovery(shost: cmd->device->host)); |
168 | } |
169 | |
170 | /** |
171 | * scsi_queue_insert - Reinsert a command in the queue. |
172 | * @cmd: command that we are adding to queue. |
173 | * @reason: why we are inserting command to queue. |
174 | * |
175 | * We do this for one of two cases. Either the host is busy and it cannot accept |
176 | * any more commands for the time being, or the device returned QUEUE_FULL and |
177 | * can accept no more commands. |
178 | * |
179 | * Context: This could be called either from an interrupt context or a normal |
180 | * process context. |
181 | */ |
182 | void scsi_queue_insert(struct scsi_cmnd *cmd, int reason) |
183 | { |
184 | __scsi_queue_insert(cmd, reason, unbusy: true); |
185 | } |
186 | |
187 | void scsi_failures_reset_retries(struct scsi_failures *failures) |
188 | { |
189 | struct scsi_failure *failure; |
190 | |
191 | failures->total_retries = 0; |
192 | |
193 | for (failure = failures->failure_definitions; failure->result; |
194 | failure++) |
195 | failure->retries = 0; |
196 | } |
197 | EXPORT_SYMBOL_GPL(scsi_failures_reset_retries); |
198 | |
199 | /** |
200 | * scsi_check_passthrough - Determine if passthrough scsi_cmnd needs a retry. |
201 | * @scmd: scsi_cmnd to check. |
202 | * @failures: scsi_failures struct that lists failures to check for. |
203 | * |
204 | * Returns -EAGAIN if the caller should retry else 0. |
205 | */ |
206 | static int scsi_check_passthrough(struct scsi_cmnd *scmd, |
207 | struct scsi_failures *failures) |
208 | { |
209 | struct scsi_failure *failure; |
210 | struct scsi_sense_hdr sshdr; |
211 | enum sam_status status; |
212 | |
213 | if (!failures) |
214 | return 0; |
215 | |
216 | for (failure = failures->failure_definitions; failure->result; |
217 | failure++) { |
218 | if (failure->result == SCMD_FAILURE_RESULT_ANY) |
219 | goto maybe_retry; |
220 | |
221 | if (host_byte(scmd->result) && |
222 | host_byte(scmd->result) == host_byte(failure->result)) |
223 | goto maybe_retry; |
224 | |
225 | status = status_byte(scmd->result); |
226 | if (!status) |
227 | continue; |
228 | |
229 | if (failure->result == SCMD_FAILURE_STAT_ANY && |
230 | !scsi_status_is_good(status: scmd->result)) |
231 | goto maybe_retry; |
232 | |
233 | if (status != status_byte(failure->result)) |
234 | continue; |
235 | |
236 | if (status_byte(failure->result) != SAM_STAT_CHECK_CONDITION || |
237 | failure->sense == SCMD_FAILURE_SENSE_ANY) |
238 | goto maybe_retry; |
239 | |
240 | if (!scsi_command_normalize_sense(cmd: scmd, sshdr: &sshdr)) |
241 | return 0; |
242 | |
243 | if (failure->sense != sshdr.sense_key) |
244 | continue; |
245 | |
246 | if (failure->asc == SCMD_FAILURE_ASC_ANY) |
247 | goto maybe_retry; |
248 | |
249 | if (failure->asc != sshdr.asc) |
250 | continue; |
251 | |
252 | if (failure->ascq == SCMD_FAILURE_ASCQ_ANY || |
253 | failure->ascq == sshdr.ascq) |
254 | goto maybe_retry; |
255 | } |
256 | |
257 | return 0; |
258 | |
259 | maybe_retry: |
260 | if (failure->allowed) { |
261 | if (failure->allowed == SCMD_FAILURE_NO_LIMIT || |
262 | ++failure->retries <= failure->allowed) |
263 | return -EAGAIN; |
264 | } else { |
265 | if (failures->total_allowed == SCMD_FAILURE_NO_LIMIT || |
266 | ++failures->total_retries <= failures->total_allowed) |
267 | return -EAGAIN; |
268 | } |
269 | |
270 | return 0; |
271 | } |
272 | |
273 | /** |
274 | * scsi_execute_cmd - insert request and wait for the result |
275 | * @sdev: scsi_device |
276 | * @cmd: scsi command |
277 | * @opf: block layer request cmd_flags |
278 | * @buffer: data buffer |
279 | * @bufflen: len of buffer |
280 | * @timeout: request timeout in HZ |
281 | * @ml_retries: number of times SCSI midlayer will retry request |
282 | * @args: Optional args. See struct definition for field descriptions |
283 | * |
284 | * Returns the scsi_cmnd result field if a command was executed, or a negative |
285 | * Linux error code if we didn't get that far. |
286 | */ |
287 | int scsi_execute_cmd(struct scsi_device *sdev, const unsigned char *cmd, |
288 | blk_opf_t opf, void *buffer, unsigned int bufflen, |
289 | int timeout, int ml_retries, |
290 | const struct scsi_exec_args *args) |
291 | { |
292 | static const struct scsi_exec_args default_args; |
293 | struct request *req; |
294 | struct scsi_cmnd *scmd; |
295 | int ret; |
296 | |
297 | if (!args) |
298 | args = &default_args; |
299 | else if (WARN_ON_ONCE(args->sense && |
300 | args->sense_len != SCSI_SENSE_BUFFERSIZE)) |
301 | return -EINVAL; |
302 | |
303 | retry: |
304 | req = scsi_alloc_request(q: sdev->request_queue, opf, flags: args->req_flags); |
305 | if (IS_ERR(ptr: req)) |
306 | return PTR_ERR(ptr: req); |
307 | |
308 | if (bufflen) { |
309 | ret = blk_rq_map_kern(sdev->request_queue, req, |
310 | buffer, bufflen, GFP_NOIO); |
311 | if (ret) |
312 | goto out; |
313 | } |
314 | scmd = blk_mq_rq_to_pdu(rq: req); |
315 | scmd->cmd_len = COMMAND_SIZE(cmd[0]); |
316 | memcpy(scmd->cmnd, cmd, scmd->cmd_len); |
317 | scmd->allowed = ml_retries; |
318 | scmd->flags |= args->scmd_flags; |
319 | req->timeout = timeout; |
320 | req->rq_flags |= RQF_QUIET; |
321 | |
322 | /* |
323 | * head injection *required* here otherwise quiesce won't work |
324 | */ |
325 | blk_execute_rq(rq: req, at_head: true); |
326 | |
327 | if (scsi_check_passthrough(scmd, failures: args->failures) == -EAGAIN) { |
328 | blk_mq_free_request(rq: req); |
329 | goto retry; |
330 | } |
331 | |
332 | /* |
333 | * Some devices (USB mass-storage in particular) may transfer |
334 | * garbage data together with a residue indicating that the data |
335 | * is invalid. Prevent the garbage from being misinterpreted |
336 | * and prevent security leaks by zeroing out the excess data. |
337 | */ |
338 | if (unlikely(scmd->resid_len > 0 && scmd->resid_len <= bufflen)) |
339 | memset(buffer + bufflen - scmd->resid_len, 0, scmd->resid_len); |
340 | |
341 | if (args->resid) |
342 | *args->resid = scmd->resid_len; |
343 | if (args->sense) |
344 | memcpy(args->sense, scmd->sense_buffer, SCSI_SENSE_BUFFERSIZE); |
345 | if (args->sshdr) |
346 | scsi_normalize_sense(sense_buffer: scmd->sense_buffer, sb_len: scmd->sense_len, |
347 | sshdr: args->sshdr); |
348 | |
349 | ret = scmd->result; |
350 | out: |
351 | blk_mq_free_request(rq: req); |
352 | |
353 | return ret; |
354 | } |
355 | EXPORT_SYMBOL(scsi_execute_cmd); |
356 | |
357 | /* |
358 | * Wake up the error handler if necessary. Avoid as follows that the error |
359 | * handler is not woken up if host in-flight requests number == |
360 | * shost->host_failed: use call_rcu() in scsi_eh_scmd_add() in combination |
361 | * with an RCU read lock in this function to ensure that this function in |
362 | * its entirety either finishes before scsi_eh_scmd_add() increases the |
363 | * host_failed counter or that it notices the shost state change made by |
364 | * scsi_eh_scmd_add(). |
365 | */ |
366 | static void scsi_dec_host_busy(struct Scsi_Host *shost, struct scsi_cmnd *cmd) |
367 | { |
368 | unsigned long flags; |
369 | |
370 | rcu_read_lock(); |
371 | __clear_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
372 | if (unlikely(scsi_host_in_recovery(shost))) { |
373 | unsigned int busy = scsi_host_busy(shost); |
374 | |
375 | spin_lock_irqsave(shost->host_lock, flags); |
376 | if (shost->host_failed || shost->host_eh_scheduled) |
377 | scsi_eh_wakeup(shost, busy); |
378 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
379 | } |
380 | rcu_read_unlock(); |
381 | } |
382 | |
383 | void scsi_device_unbusy(struct scsi_device *sdev, struct scsi_cmnd *cmd) |
384 | { |
385 | struct Scsi_Host *shost = sdev->host; |
386 | struct scsi_target *starget = scsi_target(sdev); |
387 | |
388 | scsi_dec_host_busy(shost, cmd); |
389 | |
390 | if (starget->can_queue > 0) |
391 | atomic_dec(v: &starget->target_busy); |
392 | |
393 | sbitmap_put(sb: &sdev->budget_map, bitnr: cmd->budget_token); |
394 | cmd->budget_token = -1; |
395 | } |
396 | |
397 | /* |
398 | * Kick the queue of SCSI device @sdev if @sdev != current_sdev. Called with |
399 | * interrupts disabled. |
400 | */ |
401 | static void scsi_kick_sdev_queue(struct scsi_device *sdev, void *data) |
402 | { |
403 | struct scsi_device *current_sdev = data; |
404 | |
405 | if (sdev != current_sdev) |
406 | blk_mq_run_hw_queues(q: sdev->request_queue, async: true); |
407 | } |
408 | |
409 | /* |
410 | * Called for single_lun devices on IO completion. Clear starget_sdev_user, |
411 | * and call blk_run_queue for all the scsi_devices on the target - |
412 | * including current_sdev first. |
413 | * |
414 | * Called with *no* scsi locks held. |
415 | */ |
416 | static void scsi_single_lun_run(struct scsi_device *current_sdev) |
417 | { |
418 | struct Scsi_Host *shost = current_sdev->host; |
419 | struct scsi_target *starget = scsi_target(sdev: current_sdev); |
420 | unsigned long flags; |
421 | |
422 | spin_lock_irqsave(shost->host_lock, flags); |
423 | starget->starget_sdev_user = NULL; |
424 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
425 | |
426 | /* |
427 | * Call blk_run_queue for all LUNs on the target, starting with |
428 | * current_sdev. We race with others (to set starget_sdev_user), |
429 | * but in most cases, we will be first. Ideally, each LU on the |
430 | * target would get some limited time or requests on the target. |
431 | */ |
432 | blk_mq_run_hw_queues(q: current_sdev->request_queue, |
433 | async: shost->queuecommand_may_block); |
434 | |
435 | spin_lock_irqsave(shost->host_lock, flags); |
436 | if (!starget->starget_sdev_user) |
437 | __starget_for_each_device(starget, current_sdev, |
438 | fn: scsi_kick_sdev_queue); |
439 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
440 | } |
441 | |
442 | static inline bool scsi_device_is_busy(struct scsi_device *sdev) |
443 | { |
444 | if (scsi_device_busy(sdev) >= sdev->queue_depth) |
445 | return true; |
446 | if (atomic_read(v: &sdev->device_blocked) > 0) |
447 | return true; |
448 | return false; |
449 | } |
450 | |
451 | static inline bool scsi_target_is_busy(struct scsi_target *starget) |
452 | { |
453 | if (starget->can_queue > 0) { |
454 | if (atomic_read(v: &starget->target_busy) >= starget->can_queue) |
455 | return true; |
456 | if (atomic_read(v: &starget->target_blocked) > 0) |
457 | return true; |
458 | } |
459 | return false; |
460 | } |
461 | |
462 | static inline bool scsi_host_is_busy(struct Scsi_Host *shost) |
463 | { |
464 | if (atomic_read(v: &shost->host_blocked) > 0) |
465 | return true; |
466 | if (shost->host_self_blocked) |
467 | return true; |
468 | return false; |
469 | } |
470 | |
471 | static void scsi_starved_list_run(struct Scsi_Host *shost) |
472 | { |
473 | LIST_HEAD(starved_list); |
474 | struct scsi_device *sdev; |
475 | unsigned long flags; |
476 | |
477 | spin_lock_irqsave(shost->host_lock, flags); |
478 | list_splice_init(list: &shost->starved_list, head: &starved_list); |
479 | |
480 | while (!list_empty(head: &starved_list)) { |
481 | struct request_queue *slq; |
482 | |
483 | /* |
484 | * As long as shost is accepting commands and we have |
485 | * starved queues, call blk_run_queue. scsi_request_fn |
486 | * drops the queue_lock and can add us back to the |
487 | * starved_list. |
488 | * |
489 | * host_lock protects the starved_list and starved_entry. |
490 | * scsi_request_fn must get the host_lock before checking |
491 | * or modifying starved_list or starved_entry. |
492 | */ |
493 | if (scsi_host_is_busy(shost)) |
494 | break; |
495 | |
496 | sdev = list_entry(starved_list.next, |
497 | struct scsi_device, starved_entry); |
498 | list_del_init(entry: &sdev->starved_entry); |
499 | if (scsi_target_is_busy(starget: scsi_target(sdev))) { |
500 | list_move_tail(list: &sdev->starved_entry, |
501 | head: &shost->starved_list); |
502 | continue; |
503 | } |
504 | |
505 | /* |
506 | * Once we drop the host lock, a racing scsi_remove_device() |
507 | * call may remove the sdev from the starved list and destroy |
508 | * it and the queue. Mitigate by taking a reference to the |
509 | * queue and never touching the sdev again after we drop the |
510 | * host lock. Note: if __scsi_remove_device() invokes |
511 | * blk_mq_destroy_queue() before the queue is run from this |
512 | * function then blk_run_queue() will return immediately since |
513 | * blk_mq_destroy_queue() marks the queue with QUEUE_FLAG_DYING. |
514 | */ |
515 | slq = sdev->request_queue; |
516 | if (!blk_get_queue(slq)) |
517 | continue; |
518 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
519 | |
520 | blk_mq_run_hw_queues(q: slq, async: false); |
521 | blk_put_queue(slq); |
522 | |
523 | spin_lock_irqsave(shost->host_lock, flags); |
524 | } |
525 | /* put any unprocessed entries back */ |
526 | list_splice(list: &starved_list, head: &shost->starved_list); |
527 | spin_unlock_irqrestore(lock: shost->host_lock, flags); |
528 | } |
529 | |
530 | /** |
531 | * scsi_run_queue - Select a proper request queue to serve next. |
532 | * @q: last request's queue |
533 | * |
534 | * The previous command was completely finished, start a new one if possible. |
535 | */ |
536 | static void scsi_run_queue(struct request_queue *q) |
537 | { |
538 | struct scsi_device *sdev = q->queuedata; |
539 | |
540 | if (scsi_target(sdev)->single_lun) |
541 | scsi_single_lun_run(current_sdev: sdev); |
542 | if (!list_empty(head: &sdev->host->starved_list)) |
543 | scsi_starved_list_run(shost: sdev->host); |
544 | |
545 | /* Note: blk_mq_kick_requeue_list() runs the queue asynchronously. */ |
546 | blk_mq_kick_requeue_list(q); |
547 | } |
548 | |
549 | void scsi_requeue_run_queue(struct work_struct *work) |
550 | { |
551 | struct scsi_device *sdev; |
552 | struct request_queue *q; |
553 | |
554 | sdev = container_of(work, struct scsi_device, requeue_work); |
555 | q = sdev->request_queue; |
556 | scsi_run_queue(q); |
557 | } |
558 | |
559 | void scsi_run_host_queues(struct Scsi_Host *shost) |
560 | { |
561 | struct scsi_device *sdev; |
562 | |
563 | shost_for_each_device(sdev, shost) |
564 | scsi_run_queue(q: sdev->request_queue); |
565 | } |
566 | |
567 | static void scsi_uninit_cmd(struct scsi_cmnd *cmd) |
568 | { |
569 | if (!blk_rq_is_passthrough(rq: scsi_cmd_to_rq(scmd: cmd))) { |
570 | struct scsi_driver *drv = scsi_cmd_to_driver(cmd); |
571 | |
572 | if (drv->uninit_command) |
573 | drv->uninit_command(cmd); |
574 | } |
575 | } |
576 | |
577 | void scsi_free_sgtables(struct scsi_cmnd *cmd) |
578 | { |
579 | if (cmd->sdb.table.nents) |
580 | sg_free_table_chained(table: &cmd->sdb.table, |
581 | SCSI_INLINE_SG_CNT); |
582 | if (scsi_prot_sg_count(cmd)) |
583 | sg_free_table_chained(table: &cmd->prot_sdb->table, |
584 | SCSI_INLINE_PROT_SG_CNT); |
585 | } |
586 | EXPORT_SYMBOL_GPL(scsi_free_sgtables); |
587 | |
588 | static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd) |
589 | { |
590 | scsi_free_sgtables(cmd); |
591 | scsi_uninit_cmd(cmd); |
592 | } |
593 | |
594 | static void scsi_run_queue_async(struct scsi_device *sdev) |
595 | { |
596 | if (scsi_host_in_recovery(shost: sdev->host)) |
597 | return; |
598 | |
599 | if (scsi_target(sdev)->single_lun || |
600 | !list_empty(head: &sdev->host->starved_list)) { |
601 | kblockd_schedule_work(work: &sdev->requeue_work); |
602 | } else { |
603 | /* |
604 | * smp_mb() present in sbitmap_queue_clear() or implied in |
605 | * .end_io is for ordering writing .device_busy in |
606 | * scsi_device_unbusy() and reading sdev->restarts. |
607 | */ |
608 | int old = atomic_read(v: &sdev->restarts); |
609 | |
610 | /* |
611 | * ->restarts has to be kept as non-zero if new budget |
612 | * contention occurs. |
613 | * |
614 | * No need to run queue when either another re-run |
615 | * queue wins in updating ->restarts or a new budget |
616 | * contention occurs. |
617 | */ |
618 | if (old && atomic_cmpxchg(v: &sdev->restarts, old, new: 0) == old) |
619 | blk_mq_run_hw_queues(q: sdev->request_queue, async: true); |
620 | } |
621 | } |
622 | |
623 | /* Returns false when no more bytes to process, true if there are more */ |
624 | static bool scsi_end_request(struct request *req, blk_status_t error, |
625 | unsigned int bytes) |
626 | { |
627 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
628 | struct scsi_device *sdev = cmd->device; |
629 | struct request_queue *q = sdev->request_queue; |
630 | |
631 | if (blk_update_request(rq: req, error, nr_bytes: bytes)) |
632 | return true; |
633 | |
634 | // XXX: |
635 | if (blk_queue_add_random(q)) |
636 | add_disk_randomness(disk: req->q->disk); |
637 | |
638 | WARN_ON_ONCE(!blk_rq_is_passthrough(req) && |
639 | !(cmd->flags & SCMD_INITIALIZED)); |
640 | cmd->flags = 0; |
641 | |
642 | /* |
643 | * Calling rcu_barrier() is not necessary here because the |
644 | * SCSI error handler guarantees that the function called by |
645 | * call_rcu() has been called before scsi_end_request() is |
646 | * called. |
647 | */ |
648 | destroy_rcu_head(head: &cmd->rcu); |
649 | |
650 | /* |
651 | * In the MQ case the command gets freed by __blk_mq_end_request, |
652 | * so we have to do all cleanup that depends on it earlier. |
653 | * |
654 | * We also can't kick the queues from irq context, so we |
655 | * will have to defer it to a workqueue. |
656 | */ |
657 | scsi_mq_uninit_cmd(cmd); |
658 | |
659 | /* |
660 | * queue is still alive, so grab the ref for preventing it |
661 | * from being cleaned up during running queue. |
662 | */ |
663 | percpu_ref_get(ref: &q->q_usage_counter); |
664 | |
665 | __blk_mq_end_request(rq: req, error); |
666 | |
667 | scsi_run_queue_async(sdev); |
668 | |
669 | percpu_ref_put(ref: &q->q_usage_counter); |
670 | return false; |
671 | } |
672 | |
673 | /** |
674 | * scsi_result_to_blk_status - translate a SCSI result code into blk_status_t |
675 | * @result: scsi error code |
676 | * |
677 | * Translate a SCSI result code into a blk_status_t value. |
678 | */ |
679 | static blk_status_t scsi_result_to_blk_status(int result) |
680 | { |
681 | /* |
682 | * Check the scsi-ml byte first in case we converted a host or status |
683 | * byte. |
684 | */ |
685 | switch (scsi_ml_byte(result)) { |
686 | case SCSIML_STAT_OK: |
687 | break; |
688 | case SCSIML_STAT_RESV_CONFLICT: |
689 | return BLK_STS_RESV_CONFLICT; |
690 | case SCSIML_STAT_NOSPC: |
691 | return BLK_STS_NOSPC; |
692 | case SCSIML_STAT_MED_ERROR: |
693 | return BLK_STS_MEDIUM; |
694 | case SCSIML_STAT_TGT_FAILURE: |
695 | return BLK_STS_TARGET; |
696 | case SCSIML_STAT_DL_TIMEOUT: |
697 | return BLK_STS_DURATION_LIMIT; |
698 | } |
699 | |
700 | switch (host_byte(result)) { |
701 | case DID_OK: |
702 | if (scsi_status_is_good(status: result)) |
703 | return BLK_STS_OK; |
704 | return BLK_STS_IOERR; |
705 | case DID_TRANSPORT_FAILFAST: |
706 | case DID_TRANSPORT_MARGINAL: |
707 | return BLK_STS_TRANSPORT; |
708 | default: |
709 | return BLK_STS_IOERR; |
710 | } |
711 | } |
712 | |
713 | /** |
714 | * scsi_rq_err_bytes - determine number of bytes till the next failure boundary |
715 | * @rq: request to examine |
716 | * |
717 | * Description: |
718 | * A request could be merge of IOs which require different failure |
719 | * handling. This function determines the number of bytes which |
720 | * can be failed from the beginning of the request without |
721 | * crossing into area which need to be retried further. |
722 | * |
723 | * Return: |
724 | * The number of bytes to fail. |
725 | */ |
726 | static unsigned int scsi_rq_err_bytes(const struct request *rq) |
727 | { |
728 | blk_opf_t ff = rq->cmd_flags & REQ_FAILFAST_MASK; |
729 | unsigned int bytes = 0; |
730 | struct bio *bio; |
731 | |
732 | if (!(rq->rq_flags & RQF_MIXED_MERGE)) |
733 | return blk_rq_bytes(rq); |
734 | |
735 | /* |
736 | * Currently the only 'mixing' which can happen is between |
737 | * different fastfail types. We can safely fail portions |
738 | * which have all the failfast bits that the first one has - |
739 | * the ones which are at least as eager to fail as the first |
740 | * one. |
741 | */ |
742 | for (bio = rq->bio; bio; bio = bio->bi_next) { |
743 | if ((bio->bi_opf & ff) != ff) |
744 | break; |
745 | bytes += bio->bi_iter.bi_size; |
746 | } |
747 | |
748 | /* this could lead to infinite loop */ |
749 | BUG_ON(blk_rq_bytes(rq) && !bytes); |
750 | return bytes; |
751 | } |
752 | |
753 | static bool scsi_cmd_runtime_exceeced(struct scsi_cmnd *cmd) |
754 | { |
755 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
756 | unsigned long wait_for; |
757 | |
758 | if (cmd->allowed == SCSI_CMD_RETRIES_NO_LIMIT) |
759 | return false; |
760 | |
761 | wait_for = (cmd->allowed + 1) * req->timeout; |
762 | if (time_before(cmd->jiffies_at_alloc + wait_for, jiffies)) { |
763 | scmd_printk(KERN_ERR, cmd, "timing out command, waited %lus\n" , |
764 | wait_for/HZ); |
765 | return true; |
766 | } |
767 | return false; |
768 | } |
769 | |
770 | /* |
771 | * When ALUA transition state is returned, reprep the cmd to |
772 | * use the ALUA handler's transition timeout. Delay the reprep |
773 | * 1 sec to avoid aggressive retries of the target in that |
774 | * state. |
775 | */ |
776 | #define ALUA_TRANSITION_REPREP_DELAY 1000 |
777 | |
778 | /* Helper for scsi_io_completion() when special action required. */ |
779 | static void scsi_io_completion_action(struct scsi_cmnd *cmd, int result) |
780 | { |
781 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
782 | int level = 0; |
783 | enum {ACTION_FAIL, ACTION_REPREP, ACTION_DELAYED_REPREP, |
784 | ACTION_RETRY, ACTION_DELAYED_RETRY} action; |
785 | struct scsi_sense_hdr sshdr; |
786 | bool sense_valid; |
787 | bool sense_current = true; /* false implies "deferred sense" */ |
788 | blk_status_t blk_stat; |
789 | |
790 | sense_valid = scsi_command_normalize_sense(cmd, sshdr: &sshdr); |
791 | if (sense_valid) |
792 | sense_current = !scsi_sense_is_deferred(sshdr: &sshdr); |
793 | |
794 | blk_stat = scsi_result_to_blk_status(result); |
795 | |
796 | if (host_byte(result) == DID_RESET) { |
797 | /* Third party bus reset or reset for error recovery |
798 | * reasons. Just retry the command and see what |
799 | * happens. |
800 | */ |
801 | action = ACTION_RETRY; |
802 | } else if (sense_valid && sense_current) { |
803 | switch (sshdr.sense_key) { |
804 | case UNIT_ATTENTION: |
805 | if (cmd->device->removable) { |
806 | /* Detected disc change. Set a bit |
807 | * and quietly refuse further access. |
808 | */ |
809 | cmd->device->changed = 1; |
810 | action = ACTION_FAIL; |
811 | } else { |
812 | /* Must have been a power glitch, or a |
813 | * bus reset. Could not have been a |
814 | * media change, so we just retry the |
815 | * command and see what happens. |
816 | */ |
817 | action = ACTION_RETRY; |
818 | } |
819 | break; |
820 | case ILLEGAL_REQUEST: |
821 | /* If we had an ILLEGAL REQUEST returned, then |
822 | * we may have performed an unsupported |
823 | * command. The only thing this should be |
824 | * would be a ten byte read where only a six |
825 | * byte read was supported. Also, on a system |
826 | * where READ CAPACITY failed, we may have |
827 | * read past the end of the disk. |
828 | */ |
829 | if ((cmd->device->use_10_for_rw && |
830 | sshdr.asc == 0x20 && sshdr.ascq == 0x00) && |
831 | (cmd->cmnd[0] == READ_10 || |
832 | cmd->cmnd[0] == WRITE_10)) { |
833 | /* This will issue a new 6-byte command. */ |
834 | cmd->device->use_10_for_rw = 0; |
835 | action = ACTION_REPREP; |
836 | } else if (sshdr.asc == 0x10) /* DIX */ { |
837 | action = ACTION_FAIL; |
838 | blk_stat = BLK_STS_PROTECTION; |
839 | /* INVALID COMMAND OPCODE or INVALID FIELD IN CDB */ |
840 | } else if (sshdr.asc == 0x20 || sshdr.asc == 0x24) { |
841 | action = ACTION_FAIL; |
842 | blk_stat = BLK_STS_TARGET; |
843 | } else |
844 | action = ACTION_FAIL; |
845 | break; |
846 | case ABORTED_COMMAND: |
847 | action = ACTION_FAIL; |
848 | if (sshdr.asc == 0x10) /* DIF */ |
849 | blk_stat = BLK_STS_PROTECTION; |
850 | break; |
851 | case NOT_READY: |
852 | /* If the device is in the process of becoming |
853 | * ready, or has a temporary blockage, retry. |
854 | */ |
855 | if (sshdr.asc == 0x04) { |
856 | switch (sshdr.ascq) { |
857 | case 0x01: /* becoming ready */ |
858 | case 0x04: /* format in progress */ |
859 | case 0x05: /* rebuild in progress */ |
860 | case 0x06: /* recalculation in progress */ |
861 | case 0x07: /* operation in progress */ |
862 | case 0x08: /* Long write in progress */ |
863 | case 0x09: /* self test in progress */ |
864 | case 0x11: /* notify (enable spinup) required */ |
865 | case 0x14: /* space allocation in progress */ |
866 | case 0x1a: /* start stop unit in progress */ |
867 | case 0x1b: /* sanitize in progress */ |
868 | case 0x1d: /* configuration in progress */ |
869 | case 0x24: /* depopulation in progress */ |
870 | case 0x25: /* depopulation restore in progress */ |
871 | action = ACTION_DELAYED_RETRY; |
872 | break; |
873 | case 0x0a: /* ALUA state transition */ |
874 | action = ACTION_DELAYED_REPREP; |
875 | break; |
876 | default: |
877 | action = ACTION_FAIL; |
878 | break; |
879 | } |
880 | } else |
881 | action = ACTION_FAIL; |
882 | break; |
883 | case VOLUME_OVERFLOW: |
884 | /* See SSC3rXX or current. */ |
885 | action = ACTION_FAIL; |
886 | break; |
887 | case DATA_PROTECT: |
888 | action = ACTION_FAIL; |
889 | if ((sshdr.asc == 0x0C && sshdr.ascq == 0x12) || |
890 | (sshdr.asc == 0x55 && |
891 | (sshdr.ascq == 0x0E || sshdr.ascq == 0x0F))) { |
892 | /* Insufficient zone resources */ |
893 | blk_stat = BLK_STS_ZONE_OPEN_RESOURCE; |
894 | } |
895 | break; |
896 | case COMPLETED: |
897 | fallthrough; |
898 | default: |
899 | action = ACTION_FAIL; |
900 | break; |
901 | } |
902 | } else |
903 | action = ACTION_FAIL; |
904 | |
905 | if (action != ACTION_FAIL && scsi_cmd_runtime_exceeced(cmd)) |
906 | action = ACTION_FAIL; |
907 | |
908 | switch (action) { |
909 | case ACTION_FAIL: |
910 | /* Give up and fail the remainder of the request */ |
911 | if (!(req->rq_flags & RQF_QUIET)) { |
912 | static DEFINE_RATELIMIT_STATE(_rs, |
913 | DEFAULT_RATELIMIT_INTERVAL, |
914 | DEFAULT_RATELIMIT_BURST); |
915 | |
916 | if (unlikely(scsi_logging_level)) |
917 | level = |
918 | SCSI_LOG_LEVEL(SCSI_LOG_MLCOMPLETE_SHIFT, |
919 | SCSI_LOG_MLCOMPLETE_BITS); |
920 | |
921 | /* |
922 | * if logging is enabled the failure will be printed |
923 | * in scsi_log_completion(), so avoid duplicate messages |
924 | */ |
925 | if (!level && __ratelimit(&_rs)) { |
926 | scsi_print_result(cmd, NULL, FAILED); |
927 | if (sense_valid) |
928 | scsi_print_sense(cmd); |
929 | scsi_print_command(cmd); |
930 | } |
931 | } |
932 | if (!scsi_end_request(req, error: blk_stat, bytes: scsi_rq_err_bytes(rq: req))) |
933 | return; |
934 | fallthrough; |
935 | case ACTION_REPREP: |
936 | scsi_mq_requeue_cmd(cmd, msecs: 0); |
937 | break; |
938 | case ACTION_DELAYED_REPREP: |
939 | scsi_mq_requeue_cmd(cmd, ALUA_TRANSITION_REPREP_DELAY); |
940 | break; |
941 | case ACTION_RETRY: |
942 | /* Retry the same command immediately */ |
943 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY, unbusy: false); |
944 | break; |
945 | case ACTION_DELAYED_RETRY: |
946 | /* Retry the same command after a delay */ |
947 | __scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY, unbusy: false); |
948 | break; |
949 | } |
950 | } |
951 | |
952 | /* |
953 | * Helper for scsi_io_completion() when cmd->result is non-zero. Returns a |
954 | * new result that may suppress further error checking. Also modifies |
955 | * *blk_statp in some cases. |
956 | */ |
957 | static int scsi_io_completion_nz_result(struct scsi_cmnd *cmd, int result, |
958 | blk_status_t *blk_statp) |
959 | { |
960 | bool sense_valid; |
961 | bool sense_current = true; /* false implies "deferred sense" */ |
962 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
963 | struct scsi_sense_hdr sshdr; |
964 | |
965 | sense_valid = scsi_command_normalize_sense(cmd, sshdr: &sshdr); |
966 | if (sense_valid) |
967 | sense_current = !scsi_sense_is_deferred(sshdr: &sshdr); |
968 | |
969 | if (blk_rq_is_passthrough(rq: req)) { |
970 | if (sense_valid) { |
971 | /* |
972 | * SG_IO wants current and deferred errors |
973 | */ |
974 | cmd->sense_len = min(8 + cmd->sense_buffer[7], |
975 | SCSI_SENSE_BUFFERSIZE); |
976 | } |
977 | if (sense_current) |
978 | *blk_statp = scsi_result_to_blk_status(result); |
979 | } else if (blk_rq_bytes(rq: req) == 0 && sense_current) { |
980 | /* |
981 | * Flush commands do not transfers any data, and thus cannot use |
982 | * good_bytes != blk_rq_bytes(req) as the signal for an error. |
983 | * This sets *blk_statp explicitly for the problem case. |
984 | */ |
985 | *blk_statp = scsi_result_to_blk_status(result); |
986 | } |
987 | /* |
988 | * Recovered errors need reporting, but they're always treated as |
989 | * success, so fiddle the result code here. For passthrough requests |
990 | * we already took a copy of the original into sreq->result which |
991 | * is what gets returned to the user |
992 | */ |
993 | if (sense_valid && (sshdr.sense_key == RECOVERED_ERROR)) { |
994 | bool do_print = true; |
995 | /* |
996 | * if ATA PASS-THROUGH INFORMATION AVAILABLE [0x0, 0x1d] |
997 | * skip print since caller wants ATA registers. Only occurs |
998 | * on SCSI ATA PASS_THROUGH commands when CK_COND=1 |
999 | */ |
1000 | if ((sshdr.asc == 0x0) && (sshdr.ascq == 0x1d)) |
1001 | do_print = false; |
1002 | else if (req->rq_flags & RQF_QUIET) |
1003 | do_print = false; |
1004 | if (do_print) |
1005 | scsi_print_sense(cmd); |
1006 | result = 0; |
1007 | /* for passthrough, *blk_statp may be set */ |
1008 | *blk_statp = BLK_STS_OK; |
1009 | } |
1010 | /* |
1011 | * Another corner case: the SCSI status byte is non-zero but 'good'. |
1012 | * Example: PRE-FETCH command returns SAM_STAT_CONDITION_MET when |
1013 | * it is able to fit nominated LBs in its cache (and SAM_STAT_GOOD |
1014 | * if it can't fit). Treat SAM_STAT_CONDITION_MET and the related |
1015 | * intermediate statuses (both obsolete in SAM-4) as good. |
1016 | */ |
1017 | if ((result & 0xff) && scsi_status_is_good(status: result)) { |
1018 | result = 0; |
1019 | *blk_statp = BLK_STS_OK; |
1020 | } |
1021 | return result; |
1022 | } |
1023 | |
1024 | /** |
1025 | * scsi_io_completion - Completion processing for SCSI commands. |
1026 | * @cmd: command that is finished. |
1027 | * @good_bytes: number of processed bytes. |
1028 | * |
1029 | * We will finish off the specified number of sectors. If we are done, the |
1030 | * command block will be released and the queue function will be goosed. If we |
1031 | * are not done then we have to figure out what to do next: |
1032 | * |
1033 | * a) We can call scsi_mq_requeue_cmd(). The request will be |
1034 | * unprepared and put back on the queue. Then a new command will |
1035 | * be created for it. This should be used if we made forward |
1036 | * progress, or if we want to switch from READ(10) to READ(6) for |
1037 | * example. |
1038 | * |
1039 | * b) We can call scsi_io_completion_action(). The request will be |
1040 | * put back on the queue and retried using the same command as |
1041 | * before, possibly after a delay. |
1042 | * |
1043 | * c) We can call scsi_end_request() with blk_stat other than |
1044 | * BLK_STS_OK, to fail the remainder of the request. |
1045 | */ |
1046 | void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes) |
1047 | { |
1048 | int result = cmd->result; |
1049 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
1050 | blk_status_t blk_stat = BLK_STS_OK; |
1051 | |
1052 | if (unlikely(result)) /* a nz result may or may not be an error */ |
1053 | result = scsi_io_completion_nz_result(cmd, result, blk_statp: &blk_stat); |
1054 | |
1055 | /* |
1056 | * Next deal with any sectors which we were able to correctly |
1057 | * handle. |
1058 | */ |
1059 | SCSI_LOG_HLCOMPLETE(1, scmd_printk(KERN_INFO, cmd, |
1060 | "%u sectors total, %d bytes done.\n" , |
1061 | blk_rq_sectors(req), good_bytes)); |
1062 | |
1063 | /* |
1064 | * Failed, zero length commands always need to drop down |
1065 | * to retry code. Fast path should return in this block. |
1066 | */ |
1067 | if (likely(blk_rq_bytes(req) > 0 || blk_stat == BLK_STS_OK)) { |
1068 | if (likely(!scsi_end_request(req, blk_stat, good_bytes))) |
1069 | return; /* no bytes remaining */ |
1070 | } |
1071 | |
1072 | /* Kill remainder if no retries. */ |
1073 | if (unlikely(blk_stat && scsi_noretry_cmd(cmd))) { |
1074 | if (scsi_end_request(req, error: blk_stat, bytes: blk_rq_bytes(rq: req))) |
1075 | WARN_ONCE(true, |
1076 | "Bytes remaining after failed, no-retry command" ); |
1077 | return; |
1078 | } |
1079 | |
1080 | /* |
1081 | * If there had been no error, but we have leftover bytes in the |
1082 | * request just queue the command up again. |
1083 | */ |
1084 | if (likely(result == 0)) |
1085 | scsi_mq_requeue_cmd(cmd, msecs: 0); |
1086 | else |
1087 | scsi_io_completion_action(cmd, result); |
1088 | } |
1089 | |
1090 | static inline bool scsi_cmd_needs_dma_drain(struct scsi_device *sdev, |
1091 | struct request *rq) |
1092 | { |
1093 | return sdev->dma_drain_len && blk_rq_is_passthrough(rq) && |
1094 | !op_is_write(op: req_op(req: rq)) && |
1095 | sdev->host->hostt->dma_need_drain(rq); |
1096 | } |
1097 | |
1098 | /** |
1099 | * scsi_alloc_sgtables - Allocate and initialize data and integrity scatterlists |
1100 | * @cmd: SCSI command data structure to initialize. |
1101 | * |
1102 | * Initializes @cmd->sdb and also @cmd->prot_sdb if data integrity is enabled |
1103 | * for @cmd. |
1104 | * |
1105 | * Returns: |
1106 | * * BLK_STS_OK - on success |
1107 | * * BLK_STS_RESOURCE - if the failure is retryable |
1108 | * * BLK_STS_IOERR - if the failure is fatal |
1109 | */ |
1110 | blk_status_t scsi_alloc_sgtables(struct scsi_cmnd *cmd) |
1111 | { |
1112 | struct scsi_device *sdev = cmd->device; |
1113 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); |
1114 | unsigned short nr_segs = blk_rq_nr_phys_segments(rq); |
1115 | struct scatterlist *last_sg = NULL; |
1116 | blk_status_t ret; |
1117 | bool need_drain = scsi_cmd_needs_dma_drain(sdev, rq); |
1118 | int count; |
1119 | |
1120 | if (WARN_ON_ONCE(!nr_segs)) |
1121 | return BLK_STS_IOERR; |
1122 | |
1123 | /* |
1124 | * Make sure there is space for the drain. The driver must adjust |
1125 | * max_hw_segments to be prepared for this. |
1126 | */ |
1127 | if (need_drain) |
1128 | nr_segs++; |
1129 | |
1130 | /* |
1131 | * If sg table allocation fails, requeue request later. |
1132 | */ |
1133 | if (unlikely(sg_alloc_table_chained(&cmd->sdb.table, nr_segs, |
1134 | cmd->sdb.table.sgl, SCSI_INLINE_SG_CNT))) |
1135 | return BLK_STS_RESOURCE; |
1136 | |
1137 | /* |
1138 | * Next, walk the list, and fill in the addresses and sizes of |
1139 | * each segment. |
1140 | */ |
1141 | count = __blk_rq_map_sg(q: rq->q, rq, sglist: cmd->sdb.table.sgl, last_sg: &last_sg); |
1142 | |
1143 | if (blk_rq_bytes(rq) & rq->q->dma_pad_mask) { |
1144 | unsigned int pad_len = |
1145 | (rq->q->dma_pad_mask & ~blk_rq_bytes(rq)) + 1; |
1146 | |
1147 | last_sg->length += pad_len; |
1148 | cmd->extra_len += pad_len; |
1149 | } |
1150 | |
1151 | if (need_drain) { |
1152 | sg_unmark_end(sg: last_sg); |
1153 | last_sg = sg_next(last_sg); |
1154 | sg_set_buf(sg: last_sg, buf: sdev->dma_drain_buf, buflen: sdev->dma_drain_len); |
1155 | sg_mark_end(sg: last_sg); |
1156 | |
1157 | cmd->extra_len += sdev->dma_drain_len; |
1158 | count++; |
1159 | } |
1160 | |
1161 | BUG_ON(count > cmd->sdb.table.nents); |
1162 | cmd->sdb.table.nents = count; |
1163 | cmd->sdb.length = blk_rq_payload_bytes(rq); |
1164 | |
1165 | if (blk_integrity_rq(rq)) { |
1166 | struct scsi_data_buffer *prot_sdb = cmd->prot_sdb; |
1167 | int ivecs; |
1168 | |
1169 | if (WARN_ON_ONCE(!prot_sdb)) { |
1170 | /* |
1171 | * This can happen if someone (e.g. multipath) |
1172 | * queues a command to a device on an adapter |
1173 | * that does not support DIX. |
1174 | */ |
1175 | ret = BLK_STS_IOERR; |
1176 | goto out_free_sgtables; |
1177 | } |
1178 | |
1179 | ivecs = blk_rq_count_integrity_sg(rq->q, rq->bio); |
1180 | |
1181 | if (sg_alloc_table_chained(table: &prot_sdb->table, nents: ivecs, |
1182 | first_chunk: prot_sdb->table.sgl, |
1183 | SCSI_INLINE_PROT_SG_CNT)) { |
1184 | ret = BLK_STS_RESOURCE; |
1185 | goto out_free_sgtables; |
1186 | } |
1187 | |
1188 | count = blk_rq_map_integrity_sg(rq->q, rq->bio, |
1189 | prot_sdb->table.sgl); |
1190 | BUG_ON(count > ivecs); |
1191 | BUG_ON(count > queue_max_integrity_segments(rq->q)); |
1192 | |
1193 | cmd->prot_sdb = prot_sdb; |
1194 | cmd->prot_sdb->table.nents = count; |
1195 | } |
1196 | |
1197 | return BLK_STS_OK; |
1198 | out_free_sgtables: |
1199 | scsi_free_sgtables(cmd); |
1200 | return ret; |
1201 | } |
1202 | EXPORT_SYMBOL(scsi_alloc_sgtables); |
1203 | |
1204 | /** |
1205 | * scsi_initialize_rq - initialize struct scsi_cmnd partially |
1206 | * @rq: Request associated with the SCSI command to be initialized. |
1207 | * |
1208 | * This function initializes the members of struct scsi_cmnd that must be |
1209 | * initialized before request processing starts and that won't be |
1210 | * reinitialized if a SCSI command is requeued. |
1211 | */ |
1212 | static void scsi_initialize_rq(struct request *rq) |
1213 | { |
1214 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1215 | |
1216 | memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); |
1217 | cmd->cmd_len = MAX_COMMAND_SIZE; |
1218 | cmd->sense_len = 0; |
1219 | init_rcu_head(head: &cmd->rcu); |
1220 | cmd->jiffies_at_alloc = jiffies; |
1221 | cmd->retries = 0; |
1222 | } |
1223 | |
1224 | struct request *scsi_alloc_request(struct request_queue *q, blk_opf_t opf, |
1225 | blk_mq_req_flags_t flags) |
1226 | { |
1227 | struct request *rq; |
1228 | |
1229 | rq = blk_mq_alloc_request(q, opf, flags); |
1230 | if (!IS_ERR(ptr: rq)) |
1231 | scsi_initialize_rq(rq); |
1232 | return rq; |
1233 | } |
1234 | EXPORT_SYMBOL_GPL(scsi_alloc_request); |
1235 | |
1236 | /* |
1237 | * Only called when the request isn't completed by SCSI, and not freed by |
1238 | * SCSI |
1239 | */ |
1240 | static void scsi_cleanup_rq(struct request *rq) |
1241 | { |
1242 | if (rq->rq_flags & RQF_DONTPREP) { |
1243 | scsi_mq_uninit_cmd(cmd: blk_mq_rq_to_pdu(rq)); |
1244 | rq->rq_flags &= ~RQF_DONTPREP; |
1245 | } |
1246 | } |
1247 | |
1248 | /* Called before a request is prepared. See also scsi_mq_prep_fn(). */ |
1249 | void scsi_init_command(struct scsi_device *dev, struct scsi_cmnd *cmd) |
1250 | { |
1251 | struct request *rq = scsi_cmd_to_rq(scmd: cmd); |
1252 | |
1253 | if (!blk_rq_is_passthrough(rq) && !(cmd->flags & SCMD_INITIALIZED)) { |
1254 | cmd->flags |= SCMD_INITIALIZED; |
1255 | scsi_initialize_rq(rq); |
1256 | } |
1257 | |
1258 | cmd->device = dev; |
1259 | INIT_LIST_HEAD(list: &cmd->eh_entry); |
1260 | INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler); |
1261 | } |
1262 | |
1263 | static blk_status_t scsi_setup_scsi_cmnd(struct scsi_device *sdev, |
1264 | struct request *req) |
1265 | { |
1266 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1267 | |
1268 | /* |
1269 | * Passthrough requests may transfer data, in which case they must |
1270 | * a bio attached to them. Or they might contain a SCSI command |
1271 | * that does not transfer data, in which case they may optionally |
1272 | * submit a request without an attached bio. |
1273 | */ |
1274 | if (req->bio) { |
1275 | blk_status_t ret = scsi_alloc_sgtables(cmd); |
1276 | if (unlikely(ret != BLK_STS_OK)) |
1277 | return ret; |
1278 | } else { |
1279 | BUG_ON(blk_rq_bytes(req)); |
1280 | |
1281 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
1282 | } |
1283 | |
1284 | cmd->transfersize = blk_rq_bytes(rq: req); |
1285 | return BLK_STS_OK; |
1286 | } |
1287 | |
1288 | static blk_status_t |
1289 | scsi_device_state_check(struct scsi_device *sdev, struct request *req) |
1290 | { |
1291 | switch (sdev->sdev_state) { |
1292 | case SDEV_CREATED: |
1293 | return BLK_STS_OK; |
1294 | case SDEV_OFFLINE: |
1295 | case SDEV_TRANSPORT_OFFLINE: |
1296 | /* |
1297 | * If the device is offline we refuse to process any |
1298 | * commands. The device must be brought online |
1299 | * before trying any recovery commands. |
1300 | */ |
1301 | if (!sdev->offline_already) { |
1302 | sdev->offline_already = true; |
1303 | sdev_printk(KERN_ERR, sdev, |
1304 | "rejecting I/O to offline device\n" ); |
1305 | } |
1306 | return BLK_STS_IOERR; |
1307 | case SDEV_DEL: |
1308 | /* |
1309 | * If the device is fully deleted, we refuse to |
1310 | * process any commands as well. |
1311 | */ |
1312 | sdev_printk(KERN_ERR, sdev, |
1313 | "rejecting I/O to dead device\n" ); |
1314 | return BLK_STS_IOERR; |
1315 | case SDEV_BLOCK: |
1316 | case SDEV_CREATED_BLOCK: |
1317 | return BLK_STS_RESOURCE; |
1318 | case SDEV_QUIESCE: |
1319 | /* |
1320 | * If the device is blocked we only accept power management |
1321 | * commands. |
1322 | */ |
1323 | if (req && WARN_ON_ONCE(!(req->rq_flags & RQF_PM))) |
1324 | return BLK_STS_RESOURCE; |
1325 | return BLK_STS_OK; |
1326 | default: |
1327 | /* |
1328 | * For any other not fully online state we only allow |
1329 | * power management commands. |
1330 | */ |
1331 | if (req && !(req->rq_flags & RQF_PM)) |
1332 | return BLK_STS_OFFLINE; |
1333 | return BLK_STS_OK; |
1334 | } |
1335 | } |
1336 | |
1337 | /* |
1338 | * scsi_dev_queue_ready: if we can send requests to sdev, assign one token |
1339 | * and return the token else return -1. |
1340 | */ |
1341 | static inline int scsi_dev_queue_ready(struct request_queue *q, |
1342 | struct scsi_device *sdev) |
1343 | { |
1344 | int token; |
1345 | |
1346 | token = sbitmap_get(sb: &sdev->budget_map); |
1347 | if (token < 0) |
1348 | return -1; |
1349 | |
1350 | if (!atomic_read(v: &sdev->device_blocked)) |
1351 | return token; |
1352 | |
1353 | /* |
1354 | * Only unblock if no other commands are pending and |
1355 | * if device_blocked has decreased to zero |
1356 | */ |
1357 | if (scsi_device_busy(sdev) > 1 || |
1358 | atomic_dec_return(v: &sdev->device_blocked) > 0) { |
1359 | sbitmap_put(sb: &sdev->budget_map, bitnr: token); |
1360 | return -1; |
1361 | } |
1362 | |
1363 | SCSI_LOG_MLQUEUE(3, sdev_printk(KERN_INFO, sdev, |
1364 | "unblocking device at zero depth\n" )); |
1365 | |
1366 | return token; |
1367 | } |
1368 | |
1369 | /* |
1370 | * scsi_target_queue_ready: checks if there we can send commands to target |
1371 | * @sdev: scsi device on starget to check. |
1372 | */ |
1373 | static inline int scsi_target_queue_ready(struct Scsi_Host *shost, |
1374 | struct scsi_device *sdev) |
1375 | { |
1376 | struct scsi_target *starget = scsi_target(sdev); |
1377 | unsigned int busy; |
1378 | |
1379 | if (starget->single_lun) { |
1380 | spin_lock_irq(lock: shost->host_lock); |
1381 | if (starget->starget_sdev_user && |
1382 | starget->starget_sdev_user != sdev) { |
1383 | spin_unlock_irq(lock: shost->host_lock); |
1384 | return 0; |
1385 | } |
1386 | starget->starget_sdev_user = sdev; |
1387 | spin_unlock_irq(lock: shost->host_lock); |
1388 | } |
1389 | |
1390 | if (starget->can_queue <= 0) |
1391 | return 1; |
1392 | |
1393 | busy = atomic_inc_return(v: &starget->target_busy) - 1; |
1394 | if (atomic_read(v: &starget->target_blocked) > 0) { |
1395 | if (busy) |
1396 | goto starved; |
1397 | |
1398 | /* |
1399 | * unblock after target_blocked iterates to zero |
1400 | */ |
1401 | if (atomic_dec_return(v: &starget->target_blocked) > 0) |
1402 | goto out_dec; |
1403 | |
1404 | SCSI_LOG_MLQUEUE(3, starget_printk(KERN_INFO, starget, |
1405 | "unblocking target at zero depth\n" )); |
1406 | } |
1407 | |
1408 | if (busy >= starget->can_queue) |
1409 | goto starved; |
1410 | |
1411 | return 1; |
1412 | |
1413 | starved: |
1414 | spin_lock_irq(lock: shost->host_lock); |
1415 | list_move_tail(list: &sdev->starved_entry, head: &shost->starved_list); |
1416 | spin_unlock_irq(lock: shost->host_lock); |
1417 | out_dec: |
1418 | if (starget->can_queue > 0) |
1419 | atomic_dec(v: &starget->target_busy); |
1420 | return 0; |
1421 | } |
1422 | |
1423 | /* |
1424 | * scsi_host_queue_ready: if we can send requests to shost, return 1 else |
1425 | * return 0. We must end up running the queue again whenever 0 is |
1426 | * returned, else IO can hang. |
1427 | */ |
1428 | static inline int scsi_host_queue_ready(struct request_queue *q, |
1429 | struct Scsi_Host *shost, |
1430 | struct scsi_device *sdev, |
1431 | struct scsi_cmnd *cmd) |
1432 | { |
1433 | if (atomic_read(v: &shost->host_blocked) > 0) { |
1434 | if (scsi_host_busy(shost) > 0) |
1435 | goto starved; |
1436 | |
1437 | /* |
1438 | * unblock after host_blocked iterates to zero |
1439 | */ |
1440 | if (atomic_dec_return(v: &shost->host_blocked) > 0) |
1441 | goto out_dec; |
1442 | |
1443 | SCSI_LOG_MLQUEUE(3, |
1444 | shost_printk(KERN_INFO, shost, |
1445 | "unblocking host at zero depth\n" )); |
1446 | } |
1447 | |
1448 | if (shost->host_self_blocked) |
1449 | goto starved; |
1450 | |
1451 | /* We're OK to process the command, so we can't be starved */ |
1452 | if (!list_empty(head: &sdev->starved_entry)) { |
1453 | spin_lock_irq(lock: shost->host_lock); |
1454 | if (!list_empty(head: &sdev->starved_entry)) |
1455 | list_del_init(entry: &sdev->starved_entry); |
1456 | spin_unlock_irq(lock: shost->host_lock); |
1457 | } |
1458 | |
1459 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
1460 | |
1461 | return 1; |
1462 | |
1463 | starved: |
1464 | spin_lock_irq(lock: shost->host_lock); |
1465 | if (list_empty(head: &sdev->starved_entry)) |
1466 | list_add_tail(new: &sdev->starved_entry, head: &shost->starved_list); |
1467 | spin_unlock_irq(lock: shost->host_lock); |
1468 | out_dec: |
1469 | scsi_dec_host_busy(shost, cmd); |
1470 | return 0; |
1471 | } |
1472 | |
1473 | /* |
1474 | * Busy state exporting function for request stacking drivers. |
1475 | * |
1476 | * For efficiency, no lock is taken to check the busy state of |
1477 | * shost/starget/sdev, since the returned value is not guaranteed and |
1478 | * may be changed after request stacking drivers call the function, |
1479 | * regardless of taking lock or not. |
1480 | * |
1481 | * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi |
1482 | * needs to return 'not busy'. Otherwise, request stacking drivers |
1483 | * may hold requests forever. |
1484 | */ |
1485 | static bool scsi_mq_lld_busy(struct request_queue *q) |
1486 | { |
1487 | struct scsi_device *sdev = q->queuedata; |
1488 | struct Scsi_Host *shost; |
1489 | |
1490 | if (blk_queue_dying(q)) |
1491 | return false; |
1492 | |
1493 | shost = sdev->host; |
1494 | |
1495 | /* |
1496 | * Ignore host/starget busy state. |
1497 | * Since block layer does not have a concept of fairness across |
1498 | * multiple queues, congestion of host/starget needs to be handled |
1499 | * in SCSI layer. |
1500 | */ |
1501 | if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev)) |
1502 | return true; |
1503 | |
1504 | return false; |
1505 | } |
1506 | |
1507 | /* |
1508 | * Block layer request completion callback. May be called from interrupt |
1509 | * context. |
1510 | */ |
1511 | static void scsi_complete(struct request *rq) |
1512 | { |
1513 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1514 | enum scsi_disposition disposition; |
1515 | |
1516 | INIT_LIST_HEAD(list: &cmd->eh_entry); |
1517 | |
1518 | atomic_inc(v: &cmd->device->iodone_cnt); |
1519 | if (cmd->result) |
1520 | atomic_inc(v: &cmd->device->ioerr_cnt); |
1521 | |
1522 | disposition = scsi_decide_disposition(cmd); |
1523 | if (disposition != SUCCESS && scsi_cmd_runtime_exceeced(cmd)) |
1524 | disposition = SUCCESS; |
1525 | |
1526 | scsi_log_completion(cmd, disposition); |
1527 | |
1528 | switch (disposition) { |
1529 | case SUCCESS: |
1530 | scsi_finish_command(cmd); |
1531 | break; |
1532 | case NEEDS_RETRY: |
1533 | scsi_queue_insert(cmd, SCSI_MLQUEUE_EH_RETRY); |
1534 | break; |
1535 | case ADD_TO_MLQUEUE: |
1536 | scsi_queue_insert(cmd, SCSI_MLQUEUE_DEVICE_BUSY); |
1537 | break; |
1538 | default: |
1539 | scsi_eh_scmd_add(cmd); |
1540 | break; |
1541 | } |
1542 | } |
1543 | |
1544 | /** |
1545 | * scsi_dispatch_cmd - Dispatch a command to the low-level driver. |
1546 | * @cmd: command block we are dispatching. |
1547 | * |
1548 | * Return: nonzero return request was rejected and device's queue needs to be |
1549 | * plugged. |
1550 | */ |
1551 | static int scsi_dispatch_cmd(struct scsi_cmnd *cmd) |
1552 | { |
1553 | struct Scsi_Host *host = cmd->device->host; |
1554 | int rtn = 0; |
1555 | |
1556 | atomic_inc(v: &cmd->device->iorequest_cnt); |
1557 | |
1558 | /* check if the device is still usable */ |
1559 | if (unlikely(cmd->device->sdev_state == SDEV_DEL)) { |
1560 | /* in SDEV_DEL we error all commands. DID_NO_CONNECT |
1561 | * returns an immediate error upwards, and signals |
1562 | * that the device is no longer present */ |
1563 | cmd->result = DID_NO_CONNECT << 16; |
1564 | goto done; |
1565 | } |
1566 | |
1567 | /* Check to see if the scsi lld made this device blocked. */ |
1568 | if (unlikely(scsi_device_blocked(cmd->device))) { |
1569 | /* |
1570 | * in blocked state, the command is just put back on |
1571 | * the device queue. The suspend state has already |
1572 | * blocked the queue so future requests should not |
1573 | * occur until the device transitions out of the |
1574 | * suspend state. |
1575 | */ |
1576 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
1577 | "queuecommand : device blocked\n" )); |
1578 | atomic_dec(v: &cmd->device->iorequest_cnt); |
1579 | return SCSI_MLQUEUE_DEVICE_BUSY; |
1580 | } |
1581 | |
1582 | /* Store the LUN value in cmnd, if needed. */ |
1583 | if (cmd->device->lun_in_cdb) |
1584 | cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) | |
1585 | (cmd->device->lun << 5 & 0xe0); |
1586 | |
1587 | scsi_log_send(cmd); |
1588 | |
1589 | /* |
1590 | * Before we queue this command, check if the command |
1591 | * length exceeds what the host adapter can handle. |
1592 | */ |
1593 | if (cmd->cmd_len > cmd->device->host->max_cmd_len) { |
1594 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
1595 | "queuecommand : command too long. " |
1596 | "cdb_size=%d host->max_cmd_len=%d\n" , |
1597 | cmd->cmd_len, cmd->device->host->max_cmd_len)); |
1598 | cmd->result = (DID_ABORT << 16); |
1599 | goto done; |
1600 | } |
1601 | |
1602 | if (unlikely(host->shost_state == SHOST_DEL)) { |
1603 | cmd->result = (DID_NO_CONNECT << 16); |
1604 | goto done; |
1605 | |
1606 | } |
1607 | |
1608 | trace_scsi_dispatch_cmd_start(cmd); |
1609 | rtn = host->hostt->queuecommand(host, cmd); |
1610 | if (rtn) { |
1611 | atomic_dec(v: &cmd->device->iorequest_cnt); |
1612 | trace_scsi_dispatch_cmd_error(cmd, rtn); |
1613 | if (rtn != SCSI_MLQUEUE_DEVICE_BUSY && |
1614 | rtn != SCSI_MLQUEUE_TARGET_BUSY) |
1615 | rtn = SCSI_MLQUEUE_HOST_BUSY; |
1616 | |
1617 | SCSI_LOG_MLQUEUE(3, scmd_printk(KERN_INFO, cmd, |
1618 | "queuecommand : request rejected\n" )); |
1619 | } |
1620 | |
1621 | return rtn; |
1622 | done: |
1623 | scsi_done(cmd); |
1624 | return 0; |
1625 | } |
1626 | |
1627 | /* Size in bytes of the sg-list stored in the scsi-mq command-private data. */ |
1628 | static unsigned int scsi_mq_inline_sgl_size(struct Scsi_Host *shost) |
1629 | { |
1630 | return min_t(unsigned int, shost->sg_tablesize, SCSI_INLINE_SG_CNT) * |
1631 | sizeof(struct scatterlist); |
1632 | } |
1633 | |
1634 | static blk_status_t scsi_prepare_cmd(struct request *req) |
1635 | { |
1636 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1637 | struct scsi_device *sdev = req->q->queuedata; |
1638 | struct Scsi_Host *shost = sdev->host; |
1639 | bool in_flight = test_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
1640 | struct scatterlist *sg; |
1641 | |
1642 | scsi_init_command(dev: sdev, cmd); |
1643 | |
1644 | cmd->eh_eflags = 0; |
1645 | cmd->prot_type = 0; |
1646 | cmd->prot_flags = 0; |
1647 | cmd->submitter = 0; |
1648 | memset(&cmd->sdb, 0, sizeof(cmd->sdb)); |
1649 | cmd->underflow = 0; |
1650 | cmd->transfersize = 0; |
1651 | cmd->host_scribble = NULL; |
1652 | cmd->result = 0; |
1653 | cmd->extra_len = 0; |
1654 | cmd->state = 0; |
1655 | if (in_flight) |
1656 | __set_bit(SCMD_STATE_INFLIGHT, &cmd->state); |
1657 | |
1658 | /* |
1659 | * Only clear the driver-private command data if the LLD does not supply |
1660 | * a function to initialize that data. |
1661 | */ |
1662 | if (!shost->hostt->init_cmd_priv) |
1663 | memset(cmd + 1, 0, shost->hostt->cmd_size); |
1664 | |
1665 | cmd->prot_op = SCSI_PROT_NORMAL; |
1666 | if (blk_rq_bytes(rq: req)) |
1667 | cmd->sc_data_direction = rq_dma_dir(req); |
1668 | else |
1669 | cmd->sc_data_direction = DMA_NONE; |
1670 | |
1671 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size; |
1672 | cmd->sdb.table.sgl = sg; |
1673 | |
1674 | if (scsi_host_get_prot(shost)) { |
1675 | memset(cmd->prot_sdb, 0, sizeof(struct scsi_data_buffer)); |
1676 | |
1677 | cmd->prot_sdb->table.sgl = |
1678 | (struct scatterlist *)(cmd->prot_sdb + 1); |
1679 | } |
1680 | |
1681 | /* |
1682 | * Special handling for passthrough commands, which don't go to the ULP |
1683 | * at all: |
1684 | */ |
1685 | if (blk_rq_is_passthrough(rq: req)) |
1686 | return scsi_setup_scsi_cmnd(sdev, req); |
1687 | |
1688 | if (sdev->handler && sdev->handler->prep_fn) { |
1689 | blk_status_t ret = sdev->handler->prep_fn(sdev, req); |
1690 | |
1691 | if (ret != BLK_STS_OK) |
1692 | return ret; |
1693 | } |
1694 | |
1695 | /* Usually overridden by the ULP */ |
1696 | cmd->allowed = 0; |
1697 | memset(cmd->cmnd, 0, sizeof(cmd->cmnd)); |
1698 | return scsi_cmd_to_driver(cmd)->init_command(cmd); |
1699 | } |
1700 | |
1701 | static void scsi_done_internal(struct scsi_cmnd *cmd, bool complete_directly) |
1702 | { |
1703 | struct request *req = scsi_cmd_to_rq(scmd: cmd); |
1704 | |
1705 | switch (cmd->submitter) { |
1706 | case SUBMITTED_BY_BLOCK_LAYER: |
1707 | break; |
1708 | case SUBMITTED_BY_SCSI_ERROR_HANDLER: |
1709 | return scsi_eh_done(scmd: cmd); |
1710 | case SUBMITTED_BY_SCSI_RESET_IOCTL: |
1711 | return; |
1712 | } |
1713 | |
1714 | if (unlikely(blk_should_fake_timeout(scsi_cmd_to_rq(cmd)->q))) |
1715 | return; |
1716 | if (unlikely(test_and_set_bit(SCMD_STATE_COMPLETE, &cmd->state))) |
1717 | return; |
1718 | trace_scsi_dispatch_cmd_done(cmd); |
1719 | |
1720 | if (complete_directly) |
1721 | blk_mq_complete_request_direct(rq: req, complete: scsi_complete); |
1722 | else |
1723 | blk_mq_complete_request(rq: req); |
1724 | } |
1725 | |
1726 | void scsi_done(struct scsi_cmnd *cmd) |
1727 | { |
1728 | scsi_done_internal(cmd, complete_directly: false); |
1729 | } |
1730 | EXPORT_SYMBOL(scsi_done); |
1731 | |
1732 | void scsi_done_direct(struct scsi_cmnd *cmd) |
1733 | { |
1734 | scsi_done_internal(cmd, complete_directly: true); |
1735 | } |
1736 | EXPORT_SYMBOL(scsi_done_direct); |
1737 | |
1738 | static void scsi_mq_put_budget(struct request_queue *q, int budget_token) |
1739 | { |
1740 | struct scsi_device *sdev = q->queuedata; |
1741 | |
1742 | sbitmap_put(sb: &sdev->budget_map, bitnr: budget_token); |
1743 | } |
1744 | |
1745 | /* |
1746 | * When to reinvoke queueing after a resource shortage. It's 3 msecs to |
1747 | * not change behaviour from the previous unplug mechanism, experimentation |
1748 | * may prove this needs changing. |
1749 | */ |
1750 | #define SCSI_QUEUE_DELAY 3 |
1751 | |
1752 | static int scsi_mq_get_budget(struct request_queue *q) |
1753 | { |
1754 | struct scsi_device *sdev = q->queuedata; |
1755 | int token = scsi_dev_queue_ready(q, sdev); |
1756 | |
1757 | if (token >= 0) |
1758 | return token; |
1759 | |
1760 | atomic_inc(v: &sdev->restarts); |
1761 | |
1762 | /* |
1763 | * Orders atomic_inc(&sdev->restarts) and atomic_read(&sdev->device_busy). |
1764 | * .restarts must be incremented before .device_busy is read because the |
1765 | * code in scsi_run_queue_async() depends on the order of these operations. |
1766 | */ |
1767 | smp_mb__after_atomic(); |
1768 | |
1769 | /* |
1770 | * If all in-flight requests originated from this LUN are completed |
1771 | * before reading .device_busy, sdev->device_busy will be observed as |
1772 | * zero, then blk_mq_delay_run_hw_queues() will dispatch this request |
1773 | * soon. Otherwise, completion of one of these requests will observe |
1774 | * the .restarts flag, and the request queue will be run for handling |
1775 | * this request, see scsi_end_request(). |
1776 | */ |
1777 | if (unlikely(scsi_device_busy(sdev) == 0 && |
1778 | !scsi_device_blocked(sdev))) |
1779 | blk_mq_delay_run_hw_queues(q: sdev->request_queue, SCSI_QUEUE_DELAY); |
1780 | return -1; |
1781 | } |
1782 | |
1783 | static void scsi_mq_set_rq_budget_token(struct request *req, int token) |
1784 | { |
1785 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1786 | |
1787 | cmd->budget_token = token; |
1788 | } |
1789 | |
1790 | static int scsi_mq_get_rq_budget_token(struct request *req) |
1791 | { |
1792 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1793 | |
1794 | return cmd->budget_token; |
1795 | } |
1796 | |
1797 | static blk_status_t scsi_queue_rq(struct blk_mq_hw_ctx *hctx, |
1798 | const struct blk_mq_queue_data *bd) |
1799 | { |
1800 | struct request *req = bd->rq; |
1801 | struct request_queue *q = req->q; |
1802 | struct scsi_device *sdev = q->queuedata; |
1803 | struct Scsi_Host *shost = sdev->host; |
1804 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq: req); |
1805 | blk_status_t ret; |
1806 | int reason; |
1807 | |
1808 | WARN_ON_ONCE(cmd->budget_token < 0); |
1809 | |
1810 | /* |
1811 | * If the device is not in running state we will reject some or all |
1812 | * commands. |
1813 | */ |
1814 | if (unlikely(sdev->sdev_state != SDEV_RUNNING)) { |
1815 | ret = scsi_device_state_check(sdev, req); |
1816 | if (ret != BLK_STS_OK) |
1817 | goto out_put_budget; |
1818 | } |
1819 | |
1820 | ret = BLK_STS_RESOURCE; |
1821 | if (!scsi_target_queue_ready(shost, sdev)) |
1822 | goto out_put_budget; |
1823 | if (unlikely(scsi_host_in_recovery(shost))) { |
1824 | if (cmd->flags & SCMD_FAIL_IF_RECOVERING) |
1825 | ret = BLK_STS_OFFLINE; |
1826 | goto out_dec_target_busy; |
1827 | } |
1828 | if (!scsi_host_queue_ready(q, shost, sdev, cmd)) |
1829 | goto out_dec_target_busy; |
1830 | |
1831 | if (!(req->rq_flags & RQF_DONTPREP)) { |
1832 | ret = scsi_prepare_cmd(req); |
1833 | if (ret != BLK_STS_OK) |
1834 | goto out_dec_host_busy; |
1835 | req->rq_flags |= RQF_DONTPREP; |
1836 | } else { |
1837 | clear_bit(SCMD_STATE_COMPLETE, addr: &cmd->state); |
1838 | } |
1839 | |
1840 | cmd->flags &= SCMD_PRESERVED_FLAGS; |
1841 | if (sdev->simple_tags) |
1842 | cmd->flags |= SCMD_TAGGED; |
1843 | if (bd->last) |
1844 | cmd->flags |= SCMD_LAST; |
1845 | |
1846 | scsi_set_resid(cmd, resid: 0); |
1847 | memset(cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); |
1848 | cmd->submitter = SUBMITTED_BY_BLOCK_LAYER; |
1849 | |
1850 | blk_mq_start_request(rq: req); |
1851 | reason = scsi_dispatch_cmd(cmd); |
1852 | if (reason) { |
1853 | scsi_set_blocked(cmd, reason); |
1854 | ret = BLK_STS_RESOURCE; |
1855 | goto out_dec_host_busy; |
1856 | } |
1857 | |
1858 | return BLK_STS_OK; |
1859 | |
1860 | out_dec_host_busy: |
1861 | scsi_dec_host_busy(shost, cmd); |
1862 | out_dec_target_busy: |
1863 | if (scsi_target(sdev)->can_queue > 0) |
1864 | atomic_dec(v: &scsi_target(sdev)->target_busy); |
1865 | out_put_budget: |
1866 | scsi_mq_put_budget(q, budget_token: cmd->budget_token); |
1867 | cmd->budget_token = -1; |
1868 | switch (ret) { |
1869 | case BLK_STS_OK: |
1870 | break; |
1871 | case BLK_STS_RESOURCE: |
1872 | case BLK_STS_ZONE_RESOURCE: |
1873 | if (scsi_device_blocked(sdev)) |
1874 | ret = BLK_STS_DEV_RESOURCE; |
1875 | break; |
1876 | case BLK_STS_AGAIN: |
1877 | cmd->result = DID_BUS_BUSY << 16; |
1878 | if (req->rq_flags & RQF_DONTPREP) |
1879 | scsi_mq_uninit_cmd(cmd); |
1880 | break; |
1881 | default: |
1882 | if (unlikely(!scsi_device_online(sdev))) |
1883 | cmd->result = DID_NO_CONNECT << 16; |
1884 | else |
1885 | cmd->result = DID_ERROR << 16; |
1886 | /* |
1887 | * Make sure to release all allocated resources when |
1888 | * we hit an error, as we will never see this command |
1889 | * again. |
1890 | */ |
1891 | if (req->rq_flags & RQF_DONTPREP) |
1892 | scsi_mq_uninit_cmd(cmd); |
1893 | scsi_run_queue_async(sdev); |
1894 | break; |
1895 | } |
1896 | return ret; |
1897 | } |
1898 | |
1899 | static int scsi_mq_init_request(struct blk_mq_tag_set *set, struct request *rq, |
1900 | unsigned int hctx_idx, unsigned int numa_node) |
1901 | { |
1902 | struct Scsi_Host *shost = set->driver_data; |
1903 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1904 | struct scatterlist *sg; |
1905 | int ret = 0; |
1906 | |
1907 | cmd->sense_buffer = |
1908 | kmem_cache_alloc_node(s: scsi_sense_cache, GFP_KERNEL, node: numa_node); |
1909 | if (!cmd->sense_buffer) |
1910 | return -ENOMEM; |
1911 | |
1912 | if (scsi_host_get_prot(shost)) { |
1913 | sg = (void *)cmd + sizeof(struct scsi_cmnd) + |
1914 | shost->hostt->cmd_size; |
1915 | cmd->prot_sdb = (void *)sg + scsi_mq_inline_sgl_size(shost); |
1916 | } |
1917 | |
1918 | if (shost->hostt->init_cmd_priv) { |
1919 | ret = shost->hostt->init_cmd_priv(shost, cmd); |
1920 | if (ret < 0) |
1921 | kmem_cache_free(s: scsi_sense_cache, objp: cmd->sense_buffer); |
1922 | } |
1923 | |
1924 | return ret; |
1925 | } |
1926 | |
1927 | static void scsi_mq_exit_request(struct blk_mq_tag_set *set, struct request *rq, |
1928 | unsigned int hctx_idx) |
1929 | { |
1930 | struct Scsi_Host *shost = set->driver_data; |
1931 | struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq); |
1932 | |
1933 | if (shost->hostt->exit_cmd_priv) |
1934 | shost->hostt->exit_cmd_priv(shost, cmd); |
1935 | kmem_cache_free(s: scsi_sense_cache, objp: cmd->sense_buffer); |
1936 | } |
1937 | |
1938 | |
1939 | static int scsi_mq_poll(struct blk_mq_hw_ctx *hctx, struct io_comp_batch *iob) |
1940 | { |
1941 | struct Scsi_Host *shost = hctx->driver_data; |
1942 | |
1943 | if (shost->hostt->mq_poll) |
1944 | return shost->hostt->mq_poll(shost, hctx->queue_num); |
1945 | |
1946 | return 0; |
1947 | } |
1948 | |
1949 | static int scsi_init_hctx(struct blk_mq_hw_ctx *hctx, void *data, |
1950 | unsigned int hctx_idx) |
1951 | { |
1952 | struct Scsi_Host *shost = data; |
1953 | |
1954 | hctx->driver_data = shost; |
1955 | return 0; |
1956 | } |
1957 | |
1958 | static void scsi_map_queues(struct blk_mq_tag_set *set) |
1959 | { |
1960 | struct Scsi_Host *shost = container_of(set, struct Scsi_Host, tag_set); |
1961 | |
1962 | if (shost->hostt->map_queues) |
1963 | return shost->hostt->map_queues(shost); |
1964 | blk_mq_map_queues(qmap: &set->map[HCTX_TYPE_DEFAULT]); |
1965 | } |
1966 | |
1967 | void __scsi_init_queue(struct Scsi_Host *shost, struct request_queue *q) |
1968 | { |
1969 | struct device *dev = shost->dma_dev; |
1970 | |
1971 | /* |
1972 | * this limit is imposed by hardware restrictions |
1973 | */ |
1974 | blk_queue_max_segments(q, min_t(unsigned short, shost->sg_tablesize, |
1975 | SG_MAX_SEGMENTS)); |
1976 | |
1977 | if (scsi_host_prot_dma(shost)) { |
1978 | shost->sg_prot_tablesize = |
1979 | min_not_zero(shost->sg_prot_tablesize, |
1980 | (unsigned short)SCSI_MAX_PROT_SG_SEGMENTS); |
1981 | BUG_ON(shost->sg_prot_tablesize < shost->sg_tablesize); |
1982 | blk_queue_max_integrity_segments(q, segs: shost->sg_prot_tablesize); |
1983 | } |
1984 | |
1985 | blk_queue_max_hw_sectors(q, shost->max_sectors); |
1986 | blk_queue_segment_boundary(q, shost->dma_boundary); |
1987 | dma_set_seg_boundary(dev, mask: shost->dma_boundary); |
1988 | |
1989 | blk_queue_max_segment_size(q, shost->max_segment_size); |
1990 | blk_queue_virt_boundary(q, shost->virt_boundary_mask); |
1991 | dma_set_max_seg_size(dev, size: queue_max_segment_size(q)); |
1992 | |
1993 | /* |
1994 | * Set a reasonable default alignment: The larger of 32-byte (dword), |
1995 | * which is a common minimum for HBAs, and the minimum DMA alignment, |
1996 | * which is set by the platform. |
1997 | * |
1998 | * Devices that require a bigger alignment can increase it later. |
1999 | */ |
2000 | blk_queue_dma_alignment(q, max(4, dma_get_cache_alignment()) - 1); |
2001 | } |
2002 | EXPORT_SYMBOL_GPL(__scsi_init_queue); |
2003 | |
2004 | static const struct blk_mq_ops scsi_mq_ops_no_commit = { |
2005 | .get_budget = scsi_mq_get_budget, |
2006 | .put_budget = scsi_mq_put_budget, |
2007 | .queue_rq = scsi_queue_rq, |
2008 | .complete = scsi_complete, |
2009 | .timeout = scsi_timeout, |
2010 | #ifdef CONFIG_BLK_DEBUG_FS |
2011 | .show_rq = scsi_show_rq, |
2012 | #endif |
2013 | .init_request = scsi_mq_init_request, |
2014 | .exit_request = scsi_mq_exit_request, |
2015 | .cleanup_rq = scsi_cleanup_rq, |
2016 | .busy = scsi_mq_lld_busy, |
2017 | .map_queues = scsi_map_queues, |
2018 | .init_hctx = scsi_init_hctx, |
2019 | .poll = scsi_mq_poll, |
2020 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, |
2021 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, |
2022 | }; |
2023 | |
2024 | |
2025 | static void scsi_commit_rqs(struct blk_mq_hw_ctx *hctx) |
2026 | { |
2027 | struct Scsi_Host *shost = hctx->driver_data; |
2028 | |
2029 | shost->hostt->commit_rqs(shost, hctx->queue_num); |
2030 | } |
2031 | |
2032 | static const struct blk_mq_ops scsi_mq_ops = { |
2033 | .get_budget = scsi_mq_get_budget, |
2034 | .put_budget = scsi_mq_put_budget, |
2035 | .queue_rq = scsi_queue_rq, |
2036 | .commit_rqs = scsi_commit_rqs, |
2037 | .complete = scsi_complete, |
2038 | .timeout = scsi_timeout, |
2039 | #ifdef CONFIG_BLK_DEBUG_FS |
2040 | .show_rq = scsi_show_rq, |
2041 | #endif |
2042 | .init_request = scsi_mq_init_request, |
2043 | .exit_request = scsi_mq_exit_request, |
2044 | .cleanup_rq = scsi_cleanup_rq, |
2045 | .busy = scsi_mq_lld_busy, |
2046 | .map_queues = scsi_map_queues, |
2047 | .init_hctx = scsi_init_hctx, |
2048 | .poll = scsi_mq_poll, |
2049 | .set_rq_budget_token = scsi_mq_set_rq_budget_token, |
2050 | .get_rq_budget_token = scsi_mq_get_rq_budget_token, |
2051 | }; |
2052 | |
2053 | int scsi_mq_setup_tags(struct Scsi_Host *shost) |
2054 | { |
2055 | unsigned int cmd_size, sgl_size; |
2056 | struct blk_mq_tag_set *tag_set = &shost->tag_set; |
2057 | |
2058 | sgl_size = max_t(unsigned int, sizeof(struct scatterlist), |
2059 | scsi_mq_inline_sgl_size(shost)); |
2060 | cmd_size = sizeof(struct scsi_cmnd) + shost->hostt->cmd_size + sgl_size; |
2061 | if (scsi_host_get_prot(shost)) |
2062 | cmd_size += sizeof(struct scsi_data_buffer) + |
2063 | sizeof(struct scatterlist) * SCSI_INLINE_PROT_SG_CNT; |
2064 | |
2065 | memset(tag_set, 0, sizeof(*tag_set)); |
2066 | if (shost->hostt->commit_rqs) |
2067 | tag_set->ops = &scsi_mq_ops; |
2068 | else |
2069 | tag_set->ops = &scsi_mq_ops_no_commit; |
2070 | tag_set->nr_hw_queues = shost->nr_hw_queues ? : 1; |
2071 | tag_set->nr_maps = shost->nr_maps ? : 1; |
2072 | tag_set->queue_depth = shost->can_queue; |
2073 | tag_set->cmd_size = cmd_size; |
2074 | tag_set->numa_node = dev_to_node(dev: shost->dma_dev); |
2075 | tag_set->flags = BLK_MQ_F_SHOULD_MERGE; |
2076 | tag_set->flags |= |
2077 | BLK_ALLOC_POLICY_TO_MQ_FLAG(shost->hostt->tag_alloc_policy); |
2078 | if (shost->queuecommand_may_block) |
2079 | tag_set->flags |= BLK_MQ_F_BLOCKING; |
2080 | tag_set->driver_data = shost; |
2081 | if (shost->host_tagset) |
2082 | tag_set->flags |= BLK_MQ_F_TAG_HCTX_SHARED; |
2083 | |
2084 | return blk_mq_alloc_tag_set(set: tag_set); |
2085 | } |
2086 | |
2087 | void scsi_mq_free_tags(struct kref *kref) |
2088 | { |
2089 | struct Scsi_Host *shost = container_of(kref, typeof(*shost), |
2090 | tagset_refcnt); |
2091 | |
2092 | blk_mq_free_tag_set(set: &shost->tag_set); |
2093 | complete(&shost->tagset_freed); |
2094 | } |
2095 | |
2096 | /** |
2097 | * scsi_device_from_queue - return sdev associated with a request_queue |
2098 | * @q: The request queue to return the sdev from |
2099 | * |
2100 | * Return the sdev associated with a request queue or NULL if the |
2101 | * request_queue does not reference a SCSI device. |
2102 | */ |
2103 | struct scsi_device *scsi_device_from_queue(struct request_queue *q) |
2104 | { |
2105 | struct scsi_device *sdev = NULL; |
2106 | |
2107 | if (q->mq_ops == &scsi_mq_ops_no_commit || |
2108 | q->mq_ops == &scsi_mq_ops) |
2109 | sdev = q->queuedata; |
2110 | if (!sdev || !get_device(dev: &sdev->sdev_gendev)) |
2111 | sdev = NULL; |
2112 | |
2113 | return sdev; |
2114 | } |
2115 | /* |
2116 | * pktcdvd should have been integrated into the SCSI layers, but for historical |
2117 | * reasons like the old IDE driver it isn't. This export allows it to safely |
2118 | * probe if a given device is a SCSI one and only attach to that. |
2119 | */ |
2120 | #ifdef CONFIG_CDROM_PKTCDVD_MODULE |
2121 | EXPORT_SYMBOL_GPL(scsi_device_from_queue); |
2122 | #endif |
2123 | |
2124 | /** |
2125 | * scsi_block_requests - Utility function used by low-level drivers to prevent |
2126 | * further commands from being queued to the device. |
2127 | * @shost: host in question |
2128 | * |
2129 | * There is no timer nor any other means by which the requests get unblocked |
2130 | * other than the low-level driver calling scsi_unblock_requests(). |
2131 | */ |
2132 | void scsi_block_requests(struct Scsi_Host *shost) |
2133 | { |
2134 | shost->host_self_blocked = 1; |
2135 | } |
2136 | EXPORT_SYMBOL(scsi_block_requests); |
2137 | |
2138 | /** |
2139 | * scsi_unblock_requests - Utility function used by low-level drivers to allow |
2140 | * further commands to be queued to the device. |
2141 | * @shost: host in question |
2142 | * |
2143 | * There is no timer nor any other means by which the requests get unblocked |
2144 | * other than the low-level driver calling scsi_unblock_requests(). This is done |
2145 | * as an API function so that changes to the internals of the scsi mid-layer |
2146 | * won't require wholesale changes to drivers that use this feature. |
2147 | */ |
2148 | void scsi_unblock_requests(struct Scsi_Host *shost) |
2149 | { |
2150 | shost->host_self_blocked = 0; |
2151 | scsi_run_host_queues(shost); |
2152 | } |
2153 | EXPORT_SYMBOL(scsi_unblock_requests); |
2154 | |
2155 | void scsi_exit_queue(void) |
2156 | { |
2157 | kmem_cache_destroy(s: scsi_sense_cache); |
2158 | } |
2159 | |
2160 | /** |
2161 | * scsi_mode_select - issue a mode select |
2162 | * @sdev: SCSI device to be queried |
2163 | * @pf: Page format bit (1 == standard, 0 == vendor specific) |
2164 | * @sp: Save page bit (0 == don't save, 1 == save) |
2165 | * @buffer: request buffer (may not be smaller than eight bytes) |
2166 | * @len: length of request buffer. |
2167 | * @timeout: command timeout |
2168 | * @retries: number of retries before failing |
2169 | * @data: returns a structure abstracting the mode header data |
2170 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
2171 | * must be SCSI_SENSE_BUFFERSIZE big. |
2172 | * |
2173 | * Returns zero if successful; negative error number or scsi |
2174 | * status on error |
2175 | * |
2176 | */ |
2177 | int scsi_mode_select(struct scsi_device *sdev, int pf, int sp, |
2178 | unsigned char *buffer, int len, int timeout, int retries, |
2179 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) |
2180 | { |
2181 | unsigned char cmd[10]; |
2182 | unsigned char *real_buffer; |
2183 | const struct scsi_exec_args exec_args = { |
2184 | .sshdr = sshdr, |
2185 | }; |
2186 | int ret; |
2187 | |
2188 | memset(cmd, 0, sizeof(cmd)); |
2189 | cmd[1] = (pf ? 0x10 : 0) | (sp ? 0x01 : 0); |
2190 | |
2191 | /* |
2192 | * Use MODE SELECT(10) if the device asked for it or if the mode page |
2193 | * and the mode select header cannot fit within the maximumm 255 bytes |
2194 | * of the MODE SELECT(6) command. |
2195 | */ |
2196 | if (sdev->use_10_for_ms || |
2197 | len + 4 > 255 || |
2198 | data->block_descriptor_length > 255) { |
2199 | if (len > 65535 - 8) |
2200 | return -EINVAL; |
2201 | real_buffer = kmalloc(size: 8 + len, GFP_KERNEL); |
2202 | if (!real_buffer) |
2203 | return -ENOMEM; |
2204 | memcpy(real_buffer + 8, buffer, len); |
2205 | len += 8; |
2206 | real_buffer[0] = 0; |
2207 | real_buffer[1] = 0; |
2208 | real_buffer[2] = data->medium_type; |
2209 | real_buffer[3] = data->device_specific; |
2210 | real_buffer[4] = data->longlba ? 0x01 : 0; |
2211 | real_buffer[5] = 0; |
2212 | put_unaligned_be16(val: data->block_descriptor_length, |
2213 | p: &real_buffer[6]); |
2214 | |
2215 | cmd[0] = MODE_SELECT_10; |
2216 | put_unaligned_be16(val: len, p: &cmd[7]); |
2217 | } else { |
2218 | if (data->longlba) |
2219 | return -EINVAL; |
2220 | |
2221 | real_buffer = kmalloc(size: 4 + len, GFP_KERNEL); |
2222 | if (!real_buffer) |
2223 | return -ENOMEM; |
2224 | memcpy(real_buffer + 4, buffer, len); |
2225 | len += 4; |
2226 | real_buffer[0] = 0; |
2227 | real_buffer[1] = data->medium_type; |
2228 | real_buffer[2] = data->device_specific; |
2229 | real_buffer[3] = data->block_descriptor_length; |
2230 | |
2231 | cmd[0] = MODE_SELECT; |
2232 | cmd[4] = len; |
2233 | } |
2234 | |
2235 | ret = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_OUT, real_buffer, len, |
2236 | timeout, retries, &exec_args); |
2237 | kfree(objp: real_buffer); |
2238 | return ret; |
2239 | } |
2240 | EXPORT_SYMBOL_GPL(scsi_mode_select); |
2241 | |
2242 | /** |
2243 | * scsi_mode_sense - issue a mode sense, falling back from 10 to six bytes if necessary. |
2244 | * @sdev: SCSI device to be queried |
2245 | * @dbd: set to prevent mode sense from returning block descriptors |
2246 | * @modepage: mode page being requested |
2247 | * @subpage: sub-page of the mode page being requested |
2248 | * @buffer: request buffer (may not be smaller than eight bytes) |
2249 | * @len: length of request buffer. |
2250 | * @timeout: command timeout |
2251 | * @retries: number of retries before failing |
2252 | * @data: returns a structure abstracting the mode header data |
2253 | * @sshdr: place to put sense data (or NULL if no sense to be collected). |
2254 | * must be SCSI_SENSE_BUFFERSIZE big. |
2255 | * |
2256 | * Returns zero if successful, or a negative error number on failure |
2257 | */ |
2258 | int |
2259 | scsi_mode_sense(struct scsi_device *sdev, int dbd, int modepage, int subpage, |
2260 | unsigned char *buffer, int len, int timeout, int retries, |
2261 | struct scsi_mode_data *data, struct scsi_sense_hdr *sshdr) |
2262 | { |
2263 | unsigned char cmd[12]; |
2264 | int use_10_for_ms; |
2265 | int ; |
2266 | int result; |
2267 | struct scsi_sense_hdr my_sshdr; |
2268 | struct scsi_failure failure_defs[] = { |
2269 | { |
2270 | .sense = UNIT_ATTENTION, |
2271 | .asc = SCMD_FAILURE_ASC_ANY, |
2272 | .ascq = SCMD_FAILURE_ASCQ_ANY, |
2273 | .allowed = retries, |
2274 | .result = SAM_STAT_CHECK_CONDITION, |
2275 | }, |
2276 | {} |
2277 | }; |
2278 | struct scsi_failures failures = { |
2279 | .failure_definitions = failure_defs, |
2280 | }; |
2281 | const struct scsi_exec_args exec_args = { |
2282 | /* caller might not be interested in sense, but we need it */ |
2283 | .sshdr = sshdr ? : &my_sshdr, |
2284 | .failures = &failures, |
2285 | }; |
2286 | |
2287 | memset(data, 0, sizeof(*data)); |
2288 | memset(&cmd[0], 0, 12); |
2289 | |
2290 | dbd = sdev->set_dbd_for_ms ? 8 : dbd; |
2291 | cmd[1] = dbd & 0x18; /* allows DBD and LLBA bits */ |
2292 | cmd[2] = modepage; |
2293 | cmd[3] = subpage; |
2294 | |
2295 | sshdr = exec_args.sshdr; |
2296 | |
2297 | retry: |
2298 | use_10_for_ms = sdev->use_10_for_ms || len > 255; |
2299 | |
2300 | if (use_10_for_ms) { |
2301 | if (len < 8 || len > 65535) |
2302 | return -EINVAL; |
2303 | |
2304 | cmd[0] = MODE_SENSE_10; |
2305 | put_unaligned_be16(val: len, p: &cmd[7]); |
2306 | header_length = 8; |
2307 | } else { |
2308 | if (len < 4) |
2309 | return -EINVAL; |
2310 | |
2311 | cmd[0] = MODE_SENSE; |
2312 | cmd[4] = len; |
2313 | header_length = 4; |
2314 | } |
2315 | |
2316 | memset(buffer, 0, len); |
2317 | |
2318 | result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, buffer, len, |
2319 | timeout, retries, &exec_args); |
2320 | if (result < 0) |
2321 | return result; |
2322 | |
2323 | /* This code looks awful: what it's doing is making sure an |
2324 | * ILLEGAL REQUEST sense return identifies the actual command |
2325 | * byte as the problem. MODE_SENSE commands can return |
2326 | * ILLEGAL REQUEST if the code page isn't supported */ |
2327 | |
2328 | if (!scsi_status_is_good(status: result)) { |
2329 | if (scsi_sense_valid(sshdr)) { |
2330 | if ((sshdr->sense_key == ILLEGAL_REQUEST) && |
2331 | (sshdr->asc == 0x20) && (sshdr->ascq == 0)) { |
2332 | /* |
2333 | * Invalid command operation code: retry using |
2334 | * MODE SENSE(6) if this was a MODE SENSE(10) |
2335 | * request, except if the request mode page is |
2336 | * too large for MODE SENSE single byte |
2337 | * allocation length field. |
2338 | */ |
2339 | if (use_10_for_ms) { |
2340 | if (len > 255) |
2341 | return -EIO; |
2342 | sdev->use_10_for_ms = 0; |
2343 | goto retry; |
2344 | } |
2345 | } |
2346 | } |
2347 | return -EIO; |
2348 | } |
2349 | if (unlikely(buffer[0] == 0x86 && buffer[1] == 0x0b && |
2350 | (modepage == 6 || modepage == 8))) { |
2351 | /* Initio breakage? */ |
2352 | header_length = 0; |
2353 | data->length = 13; |
2354 | data->medium_type = 0; |
2355 | data->device_specific = 0; |
2356 | data->longlba = 0; |
2357 | data->block_descriptor_length = 0; |
2358 | } else if (use_10_for_ms) { |
2359 | data->length = get_unaligned_be16(p: &buffer[0]) + 2; |
2360 | data->medium_type = buffer[2]; |
2361 | data->device_specific = buffer[3]; |
2362 | data->longlba = buffer[4] & 0x01; |
2363 | data->block_descriptor_length = get_unaligned_be16(p: &buffer[6]); |
2364 | } else { |
2365 | data->length = buffer[0] + 1; |
2366 | data->medium_type = buffer[1]; |
2367 | data->device_specific = buffer[2]; |
2368 | data->block_descriptor_length = buffer[3]; |
2369 | } |
2370 | data->header_length = header_length; |
2371 | |
2372 | return 0; |
2373 | } |
2374 | EXPORT_SYMBOL(scsi_mode_sense); |
2375 | |
2376 | /** |
2377 | * scsi_test_unit_ready - test if unit is ready |
2378 | * @sdev: scsi device to change the state of. |
2379 | * @timeout: command timeout |
2380 | * @retries: number of retries before failing |
2381 | * @sshdr: outpout pointer for decoded sense information. |
2382 | * |
2383 | * Returns zero if unsuccessful or an error if TUR failed. For |
2384 | * removable media, UNIT_ATTENTION sets ->changed flag. |
2385 | **/ |
2386 | int |
2387 | scsi_test_unit_ready(struct scsi_device *sdev, int timeout, int retries, |
2388 | struct scsi_sense_hdr *sshdr) |
2389 | { |
2390 | char cmd[] = { |
2391 | TEST_UNIT_READY, 0, 0, 0, 0, 0, |
2392 | }; |
2393 | const struct scsi_exec_args exec_args = { |
2394 | .sshdr = sshdr, |
2395 | }; |
2396 | int result; |
2397 | |
2398 | /* try to eat the UNIT_ATTENTION if there are enough retries */ |
2399 | do { |
2400 | result = scsi_execute_cmd(sdev, cmd, REQ_OP_DRV_IN, NULL, 0, |
2401 | timeout, 1, &exec_args); |
2402 | if (sdev->removable && result > 0 && scsi_sense_valid(sshdr) && |
2403 | sshdr->sense_key == UNIT_ATTENTION) |
2404 | sdev->changed = 1; |
2405 | } while (result > 0 && scsi_sense_valid(sshdr) && |
2406 | sshdr->sense_key == UNIT_ATTENTION && --retries); |
2407 | |
2408 | return result; |
2409 | } |
2410 | EXPORT_SYMBOL(scsi_test_unit_ready); |
2411 | |
2412 | /** |
2413 | * scsi_device_set_state - Take the given device through the device state model. |
2414 | * @sdev: scsi device to change the state of. |
2415 | * @state: state to change to. |
2416 | * |
2417 | * Returns zero if successful or an error if the requested |
2418 | * transition is illegal. |
2419 | */ |
2420 | int |
2421 | scsi_device_set_state(struct scsi_device *sdev, enum scsi_device_state state) |
2422 | { |
2423 | enum scsi_device_state oldstate = sdev->sdev_state; |
2424 | |
2425 | if (state == oldstate) |
2426 | return 0; |
2427 | |
2428 | switch (state) { |
2429 | case SDEV_CREATED: |
2430 | switch (oldstate) { |
2431 | case SDEV_CREATED_BLOCK: |
2432 | break; |
2433 | default: |
2434 | goto illegal; |
2435 | } |
2436 | break; |
2437 | |
2438 | case SDEV_RUNNING: |
2439 | switch (oldstate) { |
2440 | case SDEV_CREATED: |
2441 | case SDEV_OFFLINE: |
2442 | case SDEV_TRANSPORT_OFFLINE: |
2443 | case SDEV_QUIESCE: |
2444 | case SDEV_BLOCK: |
2445 | break; |
2446 | default: |
2447 | goto illegal; |
2448 | } |
2449 | break; |
2450 | |
2451 | case SDEV_QUIESCE: |
2452 | switch (oldstate) { |
2453 | case SDEV_RUNNING: |
2454 | case SDEV_OFFLINE: |
2455 | case SDEV_TRANSPORT_OFFLINE: |
2456 | break; |
2457 | default: |
2458 | goto illegal; |
2459 | } |
2460 | break; |
2461 | |
2462 | case SDEV_OFFLINE: |
2463 | case SDEV_TRANSPORT_OFFLINE: |
2464 | switch (oldstate) { |
2465 | case SDEV_CREATED: |
2466 | case SDEV_RUNNING: |
2467 | case SDEV_QUIESCE: |
2468 | case SDEV_BLOCK: |
2469 | break; |
2470 | default: |
2471 | goto illegal; |
2472 | } |
2473 | break; |
2474 | |
2475 | case SDEV_BLOCK: |
2476 | switch (oldstate) { |
2477 | case SDEV_RUNNING: |
2478 | case SDEV_CREATED_BLOCK: |
2479 | case SDEV_QUIESCE: |
2480 | case SDEV_OFFLINE: |
2481 | break; |
2482 | default: |
2483 | goto illegal; |
2484 | } |
2485 | break; |
2486 | |
2487 | case SDEV_CREATED_BLOCK: |
2488 | switch (oldstate) { |
2489 | case SDEV_CREATED: |
2490 | break; |
2491 | default: |
2492 | goto illegal; |
2493 | } |
2494 | break; |
2495 | |
2496 | case SDEV_CANCEL: |
2497 | switch (oldstate) { |
2498 | case SDEV_CREATED: |
2499 | case SDEV_RUNNING: |
2500 | case SDEV_QUIESCE: |
2501 | case SDEV_OFFLINE: |
2502 | case SDEV_TRANSPORT_OFFLINE: |
2503 | break; |
2504 | default: |
2505 | goto illegal; |
2506 | } |
2507 | break; |
2508 | |
2509 | case SDEV_DEL: |
2510 | switch (oldstate) { |
2511 | case SDEV_CREATED: |
2512 | case SDEV_RUNNING: |
2513 | case SDEV_OFFLINE: |
2514 | case SDEV_TRANSPORT_OFFLINE: |
2515 | case SDEV_CANCEL: |
2516 | case SDEV_BLOCK: |
2517 | case SDEV_CREATED_BLOCK: |
2518 | break; |
2519 | default: |
2520 | goto illegal; |
2521 | } |
2522 | break; |
2523 | |
2524 | } |
2525 | sdev->offline_already = false; |
2526 | sdev->sdev_state = state; |
2527 | return 0; |
2528 | |
2529 | illegal: |
2530 | SCSI_LOG_ERROR_RECOVERY(1, |
2531 | sdev_printk(KERN_ERR, sdev, |
2532 | "Illegal state transition %s->%s" , |
2533 | scsi_device_state_name(oldstate), |
2534 | scsi_device_state_name(state)) |
2535 | ); |
2536 | return -EINVAL; |
2537 | } |
2538 | EXPORT_SYMBOL(scsi_device_set_state); |
2539 | |
2540 | /** |
2541 | * scsi_evt_emit - emit a single SCSI device uevent |
2542 | * @sdev: associated SCSI device |
2543 | * @evt: event to emit |
2544 | * |
2545 | * Send a single uevent (scsi_event) to the associated scsi_device. |
2546 | */ |
2547 | static void scsi_evt_emit(struct scsi_device *sdev, struct scsi_event *evt) |
2548 | { |
2549 | int idx = 0; |
2550 | char *envp[3]; |
2551 | |
2552 | switch (evt->evt_type) { |
2553 | case SDEV_EVT_MEDIA_CHANGE: |
2554 | envp[idx++] = "SDEV_MEDIA_CHANGE=1" ; |
2555 | break; |
2556 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: |
2557 | scsi_rescan_device(sdev); |
2558 | envp[idx++] = "SDEV_UA=INQUIRY_DATA_HAS_CHANGED" ; |
2559 | break; |
2560 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: |
2561 | envp[idx++] = "SDEV_UA=CAPACITY_DATA_HAS_CHANGED" ; |
2562 | break; |
2563 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: |
2564 | envp[idx++] = "SDEV_UA=THIN_PROVISIONING_SOFT_THRESHOLD_REACHED" ; |
2565 | break; |
2566 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: |
2567 | envp[idx++] = "SDEV_UA=MODE_PARAMETERS_CHANGED" ; |
2568 | break; |
2569 | case SDEV_EVT_LUN_CHANGE_REPORTED: |
2570 | envp[idx++] = "SDEV_UA=REPORTED_LUNS_DATA_HAS_CHANGED" ; |
2571 | break; |
2572 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: |
2573 | envp[idx++] = "SDEV_UA=ASYMMETRIC_ACCESS_STATE_CHANGED" ; |
2574 | break; |
2575 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: |
2576 | envp[idx++] = "SDEV_UA=POWER_ON_RESET_OCCURRED" ; |
2577 | break; |
2578 | default: |
2579 | /* do nothing */ |
2580 | break; |
2581 | } |
2582 | |
2583 | envp[idx++] = NULL; |
2584 | |
2585 | kobject_uevent_env(kobj: &sdev->sdev_gendev.kobj, action: KOBJ_CHANGE, envp); |
2586 | } |
2587 | |
2588 | /** |
2589 | * scsi_evt_thread - send a uevent for each scsi event |
2590 | * @work: work struct for scsi_device |
2591 | * |
2592 | * Dispatch queued events to their associated scsi_device kobjects |
2593 | * as uevents. |
2594 | */ |
2595 | void scsi_evt_thread(struct work_struct *work) |
2596 | { |
2597 | struct scsi_device *sdev; |
2598 | enum scsi_device_event evt_type; |
2599 | LIST_HEAD(event_list); |
2600 | |
2601 | sdev = container_of(work, struct scsi_device, event_work); |
2602 | |
2603 | for (evt_type = SDEV_EVT_FIRST; evt_type <= SDEV_EVT_LAST; evt_type++) |
2604 | if (test_and_clear_bit(nr: evt_type, addr: sdev->pending_events)) |
2605 | sdev_evt_send_simple(sdev, evt_type, GFP_KERNEL); |
2606 | |
2607 | while (1) { |
2608 | struct scsi_event *evt; |
2609 | struct list_head *this, *tmp; |
2610 | unsigned long flags; |
2611 | |
2612 | spin_lock_irqsave(&sdev->list_lock, flags); |
2613 | list_splice_init(list: &sdev->event_list, head: &event_list); |
2614 | spin_unlock_irqrestore(lock: &sdev->list_lock, flags); |
2615 | |
2616 | if (list_empty(head: &event_list)) |
2617 | break; |
2618 | |
2619 | list_for_each_safe(this, tmp, &event_list) { |
2620 | evt = list_entry(this, struct scsi_event, node); |
2621 | list_del(entry: &evt->node); |
2622 | scsi_evt_emit(sdev, evt); |
2623 | kfree(objp: evt); |
2624 | } |
2625 | } |
2626 | } |
2627 | |
2628 | /** |
2629 | * sdev_evt_send - send asserted event to uevent thread |
2630 | * @sdev: scsi_device event occurred on |
2631 | * @evt: event to send |
2632 | * |
2633 | * Assert scsi device event asynchronously. |
2634 | */ |
2635 | void sdev_evt_send(struct scsi_device *sdev, struct scsi_event *evt) |
2636 | { |
2637 | unsigned long flags; |
2638 | |
2639 | #if 0 |
2640 | /* FIXME: currently this check eliminates all media change events |
2641 | * for polled devices. Need to update to discriminate between AN |
2642 | * and polled events */ |
2643 | if (!test_bit(evt->evt_type, sdev->supported_events)) { |
2644 | kfree(evt); |
2645 | return; |
2646 | } |
2647 | #endif |
2648 | |
2649 | spin_lock_irqsave(&sdev->list_lock, flags); |
2650 | list_add_tail(new: &evt->node, head: &sdev->event_list); |
2651 | schedule_work(work: &sdev->event_work); |
2652 | spin_unlock_irqrestore(lock: &sdev->list_lock, flags); |
2653 | } |
2654 | EXPORT_SYMBOL_GPL(sdev_evt_send); |
2655 | |
2656 | /** |
2657 | * sdev_evt_alloc - allocate a new scsi event |
2658 | * @evt_type: type of event to allocate |
2659 | * @gfpflags: GFP flags for allocation |
2660 | * |
2661 | * Allocates and returns a new scsi_event. |
2662 | */ |
2663 | struct scsi_event *sdev_evt_alloc(enum scsi_device_event evt_type, |
2664 | gfp_t gfpflags) |
2665 | { |
2666 | struct scsi_event *evt = kzalloc(size: sizeof(struct scsi_event), flags: gfpflags); |
2667 | if (!evt) |
2668 | return NULL; |
2669 | |
2670 | evt->evt_type = evt_type; |
2671 | INIT_LIST_HEAD(list: &evt->node); |
2672 | |
2673 | /* evt_type-specific initialization, if any */ |
2674 | switch (evt_type) { |
2675 | case SDEV_EVT_MEDIA_CHANGE: |
2676 | case SDEV_EVT_INQUIRY_CHANGE_REPORTED: |
2677 | case SDEV_EVT_CAPACITY_CHANGE_REPORTED: |
2678 | case SDEV_EVT_SOFT_THRESHOLD_REACHED_REPORTED: |
2679 | case SDEV_EVT_MODE_PARAMETER_CHANGE_REPORTED: |
2680 | case SDEV_EVT_LUN_CHANGE_REPORTED: |
2681 | case SDEV_EVT_ALUA_STATE_CHANGE_REPORTED: |
2682 | case SDEV_EVT_POWER_ON_RESET_OCCURRED: |
2683 | default: |
2684 | /* do nothing */ |
2685 | break; |
2686 | } |
2687 | |
2688 | return evt; |
2689 | } |
2690 | EXPORT_SYMBOL_GPL(sdev_evt_alloc); |
2691 | |
2692 | /** |
2693 | * sdev_evt_send_simple - send asserted event to uevent thread |
2694 | * @sdev: scsi_device event occurred on |
2695 | * @evt_type: type of event to send |
2696 | * @gfpflags: GFP flags for allocation |
2697 | * |
2698 | * Assert scsi device event asynchronously, given an event type. |
2699 | */ |
2700 | void sdev_evt_send_simple(struct scsi_device *sdev, |
2701 | enum scsi_device_event evt_type, gfp_t gfpflags) |
2702 | { |
2703 | struct scsi_event *evt = sdev_evt_alloc(evt_type, gfpflags); |
2704 | if (!evt) { |
2705 | sdev_printk(KERN_ERR, sdev, "event %d eaten due to OOM\n" , |
2706 | evt_type); |
2707 | return; |
2708 | } |
2709 | |
2710 | sdev_evt_send(sdev, evt); |
2711 | } |
2712 | EXPORT_SYMBOL_GPL(sdev_evt_send_simple); |
2713 | |
2714 | /** |
2715 | * scsi_device_quiesce - Block all commands except power management. |
2716 | * @sdev: scsi device to quiesce. |
2717 | * |
2718 | * This works by trying to transition to the SDEV_QUIESCE state |
2719 | * (which must be a legal transition). When the device is in this |
2720 | * state, only power management requests will be accepted, all others will |
2721 | * be deferred. |
2722 | * |
2723 | * Must be called with user context, may sleep. |
2724 | * |
2725 | * Returns zero if unsuccessful or an error if not. |
2726 | */ |
2727 | int |
2728 | scsi_device_quiesce(struct scsi_device *sdev) |
2729 | { |
2730 | struct request_queue *q = sdev->request_queue; |
2731 | int err; |
2732 | |
2733 | /* |
2734 | * It is allowed to call scsi_device_quiesce() multiple times from |
2735 | * the same context but concurrent scsi_device_quiesce() calls are |
2736 | * not allowed. |
2737 | */ |
2738 | WARN_ON_ONCE(sdev->quiesced_by && sdev->quiesced_by != current); |
2739 | |
2740 | if (sdev->quiesced_by == current) |
2741 | return 0; |
2742 | |
2743 | blk_set_pm_only(q); |
2744 | |
2745 | blk_mq_freeze_queue(q); |
2746 | /* |
2747 | * Ensure that the effect of blk_set_pm_only() will be visible |
2748 | * for percpu_ref_tryget() callers that occur after the queue |
2749 | * unfreeze even if the queue was already frozen before this function |
2750 | * was called. See also https://lwn.net/Articles/573497/. |
2751 | */ |
2752 | synchronize_rcu(); |
2753 | blk_mq_unfreeze_queue(q); |
2754 | |
2755 | mutex_lock(&sdev->state_mutex); |
2756 | err = scsi_device_set_state(sdev, SDEV_QUIESCE); |
2757 | if (err == 0) |
2758 | sdev->quiesced_by = current; |
2759 | else |
2760 | blk_clear_pm_only(q); |
2761 | mutex_unlock(lock: &sdev->state_mutex); |
2762 | |
2763 | return err; |
2764 | } |
2765 | EXPORT_SYMBOL(scsi_device_quiesce); |
2766 | |
2767 | /** |
2768 | * scsi_device_resume - Restart user issued commands to a quiesced device. |
2769 | * @sdev: scsi device to resume. |
2770 | * |
2771 | * Moves the device from quiesced back to running and restarts the |
2772 | * queues. |
2773 | * |
2774 | * Must be called with user context, may sleep. |
2775 | */ |
2776 | void scsi_device_resume(struct scsi_device *sdev) |
2777 | { |
2778 | /* check if the device state was mutated prior to resume, and if |
2779 | * so assume the state is being managed elsewhere (for example |
2780 | * device deleted during suspend) |
2781 | */ |
2782 | mutex_lock(&sdev->state_mutex); |
2783 | if (sdev->sdev_state == SDEV_QUIESCE) |
2784 | scsi_device_set_state(sdev, SDEV_RUNNING); |
2785 | if (sdev->quiesced_by) { |
2786 | sdev->quiesced_by = NULL; |
2787 | blk_clear_pm_only(q: sdev->request_queue); |
2788 | } |
2789 | mutex_unlock(lock: &sdev->state_mutex); |
2790 | } |
2791 | EXPORT_SYMBOL(scsi_device_resume); |
2792 | |
2793 | static void |
2794 | device_quiesce_fn(struct scsi_device *sdev, void *data) |
2795 | { |
2796 | scsi_device_quiesce(sdev); |
2797 | } |
2798 | |
2799 | void |
2800 | scsi_target_quiesce(struct scsi_target *starget) |
2801 | { |
2802 | starget_for_each_device(starget, NULL, fn: device_quiesce_fn); |
2803 | } |
2804 | EXPORT_SYMBOL(scsi_target_quiesce); |
2805 | |
2806 | static void |
2807 | device_resume_fn(struct scsi_device *sdev, void *data) |
2808 | { |
2809 | scsi_device_resume(sdev); |
2810 | } |
2811 | |
2812 | void |
2813 | scsi_target_resume(struct scsi_target *starget) |
2814 | { |
2815 | starget_for_each_device(starget, NULL, fn: device_resume_fn); |
2816 | } |
2817 | EXPORT_SYMBOL(scsi_target_resume); |
2818 | |
2819 | static int __scsi_internal_device_block_nowait(struct scsi_device *sdev) |
2820 | { |
2821 | if (scsi_device_set_state(sdev, SDEV_BLOCK)) |
2822 | return scsi_device_set_state(sdev, SDEV_CREATED_BLOCK); |
2823 | |
2824 | return 0; |
2825 | } |
2826 | |
2827 | void scsi_start_queue(struct scsi_device *sdev) |
2828 | { |
2829 | if (cmpxchg(&sdev->queue_stopped, 1, 0)) |
2830 | blk_mq_unquiesce_queue(q: sdev->request_queue); |
2831 | } |
2832 | |
2833 | static void scsi_stop_queue(struct scsi_device *sdev) |
2834 | { |
2835 | /* |
2836 | * The atomic variable of ->queue_stopped covers that |
2837 | * blk_mq_quiesce_queue* is balanced with blk_mq_unquiesce_queue. |
2838 | * |
2839 | * The caller needs to wait until quiesce is done. |
2840 | */ |
2841 | if (!cmpxchg(&sdev->queue_stopped, 0, 1)) |
2842 | blk_mq_quiesce_queue_nowait(q: sdev->request_queue); |
2843 | } |
2844 | |
2845 | /** |
2846 | * scsi_internal_device_block_nowait - try to transition to the SDEV_BLOCK state |
2847 | * @sdev: device to block |
2848 | * |
2849 | * Pause SCSI command processing on the specified device. Does not sleep. |
2850 | * |
2851 | * Returns zero if successful or a negative error code upon failure. |
2852 | * |
2853 | * Notes: |
2854 | * This routine transitions the device to the SDEV_BLOCK state (which must be |
2855 | * a legal transition). When the device is in this state, command processing |
2856 | * is paused until the device leaves the SDEV_BLOCK state. See also |
2857 | * scsi_internal_device_unblock_nowait(). |
2858 | */ |
2859 | int scsi_internal_device_block_nowait(struct scsi_device *sdev) |
2860 | { |
2861 | int ret = __scsi_internal_device_block_nowait(sdev); |
2862 | |
2863 | /* |
2864 | * The device has transitioned to SDEV_BLOCK. Stop the |
2865 | * block layer from calling the midlayer with this device's |
2866 | * request queue. |
2867 | */ |
2868 | if (!ret) |
2869 | scsi_stop_queue(sdev); |
2870 | return ret; |
2871 | } |
2872 | EXPORT_SYMBOL_GPL(scsi_internal_device_block_nowait); |
2873 | |
2874 | /** |
2875 | * scsi_device_block - try to transition to the SDEV_BLOCK state |
2876 | * @sdev: device to block |
2877 | * @data: dummy argument, ignored |
2878 | * |
2879 | * Pause SCSI command processing on the specified device. Callers must wait |
2880 | * until all ongoing scsi_queue_rq() calls have finished after this function |
2881 | * returns. |
2882 | * |
2883 | * Note: |
2884 | * This routine transitions the device to the SDEV_BLOCK state (which must be |
2885 | * a legal transition). When the device is in this state, command processing |
2886 | * is paused until the device leaves the SDEV_BLOCK state. See also |
2887 | * scsi_internal_device_unblock(). |
2888 | */ |
2889 | static void scsi_device_block(struct scsi_device *sdev, void *data) |
2890 | { |
2891 | int err; |
2892 | enum scsi_device_state state; |
2893 | |
2894 | mutex_lock(&sdev->state_mutex); |
2895 | err = __scsi_internal_device_block_nowait(sdev); |
2896 | state = sdev->sdev_state; |
2897 | if (err == 0) |
2898 | /* |
2899 | * scsi_stop_queue() must be called with the state_mutex |
2900 | * held. Otherwise a simultaneous scsi_start_queue() call |
2901 | * might unquiesce the queue before we quiesce it. |
2902 | */ |
2903 | scsi_stop_queue(sdev); |
2904 | |
2905 | mutex_unlock(lock: &sdev->state_mutex); |
2906 | |
2907 | WARN_ONCE(err, "%s: failed to block %s in state %d\n" , |
2908 | __func__, dev_name(&sdev->sdev_gendev), state); |
2909 | } |
2910 | |
2911 | /** |
2912 | * scsi_internal_device_unblock_nowait - resume a device after a block request |
2913 | * @sdev: device to resume |
2914 | * @new_state: state to set the device to after unblocking |
2915 | * |
2916 | * Restart the device queue for a previously suspended SCSI device. Does not |
2917 | * sleep. |
2918 | * |
2919 | * Returns zero if successful or a negative error code upon failure. |
2920 | * |
2921 | * Notes: |
2922 | * This routine transitions the device to the SDEV_RUNNING state or to one of |
2923 | * the offline states (which must be a legal transition) allowing the midlayer |
2924 | * to goose the queue for this device. |
2925 | */ |
2926 | int scsi_internal_device_unblock_nowait(struct scsi_device *sdev, |
2927 | enum scsi_device_state new_state) |
2928 | { |
2929 | switch (new_state) { |
2930 | case SDEV_RUNNING: |
2931 | case SDEV_TRANSPORT_OFFLINE: |
2932 | break; |
2933 | default: |
2934 | return -EINVAL; |
2935 | } |
2936 | |
2937 | /* |
2938 | * Try to transition the scsi device to SDEV_RUNNING or one of the |
2939 | * offlined states and goose the device queue if successful. |
2940 | */ |
2941 | switch (sdev->sdev_state) { |
2942 | case SDEV_BLOCK: |
2943 | case SDEV_TRANSPORT_OFFLINE: |
2944 | sdev->sdev_state = new_state; |
2945 | break; |
2946 | case SDEV_CREATED_BLOCK: |
2947 | if (new_state == SDEV_TRANSPORT_OFFLINE || |
2948 | new_state == SDEV_OFFLINE) |
2949 | sdev->sdev_state = new_state; |
2950 | else |
2951 | sdev->sdev_state = SDEV_CREATED; |
2952 | break; |
2953 | case SDEV_CANCEL: |
2954 | case SDEV_OFFLINE: |
2955 | break; |
2956 | default: |
2957 | return -EINVAL; |
2958 | } |
2959 | scsi_start_queue(sdev); |
2960 | |
2961 | return 0; |
2962 | } |
2963 | EXPORT_SYMBOL_GPL(scsi_internal_device_unblock_nowait); |
2964 | |
2965 | /** |
2966 | * scsi_internal_device_unblock - resume a device after a block request |
2967 | * @sdev: device to resume |
2968 | * @new_state: state to set the device to after unblocking |
2969 | * |
2970 | * Restart the device queue for a previously suspended SCSI device. May sleep. |
2971 | * |
2972 | * Returns zero if successful or a negative error code upon failure. |
2973 | * |
2974 | * Notes: |
2975 | * This routine transitions the device to the SDEV_RUNNING state or to one of |
2976 | * the offline states (which must be a legal transition) allowing the midlayer |
2977 | * to goose the queue for this device. |
2978 | */ |
2979 | static int scsi_internal_device_unblock(struct scsi_device *sdev, |
2980 | enum scsi_device_state new_state) |
2981 | { |
2982 | int ret; |
2983 | |
2984 | mutex_lock(&sdev->state_mutex); |
2985 | ret = scsi_internal_device_unblock_nowait(sdev, new_state); |
2986 | mutex_unlock(lock: &sdev->state_mutex); |
2987 | |
2988 | return ret; |
2989 | } |
2990 | |
2991 | static int |
2992 | target_block(struct device *dev, void *data) |
2993 | { |
2994 | if (scsi_is_target_device(dev)) |
2995 | starget_for_each_device(to_scsi_target(dev), NULL, |
2996 | fn: scsi_device_block); |
2997 | return 0; |
2998 | } |
2999 | |
3000 | /** |
3001 | * scsi_block_targets - transition all SCSI child devices to SDEV_BLOCK state |
3002 | * @dev: a parent device of one or more scsi_target devices |
3003 | * @shost: the Scsi_Host to which this device belongs |
3004 | * |
3005 | * Iterate over all children of @dev, which should be scsi_target devices, |
3006 | * and switch all subordinate scsi devices to SDEV_BLOCK state. Wait for |
3007 | * ongoing scsi_queue_rq() calls to finish. May sleep. |
3008 | * |
3009 | * Note: |
3010 | * @dev must not itself be a scsi_target device. |
3011 | */ |
3012 | void |
3013 | scsi_block_targets(struct Scsi_Host *shost, struct device *dev) |
3014 | { |
3015 | WARN_ON_ONCE(scsi_is_target_device(dev)); |
3016 | device_for_each_child(dev, NULL, fn: target_block); |
3017 | blk_mq_wait_quiesce_done(set: &shost->tag_set); |
3018 | } |
3019 | EXPORT_SYMBOL_GPL(scsi_block_targets); |
3020 | |
3021 | static void |
3022 | device_unblock(struct scsi_device *sdev, void *data) |
3023 | { |
3024 | scsi_internal_device_unblock(sdev, new_state: *(enum scsi_device_state *)data); |
3025 | } |
3026 | |
3027 | static int |
3028 | target_unblock(struct device *dev, void *data) |
3029 | { |
3030 | if (scsi_is_target_device(dev)) |
3031 | starget_for_each_device(to_scsi_target(dev), data, |
3032 | fn: device_unblock); |
3033 | return 0; |
3034 | } |
3035 | |
3036 | void |
3037 | scsi_target_unblock(struct device *dev, enum scsi_device_state new_state) |
3038 | { |
3039 | if (scsi_is_target_device(dev)) |
3040 | starget_for_each_device(to_scsi_target(dev), &new_state, |
3041 | fn: device_unblock); |
3042 | else |
3043 | device_for_each_child(dev, data: &new_state, fn: target_unblock); |
3044 | } |
3045 | EXPORT_SYMBOL_GPL(scsi_target_unblock); |
3046 | |
3047 | /** |
3048 | * scsi_host_block - Try to transition all logical units to the SDEV_BLOCK state |
3049 | * @shost: device to block |
3050 | * |
3051 | * Pause SCSI command processing for all logical units associated with the SCSI |
3052 | * host and wait until pending scsi_queue_rq() calls have finished. |
3053 | * |
3054 | * Returns zero if successful or a negative error code upon failure. |
3055 | */ |
3056 | int |
3057 | scsi_host_block(struct Scsi_Host *shost) |
3058 | { |
3059 | struct scsi_device *sdev; |
3060 | int ret; |
3061 | |
3062 | /* |
3063 | * Call scsi_internal_device_block_nowait so we can avoid |
3064 | * calling synchronize_rcu() for each LUN. |
3065 | */ |
3066 | shost_for_each_device(sdev, shost) { |
3067 | mutex_lock(&sdev->state_mutex); |
3068 | ret = scsi_internal_device_block_nowait(sdev); |
3069 | mutex_unlock(lock: &sdev->state_mutex); |
3070 | if (ret) { |
3071 | scsi_device_put(sdev); |
3072 | return ret; |
3073 | } |
3074 | } |
3075 | |
3076 | /* Wait for ongoing scsi_queue_rq() calls to finish. */ |
3077 | blk_mq_wait_quiesce_done(set: &shost->tag_set); |
3078 | |
3079 | return 0; |
3080 | } |
3081 | EXPORT_SYMBOL_GPL(scsi_host_block); |
3082 | |
3083 | int |
3084 | scsi_host_unblock(struct Scsi_Host *shost, int new_state) |
3085 | { |
3086 | struct scsi_device *sdev; |
3087 | int ret = 0; |
3088 | |
3089 | shost_for_each_device(sdev, shost) { |
3090 | ret = scsi_internal_device_unblock(sdev, new_state); |
3091 | if (ret) { |
3092 | scsi_device_put(sdev); |
3093 | break; |
3094 | } |
3095 | } |
3096 | return ret; |
3097 | } |
3098 | EXPORT_SYMBOL_GPL(scsi_host_unblock); |
3099 | |
3100 | /** |
3101 | * scsi_kmap_atomic_sg - find and atomically map an sg-elemnt |
3102 | * @sgl: scatter-gather list |
3103 | * @sg_count: number of segments in sg |
3104 | * @offset: offset in bytes into sg, on return offset into the mapped area |
3105 | * @len: bytes to map, on return number of bytes mapped |
3106 | * |
3107 | * Returns virtual address of the start of the mapped page |
3108 | */ |
3109 | void *scsi_kmap_atomic_sg(struct scatterlist *sgl, int sg_count, |
3110 | size_t *offset, size_t *len) |
3111 | { |
3112 | int i; |
3113 | size_t sg_len = 0, len_complete = 0; |
3114 | struct scatterlist *sg; |
3115 | struct page *page; |
3116 | |
3117 | WARN_ON(!irqs_disabled()); |
3118 | |
3119 | for_each_sg(sgl, sg, sg_count, i) { |
3120 | len_complete = sg_len; /* Complete sg-entries */ |
3121 | sg_len += sg->length; |
3122 | if (sg_len > *offset) |
3123 | break; |
3124 | } |
3125 | |
3126 | if (unlikely(i == sg_count)) { |
3127 | printk(KERN_ERR "%s: Bytes in sg: %zu, requested offset %zu, " |
3128 | "elements %d\n" , |
3129 | __func__, sg_len, *offset, sg_count); |
3130 | WARN_ON(1); |
3131 | return NULL; |
3132 | } |
3133 | |
3134 | /* Offset starting from the beginning of first page in this sg-entry */ |
3135 | *offset = *offset - len_complete + sg->offset; |
3136 | |
3137 | /* Assumption: contiguous pages can be accessed as "page + i" */ |
3138 | page = nth_page(sg_page(sg), (*offset >> PAGE_SHIFT)); |
3139 | *offset &= ~PAGE_MASK; |
3140 | |
3141 | /* Bytes in this sg-entry from *offset to the end of the page */ |
3142 | sg_len = PAGE_SIZE - *offset; |
3143 | if (*len > sg_len) |
3144 | *len = sg_len; |
3145 | |
3146 | return kmap_atomic(page); |
3147 | } |
3148 | EXPORT_SYMBOL(scsi_kmap_atomic_sg); |
3149 | |
3150 | /** |
3151 | * scsi_kunmap_atomic_sg - atomically unmap a virtual address, previously mapped with scsi_kmap_atomic_sg |
3152 | * @virt: virtual address to be unmapped |
3153 | */ |
3154 | void scsi_kunmap_atomic_sg(void *virt) |
3155 | { |
3156 | kunmap_atomic(virt); |
3157 | } |
3158 | EXPORT_SYMBOL(scsi_kunmap_atomic_sg); |
3159 | |
3160 | void sdev_disable_disk_events(struct scsi_device *sdev) |
3161 | { |
3162 | atomic_inc(v: &sdev->disk_events_disable_depth); |
3163 | } |
3164 | EXPORT_SYMBOL(sdev_disable_disk_events); |
3165 | |
3166 | void sdev_enable_disk_events(struct scsi_device *sdev) |
3167 | { |
3168 | if (WARN_ON_ONCE(atomic_read(&sdev->disk_events_disable_depth) <= 0)) |
3169 | return; |
3170 | atomic_dec(v: &sdev->disk_events_disable_depth); |
3171 | } |
3172 | EXPORT_SYMBOL(sdev_enable_disk_events); |
3173 | |
3174 | static unsigned char designator_prio(const unsigned char *d) |
3175 | { |
3176 | if (d[1] & 0x30) |
3177 | /* not associated with LUN */ |
3178 | return 0; |
3179 | |
3180 | if (d[3] == 0) |
3181 | /* invalid length */ |
3182 | return 0; |
3183 | |
3184 | /* |
3185 | * Order of preference for lun descriptor: |
3186 | * - SCSI name string |
3187 | * - NAA IEEE Registered Extended |
3188 | * - EUI-64 based 16-byte |
3189 | * - EUI-64 based 12-byte |
3190 | * - NAA IEEE Registered |
3191 | * - NAA IEEE Extended |
3192 | * - EUI-64 based 8-byte |
3193 | * - SCSI name string (truncated) |
3194 | * - T10 Vendor ID |
3195 | * as longer descriptors reduce the likelyhood |
3196 | * of identification clashes. |
3197 | */ |
3198 | |
3199 | switch (d[1] & 0xf) { |
3200 | case 8: |
3201 | /* SCSI name string, variable-length UTF-8 */ |
3202 | return 9; |
3203 | case 3: |
3204 | switch (d[4] >> 4) { |
3205 | case 6: |
3206 | /* NAA registered extended */ |
3207 | return 8; |
3208 | case 5: |
3209 | /* NAA registered */ |
3210 | return 5; |
3211 | case 4: |
3212 | /* NAA extended */ |
3213 | return 4; |
3214 | case 3: |
3215 | /* NAA locally assigned */ |
3216 | return 1; |
3217 | default: |
3218 | break; |
3219 | } |
3220 | break; |
3221 | case 2: |
3222 | switch (d[3]) { |
3223 | case 16: |
3224 | /* EUI64-based, 16 byte */ |
3225 | return 7; |
3226 | case 12: |
3227 | /* EUI64-based, 12 byte */ |
3228 | return 6; |
3229 | case 8: |
3230 | /* EUI64-based, 8 byte */ |
3231 | return 3; |
3232 | default: |
3233 | break; |
3234 | } |
3235 | break; |
3236 | case 1: |
3237 | /* T10 vendor ID */ |
3238 | return 1; |
3239 | default: |
3240 | break; |
3241 | } |
3242 | |
3243 | return 0; |
3244 | } |
3245 | |
3246 | /** |
3247 | * scsi_vpd_lun_id - return a unique device identification |
3248 | * @sdev: SCSI device |
3249 | * @id: buffer for the identification |
3250 | * @id_len: length of the buffer |
3251 | * |
3252 | * Copies a unique device identification into @id based |
3253 | * on the information in the VPD page 0x83 of the device. |
3254 | * The string will be formatted as a SCSI name string. |
3255 | * |
3256 | * Returns the length of the identification or error on failure. |
3257 | * If the identifier is longer than the supplied buffer the actual |
3258 | * identifier length is returned and the buffer is not zero-padded. |
3259 | */ |
3260 | int scsi_vpd_lun_id(struct scsi_device *sdev, char *id, size_t id_len) |
3261 | { |
3262 | u8 cur_id_prio = 0; |
3263 | u8 cur_id_size = 0; |
3264 | const unsigned char *d, *cur_id_str; |
3265 | const struct scsi_vpd *vpd_pg83; |
3266 | int id_size = -EINVAL; |
3267 | |
3268 | rcu_read_lock(); |
3269 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); |
3270 | if (!vpd_pg83) { |
3271 | rcu_read_unlock(); |
3272 | return -ENXIO; |
3273 | } |
3274 | |
3275 | /* The id string must be at least 20 bytes + terminating NULL byte */ |
3276 | if (id_len < 21) { |
3277 | rcu_read_unlock(); |
3278 | return -EINVAL; |
3279 | } |
3280 | |
3281 | memset(id, 0, id_len); |
3282 | for (d = vpd_pg83->data + 4; |
3283 | d < vpd_pg83->data + vpd_pg83->len; |
3284 | d += d[3] + 4) { |
3285 | u8 prio = designator_prio(d); |
3286 | |
3287 | if (prio == 0 || cur_id_prio > prio) |
3288 | continue; |
3289 | |
3290 | switch (d[1] & 0xf) { |
3291 | case 0x1: |
3292 | /* T10 Vendor ID */ |
3293 | if (cur_id_size > d[3]) |
3294 | break; |
3295 | cur_id_prio = prio; |
3296 | cur_id_size = d[3]; |
3297 | if (cur_id_size + 4 > id_len) |
3298 | cur_id_size = id_len - 4; |
3299 | cur_id_str = d + 4; |
3300 | id_size = snprintf(buf: id, size: id_len, fmt: "t10.%*pE" , |
3301 | cur_id_size, cur_id_str); |
3302 | break; |
3303 | case 0x2: |
3304 | /* EUI-64 */ |
3305 | cur_id_prio = prio; |
3306 | cur_id_size = d[3]; |
3307 | cur_id_str = d + 4; |
3308 | switch (cur_id_size) { |
3309 | case 8: |
3310 | id_size = snprintf(buf: id, size: id_len, |
3311 | fmt: "eui.%8phN" , |
3312 | cur_id_str); |
3313 | break; |
3314 | case 12: |
3315 | id_size = snprintf(buf: id, size: id_len, |
3316 | fmt: "eui.%12phN" , |
3317 | cur_id_str); |
3318 | break; |
3319 | case 16: |
3320 | id_size = snprintf(buf: id, size: id_len, |
3321 | fmt: "eui.%16phN" , |
3322 | cur_id_str); |
3323 | break; |
3324 | default: |
3325 | break; |
3326 | } |
3327 | break; |
3328 | case 0x3: |
3329 | /* NAA */ |
3330 | cur_id_prio = prio; |
3331 | cur_id_size = d[3]; |
3332 | cur_id_str = d + 4; |
3333 | switch (cur_id_size) { |
3334 | case 8: |
3335 | id_size = snprintf(buf: id, size: id_len, |
3336 | fmt: "naa.%8phN" , |
3337 | cur_id_str); |
3338 | break; |
3339 | case 16: |
3340 | id_size = snprintf(buf: id, size: id_len, |
3341 | fmt: "naa.%16phN" , |
3342 | cur_id_str); |
3343 | break; |
3344 | default: |
3345 | break; |
3346 | } |
3347 | break; |
3348 | case 0x8: |
3349 | /* SCSI name string */ |
3350 | if (cur_id_size > d[3]) |
3351 | break; |
3352 | /* Prefer others for truncated descriptor */ |
3353 | if (d[3] > id_len) { |
3354 | prio = 2; |
3355 | if (cur_id_prio > prio) |
3356 | break; |
3357 | } |
3358 | cur_id_prio = prio; |
3359 | cur_id_size = id_size = d[3]; |
3360 | cur_id_str = d + 4; |
3361 | if (cur_id_size >= id_len) |
3362 | cur_id_size = id_len - 1; |
3363 | memcpy(id, cur_id_str, cur_id_size); |
3364 | break; |
3365 | default: |
3366 | break; |
3367 | } |
3368 | } |
3369 | rcu_read_unlock(); |
3370 | |
3371 | return id_size; |
3372 | } |
3373 | EXPORT_SYMBOL(scsi_vpd_lun_id); |
3374 | |
3375 | /* |
3376 | * scsi_vpd_tpg_id - return a target port group identifier |
3377 | * @sdev: SCSI device |
3378 | * |
3379 | * Returns the Target Port Group identifier from the information |
3380 | * froom VPD page 0x83 of the device. |
3381 | * |
3382 | * Returns the identifier or error on failure. |
3383 | */ |
3384 | int scsi_vpd_tpg_id(struct scsi_device *sdev, int *rel_id) |
3385 | { |
3386 | const unsigned char *d; |
3387 | const struct scsi_vpd *vpd_pg83; |
3388 | int group_id = -EAGAIN, rel_port = -1; |
3389 | |
3390 | rcu_read_lock(); |
3391 | vpd_pg83 = rcu_dereference(sdev->vpd_pg83); |
3392 | if (!vpd_pg83) { |
3393 | rcu_read_unlock(); |
3394 | return -ENXIO; |
3395 | } |
3396 | |
3397 | d = vpd_pg83->data + 4; |
3398 | while (d < vpd_pg83->data + vpd_pg83->len) { |
3399 | switch (d[1] & 0xf) { |
3400 | case 0x4: |
3401 | /* Relative target port */ |
3402 | rel_port = get_unaligned_be16(p: &d[6]); |
3403 | break; |
3404 | case 0x5: |
3405 | /* Target port group */ |
3406 | group_id = get_unaligned_be16(p: &d[6]); |
3407 | break; |
3408 | default: |
3409 | break; |
3410 | } |
3411 | d += d[3] + 4; |
3412 | } |
3413 | rcu_read_unlock(); |
3414 | |
3415 | if (group_id >= 0 && rel_id && rel_port != -1) |
3416 | *rel_id = rel_port; |
3417 | |
3418 | return group_id; |
3419 | } |
3420 | EXPORT_SYMBOL(scsi_vpd_tpg_id); |
3421 | |
3422 | /** |
3423 | * scsi_build_sense - build sense data for a command |
3424 | * @scmd: scsi command for which the sense should be formatted |
3425 | * @desc: Sense format (non-zero == descriptor format, |
3426 | * 0 == fixed format) |
3427 | * @key: Sense key |
3428 | * @asc: Additional sense code |
3429 | * @ascq: Additional sense code qualifier |
3430 | * |
3431 | **/ |
3432 | void scsi_build_sense(struct scsi_cmnd *scmd, int desc, u8 key, u8 asc, u8 ascq) |
3433 | { |
3434 | scsi_build_sense_buffer(desc, buf: scmd->sense_buffer, key, asc, ascq); |
3435 | scmd->result = SAM_STAT_CHECK_CONDITION; |
3436 | } |
3437 | EXPORT_SYMBOL_GPL(scsi_build_sense); |
3438 | |
3439 | #ifdef CONFIG_SCSI_LIB_KUNIT_TEST |
3440 | #include "scsi_lib_test.c" |
3441 | #endif |
3442 | |