1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * Support for SATA devices on Serial Attached SCSI (SAS) controllers |
4 | * |
5 | * Copyright (C) 2006 IBM Corporation |
6 | * |
7 | * Written by: Darrick J. Wong <djwong@us.ibm.com>, IBM Corporation |
8 | */ |
9 | |
10 | #include <linux/scatterlist.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/async.h> |
13 | #include <linux/export.h> |
14 | |
15 | #include <scsi/sas_ata.h> |
16 | #include "sas_internal.h" |
17 | #include <scsi/scsi_host.h> |
18 | #include <scsi/scsi_device.h> |
19 | #include <scsi/scsi_tcq.h> |
20 | #include <scsi/scsi.h> |
21 | #include <scsi/scsi_transport.h> |
22 | #include <scsi/scsi_transport_sas.h> |
23 | #include "scsi_sas_internal.h" |
24 | #include "scsi_transport_api.h" |
25 | #include <scsi/scsi_eh.h> |
26 | |
27 | static enum ata_completion_errors sas_to_ata_err(struct task_status_struct *ts) |
28 | { |
29 | /* Cheesy attempt to translate SAS errors into ATA. Hah! */ |
30 | |
31 | /* transport error */ |
32 | if (ts->resp == SAS_TASK_UNDELIVERED) |
33 | return AC_ERR_ATA_BUS; |
34 | |
35 | /* ts->resp == SAS_TASK_COMPLETE */ |
36 | /* task delivered, what happened afterwards? */ |
37 | switch (ts->stat) { |
38 | case SAS_DEV_NO_RESPONSE: |
39 | return AC_ERR_TIMEOUT; |
40 | case SAS_INTERRUPTED: |
41 | case SAS_PHY_DOWN: |
42 | case SAS_NAK_R_ERR: |
43 | return AC_ERR_ATA_BUS; |
44 | case SAS_DATA_UNDERRUN: |
45 | /* |
46 | * Some programs that use the taskfile interface |
47 | * (smartctl in particular) can cause underrun |
48 | * problems. Ignore these errors, perhaps at our |
49 | * peril. |
50 | */ |
51 | return 0; |
52 | case SAS_DATA_OVERRUN: |
53 | case SAS_QUEUE_FULL: |
54 | case SAS_DEVICE_UNKNOWN: |
55 | case SAS_OPEN_TO: |
56 | case SAS_OPEN_REJECT: |
57 | pr_warn("%s: Saw error %d. What to do?\n", |
58 | __func__, ts->stat); |
59 | return AC_ERR_OTHER; |
60 | case SAM_STAT_CHECK_CONDITION: |
61 | case SAS_ABORTED_TASK: |
62 | return AC_ERR_DEV; |
63 | case SAS_PROTO_RESPONSE: |
64 | /* This means the ending_fis has the error |
65 | * value; return 0 here to collect it |
66 | */ |
67 | return 0; |
68 | default: |
69 | return 0; |
70 | } |
71 | } |
72 | |
73 | static void sas_ata_task_done(struct sas_task *task) |
74 | { |
75 | struct ata_queued_cmd *qc = task->uldd_task; |
76 | struct domain_device *dev = task->dev; |
77 | struct task_status_struct *stat = &task->task_status; |
78 | struct ata_task_resp *resp = (struct ata_task_resp *)stat->buf; |
79 | struct sas_ha_struct *sas_ha = dev->port->ha; |
80 | enum ata_completion_errors ac; |
81 | unsigned long flags; |
82 | struct ata_link *link; |
83 | struct ata_port *ap; |
84 | |
85 | spin_lock_irqsave(&dev->done_lock, flags); |
86 | if (test_bit(SAS_HA_FROZEN, &sas_ha->state)) |
87 | task = NULL; |
88 | else if (qc && qc->scsicmd) |
89 | ASSIGN_SAS_TASK(qc->scsicmd, NULL); |
90 | spin_unlock_irqrestore(lock: &dev->done_lock, flags); |
91 | |
92 | /* check if libsas-eh got to the task before us */ |
93 | if (unlikely(!task)) |
94 | return; |
95 | |
96 | if (!qc) |
97 | goto qc_already_gone; |
98 | |
99 | ap = qc->ap; |
100 | link = &ap->link; |
101 | |
102 | spin_lock_irqsave(ap->lock, flags); |
103 | /* check if we lost the race with libata/sas_ata_post_internal() */ |
104 | if (unlikely(ata_port_is_frozen(ap))) { |
105 | spin_unlock_irqrestore(lock: ap->lock, flags); |
106 | if (qc->scsicmd) |
107 | goto qc_already_gone; |
108 | else { |
109 | /* if eh is not involved and the port is frozen then the |
110 | * ata internal abort process has taken responsibility |
111 | * for this sas_task |
112 | */ |
113 | return; |
114 | } |
115 | } |
116 | |
117 | if (stat->stat == SAS_PROTO_RESPONSE || |
118 | stat->stat == SAS_SAM_STAT_GOOD || |
119 | (stat->stat == SAS_SAM_STAT_CHECK_CONDITION && |
120 | dev->sata_dev.class == ATA_DEV_ATAPI)) { |
121 | memcpy(dev->sata_dev.fis, resp->ending_fis, ATA_RESP_FIS_SIZE); |
122 | |
123 | if (!link->sactive) { |
124 | qc->err_mask |= ac_err_mask(status: dev->sata_dev.fis[2]); |
125 | } else { |
126 | link->eh_info.err_mask |= ac_err_mask(status: dev->sata_dev.fis[2]); |
127 | if (unlikely(link->eh_info.err_mask)) |
128 | qc->flags |= ATA_QCFLAG_EH; |
129 | } |
130 | } else { |
131 | ac = sas_to_ata_err(ts: stat); |
132 | if (ac) { |
133 | pr_warn("%s: SAS error 0x%x\n", __func__, stat->stat); |
134 | /* We saw a SAS error. Send a vague error. */ |
135 | if (!link->sactive) { |
136 | qc->err_mask = ac; |
137 | } else { |
138 | link->eh_info.err_mask |= AC_ERR_DEV; |
139 | qc->flags |= ATA_QCFLAG_EH; |
140 | } |
141 | |
142 | dev->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ |
143 | dev->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ |
144 | } |
145 | } |
146 | |
147 | qc->lldd_task = NULL; |
148 | ata_qc_complete(qc); |
149 | spin_unlock_irqrestore(lock: ap->lock, flags); |
150 | |
151 | qc_already_gone: |
152 | sas_free_task(task); |
153 | } |
154 | |
155 | static unsigned int sas_ata_qc_issue(struct ata_queued_cmd *qc) |
156 | __must_hold(ap->lock) |
157 | { |
158 | struct sas_task *task; |
159 | struct scatterlist *sg; |
160 | int ret = AC_ERR_SYSTEM; |
161 | unsigned int si, xfer = 0; |
162 | struct ata_port *ap = qc->ap; |
163 | struct domain_device *dev = ap->private_data; |
164 | struct sas_ha_struct *sas_ha = dev->port->ha; |
165 | struct Scsi_Host *host = sas_ha->shost; |
166 | struct sas_internal *i = to_sas_internal(host->transportt); |
167 | |
168 | /* TODO: we should try to remove that unlock */ |
169 | spin_unlock(lock: ap->lock); |
170 | |
171 | /* If the device fell off, no sense in issuing commands */ |
172 | if (test_bit(SAS_DEV_GONE, &dev->state)) |
173 | goto out; |
174 | |
175 | task = sas_alloc_task(GFP_ATOMIC); |
176 | if (!task) |
177 | goto out; |
178 | task->dev = dev; |
179 | task->task_proto = SAS_PROTOCOL_STP; |
180 | task->task_done = sas_ata_task_done; |
181 | |
182 | /* For NCQ commands, zero out the tag libata assigned us */ |
183 | if (ata_is_ncq(prot: qc->tf.protocol)) |
184 | qc->tf.nsect = 0; |
185 | |
186 | ata_tf_to_fis(tf: &qc->tf, pmp: qc->dev->link->pmp, is_cmd: 1, fis: (u8 *)&task->ata_task.fis); |
187 | task->uldd_task = qc; |
188 | if (ata_is_atapi(prot: qc->tf.protocol)) { |
189 | memcpy(task->ata_task.atapi_packet, qc->cdb, qc->dev->cdb_len); |
190 | task->total_xfer_len = qc->nbytes; |
191 | task->num_scatter = qc->n_elem; |
192 | task->data_dir = qc->dma_dir; |
193 | } else if (!ata_is_data(prot: qc->tf.protocol)) { |
194 | task->data_dir = DMA_NONE; |
195 | } else { |
196 | for_each_sg(qc->sg, sg, qc->n_elem, si) |
197 | xfer += sg_dma_len(sg); |
198 | |
199 | task->total_xfer_len = xfer; |
200 | task->num_scatter = si; |
201 | task->data_dir = qc->dma_dir; |
202 | } |
203 | task->scatter = qc->sg; |
204 | qc->lldd_task = task; |
205 | |
206 | task->ata_task.use_ncq = ata_is_ncq(prot: qc->tf.protocol); |
207 | task->ata_task.dma_xfer = ata_is_dma(prot: qc->tf.protocol); |
208 | |
209 | if (qc->flags & ATA_QCFLAG_RESULT_TF) |
210 | task->ata_task.return_fis_on_success = 1; |
211 | |
212 | if (qc->scsicmd) |
213 | ASSIGN_SAS_TASK(qc->scsicmd, task); |
214 | |
215 | ret = i->dft->lldd_execute_task(task, GFP_ATOMIC); |
216 | if (ret) { |
217 | pr_debug("lldd_execute_task returned: %d\n", ret); |
218 | |
219 | if (qc->scsicmd) |
220 | ASSIGN_SAS_TASK(qc->scsicmd, NULL); |
221 | sas_free_task(task); |
222 | qc->lldd_task = NULL; |
223 | ret = AC_ERR_SYSTEM; |
224 | } |
225 | |
226 | out: |
227 | spin_lock(lock: ap->lock); |
228 | return ret; |
229 | } |
230 | |
231 | static void sas_ata_qc_fill_rtf(struct ata_queued_cmd *qc) |
232 | { |
233 | struct domain_device *dev = qc->ap->private_data; |
234 | |
235 | ata_tf_from_fis(fis: dev->sata_dev.fis, tf: &qc->result_tf); |
236 | } |
237 | |
238 | static struct sas_internal *dev_to_sas_internal(struct domain_device *dev) |
239 | { |
240 | return to_sas_internal(dev->port->ha->shost->transportt); |
241 | } |
242 | |
243 | static int sas_get_ata_command_set(struct domain_device *dev) |
244 | { |
245 | struct ata_taskfile tf; |
246 | |
247 | if (dev->dev_type == SAS_SATA_PENDING) |
248 | return ATA_DEV_UNKNOWN; |
249 | |
250 | ata_tf_from_fis(fis: dev->frame_rcvd, tf: &tf); |
251 | |
252 | return ata_dev_classify(tf: &tf); |
253 | } |
254 | |
255 | int sas_get_ata_info(struct domain_device *dev, struct ex_phy *phy) |
256 | { |
257 | if (phy->attached_tproto & SAS_PROTOCOL_STP) |
258 | dev->tproto = phy->attached_tproto; |
259 | if (phy->attached_sata_dev) |
260 | dev->tproto |= SAS_SATA_DEV; |
261 | |
262 | if (phy->attached_dev_type == SAS_SATA_PENDING) |
263 | dev->dev_type = SAS_SATA_PENDING; |
264 | else { |
265 | int res; |
266 | |
267 | dev->dev_type = SAS_SATA_DEV; |
268 | res = sas_get_report_phy_sata(dev: dev->parent, phy_id: phy->phy_id, |
269 | rps_resp: &dev->sata_dev.rps_resp); |
270 | if (res) { |
271 | pr_debug("report phy sata to %016llx:%02d returned 0x%x\n", |
272 | SAS_ADDR(dev->parent->sas_addr), |
273 | phy->phy_id, res); |
274 | return res; |
275 | } |
276 | memcpy(dev->frame_rcvd, &dev->sata_dev.rps_resp.rps.fis, |
277 | sizeof(struct dev_to_host_fis)); |
278 | dev->sata_dev.class = sas_get_ata_command_set(dev); |
279 | } |
280 | return 0; |
281 | } |
282 | |
283 | static int sas_ata_clear_pending(struct domain_device *dev, struct ex_phy *phy) |
284 | { |
285 | int res; |
286 | |
287 | /* we weren't pending, so successfully end the reset sequence now */ |
288 | if (dev->dev_type != SAS_SATA_PENDING) |
289 | return 1; |
290 | |
291 | /* hmmm, if this succeeds do we need to repost the domain_device to the |
292 | * lldd so it can pick up new parameters? |
293 | */ |
294 | res = sas_get_ata_info(dev, phy); |
295 | if (res) |
296 | return 0; /* retry */ |
297 | else |
298 | return 1; |
299 | } |
300 | |
301 | int smp_ata_check_ready_type(struct ata_link *link) |
302 | { |
303 | struct domain_device *dev = link->ap->private_data; |
304 | struct sas_phy *phy = sas_get_local_phy(dev); |
305 | struct domain_device *ex_dev = dev->parent; |
306 | enum sas_device_type type = SAS_PHY_UNUSED; |
307 | u8 sas_addr[SAS_ADDR_SIZE]; |
308 | int res; |
309 | |
310 | res = sas_get_phy_attached_dev(dev: ex_dev, phy_id: phy->number, sas_addr, type: &type); |
311 | sas_put_local_phy(phy); |
312 | if (res) |
313 | return res; |
314 | |
315 | switch (type) { |
316 | case SAS_SATA_PENDING: |
317 | return 0; |
318 | case SAS_END_DEVICE: |
319 | return 1; |
320 | default: |
321 | return -ENODEV; |
322 | } |
323 | } |
324 | EXPORT_SYMBOL_GPL(smp_ata_check_ready_type); |
325 | |
326 | static int smp_ata_check_ready(struct ata_link *link) |
327 | { |
328 | int res; |
329 | struct ata_port *ap = link->ap; |
330 | struct domain_device *dev = ap->private_data; |
331 | struct domain_device *ex_dev = dev->parent; |
332 | struct sas_phy *phy = sas_get_local_phy(dev); |
333 | struct ex_phy *ex_phy = &ex_dev->ex_dev.ex_phy[phy->number]; |
334 | |
335 | res = sas_ex_phy_discover(dev: ex_dev, single: phy->number); |
336 | sas_put_local_phy(phy); |
337 | |
338 | /* break the wait early if the expander is unreachable, |
339 | * otherwise keep polling |
340 | */ |
341 | if (res == -ECOMM) |
342 | return res; |
343 | if (res != SMP_RESP_FUNC_ACC) |
344 | return 0; |
345 | |
346 | switch (ex_phy->attached_dev_type) { |
347 | case SAS_SATA_PENDING: |
348 | return 0; |
349 | case SAS_END_DEVICE: |
350 | if (ex_phy->attached_sata_dev) |
351 | return sas_ata_clear_pending(dev, phy: ex_phy); |
352 | fallthrough; |
353 | default: |
354 | return -ENODEV; |
355 | } |
356 | } |
357 | |
358 | static int local_ata_check_ready(struct ata_link *link) |
359 | { |
360 | struct ata_port *ap = link->ap; |
361 | struct domain_device *dev = ap->private_data; |
362 | struct sas_internal *i = dev_to_sas_internal(dev); |
363 | |
364 | if (i->dft->lldd_ata_check_ready) |
365 | return i->dft->lldd_ata_check_ready(dev); |
366 | else { |
367 | /* lldd's that don't implement 'ready' checking get the |
368 | * old default behavior of not coordinating reset |
369 | * recovery with libata |
370 | */ |
371 | return 1; |
372 | } |
373 | } |
374 | |
375 | static int sas_ata_printk(const char *level, const struct domain_device *ddev, |
376 | const char *fmt, ...) |
377 | { |
378 | struct ata_port *ap = ddev->sata_dev.ap; |
379 | struct device *dev = &ddev->rphy->dev; |
380 | struct va_format vaf; |
381 | va_list args; |
382 | int r; |
383 | |
384 | va_start(args, fmt); |
385 | |
386 | vaf.fmt = fmt; |
387 | vaf.va = &args; |
388 | |
389 | r = printk("%s"SAS_FMT "ata%u: %s: %pV", |
390 | level, ap->print_id, dev_name(dev), &vaf); |
391 | |
392 | va_end(args); |
393 | |
394 | return r; |
395 | } |
396 | |
397 | static int sas_ata_wait_after_reset(struct domain_device *dev, unsigned long deadline) |
398 | { |
399 | struct sata_device *sata_dev = &dev->sata_dev; |
400 | int (*check_ready)(struct ata_link *link); |
401 | struct ata_port *ap = sata_dev->ap; |
402 | struct ata_link *link = &ap->link; |
403 | struct sas_phy *phy; |
404 | int ret; |
405 | |
406 | phy = sas_get_local_phy(dev); |
407 | if (scsi_is_sas_phy_local(phy)) |
408 | check_ready = local_ata_check_ready; |
409 | else |
410 | check_ready = smp_ata_check_ready; |
411 | sas_put_local_phy(phy); |
412 | |
413 | ret = ata_wait_after_reset(link, deadline, check_ready); |
414 | if (ret && ret != -EAGAIN) |
415 | sas_ata_printk(KERN_ERR, ddev: dev, fmt: "reset failed (errno=%d)\n", ret); |
416 | |
417 | return ret; |
418 | } |
419 | |
420 | static int sas_ata_hard_reset(struct ata_link *link, unsigned int *class, |
421 | unsigned long deadline) |
422 | { |
423 | struct ata_port *ap = link->ap; |
424 | struct domain_device *dev = ap->private_data; |
425 | struct sas_internal *i = dev_to_sas_internal(dev); |
426 | int ret; |
427 | |
428 | ret = i->dft->lldd_I_T_nexus_reset(dev); |
429 | if (ret == -ENODEV) |
430 | return ret; |
431 | |
432 | if (ret != TMF_RESP_FUNC_COMPLETE) |
433 | sas_ata_printk(KERN_DEBUG, ddev: dev, fmt: "Unable to reset ata device?\n"); |
434 | |
435 | ret = sas_ata_wait_after_reset(dev, deadline); |
436 | |
437 | *class = dev->sata_dev.class; |
438 | |
439 | ap->cbl = ATA_CBL_SATA; |
440 | return ret; |
441 | } |
442 | |
443 | /* |
444 | * notify the lldd to forget the sas_task for this internal ata command |
445 | * that bypasses scsi-eh |
446 | */ |
447 | static void sas_ata_internal_abort(struct sas_task *task) |
448 | { |
449 | struct sas_internal *si = dev_to_sas_internal(dev: task->dev); |
450 | unsigned long flags; |
451 | int res; |
452 | |
453 | spin_lock_irqsave(&task->task_state_lock, flags); |
454 | if (task->task_state_flags & SAS_TASK_STATE_ABORTED || |
455 | task->task_state_flags & SAS_TASK_STATE_DONE) { |
456 | spin_unlock_irqrestore(lock: &task->task_state_lock, flags); |
457 | pr_debug("%s: Task %p already finished.\n", __func__, task); |
458 | goto out; |
459 | } |
460 | task->task_state_flags |= SAS_TASK_STATE_ABORTED; |
461 | spin_unlock_irqrestore(lock: &task->task_state_lock, flags); |
462 | |
463 | res = si->dft->lldd_abort_task(task); |
464 | |
465 | spin_lock_irqsave(&task->task_state_lock, flags); |
466 | if (task->task_state_flags & SAS_TASK_STATE_DONE || |
467 | res == TMF_RESP_FUNC_COMPLETE) { |
468 | spin_unlock_irqrestore(lock: &task->task_state_lock, flags); |
469 | goto out; |
470 | } |
471 | |
472 | /* XXX we are not prepared to deal with ->lldd_abort_task() |
473 | * failures. TODO: lldds need to unconditionally forget about |
474 | * aborted ata tasks, otherwise we (likely) leak the sas task |
475 | * here |
476 | */ |
477 | pr_warn("%s: Task %p leaked.\n", __func__, task); |
478 | |
479 | if (!(task->task_state_flags & SAS_TASK_STATE_DONE)) |
480 | task->task_state_flags &= ~SAS_TASK_STATE_ABORTED; |
481 | spin_unlock_irqrestore(lock: &task->task_state_lock, flags); |
482 | |
483 | return; |
484 | out: |
485 | sas_free_task(task); |
486 | } |
487 | |
488 | static void sas_ata_post_internal(struct ata_queued_cmd *qc) |
489 | { |
490 | if (qc->flags & ATA_QCFLAG_EH) |
491 | qc->err_mask |= AC_ERR_OTHER; |
492 | |
493 | if (qc->err_mask) { |
494 | /* |
495 | * Find the sas_task and kill it. By this point, libata |
496 | * has decided to kill the qc and has frozen the port. |
497 | * In this state sas_ata_task_done() will no longer free |
498 | * the sas_task, so we need to notify the lldd (via |
499 | * ->lldd_abort_task) that the task is dead and free it |
500 | * ourselves. |
501 | */ |
502 | struct sas_task *task = qc->lldd_task; |
503 | |
504 | qc->lldd_task = NULL; |
505 | if (!task) |
506 | return; |
507 | task->uldd_task = NULL; |
508 | sas_ata_internal_abort(task); |
509 | } |
510 | } |
511 | |
512 | |
513 | static void sas_ata_set_dmamode(struct ata_port *ap, struct ata_device *ata_dev) |
514 | { |
515 | struct domain_device *dev = ap->private_data; |
516 | struct sas_internal *i = dev_to_sas_internal(dev); |
517 | |
518 | if (i->dft->lldd_ata_set_dmamode) |
519 | i->dft->lldd_ata_set_dmamode(dev); |
520 | } |
521 | |
522 | static void sas_ata_sched_eh(struct ata_port *ap) |
523 | { |
524 | struct domain_device *dev = ap->private_data; |
525 | struct sas_ha_struct *ha = dev->port->ha; |
526 | unsigned long flags; |
527 | |
528 | spin_lock_irqsave(&ha->lock, flags); |
529 | if (!test_and_set_bit(nr: SAS_DEV_EH_PENDING, addr: &dev->state)) |
530 | ha->eh_active++; |
531 | ata_std_sched_eh(ap); |
532 | spin_unlock_irqrestore(lock: &ha->lock, flags); |
533 | } |
534 | |
535 | void sas_ata_end_eh(struct ata_port *ap) |
536 | { |
537 | struct domain_device *dev = ap->private_data; |
538 | struct sas_ha_struct *ha = dev->port->ha; |
539 | unsigned long flags; |
540 | |
541 | spin_lock_irqsave(&ha->lock, flags); |
542 | if (test_and_clear_bit(nr: SAS_DEV_EH_PENDING, addr: &dev->state)) |
543 | ha->eh_active--; |
544 | spin_unlock_irqrestore(lock: &ha->lock, flags); |
545 | } |
546 | |
547 | static int sas_ata_prereset(struct ata_link *link, unsigned long deadline) |
548 | { |
549 | struct ata_port *ap = link->ap; |
550 | struct domain_device *dev = ap->private_data; |
551 | struct sas_phy *local_phy = sas_get_local_phy(dev); |
552 | int res = 0; |
553 | |
554 | if (!local_phy->enabled || test_bit(SAS_DEV_GONE, &dev->state)) |
555 | res = -ENOENT; |
556 | sas_put_local_phy(phy: local_phy); |
557 | |
558 | return res; |
559 | } |
560 | |
561 | static struct ata_port_operations sas_sata_ops = { |
562 | .prereset = sas_ata_prereset, |
563 | .hardreset = sas_ata_hard_reset, |
564 | .error_handler = ata_std_error_handler, |
565 | .post_internal_cmd = sas_ata_post_internal, |
566 | .qc_defer = ata_std_qc_defer, |
567 | .qc_issue = sas_ata_qc_issue, |
568 | .qc_fill_rtf = sas_ata_qc_fill_rtf, |
569 | .set_dmamode = sas_ata_set_dmamode, |
570 | .sched_eh = sas_ata_sched_eh, |
571 | .end_eh = sas_ata_end_eh, |
572 | }; |
573 | |
574 | int sas_ata_init(struct domain_device *found_dev) |
575 | { |
576 | struct sas_ha_struct *ha = found_dev->port->ha; |
577 | struct Scsi_Host *shost = ha->shost; |
578 | struct ata_host *ata_host; |
579 | struct ata_port *ap; |
580 | int rc; |
581 | |
582 | ata_host = kzalloc(sizeof(*ata_host), GFP_KERNEL); |
583 | if (!ata_host) { |
584 | pr_err("ata host alloc failed.\n"); |
585 | return -ENOMEM; |
586 | } |
587 | |
588 | ata_host_init(ata_host, ha->dev, &sas_sata_ops); |
589 | |
590 | ap = ata_port_alloc(host: ata_host); |
591 | if (!ap) { |
592 | pr_err("ata_port_alloc failed.\n"); |
593 | rc = -ENODEV; |
594 | goto free_host; |
595 | } |
596 | |
597 | ap->port_no = 0; |
598 | ap->pio_mask = ATA_PIO4; |
599 | ap->mwdma_mask = ATA_MWDMA2; |
600 | ap->udma_mask = ATA_UDMA6; |
601 | ap->flags |= ATA_FLAG_SATA | ATA_FLAG_PIO_DMA | ATA_FLAG_NCQ | |
602 | ATA_FLAG_SAS_HOST | ATA_FLAG_FPDMA_AUX; |
603 | ap->ops = &sas_sata_ops; |
604 | ap->private_data = found_dev; |
605 | ap->cbl = ATA_CBL_SATA; |
606 | ap->scsi_host = shost; |
607 | |
608 | rc = ata_tport_add(parent: ata_host->dev, ap); |
609 | if (rc) |
610 | goto free_port; |
611 | |
612 | found_dev->sata_dev.ata_host = ata_host; |
613 | found_dev->sata_dev.ap = ap; |
614 | |
615 | return 0; |
616 | |
617 | free_port: |
618 | ata_port_free(ap); |
619 | free_host: |
620 | ata_host_put(host: ata_host); |
621 | return rc; |
622 | } |
623 | |
624 | void sas_ata_task_abort(struct sas_task *task) |
625 | { |
626 | struct ata_queued_cmd *qc = task->uldd_task; |
627 | struct completion *waiting; |
628 | |
629 | /* Bounce SCSI-initiated commands to the SCSI EH */ |
630 | if (qc->scsicmd) { |
631 | blk_abort_request(scsi_cmd_to_rq(scmd: qc->scsicmd)); |
632 | return; |
633 | } |
634 | |
635 | /* Internal command, fake a timeout and complete. */ |
636 | qc->flags &= ~ATA_QCFLAG_ACTIVE; |
637 | qc->flags |= ATA_QCFLAG_EH; |
638 | qc->err_mask |= AC_ERR_TIMEOUT; |
639 | waiting = qc->private_data; |
640 | complete(waiting); |
641 | } |
642 | |
643 | void sas_probe_sata(struct asd_sas_port *port) |
644 | { |
645 | struct domain_device *dev, *n; |
646 | |
647 | mutex_lock(&port->ha->disco_mutex); |
648 | list_for_each_entry(dev, &port->disco_list, disco_list_node) { |
649 | if (!dev_is_sata(dev)) |
650 | continue; |
651 | |
652 | ata_port_probe(ap: dev->sata_dev.ap); |
653 | } |
654 | mutex_unlock(lock: &port->ha->disco_mutex); |
655 | |
656 | list_for_each_entry_safe(dev, n, &port->disco_list, disco_list_node) { |
657 | if (!dev_is_sata(dev)) |
658 | continue; |
659 | |
660 | sas_ata_wait_eh(dev); |
661 | |
662 | /* if libata could not bring the link up, don't surface |
663 | * the device |
664 | */ |
665 | if (!ata_dev_enabled(dev: sas_to_ata_dev(dev))) |
666 | sas_fail_probe(dev, func: __func__, err: -ENODEV); |
667 | } |
668 | |
669 | } |
670 | |
671 | int sas_ata_add_dev(struct domain_device *parent, struct ex_phy *phy, |
672 | struct domain_device *child, int phy_id) |
673 | { |
674 | struct sas_rphy *rphy; |
675 | int ret; |
676 | |
677 | if (child->linkrate > parent->min_linkrate) { |
678 | struct sas_phy *cphy = child->phy; |
679 | enum sas_linkrate min_prate = cphy->minimum_linkrate, |
680 | parent_min_lrate = parent->min_linkrate, |
681 | min_linkrate = (min_prate > parent_min_lrate) ? |
682 | parent_min_lrate : 0; |
683 | struct sas_phy_linkrates rates = { |
684 | .maximum_linkrate = parent->min_linkrate, |
685 | .minimum_linkrate = min_linkrate, |
686 | }; |
687 | |
688 | pr_notice("ex %016llx phy%02d SATA device linkrate > min pathway connection rate, attempting to lower device linkrate\n", |
689 | SAS_ADDR(child->sas_addr), phy_id); |
690 | ret = sas_smp_phy_control(dev: parent, phy_id, |
691 | phy_func: PHY_FUNC_LINK_RESET, &rates); |
692 | if (ret) { |
693 | pr_err("ex %016llx phy%02d SATA device could not set linkrate (%d)\n", |
694 | SAS_ADDR(child->sas_addr), phy_id, ret); |
695 | return ret; |
696 | } |
697 | pr_notice("ex %016llx phy%02d SATA device set linkrate successfully\n", |
698 | SAS_ADDR(child->sas_addr), phy_id); |
699 | child->linkrate = child->min_linkrate; |
700 | } |
701 | ret = sas_get_ata_info(dev: child, phy); |
702 | if (ret) |
703 | return ret; |
704 | |
705 | sas_init_dev(dev: child); |
706 | ret = sas_ata_init(found_dev: child); |
707 | if (ret) |
708 | return ret; |
709 | |
710 | rphy = sas_end_device_alloc(phy->port); |
711 | if (!rphy) |
712 | return -ENOMEM; |
713 | |
714 | rphy->identify.phy_identifier = phy_id; |
715 | child->rphy = rphy; |
716 | get_device(dev: &rphy->dev); |
717 | |
718 | list_add_tail(new: &child->disco_list_node, head: &parent->port->disco_list); |
719 | |
720 | ret = sas_discover_sata(dev: child); |
721 | if (ret) { |
722 | pr_notice("sas_discover_sata() for device %16llx at %016llx:%02d returned 0x%x\n", |
723 | SAS_ADDR(child->sas_addr), |
724 | SAS_ADDR(parent->sas_addr), phy_id, ret); |
725 | sas_rphy_free(child->rphy); |
726 | list_del(entry: &child->disco_list_node); |
727 | return ret; |
728 | } |
729 | |
730 | return 0; |
731 | } |
732 | |
733 | static void sas_ata_flush_pm_eh(struct asd_sas_port *port, const char *func) |
734 | { |
735 | struct domain_device *dev, *n; |
736 | |
737 | list_for_each_entry_safe(dev, n, &port->dev_list, dev_list_node) { |
738 | if (!dev_is_sata(dev)) |
739 | continue; |
740 | |
741 | sas_ata_wait_eh(dev); |
742 | |
743 | /* if libata failed to power manage the device, tear it down */ |
744 | if (ata_dev_disabled(dev: sas_to_ata_dev(dev))) |
745 | sas_fail_probe(dev, func, err: -ENODEV); |
746 | } |
747 | } |
748 | |
749 | void sas_suspend_sata(struct asd_sas_port *port) |
750 | { |
751 | struct domain_device *dev; |
752 | |
753 | mutex_lock(&port->ha->disco_mutex); |
754 | list_for_each_entry(dev, &port->dev_list, dev_list_node) { |
755 | struct sata_device *sata; |
756 | |
757 | if (!dev_is_sata(dev)) |
758 | continue; |
759 | |
760 | sata = &dev->sata_dev; |
761 | if (sata->ap->pm_mesg.event == PM_EVENT_SUSPEND) |
762 | continue; |
763 | |
764 | ata_sas_port_suspend(ap: sata->ap); |
765 | } |
766 | mutex_unlock(lock: &port->ha->disco_mutex); |
767 | |
768 | sas_ata_flush_pm_eh(port, func: __func__); |
769 | } |
770 | |
771 | void sas_resume_sata(struct asd_sas_port *port) |
772 | { |
773 | struct domain_device *dev; |
774 | |
775 | mutex_lock(&port->ha->disco_mutex); |
776 | list_for_each_entry(dev, &port->dev_list, dev_list_node) { |
777 | struct sata_device *sata; |
778 | |
779 | if (!dev_is_sata(dev)) |
780 | continue; |
781 | |
782 | sata = &dev->sata_dev; |
783 | if (sata->ap->pm_mesg.event == PM_EVENT_ON) |
784 | continue; |
785 | |
786 | ata_sas_port_resume(ap: sata->ap); |
787 | } |
788 | mutex_unlock(lock: &port->ha->disco_mutex); |
789 | |
790 | sas_ata_flush_pm_eh(port, func: __func__); |
791 | } |
792 | |
793 | /** |
794 | * sas_discover_sata - discover an STP/SATA domain device |
795 | * @dev: pointer to struct domain_device of interest |
796 | * |
797 | * Devices directly attached to a HA port, have no parents. All other |
798 | * devices do, and should have their "parent" pointer set appropriately |
799 | * before calling this function. |
800 | */ |
801 | int sas_discover_sata(struct domain_device *dev) |
802 | { |
803 | if (dev->dev_type == SAS_SATA_PM) |
804 | return -ENODEV; |
805 | |
806 | dev->sata_dev.class = sas_get_ata_command_set(dev); |
807 | sas_fill_in_rphy(dev, rphy: dev->rphy); |
808 | |
809 | return sas_notify_lldd_dev_found(dev); |
810 | } |
811 | |
812 | static void async_sas_ata_eh(void *data, async_cookie_t cookie) |
813 | { |
814 | struct domain_device *dev = data; |
815 | struct ata_port *ap = dev->sata_dev.ap; |
816 | struct sas_ha_struct *ha = dev->port->ha; |
817 | |
818 | sas_ata_printk(KERN_DEBUG, ddev: dev, fmt: "dev error handler\n"); |
819 | ata_scsi_port_error_handler(host: ha->shost, ap); |
820 | sas_put_device(dev); |
821 | } |
822 | |
823 | void sas_ata_strategy_handler(struct Scsi_Host *shost) |
824 | { |
825 | struct sas_ha_struct *sas_ha = SHOST_TO_SAS_HA(shost); |
826 | ASYNC_DOMAIN_EXCLUSIVE(async); |
827 | int i; |
828 | |
829 | /* it's ok to defer revalidation events during ata eh, these |
830 | * disks are in one of three states: |
831 | * 1/ present for initial domain discovery, and these |
832 | * resets will cause bcn flutters |
833 | * 2/ hot removed, we'll discover that after eh fails |
834 | * 3/ hot added after initial discovery, lost the race, and need |
835 | * to catch the next train. |
836 | */ |
837 | sas_disable_revalidation(ha: sas_ha); |
838 | |
839 | spin_lock_irq(lock: &sas_ha->phy_port_lock); |
840 | for (i = 0; i < sas_ha->num_phys; i++) { |
841 | struct asd_sas_port *port = sas_ha->sas_port[i]; |
842 | struct domain_device *dev; |
843 | |
844 | spin_lock(lock: &port->dev_list_lock); |
845 | list_for_each_entry(dev, &port->dev_list, dev_list_node) { |
846 | if (!dev_is_sata(dev)) |
847 | continue; |
848 | |
849 | /* hold a reference over eh since we may be |
850 | * racing with final remove once all commands |
851 | * are completed |
852 | */ |
853 | kref_get(kref: &dev->kref); |
854 | |
855 | async_schedule_domain(func: async_sas_ata_eh, data: dev, domain: &async); |
856 | } |
857 | spin_unlock(lock: &port->dev_list_lock); |
858 | } |
859 | spin_unlock_irq(lock: &sas_ha->phy_port_lock); |
860 | |
861 | async_synchronize_full_domain(domain: &async); |
862 | |
863 | sas_enable_revalidation(ha: sas_ha); |
864 | } |
865 | |
866 | void sas_ata_eh(struct Scsi_Host *shost, struct list_head *work_q) |
867 | { |
868 | struct scsi_cmnd *cmd, *n; |
869 | struct domain_device *eh_dev; |
870 | |
871 | do { |
872 | LIST_HEAD(sata_q); |
873 | eh_dev = NULL; |
874 | |
875 | list_for_each_entry_safe(cmd, n, work_q, eh_entry) { |
876 | struct domain_device *ddev = cmd_to_domain_dev(cmd); |
877 | |
878 | if (!dev_is_sata(dev: ddev) || TO_SAS_TASK(cmd)) |
879 | continue; |
880 | if (eh_dev && eh_dev != ddev) |
881 | continue; |
882 | eh_dev = ddev; |
883 | list_move(list: &cmd->eh_entry, head: &sata_q); |
884 | } |
885 | |
886 | if (!list_empty(head: &sata_q)) { |
887 | struct ata_port *ap = eh_dev->sata_dev.ap; |
888 | |
889 | sas_ata_printk(KERN_DEBUG, ddev: eh_dev, fmt: "cmd error handler\n"); |
890 | ata_scsi_cmd_error_handler(host: shost, ap, eh_q: &sata_q); |
891 | /* |
892 | * ata's error handler may leave the cmd on the list |
893 | * so make sure they don't remain on a stack list |
894 | * about to go out of scope. |
895 | * |
896 | * This looks strange, since the commands are |
897 | * now part of no list, but the next error |
898 | * action will be ata_port_error_handler() |
899 | * which takes no list and sweeps them up |
900 | * anyway from the ata tag array. |
901 | */ |
902 | while (!list_empty(head: &sata_q)) |
903 | list_del_init(entry: sata_q.next); |
904 | } |
905 | } while (eh_dev); |
906 | } |
907 | |
908 | void sas_ata_schedule_reset(struct domain_device *dev) |
909 | { |
910 | struct ata_eh_info *ehi; |
911 | struct ata_port *ap; |
912 | unsigned long flags; |
913 | |
914 | if (!dev_is_sata(dev)) |
915 | return; |
916 | |
917 | ap = dev->sata_dev.ap; |
918 | ehi = &ap->link.eh_info; |
919 | |
920 | spin_lock_irqsave(ap->lock, flags); |
921 | ehi->err_mask |= AC_ERR_TIMEOUT; |
922 | ehi->action |= ATA_EH_RESET; |
923 | ata_port_schedule_eh(ap); |
924 | spin_unlock_irqrestore(lock: ap->lock, flags); |
925 | } |
926 | EXPORT_SYMBOL_GPL(sas_ata_schedule_reset); |
927 | |
928 | void sas_ata_wait_eh(struct domain_device *dev) |
929 | { |
930 | struct ata_port *ap; |
931 | |
932 | if (!dev_is_sata(dev)) |
933 | return; |
934 | |
935 | ap = dev->sata_dev.ap; |
936 | ata_port_wait_eh(ap); |
937 | } |
938 | |
939 | void sas_ata_device_link_abort(struct domain_device *device, bool force_reset) |
940 | { |
941 | struct ata_port *ap = device->sata_dev.ap; |
942 | struct ata_link *link = &ap->link; |
943 | unsigned long flags; |
944 | |
945 | spin_lock_irqsave(ap->lock, flags); |
946 | device->sata_dev.fis[2] = ATA_ERR | ATA_DRDY; /* tf status */ |
947 | device->sata_dev.fis[3] = ATA_ABORTED; /* tf error */ |
948 | |
949 | link->eh_info.err_mask |= AC_ERR_DEV; |
950 | if (force_reset) |
951 | link->eh_info.action |= ATA_EH_RESET; |
952 | ata_link_abort(link); |
953 | spin_unlock_irqrestore(lock: ap->lock, flags); |
954 | } |
955 | EXPORT_SYMBOL_GPL(sas_ata_device_link_abort); |
956 | |
957 | int sas_execute_ata_cmd(struct domain_device *device, u8 *fis, int force_phy_id) |
958 | { |
959 | struct sas_tmf_task tmf_task = {}; |
960 | return sas_execute_tmf(device, parameter: fis, para_len: sizeof(struct host_to_dev_fis), |
961 | force_phy_id, tmf: &tmf_task); |
962 | } |
963 | EXPORT_SYMBOL_GPL(sas_execute_ata_cmd); |
964 | |
965 | static ssize_t sas_ncq_prio_supported_show(struct device *device, |
966 | struct device_attribute *attr, |
967 | char *buf) |
968 | { |
969 | struct scsi_device *sdev = to_scsi_device(device); |
970 | struct domain_device *ddev = sdev_to_domain_dev(sdev); |
971 | bool supported; |
972 | int rc; |
973 | |
974 | rc = ata_ncq_prio_supported(ap: ddev->sata_dev.ap, sdev, supported: &supported); |
975 | if (rc) |
976 | return rc; |
977 | |
978 | return sysfs_emit(buf, fmt: "%d\n", supported); |
979 | } |
980 | |
981 | static struct device_attribute dev_attr_sas_ncq_prio_supported = |
982 | __ATTR(ncq_prio_supported, S_IRUGO, sas_ncq_prio_supported_show, NULL); |
983 | |
984 | static ssize_t sas_ncq_prio_enable_show(struct device *device, |
985 | struct device_attribute *attr, |
986 | char *buf) |
987 | { |
988 | struct scsi_device *sdev = to_scsi_device(device); |
989 | struct domain_device *ddev = sdev_to_domain_dev(sdev); |
990 | bool enabled; |
991 | int rc; |
992 | |
993 | rc = ata_ncq_prio_enabled(ap: ddev->sata_dev.ap, sdev, enabled: &enabled); |
994 | if (rc) |
995 | return rc; |
996 | |
997 | return sysfs_emit(buf, fmt: "%d\n", enabled); |
998 | } |
999 | |
1000 | static ssize_t sas_ncq_prio_enable_store(struct device *device, |
1001 | struct device_attribute *attr, |
1002 | const char *buf, size_t len) |
1003 | { |
1004 | struct scsi_device *sdev = to_scsi_device(device); |
1005 | struct domain_device *ddev = sdev_to_domain_dev(sdev); |
1006 | bool enable; |
1007 | int rc; |
1008 | |
1009 | rc = kstrtobool(s: buf, res: &enable); |
1010 | if (rc) |
1011 | return rc; |
1012 | |
1013 | rc = ata_ncq_prio_enable(ap: ddev->sata_dev.ap, sdev, enable); |
1014 | if (rc) |
1015 | return rc; |
1016 | |
1017 | return len; |
1018 | } |
1019 | |
1020 | static struct device_attribute dev_attr_sas_ncq_prio_enable = |
1021 | __ATTR(ncq_prio_enable, S_IRUGO | S_IWUSR, |
1022 | sas_ncq_prio_enable_show, sas_ncq_prio_enable_store); |
1023 | |
1024 | static struct attribute *sas_ata_sdev_attrs[] = { |
1025 | &dev_attr_sas_ncq_prio_supported.attr, |
1026 | &dev_attr_sas_ncq_prio_enable.attr, |
1027 | NULL |
1028 | }; |
1029 | |
1030 | static umode_t sas_ata_attr_is_visible(struct kobject *kobj, |
1031 | struct attribute *attr, int i) |
1032 | { |
1033 | struct device *dev = kobj_to_dev(kobj); |
1034 | struct scsi_device *sdev = to_scsi_device(dev); |
1035 | struct domain_device *ddev = sdev_to_domain_dev(sdev); |
1036 | |
1037 | if (!dev_is_sata(dev: ddev)) |
1038 | return 0; |
1039 | |
1040 | return attr->mode; |
1041 | } |
1042 | |
1043 | const struct attribute_group sas_ata_sdev_attr_group = { |
1044 | .attrs = sas_ata_sdev_attrs, |
1045 | .is_visible = sas_ata_attr_is_visible, |
1046 | }; |
1047 | EXPORT_SYMBOL_GPL(sas_ata_sdev_attr_group); |
1048 |
Definitions
- sas_to_ata_err
- sas_ata_task_done
- sas_ata_qc_issue
- sas_ata_qc_fill_rtf
- dev_to_sas_internal
- sas_get_ata_command_set
- sas_get_ata_info
- sas_ata_clear_pending
- smp_ata_check_ready_type
- smp_ata_check_ready
- local_ata_check_ready
- sas_ata_printk
- sas_ata_wait_after_reset
- sas_ata_hard_reset
- sas_ata_internal_abort
- sas_ata_post_internal
- sas_ata_set_dmamode
- sas_ata_sched_eh
- sas_ata_end_eh
- sas_ata_prereset
- sas_sata_ops
- sas_ata_init
- sas_ata_task_abort
- sas_probe_sata
- sas_ata_add_dev
- sas_ata_flush_pm_eh
- sas_suspend_sata
- sas_resume_sata
- sas_discover_sata
- async_sas_ata_eh
- sas_ata_strategy_handler
- sas_ata_eh
- sas_ata_schedule_reset
- sas_ata_wait_eh
- sas_ata_device_link_abort
- sas_execute_ata_cmd
- sas_ncq_prio_supported_show
- dev_attr_sas_ncq_prio_supported
- sas_ncq_prio_enable_show
- sas_ncq_prio_enable_store
- dev_attr_sas_ncq_prio_enable
- sas_ata_sdev_attrs
- sas_ata_attr_is_visible
Improve your Profiling and Debugging skills
Find out more