1 | /* |
2 | * This file is provided under a dual BSD/GPLv2 license. When using or |
3 | * redistributing this file, you may do so under either license. |
4 | * |
5 | * GPL LICENSE SUMMARY |
6 | * |
7 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
8 | * |
9 | * This program is free software; you can redistribute it and/or modify |
10 | * it under the terms of version 2 of the GNU General Public License as |
11 | * published by the Free Software Foundation. |
12 | * |
13 | * This program is distributed in the hope that it will be useful, but |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
16 | * General Public License for more details. |
17 | * |
18 | * You should have received a copy of the GNU General Public License |
19 | * along with this program; if not, write to the Free Software |
20 | * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
21 | * The full GNU General Public License is included in this distribution |
22 | * in the file called LICENSE.GPL. |
23 | * |
24 | * BSD LICENSE |
25 | * |
26 | * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. |
27 | * All rights reserved. |
28 | * |
29 | * Redistribution and use in source and binary forms, with or without |
30 | * modification, are permitted provided that the following conditions |
31 | * are met: |
32 | * |
33 | * * Redistributions of source code must retain the above copyright |
34 | * notice, this list of conditions and the following disclaimer. |
35 | * * Redistributions in binary form must reproduce the above copyright |
36 | * notice, this list of conditions and the following disclaimer in |
37 | * the documentation and/or other materials provided with the |
38 | * distribution. |
39 | * * Neither the name of Intel Corporation nor the names of its |
40 | * contributors may be used to endorse or promote products derived |
41 | * from this software without specific prior written permission. |
42 | * |
43 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
44 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
45 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
46 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
47 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
48 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
49 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
50 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
51 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
52 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
53 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
54 | */ |
55 | |
56 | #include <scsi/scsi_cmnd.h> |
57 | #include "isci.h" |
58 | #include "task.h" |
59 | #include "request.h" |
60 | #include "scu_completion_codes.h" |
61 | #include "scu_event_codes.h" |
62 | #include "sas.h" |
63 | |
64 | #undef C |
65 | #define C(a) (#a) |
66 | const char *req_state_name(enum sci_base_request_states state) |
67 | { |
68 | static const char * const strings[] = REQUEST_STATES; |
69 | |
70 | return strings[state]; |
71 | } |
72 | #undef C |
73 | |
74 | static struct scu_sgl_element_pair *to_sgl_element_pair(struct isci_request *ireq, |
75 | int idx) |
76 | { |
77 | if (idx == 0) |
78 | return &ireq->tc->sgl_pair_ab; |
79 | else if (idx == 1) |
80 | return &ireq->tc->sgl_pair_cd; |
81 | else if (idx < 0) |
82 | return NULL; |
83 | else |
84 | return &ireq->sg_table[idx - 2]; |
85 | } |
86 | |
87 | static dma_addr_t to_sgl_element_pair_dma(struct isci_host *ihost, |
88 | struct isci_request *ireq, u32 idx) |
89 | { |
90 | u32 offset; |
91 | |
92 | if (idx == 0) { |
93 | offset = (void *) &ireq->tc->sgl_pair_ab - |
94 | (void *) &ihost->task_context_table[0]; |
95 | return ihost->tc_dma + offset; |
96 | } else if (idx == 1) { |
97 | offset = (void *) &ireq->tc->sgl_pair_cd - |
98 | (void *) &ihost->task_context_table[0]; |
99 | return ihost->tc_dma + offset; |
100 | } |
101 | |
102 | return sci_io_request_get_dma_addr(ireq, virt_addr: &ireq->sg_table[idx - 2]); |
103 | } |
104 | |
105 | static void init_sgl_element(struct scu_sgl_element *e, struct scatterlist *sg) |
106 | { |
107 | e->length = sg_dma_len(sg); |
108 | e->address_upper = upper_32_bits(sg_dma_address(sg)); |
109 | e->address_lower = lower_32_bits(sg_dma_address(sg)); |
110 | e->address_modifier = 0; |
111 | } |
112 | |
113 | static void sci_request_build_sgl(struct isci_request *ireq) |
114 | { |
115 | struct isci_host *ihost = ireq->isci_host; |
116 | struct sas_task *task = isci_request_access_task(ireq); |
117 | struct scatterlist *sg = NULL; |
118 | dma_addr_t dma_addr; |
119 | u32 sg_idx = 0; |
120 | struct scu_sgl_element_pair *scu_sg = NULL; |
121 | struct scu_sgl_element_pair *prev_sg = NULL; |
122 | |
123 | if (task->num_scatter > 0) { |
124 | sg = task->scatter; |
125 | |
126 | while (sg) { |
127 | scu_sg = to_sgl_element_pair(ireq, idx: sg_idx); |
128 | init_sgl_element(e: &scu_sg->A, sg); |
129 | sg = sg_next(sg); |
130 | if (sg) { |
131 | init_sgl_element(e: &scu_sg->B, sg); |
132 | sg = sg_next(sg); |
133 | } else |
134 | memset(&scu_sg->B, 0, sizeof(scu_sg->B)); |
135 | |
136 | if (prev_sg) { |
137 | dma_addr = to_sgl_element_pair_dma(ihost, |
138 | ireq, |
139 | idx: sg_idx); |
140 | |
141 | prev_sg->next_pair_upper = |
142 | upper_32_bits(dma_addr); |
143 | prev_sg->next_pair_lower = |
144 | lower_32_bits(dma_addr); |
145 | } |
146 | |
147 | prev_sg = scu_sg; |
148 | sg_idx++; |
149 | } |
150 | } else { /* handle when no sg */ |
151 | scu_sg = to_sgl_element_pair(ireq, idx: sg_idx); |
152 | |
153 | dma_addr = dma_map_single(&ihost->pdev->dev, |
154 | task->scatter, |
155 | task->total_xfer_len, |
156 | task->data_dir); |
157 | |
158 | ireq->zero_scatter_daddr = dma_addr; |
159 | |
160 | scu_sg->A.length = task->total_xfer_len; |
161 | scu_sg->A.address_upper = upper_32_bits(dma_addr); |
162 | scu_sg->A.address_lower = lower_32_bits(dma_addr); |
163 | } |
164 | |
165 | if (scu_sg) { |
166 | scu_sg->next_pair_upper = 0; |
167 | scu_sg->next_pair_lower = 0; |
168 | } |
169 | } |
170 | |
171 | static void sci_io_request_build_ssp_command_iu(struct isci_request *ireq) |
172 | { |
173 | struct ssp_cmd_iu *cmd_iu; |
174 | struct sas_task *task = isci_request_access_task(ireq); |
175 | |
176 | cmd_iu = &ireq->ssp.cmd; |
177 | |
178 | memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8); |
179 | cmd_iu->add_cdb_len = 0; |
180 | cmd_iu->_r_a = 0; |
181 | cmd_iu->_r_b = 0; |
182 | cmd_iu->en_fburst = 0; /* unsupported */ |
183 | cmd_iu->task_prio = 0; |
184 | cmd_iu->task_attr = task->ssp_task.task_attr; |
185 | cmd_iu->_r_c = 0; |
186 | |
187 | sci_swab32_cpy(dest: &cmd_iu->cdb, src: task->ssp_task.cmd->cmnd, |
188 | word_cnt: (task->ssp_task.cmd->cmd_len+3) / sizeof(u32)); |
189 | } |
190 | |
191 | static void sci_task_request_build_ssp_task_iu(struct isci_request *ireq) |
192 | { |
193 | struct ssp_task_iu *task_iu; |
194 | struct sas_task *task = isci_request_access_task(ireq); |
195 | struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); |
196 | |
197 | task_iu = &ireq->ssp.tmf; |
198 | |
199 | memset(task_iu, 0, sizeof(struct ssp_task_iu)); |
200 | |
201 | memcpy(task_iu->LUN, task->ssp_task.LUN, 8); |
202 | |
203 | task_iu->task_func = isci_tmf->tmf_code; |
204 | task_iu->task_tag = |
205 | (test_bit(IREQ_TMF, &ireq->flags)) ? |
206 | isci_tmf->io_tag : |
207 | SCI_CONTROLLER_INVALID_IO_TAG; |
208 | } |
209 | |
210 | /* |
211 | * This method is will fill in the SCU Task Context for any type of SSP request. |
212 | */ |
213 | static void scu_ssp_request_construct_task_context( |
214 | struct isci_request *ireq, |
215 | struct scu_task_context *task_context) |
216 | { |
217 | dma_addr_t dma_addr; |
218 | struct isci_remote_device *idev; |
219 | struct isci_port *iport; |
220 | |
221 | idev = ireq->target_device; |
222 | iport = idev->owning_port; |
223 | |
224 | /* Fill in the TC with its required data */ |
225 | task_context->abort = 0; |
226 | task_context->priority = 0; |
227 | task_context->initiator_request = 1; |
228 | task_context->connection_rate = idev->connection_rate; |
229 | task_context->protocol_engine_index = ISCI_PEG; |
230 | task_context->logical_port_index = iport->physical_port_index; |
231 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP; |
232 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
233 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
234 | |
235 | task_context->remote_node_index = idev->rnc.remote_node_index; |
236 | task_context->command_code = 0; |
237 | |
238 | task_context->link_layer_control = 0; |
239 | task_context->do_not_dma_ssp_good_response = 1; |
240 | task_context->strict_ordering = 0; |
241 | task_context->control_frame = 0; |
242 | task_context->timeout_enable = 0; |
243 | task_context->block_guard_enable = 0; |
244 | |
245 | task_context->address_modifier = 0; |
246 | |
247 | /* task_context->type.ssp.tag = ireq->io_tag; */ |
248 | task_context->task_phase = 0x01; |
249 | |
250 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
251 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
252 | (iport->physical_port_index << |
253 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
254 | ISCI_TAG_TCI(ireq->io_tag)); |
255 | |
256 | /* |
257 | * Copy the physical address for the command buffer to the |
258 | * SCU Task Context |
259 | */ |
260 | dma_addr = sci_io_request_get_dma_addr(ireq, virt_addr: &ireq->ssp.cmd); |
261 | |
262 | task_context->command_iu_upper = upper_32_bits(dma_addr); |
263 | task_context->command_iu_lower = lower_32_bits(dma_addr); |
264 | |
265 | /* |
266 | * Copy the physical address for the response buffer to the |
267 | * SCU Task Context |
268 | */ |
269 | dma_addr = sci_io_request_get_dma_addr(ireq, virt_addr: &ireq->ssp.rsp); |
270 | |
271 | task_context->response_iu_upper = upper_32_bits(dma_addr); |
272 | task_context->response_iu_lower = lower_32_bits(dma_addr); |
273 | } |
274 | |
275 | static u8 scu_bg_blk_size(struct scsi_device *sdp) |
276 | { |
277 | switch (sdp->sector_size) { |
278 | case 512: |
279 | return 0; |
280 | case 1024: |
281 | return 1; |
282 | case 4096: |
283 | return 3; |
284 | default: |
285 | return 0xff; |
286 | } |
287 | } |
288 | |
289 | static u32 scu_dif_bytes(u32 len, u32 sector_size) |
290 | { |
291 | return (len >> ilog2(sector_size)) * 8; |
292 | } |
293 | |
294 | static void scu_ssp_ireq_dif_insert(struct isci_request *ireq, u8 type, u8 op) |
295 | { |
296 | struct scu_task_context *tc = ireq->tc; |
297 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; |
298 | u8 blk_sz = scu_bg_blk_size(sdp: scmd->device); |
299 | |
300 | tc->block_guard_enable = 1; |
301 | tc->blk_prot_en = 1; |
302 | tc->blk_sz = blk_sz; |
303 | /* DIF write insert */ |
304 | tc->blk_prot_func = 0x2; |
305 | |
306 | tc->transfer_length_bytes += scu_dif_bytes(len: tc->transfer_length_bytes, |
307 | sector_size: scmd->device->sector_size); |
308 | |
309 | /* always init to 0, used by hw */ |
310 | tc->interm_crc_val = 0; |
311 | |
312 | tc->init_crc_seed = 0; |
313 | tc->app_tag_verify = 0; |
314 | tc->app_tag_gen = 0; |
315 | tc->ref_tag_seed_verify = 0; |
316 | |
317 | /* always init to same as bg_blk_sz */ |
318 | tc->UD_bytes_immed_val = scmd->device->sector_size; |
319 | |
320 | tc->reserved_DC_0 = 0; |
321 | |
322 | /* always init to 8 */ |
323 | tc->DIF_bytes_immed_val = 8; |
324 | |
325 | tc->reserved_DC_1 = 0; |
326 | tc->bgc_blk_sz = scmd->device->sector_size; |
327 | tc->reserved_E0_0 = 0; |
328 | tc->app_tag_gen_mask = 0; |
329 | |
330 | /** setup block guard control **/ |
331 | tc->bgctl = 0; |
332 | |
333 | /* DIF write insert */ |
334 | tc->bgctl_f.op = 0x2; |
335 | |
336 | tc->app_tag_verify_mask = 0; |
337 | |
338 | /* must init to 0 for hw */ |
339 | tc->blk_guard_err = 0; |
340 | |
341 | tc->reserved_E8_0 = 0; |
342 | |
343 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) |
344 | tc->ref_tag_seed_gen = scsi_prot_ref_tag(scmd); |
345 | else if (type & SCSI_PROT_DIF_TYPE3) |
346 | tc->ref_tag_seed_gen = 0; |
347 | } |
348 | |
349 | static void scu_ssp_ireq_dif_strip(struct isci_request *ireq, u8 type, u8 op) |
350 | { |
351 | struct scu_task_context *tc = ireq->tc; |
352 | struct scsi_cmnd *scmd = ireq->ttype_ptr.io_task_ptr->uldd_task; |
353 | u8 blk_sz = scu_bg_blk_size(sdp: scmd->device); |
354 | |
355 | tc->block_guard_enable = 1; |
356 | tc->blk_prot_en = 1; |
357 | tc->blk_sz = blk_sz; |
358 | /* DIF read strip */ |
359 | tc->blk_prot_func = 0x1; |
360 | |
361 | tc->transfer_length_bytes += scu_dif_bytes(len: tc->transfer_length_bytes, |
362 | sector_size: scmd->device->sector_size); |
363 | |
364 | /* always init to 0, used by hw */ |
365 | tc->interm_crc_val = 0; |
366 | |
367 | tc->init_crc_seed = 0; |
368 | tc->app_tag_verify = 0; |
369 | tc->app_tag_gen = 0; |
370 | |
371 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) |
372 | tc->ref_tag_seed_verify = scsi_prot_ref_tag(scmd); |
373 | else if (type & SCSI_PROT_DIF_TYPE3) |
374 | tc->ref_tag_seed_verify = 0; |
375 | |
376 | /* always init to same as bg_blk_sz */ |
377 | tc->UD_bytes_immed_val = scmd->device->sector_size; |
378 | |
379 | tc->reserved_DC_0 = 0; |
380 | |
381 | /* always init to 8 */ |
382 | tc->DIF_bytes_immed_val = 8; |
383 | |
384 | tc->reserved_DC_1 = 0; |
385 | tc->bgc_blk_sz = scmd->device->sector_size; |
386 | tc->reserved_E0_0 = 0; |
387 | tc->app_tag_gen_mask = 0; |
388 | |
389 | /** setup block guard control **/ |
390 | tc->bgctl = 0; |
391 | |
392 | /* DIF read strip */ |
393 | tc->bgctl_f.crc_verify = 1; |
394 | tc->bgctl_f.op = 0x1; |
395 | if ((type & SCSI_PROT_DIF_TYPE1) || (type & SCSI_PROT_DIF_TYPE2)) { |
396 | tc->bgctl_f.ref_tag_chk = 1; |
397 | tc->bgctl_f.app_f_detect = 1; |
398 | } else if (type & SCSI_PROT_DIF_TYPE3) |
399 | tc->bgctl_f.app_ref_f_detect = 1; |
400 | |
401 | tc->app_tag_verify_mask = 0; |
402 | |
403 | /* must init to 0 for hw */ |
404 | tc->blk_guard_err = 0; |
405 | |
406 | tc->reserved_E8_0 = 0; |
407 | tc->ref_tag_seed_gen = 0; |
408 | } |
409 | |
410 | /* |
411 | * This method is will fill in the SCU Task Context for a SSP IO request. |
412 | */ |
413 | static void scu_ssp_io_request_construct_task_context(struct isci_request *ireq, |
414 | enum dma_data_direction dir, |
415 | u32 len) |
416 | { |
417 | struct scu_task_context *task_context = ireq->tc; |
418 | struct sas_task *sas_task = ireq->ttype_ptr.io_task_ptr; |
419 | struct scsi_cmnd *scmd = sas_task->uldd_task; |
420 | u8 prot_type = scsi_get_prot_type(scmd); |
421 | u8 prot_op = scsi_get_prot_op(scmd); |
422 | |
423 | scu_ssp_request_construct_task_context(ireq, task_context); |
424 | |
425 | task_context->ssp_command_iu_length = |
426 | sizeof(struct ssp_cmd_iu) / sizeof(u32); |
427 | task_context->type.ssp.frame_type = SSP_COMMAND; |
428 | |
429 | switch (dir) { |
430 | case DMA_FROM_DEVICE: |
431 | case DMA_NONE: |
432 | default: |
433 | task_context->task_type = SCU_TASK_TYPE_IOREAD; |
434 | break; |
435 | case DMA_TO_DEVICE: |
436 | task_context->task_type = SCU_TASK_TYPE_IOWRITE; |
437 | break; |
438 | } |
439 | |
440 | task_context->transfer_length_bytes = len; |
441 | |
442 | if (task_context->transfer_length_bytes > 0) |
443 | sci_request_build_sgl(ireq); |
444 | |
445 | if (prot_type != SCSI_PROT_DIF_TYPE0) { |
446 | if (prot_op == SCSI_PROT_READ_STRIP) |
447 | scu_ssp_ireq_dif_strip(ireq, type: prot_type, op: prot_op); |
448 | else if (prot_op == SCSI_PROT_WRITE_INSERT) |
449 | scu_ssp_ireq_dif_insert(ireq, type: prot_type, op: prot_op); |
450 | } |
451 | } |
452 | |
453 | /** |
454 | * scu_ssp_task_request_construct_task_context() - This method will fill in |
455 | * the SCU Task Context for a SSP Task request. The following important |
456 | * settings are utilized: -# priority == SCU_TASK_PRIORITY_HIGH. This |
457 | * ensures that the task request is issued ahead of other task destined |
458 | * for the same Remote Node. -# task_type == SCU_TASK_TYPE_IOREAD. This |
459 | * simply indicates that a normal request type (i.e. non-raw frame) is |
460 | * being utilized to perform task management. -#control_frame == 1. This |
461 | * ensures that the proper endianness is set so that the bytes are |
462 | * transmitted in the right order for a task frame. |
463 | * @ireq: This parameter specifies the task request object being constructed. |
464 | */ |
465 | static void scu_ssp_task_request_construct_task_context(struct isci_request *ireq) |
466 | { |
467 | struct scu_task_context *task_context = ireq->tc; |
468 | |
469 | scu_ssp_request_construct_task_context(ireq, task_context); |
470 | |
471 | task_context->control_frame = 1; |
472 | task_context->priority = SCU_TASK_PRIORITY_HIGH; |
473 | task_context->task_type = SCU_TASK_TYPE_RAW_FRAME; |
474 | task_context->transfer_length_bytes = 0; |
475 | task_context->type.ssp.frame_type = SSP_TASK; |
476 | task_context->ssp_command_iu_length = |
477 | sizeof(struct ssp_task_iu) / sizeof(u32); |
478 | } |
479 | |
480 | /** |
481 | * scu_sata_request_construct_task_context() |
482 | * This method is will fill in the SCU Task Context for any type of SATA |
483 | * request. This is called from the various SATA constructors. |
484 | * @ireq: The general IO request object which is to be used in |
485 | * constructing the SCU task context. |
486 | * @task_context: The buffer pointer for the SCU task context which is being |
487 | * constructed. |
488 | * |
489 | * The general io request construction is complete. The buffer assignment for |
490 | * the command buffer is complete. none Revisit task context construction to |
491 | * determine what is common for SSP/SMP/STP task context structures. |
492 | */ |
493 | static void scu_sata_request_construct_task_context( |
494 | struct isci_request *ireq, |
495 | struct scu_task_context *task_context) |
496 | { |
497 | dma_addr_t dma_addr; |
498 | struct isci_remote_device *idev; |
499 | struct isci_port *iport; |
500 | |
501 | idev = ireq->target_device; |
502 | iport = idev->owning_port; |
503 | |
504 | /* Fill in the TC with its required data */ |
505 | task_context->abort = 0; |
506 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; |
507 | task_context->initiator_request = 1; |
508 | task_context->connection_rate = idev->connection_rate; |
509 | task_context->protocol_engine_index = ISCI_PEG; |
510 | task_context->logical_port_index = iport->physical_port_index; |
511 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP; |
512 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
513 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
514 | |
515 | task_context->remote_node_index = idev->rnc.remote_node_index; |
516 | task_context->command_code = 0; |
517 | |
518 | task_context->link_layer_control = 0; |
519 | task_context->do_not_dma_ssp_good_response = 1; |
520 | task_context->strict_ordering = 0; |
521 | task_context->control_frame = 0; |
522 | task_context->timeout_enable = 0; |
523 | task_context->block_guard_enable = 0; |
524 | |
525 | task_context->address_modifier = 0; |
526 | task_context->task_phase = 0x01; |
527 | |
528 | task_context->ssp_command_iu_length = |
529 | (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32); |
530 | |
531 | /* Set the first word of the H2D REG FIS */ |
532 | task_context->type.words[0] = *(u32 *)&ireq->stp.cmd; |
533 | |
534 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
535 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
536 | (iport->physical_port_index << |
537 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
538 | ISCI_TAG_TCI(ireq->io_tag)); |
539 | /* |
540 | * Copy the physical address for the command buffer to the SCU Task |
541 | * Context. We must offset the command buffer by 4 bytes because the |
542 | * first 4 bytes are transfered in the body of the TC. |
543 | */ |
544 | dma_addr = sci_io_request_get_dma_addr(ireq, |
545 | virt_addr: ((char *) &ireq->stp.cmd) + |
546 | sizeof(u32)); |
547 | |
548 | task_context->command_iu_upper = upper_32_bits(dma_addr); |
549 | task_context->command_iu_lower = lower_32_bits(dma_addr); |
550 | |
551 | /* SATA Requests do not have a response buffer */ |
552 | task_context->response_iu_upper = 0; |
553 | task_context->response_iu_lower = 0; |
554 | } |
555 | |
556 | static void scu_stp_raw_request_construct_task_context(struct isci_request *ireq) |
557 | { |
558 | struct scu_task_context *task_context = ireq->tc; |
559 | |
560 | scu_sata_request_construct_task_context(ireq, task_context); |
561 | |
562 | task_context->control_frame = 0; |
563 | task_context->priority = SCU_TASK_PRIORITY_NORMAL; |
564 | task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME; |
565 | task_context->type.stp.fis_type = FIS_REGH2D; |
566 | task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32); |
567 | } |
568 | |
569 | static enum sci_status sci_stp_pio_request_construct(struct isci_request *ireq, |
570 | bool copy_rx_frame) |
571 | { |
572 | struct isci_stp_request *stp_req = &ireq->stp.req; |
573 | |
574 | scu_stp_raw_request_construct_task_context(ireq); |
575 | |
576 | stp_req->status = 0; |
577 | stp_req->sgl.offset = 0; |
578 | stp_req->sgl.set = SCU_SGL_ELEMENT_PAIR_A; |
579 | |
580 | if (copy_rx_frame) { |
581 | sci_request_build_sgl(ireq); |
582 | stp_req->sgl.index = 0; |
583 | } else { |
584 | /* The user does not want the data copied to the SGL buffer location */ |
585 | stp_req->sgl.index = -1; |
586 | } |
587 | |
588 | return SCI_SUCCESS; |
589 | } |
590 | |
591 | /* |
592 | * sci_stp_optimized_request_construct() |
593 | * @ireq: This parameter specifies the request to be constructed as an |
594 | * optimized request. |
595 | * @optimized_task_type: This parameter specifies whether the request is to be |
596 | * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A |
597 | * value of 1 indicates NCQ. |
598 | * |
599 | * This method will perform request construction common to all types of STP |
600 | * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method |
601 | * returns an indication as to whether the construction was successful. |
602 | */ |
603 | static void sci_stp_optimized_request_construct(struct isci_request *ireq, |
604 | u8 optimized_task_type, |
605 | u32 len, |
606 | enum dma_data_direction dir) |
607 | { |
608 | struct scu_task_context *task_context = ireq->tc; |
609 | |
610 | /* Build the STP task context structure */ |
611 | scu_sata_request_construct_task_context(ireq, task_context); |
612 | |
613 | /* Copy over the SGL elements */ |
614 | sci_request_build_sgl(ireq); |
615 | |
616 | /* Copy over the number of bytes to be transfered */ |
617 | task_context->transfer_length_bytes = len; |
618 | |
619 | if (dir == DMA_TO_DEVICE) { |
620 | /* |
621 | * The difference between the DMA IN and DMA OUT request task type |
622 | * values are consistent with the difference between FPDMA READ |
623 | * and FPDMA WRITE values. Add the supplied task type parameter |
624 | * to this difference to set the task type properly for this |
625 | * DATA OUT (WRITE) case. */ |
626 | task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT |
627 | - SCU_TASK_TYPE_DMA_IN); |
628 | } else { |
629 | /* |
630 | * For the DATA IN (READ) case, simply save the supplied |
631 | * optimized task type. */ |
632 | task_context->task_type = optimized_task_type; |
633 | } |
634 | } |
635 | |
636 | static void sci_atapi_construct(struct isci_request *ireq) |
637 | { |
638 | struct host_to_dev_fis *h2d_fis = &ireq->stp.cmd; |
639 | struct sas_task *task; |
640 | |
641 | /* To simplify the implementation we take advantage of the |
642 | * silicon's partial acceleration of atapi protocol (dma data |
643 | * transfers), so we promote all commands to dma protocol. This |
644 | * breaks compatibility with ATA_HORKAGE_ATAPI_MOD16_DMA drives. |
645 | */ |
646 | h2d_fis->features |= ATAPI_PKT_DMA; |
647 | |
648 | scu_stp_raw_request_construct_task_context(ireq); |
649 | |
650 | task = isci_request_access_task(ireq); |
651 | if (task->data_dir == DMA_NONE) |
652 | task->total_xfer_len = 0; |
653 | |
654 | /* clear the response so we can detect arrivial of an |
655 | * unsolicited h2d fis |
656 | */ |
657 | ireq->stp.rsp.fis_type = 0; |
658 | } |
659 | |
660 | static enum sci_status |
661 | sci_io_request_construct_sata(struct isci_request *ireq, |
662 | u32 len, |
663 | enum dma_data_direction dir, |
664 | bool copy) |
665 | { |
666 | enum sci_status status = SCI_SUCCESS; |
667 | struct sas_task *task = isci_request_access_task(ireq); |
668 | struct domain_device *dev = ireq->target_device->domain_dev; |
669 | |
670 | /* check for management protocols */ |
671 | if (test_bit(IREQ_TMF, &ireq->flags)) { |
672 | struct isci_tmf *tmf = isci_request_access_tmf(ireq); |
673 | |
674 | dev_err(&ireq->owning_controller->pdev->dev, |
675 | "%s: Request 0x%p received un-handled SAT " |
676 | "management protocol 0x%x.\n" , |
677 | __func__, ireq, tmf->tmf_code); |
678 | |
679 | return SCI_FAILURE; |
680 | } |
681 | |
682 | if (!sas_protocol_ata(proto: task->task_proto)) { |
683 | dev_err(&ireq->owning_controller->pdev->dev, |
684 | "%s: Non-ATA protocol in SATA path: 0x%x\n" , |
685 | __func__, |
686 | task->task_proto); |
687 | return SCI_FAILURE; |
688 | |
689 | } |
690 | |
691 | /* ATAPI */ |
692 | if (dev->sata_dev.class == ATA_DEV_ATAPI && |
693 | task->ata_task.fis.command == ATA_CMD_PACKET) { |
694 | sci_atapi_construct(ireq); |
695 | return SCI_SUCCESS; |
696 | } |
697 | |
698 | /* non data */ |
699 | if (task->data_dir == DMA_NONE) { |
700 | scu_stp_raw_request_construct_task_context(ireq); |
701 | return SCI_SUCCESS; |
702 | } |
703 | |
704 | /* NCQ */ |
705 | if (task->ata_task.use_ncq) { |
706 | sci_stp_optimized_request_construct(ireq, |
707 | optimized_task_type: SCU_TASK_TYPE_FPDMAQ_READ, |
708 | len, dir); |
709 | return SCI_SUCCESS; |
710 | } |
711 | |
712 | /* DMA */ |
713 | if (task->ata_task.dma_xfer) { |
714 | sci_stp_optimized_request_construct(ireq, |
715 | optimized_task_type: SCU_TASK_TYPE_DMA_IN, |
716 | len, dir); |
717 | return SCI_SUCCESS; |
718 | } else /* PIO */ |
719 | return sci_stp_pio_request_construct(ireq, copy_rx_frame: copy); |
720 | |
721 | return status; |
722 | } |
723 | |
724 | static enum sci_status sci_io_request_construct_basic_ssp(struct isci_request *ireq) |
725 | { |
726 | struct sas_task *task = isci_request_access_task(ireq); |
727 | |
728 | ireq->protocol = SAS_PROTOCOL_SSP; |
729 | |
730 | scu_ssp_io_request_construct_task_context(ireq, |
731 | dir: task->data_dir, |
732 | len: task->total_xfer_len); |
733 | |
734 | sci_io_request_build_ssp_command_iu(ireq); |
735 | |
736 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_CONSTRUCTED); |
737 | |
738 | return SCI_SUCCESS; |
739 | } |
740 | |
741 | void sci_task_request_construct_ssp(struct isci_request *ireq) |
742 | { |
743 | /* Construct the SSP Task SCU Task Context */ |
744 | scu_ssp_task_request_construct_task_context(ireq); |
745 | |
746 | /* Fill in the SSP Task IU */ |
747 | sci_task_request_build_ssp_task_iu(ireq); |
748 | |
749 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_CONSTRUCTED); |
750 | } |
751 | |
752 | static enum sci_status sci_io_request_construct_basic_sata(struct isci_request *ireq) |
753 | { |
754 | enum sci_status status; |
755 | bool copy = false; |
756 | struct sas_task *task = isci_request_access_task(ireq); |
757 | |
758 | ireq->protocol = SAS_PROTOCOL_STP; |
759 | |
760 | copy = (task->data_dir == DMA_NONE) ? false : true; |
761 | |
762 | status = sci_io_request_construct_sata(ireq, |
763 | len: task->total_xfer_len, |
764 | dir: task->data_dir, |
765 | copy); |
766 | |
767 | if (status == SCI_SUCCESS) |
768 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_CONSTRUCTED); |
769 | |
770 | return status; |
771 | } |
772 | |
773 | #define SCU_TASK_CONTEXT_SRAM 0x200000 |
774 | /** |
775 | * sci_req_tx_bytes - bytes transferred when reply underruns request |
776 | * @ireq: request that was terminated early |
777 | */ |
778 | static u32 sci_req_tx_bytes(struct isci_request *ireq) |
779 | { |
780 | struct isci_host *ihost = ireq->owning_controller; |
781 | u32 ret_val = 0; |
782 | |
783 | if (readl(addr: &ihost->smu_registers->address_modifier) == 0) { |
784 | void __iomem *scu_reg_base = ihost->scu_registers; |
785 | |
786 | /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where |
787 | * BAR1 is the scu_registers |
788 | * 0x20002C = 0x200000 + 0x2c |
789 | * = start of task context SRAM + offset of (type.ssp.data_offset) |
790 | * TCi is the io_tag of struct sci_request |
791 | */ |
792 | ret_val = readl(addr: scu_reg_base + |
793 | (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) + |
794 | ((sizeof(struct scu_task_context)) * ISCI_TAG_TCI(ireq->io_tag))); |
795 | } |
796 | |
797 | return ret_val; |
798 | } |
799 | |
800 | enum sci_status sci_request_start(struct isci_request *ireq) |
801 | { |
802 | enum sci_base_request_states state; |
803 | struct scu_task_context *tc = ireq->tc; |
804 | struct isci_host *ihost = ireq->owning_controller; |
805 | |
806 | state = ireq->sm.current_state_id; |
807 | if (state != SCI_REQ_CONSTRUCTED) { |
808 | dev_warn(&ihost->pdev->dev, |
809 | "%s: SCIC IO Request requested to start while in wrong " |
810 | "state %d\n" , __func__, state); |
811 | return SCI_FAILURE_INVALID_STATE; |
812 | } |
813 | |
814 | tc->task_index = ISCI_TAG_TCI(ireq->io_tag); |
815 | |
816 | switch (tc->protocol_type) { |
817 | case SCU_TASK_CONTEXT_PROTOCOL_SMP: |
818 | case SCU_TASK_CONTEXT_PROTOCOL_SSP: |
819 | /* SSP/SMP Frame */ |
820 | tc->type.ssp.tag = ireq->io_tag; |
821 | tc->type.ssp.target_port_transfer_tag = 0xFFFF; |
822 | break; |
823 | |
824 | case SCU_TASK_CONTEXT_PROTOCOL_STP: |
825 | /* STP/SATA Frame |
826 | * tc->type.stp.ncq_tag = ireq->ncq_tag; |
827 | */ |
828 | break; |
829 | |
830 | case SCU_TASK_CONTEXT_PROTOCOL_NONE: |
831 | /* / @todo When do we set no protocol type? */ |
832 | break; |
833 | |
834 | default: |
835 | /* This should never happen since we build the IO |
836 | * requests */ |
837 | break; |
838 | } |
839 | |
840 | /* Add to the post_context the io tag value */ |
841 | ireq->post_context |= ISCI_TAG_TCI(ireq->io_tag); |
842 | |
843 | /* Everything is good go ahead and change state */ |
844 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STARTED); |
845 | |
846 | return SCI_SUCCESS; |
847 | } |
848 | |
849 | enum sci_status |
850 | sci_io_request_terminate(struct isci_request *ireq) |
851 | { |
852 | enum sci_base_request_states state; |
853 | |
854 | state = ireq->sm.current_state_id; |
855 | |
856 | switch (state) { |
857 | case SCI_REQ_CONSTRUCTED: |
858 | /* Set to make sure no HW terminate posting is done: */ |
859 | set_bit(IREQ_TC_ABORT_POSTED, addr: &ireq->flags); |
860 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
861 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
862 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
863 | return SCI_SUCCESS; |
864 | case SCI_REQ_STARTED: |
865 | case SCI_REQ_TASK_WAIT_TC_COMP: |
866 | case SCI_REQ_SMP_WAIT_RESP: |
867 | case SCI_REQ_SMP_WAIT_TC_COMP: |
868 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: |
869 | case SCI_REQ_STP_UDMA_WAIT_D2H: |
870 | case SCI_REQ_STP_NON_DATA_WAIT_H2D: |
871 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: |
872 | case SCI_REQ_STP_PIO_WAIT_H2D: |
873 | case SCI_REQ_STP_PIO_WAIT_FRAME: |
874 | case SCI_REQ_STP_PIO_DATA_IN: |
875 | case SCI_REQ_STP_PIO_DATA_OUT: |
876 | case SCI_REQ_ATAPI_WAIT_H2D: |
877 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: |
878 | case SCI_REQ_ATAPI_WAIT_D2H: |
879 | case SCI_REQ_ATAPI_WAIT_TC_COMP: |
880 | /* Fall through and change state to ABORTING... */ |
881 | case SCI_REQ_TASK_WAIT_TC_RESP: |
882 | /* The task frame was already confirmed to have been |
883 | * sent by the SCU HW. Since the state machine is |
884 | * now only waiting for the task response itself, |
885 | * abort the request and complete it immediately |
886 | * and don't wait for the task response. |
887 | */ |
888 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_ABORTING); |
889 | fallthrough; /* and handle like ABORTING */ |
890 | case SCI_REQ_ABORTING: |
891 | if (!isci_remote_device_is_safe_to_abort(idev: ireq->target_device)) |
892 | set_bit(IREQ_PENDING_ABORT, addr: &ireq->flags); |
893 | else |
894 | clear_bit(IREQ_PENDING_ABORT, addr: &ireq->flags); |
895 | /* If the request is only waiting on the remote device |
896 | * suspension, return SUCCESS so the caller will wait too. |
897 | */ |
898 | return SCI_SUCCESS; |
899 | case SCI_REQ_COMPLETED: |
900 | default: |
901 | dev_warn(&ireq->owning_controller->pdev->dev, |
902 | "%s: SCIC IO Request requested to abort while in wrong " |
903 | "state %d\n" , __func__, ireq->sm.current_state_id); |
904 | break; |
905 | } |
906 | |
907 | return SCI_FAILURE_INVALID_STATE; |
908 | } |
909 | |
910 | enum sci_status sci_request_complete(struct isci_request *ireq) |
911 | { |
912 | enum sci_base_request_states state; |
913 | struct isci_host *ihost = ireq->owning_controller; |
914 | |
915 | state = ireq->sm.current_state_id; |
916 | if (WARN_ONCE(state != SCI_REQ_COMPLETED, |
917 | "isci: request completion from wrong state (%s)\n" , |
918 | req_state_name(state))) |
919 | return SCI_FAILURE_INVALID_STATE; |
920 | |
921 | if (ireq->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) |
922 | sci_controller_release_frame(ihost, |
923 | frame_index: ireq->saved_rx_frame_index); |
924 | |
925 | /* XXX can we just stop the machine and remove the 'final' state? */ |
926 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_FINAL); |
927 | return SCI_SUCCESS; |
928 | } |
929 | |
930 | enum sci_status sci_io_request_event_handler(struct isci_request *ireq, |
931 | u32 event_code) |
932 | { |
933 | enum sci_base_request_states state; |
934 | struct isci_host *ihost = ireq->owning_controller; |
935 | |
936 | state = ireq->sm.current_state_id; |
937 | |
938 | if (state != SCI_REQ_STP_PIO_DATA_IN) { |
939 | dev_warn(&ihost->pdev->dev, "%s: (%x) in wrong state %s\n" , |
940 | __func__, event_code, req_state_name(state)); |
941 | |
942 | return SCI_FAILURE_INVALID_STATE; |
943 | } |
944 | |
945 | switch (scu_get_event_specifier(event_code)) { |
946 | case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT: |
947 | /* We are waiting for data and the SCU has R_ERR the data frame. |
948 | * Go back to waiting for the D2H Register FIS |
949 | */ |
950 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_PIO_WAIT_FRAME); |
951 | return SCI_SUCCESS; |
952 | default: |
953 | dev_err(&ihost->pdev->dev, |
954 | "%s: pio request unexpected event %#x\n" , |
955 | __func__, event_code); |
956 | |
957 | /* TODO Should we fail the PIO request when we get an |
958 | * unexpected event? |
959 | */ |
960 | return SCI_FAILURE; |
961 | } |
962 | } |
963 | |
964 | /* |
965 | * This function copies response data for requests returning response data |
966 | * instead of sense data. |
967 | * @sci_req: This parameter specifies the request object for which to copy |
968 | * the response data. |
969 | */ |
970 | static void sci_io_request_copy_response(struct isci_request *ireq) |
971 | { |
972 | void *resp_buf; |
973 | u32 len; |
974 | struct ssp_response_iu *ssp_response; |
975 | struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq); |
976 | |
977 | ssp_response = &ireq->ssp.rsp; |
978 | |
979 | resp_buf = &isci_tmf->resp.resp_iu; |
980 | |
981 | len = min_t(u32, |
982 | SSP_RESP_IU_MAX_SIZE, |
983 | be32_to_cpu(ssp_response->response_data_len)); |
984 | |
985 | memcpy(resp_buf, ssp_response->resp_data, len); |
986 | } |
987 | |
988 | static enum sci_status |
989 | request_started_state_tc_event(struct isci_request *ireq, |
990 | u32 completion_code) |
991 | { |
992 | struct ssp_response_iu *resp_iu; |
993 | u8 datapres; |
994 | |
995 | /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000 |
996 | * to determine SDMA status |
997 | */ |
998 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
999 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1000 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1001 | ireq->sci_status = SCI_SUCCESS; |
1002 | break; |
1003 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): { |
1004 | /* There are times when the SCU hardware will return an early |
1005 | * response because the io request specified more data than is |
1006 | * returned by the target device (mode pages, inquiry data, |
1007 | * etc.). We must check the response stats to see if this is |
1008 | * truly a failed request or a good request that just got |
1009 | * completed early. |
1010 | */ |
1011 | struct ssp_response_iu *resp = &ireq->ssp.rsp; |
1012 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1013 | |
1014 | sci_swab32_cpy(dest: &ireq->ssp.rsp, |
1015 | src: &ireq->ssp.rsp, |
1016 | word_cnt); |
1017 | |
1018 | if (resp->status == 0) { |
1019 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1020 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; |
1021 | } else { |
1022 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1023 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1024 | } |
1025 | break; |
1026 | } |
1027 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): { |
1028 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1029 | |
1030 | sci_swab32_cpy(dest: &ireq->ssp.rsp, |
1031 | src: &ireq->ssp.rsp, |
1032 | word_cnt); |
1033 | |
1034 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1035 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1036 | break; |
1037 | } |
1038 | |
1039 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR): |
1040 | /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame |
1041 | * guaranteed to be received before this completion status is |
1042 | * posted? |
1043 | */ |
1044 | resp_iu = &ireq->ssp.rsp; |
1045 | datapres = resp_iu->datapres; |
1046 | |
1047 | if (datapres == SAS_DATAPRES_RESPONSE_DATA || |
1048 | datapres == SAS_DATAPRES_SENSE_DATA) { |
1049 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1050 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1051 | } else { |
1052 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1053 | ireq->sci_status = SCI_SUCCESS; |
1054 | } |
1055 | break; |
1056 | /* only stp device gets suspended. */ |
1057 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): |
1058 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR): |
1059 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR): |
1060 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR): |
1061 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR): |
1062 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN): |
1063 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR): |
1064 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP): |
1065 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS): |
1066 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
1067 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR): |
1068 | if (ireq->protocol == SAS_PROTOCOL_STP) { |
1069 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1070 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1071 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
1072 | } else { |
1073 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1074 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1075 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1076 | } |
1077 | break; |
1078 | |
1079 | /* both stp/ssp device gets suspended */ |
1080 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR): |
1081 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION): |
1082 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1): |
1083 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2): |
1084 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3): |
1085 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION): |
1086 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION): |
1087 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY): |
1088 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED): |
1089 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED): |
1090 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1091 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1092 | ireq->sci_status = SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; |
1093 | break; |
1094 | |
1095 | /* neither ssp nor stp gets suspended. */ |
1096 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR): |
1097 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR): |
1098 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR): |
1099 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR): |
1100 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR): |
1101 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA): |
1102 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): |
1103 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): |
1104 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): |
1105 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): |
1106 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA): |
1107 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL): |
1108 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV): |
1109 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV): |
1110 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND): |
1111 | default: |
1112 | ireq->scu_status = SCU_GET_COMPLETION_TL_STATUS(completion_code) >> |
1113 | SCU_COMPLETION_TL_STATUS_SHIFT; |
1114 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1115 | break; |
1116 | } |
1117 | |
1118 | /* |
1119 | * TODO: This is probably wrong for ACK/NAK timeout conditions |
1120 | */ |
1121 | |
1122 | /* In all cases we will treat this as the completion of the IO req. */ |
1123 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1124 | return SCI_SUCCESS; |
1125 | } |
1126 | |
1127 | static enum sci_status |
1128 | request_aborting_state_tc_event(struct isci_request *ireq, |
1129 | u32 completion_code) |
1130 | { |
1131 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1132 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): |
1133 | case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT): |
1134 | ireq->scu_status = SCU_TASK_DONE_TASK_ABORT; |
1135 | ireq->sci_status = SCI_FAILURE_IO_TERMINATED; |
1136 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1137 | break; |
1138 | |
1139 | default: |
1140 | /* Unless we get some strange error wait for the task abort to complete |
1141 | * TODO: Should there be a state change for this completion? |
1142 | */ |
1143 | break; |
1144 | } |
1145 | |
1146 | return SCI_SUCCESS; |
1147 | } |
1148 | |
1149 | static enum sci_status ssp_task_request_await_tc_event(struct isci_request *ireq, |
1150 | u32 completion_code) |
1151 | { |
1152 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1153 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1154 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1155 | ireq->sci_status = SCI_SUCCESS; |
1156 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_TASK_WAIT_TC_RESP); |
1157 | break; |
1158 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO): |
1159 | /* Currently, the decision is to simply allow the task request |
1160 | * to timeout if the task IU wasn't received successfully. |
1161 | * There is a potential for receiving multiple task responses if |
1162 | * we decide to send the task IU again. |
1163 | */ |
1164 | dev_warn(&ireq->owning_controller->pdev->dev, |
1165 | "%s: TaskRequest:0x%p CompletionCode:%x - " |
1166 | "ACK/NAK timeout\n" , __func__, ireq, |
1167 | completion_code); |
1168 | |
1169 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_TASK_WAIT_TC_RESP); |
1170 | break; |
1171 | default: |
1172 | /* |
1173 | * All other completion status cause the IO to be complete. |
1174 | * If a NAK was received, then it is up to the user to retry |
1175 | * the request. |
1176 | */ |
1177 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1178 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1179 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1180 | break; |
1181 | } |
1182 | |
1183 | return SCI_SUCCESS; |
1184 | } |
1185 | |
1186 | static enum sci_status |
1187 | smp_request_await_response_tc_event(struct isci_request *ireq, |
1188 | u32 completion_code) |
1189 | { |
1190 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1191 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1192 | /* In the AWAIT RESPONSE state, any TC completion is |
1193 | * unexpected. but if the TC has success status, we |
1194 | * complete the IO anyway. |
1195 | */ |
1196 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1197 | ireq->sci_status = SCI_SUCCESS; |
1198 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1199 | break; |
1200 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR): |
1201 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR): |
1202 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR): |
1203 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR): |
1204 | /* These status has been seen in a specific LSI |
1205 | * expander, which sometimes is not able to send smp |
1206 | * response within 2 ms. This causes our hardware break |
1207 | * the connection and set TC completion with one of |
1208 | * these SMP_XXX_XX_ERR status. For these type of error, |
1209 | * we ask ihost user to retry the request. |
1210 | */ |
1211 | ireq->scu_status = SCU_TASK_DONE_SMP_RESP_TO_ERR; |
1212 | ireq->sci_status = SCI_FAILURE_RETRY_REQUIRED; |
1213 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1214 | break; |
1215 | default: |
1216 | /* All other completion status cause the IO to be complete. If a NAK |
1217 | * was received, then it is up to the user to retry the request |
1218 | */ |
1219 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1220 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1221 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1222 | break; |
1223 | } |
1224 | |
1225 | return SCI_SUCCESS; |
1226 | } |
1227 | |
1228 | static enum sci_status |
1229 | smp_request_await_tc_event(struct isci_request *ireq, |
1230 | u32 completion_code) |
1231 | { |
1232 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1233 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1234 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1235 | ireq->sci_status = SCI_SUCCESS; |
1236 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1237 | break; |
1238 | default: |
1239 | /* All other completion status cause the IO to be |
1240 | * complete. If a NAK was received, then it is up to |
1241 | * the user to retry the request. |
1242 | */ |
1243 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1244 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1245 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1246 | break; |
1247 | } |
1248 | |
1249 | return SCI_SUCCESS; |
1250 | } |
1251 | |
1252 | static struct scu_sgl_element *pio_sgl_next(struct isci_stp_request *stp_req) |
1253 | { |
1254 | struct scu_sgl_element *sgl; |
1255 | struct scu_sgl_element_pair *sgl_pair; |
1256 | struct isci_request *ireq = to_ireq(stp_req); |
1257 | struct isci_stp_pio_sgl *pio_sgl = &stp_req->sgl; |
1258 | |
1259 | sgl_pair = to_sgl_element_pair(ireq, idx: pio_sgl->index); |
1260 | if (!sgl_pair) |
1261 | sgl = NULL; |
1262 | else if (pio_sgl->set == SCU_SGL_ELEMENT_PAIR_A) { |
1263 | if (sgl_pair->B.address_lower == 0 && |
1264 | sgl_pair->B.address_upper == 0) { |
1265 | sgl = NULL; |
1266 | } else { |
1267 | pio_sgl->set = SCU_SGL_ELEMENT_PAIR_B; |
1268 | sgl = &sgl_pair->B; |
1269 | } |
1270 | } else { |
1271 | if (sgl_pair->next_pair_lower == 0 && |
1272 | sgl_pair->next_pair_upper == 0) { |
1273 | sgl = NULL; |
1274 | } else { |
1275 | pio_sgl->index++; |
1276 | pio_sgl->set = SCU_SGL_ELEMENT_PAIR_A; |
1277 | sgl_pair = to_sgl_element_pair(ireq, idx: pio_sgl->index); |
1278 | sgl = &sgl_pair->A; |
1279 | } |
1280 | } |
1281 | |
1282 | return sgl; |
1283 | } |
1284 | |
1285 | static enum sci_status |
1286 | stp_request_non_data_await_h2d_tc_event(struct isci_request *ireq, |
1287 | u32 completion_code) |
1288 | { |
1289 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1290 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1291 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1292 | ireq->sci_status = SCI_SUCCESS; |
1293 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_NON_DATA_WAIT_D2H); |
1294 | break; |
1295 | |
1296 | default: |
1297 | /* All other completion status cause the IO to be |
1298 | * complete. If a NAK was received, then it is up to |
1299 | * the user to retry the request. |
1300 | */ |
1301 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1302 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1303 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1304 | break; |
1305 | } |
1306 | |
1307 | return SCI_SUCCESS; |
1308 | } |
1309 | |
1310 | #define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */ |
1311 | |
1312 | /* transmit DATA_FIS from (current sgl + offset) for input |
1313 | * parameter length. current sgl and offset is alreay stored in the IO request |
1314 | */ |
1315 | static enum sci_status sci_stp_request_pio_data_out_trasmit_data_frame( |
1316 | struct isci_request *ireq, |
1317 | u32 length) |
1318 | { |
1319 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1320 | struct scu_task_context *task_context = ireq->tc; |
1321 | struct scu_sgl_element_pair *sgl_pair; |
1322 | struct scu_sgl_element *current_sgl; |
1323 | |
1324 | /* Recycle the TC and reconstruct it for sending out DATA FIS containing |
1325 | * for the data from current_sgl+offset for the input length |
1326 | */ |
1327 | sgl_pair = to_sgl_element_pair(ireq, idx: stp_req->sgl.index); |
1328 | if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) |
1329 | current_sgl = &sgl_pair->A; |
1330 | else |
1331 | current_sgl = &sgl_pair->B; |
1332 | |
1333 | /* update the TC */ |
1334 | task_context->command_iu_upper = current_sgl->address_upper; |
1335 | task_context->command_iu_lower = current_sgl->address_lower; |
1336 | task_context->transfer_length_bytes = length; |
1337 | task_context->type.stp.fis_type = FIS_DATA; |
1338 | |
1339 | /* send the new TC out. */ |
1340 | return sci_controller_continue_io(ireq); |
1341 | } |
1342 | |
1343 | static enum sci_status sci_stp_request_pio_data_out_transmit_data(struct isci_request *ireq) |
1344 | { |
1345 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1346 | struct scu_sgl_element_pair *sgl_pair; |
1347 | enum sci_status status = SCI_SUCCESS; |
1348 | struct scu_sgl_element *sgl; |
1349 | u32 offset; |
1350 | u32 len = 0; |
1351 | |
1352 | offset = stp_req->sgl.offset; |
1353 | sgl_pair = to_sgl_element_pair(ireq, idx: stp_req->sgl.index); |
1354 | if (WARN_ONCE(!sgl_pair, "%s: null sgl element" , __func__)) |
1355 | return SCI_FAILURE; |
1356 | |
1357 | if (stp_req->sgl.set == SCU_SGL_ELEMENT_PAIR_A) { |
1358 | sgl = &sgl_pair->A; |
1359 | len = sgl_pair->A.length - offset; |
1360 | } else { |
1361 | sgl = &sgl_pair->B; |
1362 | len = sgl_pair->B.length - offset; |
1363 | } |
1364 | |
1365 | if (stp_req->pio_len == 0) |
1366 | return SCI_SUCCESS; |
1367 | |
1368 | if (stp_req->pio_len >= len) { |
1369 | status = sci_stp_request_pio_data_out_trasmit_data_frame(ireq, length: len); |
1370 | if (status != SCI_SUCCESS) |
1371 | return status; |
1372 | stp_req->pio_len -= len; |
1373 | |
1374 | /* update the current sgl, offset and save for future */ |
1375 | sgl = pio_sgl_next(stp_req); |
1376 | offset = 0; |
1377 | } else if (stp_req->pio_len < len) { |
1378 | sci_stp_request_pio_data_out_trasmit_data_frame(ireq, length: stp_req->pio_len); |
1379 | |
1380 | /* Sgl offset will be adjusted and saved for future */ |
1381 | offset += stp_req->pio_len; |
1382 | sgl->address_lower += stp_req->pio_len; |
1383 | stp_req->pio_len = 0; |
1384 | } |
1385 | |
1386 | stp_req->sgl.offset = offset; |
1387 | |
1388 | return status; |
1389 | } |
1390 | |
1391 | /** |
1392 | * sci_stp_request_pio_data_in_copy_data_buffer() |
1393 | * @stp_req: The request that is used for the SGL processing. |
1394 | * @data_buf: The buffer of data to be copied. |
1395 | * @len: The length of the data transfer. |
1396 | * |
1397 | * Copy the data from the buffer for the length specified to the IO request SGL |
1398 | * specified data region. enum sci_status |
1399 | */ |
1400 | static enum sci_status |
1401 | sci_stp_request_pio_data_in_copy_data_buffer(struct isci_stp_request *stp_req, |
1402 | u8 *data_buf, u32 len) |
1403 | { |
1404 | struct isci_request *ireq; |
1405 | u8 *src_addr; |
1406 | int copy_len; |
1407 | struct sas_task *task; |
1408 | struct scatterlist *sg; |
1409 | void *kaddr; |
1410 | int total_len = len; |
1411 | |
1412 | ireq = to_ireq(stp_req); |
1413 | task = isci_request_access_task(ireq); |
1414 | src_addr = data_buf; |
1415 | |
1416 | if (task->num_scatter > 0) { |
1417 | sg = task->scatter; |
1418 | |
1419 | while (total_len > 0) { |
1420 | struct page *page = sg_page(sg); |
1421 | |
1422 | copy_len = min_t(int, total_len, sg_dma_len(sg)); |
1423 | kaddr = kmap_atomic(page); |
1424 | memcpy(kaddr + sg->offset, src_addr, copy_len); |
1425 | kunmap_atomic(kaddr); |
1426 | total_len -= copy_len; |
1427 | src_addr += copy_len; |
1428 | sg = sg_next(sg); |
1429 | } |
1430 | } else { |
1431 | BUG_ON(task->total_xfer_len < total_len); |
1432 | memcpy(task->scatter, src_addr, total_len); |
1433 | } |
1434 | |
1435 | return SCI_SUCCESS; |
1436 | } |
1437 | |
1438 | /** |
1439 | * sci_stp_request_pio_data_in_copy_data() |
1440 | * @stp_req: The PIO DATA IN request that is to receive the data. |
1441 | * @data_buffer: The buffer to copy from. |
1442 | * |
1443 | * Copy the data buffer to the io request data region. enum sci_status |
1444 | */ |
1445 | static enum sci_status sci_stp_request_pio_data_in_copy_data( |
1446 | struct isci_stp_request *stp_req, |
1447 | u8 *data_buffer) |
1448 | { |
1449 | enum sci_status status; |
1450 | |
1451 | /* |
1452 | * If there is less than 1K remaining in the transfer request |
1453 | * copy just the data for the transfer */ |
1454 | if (stp_req->pio_len < SCU_MAX_FRAME_BUFFER_SIZE) { |
1455 | status = sci_stp_request_pio_data_in_copy_data_buffer( |
1456 | stp_req, data_buf: data_buffer, len: stp_req->pio_len); |
1457 | |
1458 | if (status == SCI_SUCCESS) |
1459 | stp_req->pio_len = 0; |
1460 | } else { |
1461 | /* We are transfering the whole frame so copy */ |
1462 | status = sci_stp_request_pio_data_in_copy_data_buffer( |
1463 | stp_req, data_buf: data_buffer, SCU_MAX_FRAME_BUFFER_SIZE); |
1464 | |
1465 | if (status == SCI_SUCCESS) |
1466 | stp_req->pio_len -= SCU_MAX_FRAME_BUFFER_SIZE; |
1467 | } |
1468 | |
1469 | return status; |
1470 | } |
1471 | |
1472 | static enum sci_status |
1473 | stp_request_pio_await_h2d_completion_tc_event(struct isci_request *ireq, |
1474 | u32 completion_code) |
1475 | { |
1476 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1477 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1478 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1479 | ireq->sci_status = SCI_SUCCESS; |
1480 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_PIO_WAIT_FRAME); |
1481 | break; |
1482 | |
1483 | default: |
1484 | /* All other completion status cause the IO to be |
1485 | * complete. If a NAK was received, then it is up to |
1486 | * the user to retry the request. |
1487 | */ |
1488 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1489 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1490 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1491 | break; |
1492 | } |
1493 | |
1494 | return SCI_SUCCESS; |
1495 | } |
1496 | |
1497 | static enum sci_status |
1498 | pio_data_out_tx_done_tc_event(struct isci_request *ireq, |
1499 | u32 completion_code) |
1500 | { |
1501 | enum sci_status status = SCI_SUCCESS; |
1502 | bool all_frames_transferred = false; |
1503 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1504 | |
1505 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
1506 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
1507 | /* Transmit data */ |
1508 | if (stp_req->pio_len != 0) { |
1509 | status = sci_stp_request_pio_data_out_transmit_data(ireq); |
1510 | if (status == SCI_SUCCESS) { |
1511 | if (stp_req->pio_len == 0) |
1512 | all_frames_transferred = true; |
1513 | } |
1514 | } else if (stp_req->pio_len == 0) { |
1515 | /* |
1516 | * this will happen if the all data is written at the |
1517 | * first time after the pio setup fis is received |
1518 | */ |
1519 | all_frames_transferred = true; |
1520 | } |
1521 | |
1522 | /* all data transferred. */ |
1523 | if (all_frames_transferred) { |
1524 | /* |
1525 | * Change the state to SCI_REQ_STP_PIO_DATA_IN |
1526 | * and wait for PIO_SETUP fis / or D2H REg fis. */ |
1527 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_PIO_WAIT_FRAME); |
1528 | } |
1529 | break; |
1530 | |
1531 | default: |
1532 | /* |
1533 | * All other completion status cause the IO to be complete. |
1534 | * If a NAK was received, then it is up to the user to retry |
1535 | * the request. |
1536 | */ |
1537 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
1538 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1539 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1540 | break; |
1541 | } |
1542 | |
1543 | return status; |
1544 | } |
1545 | |
1546 | static enum sci_status sci_stp_request_udma_general_frame_handler(struct isci_request *ireq, |
1547 | u32 frame_index) |
1548 | { |
1549 | struct isci_host *ihost = ireq->owning_controller; |
1550 | struct dev_to_host_fis *; |
1551 | enum sci_status status; |
1552 | u32 *frame_buffer; |
1553 | |
1554 | status = sci_unsolicited_frame_control_get_header(uf_control: &ihost->uf_control, |
1555 | frame_index, |
1556 | frame_header: (void **)&frame_header); |
1557 | |
1558 | if ((status == SCI_SUCCESS) && |
1559 | (frame_header->fis_type == FIS_REGD2H)) { |
1560 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
1561 | frame_index, |
1562 | frame_buffer: (void **)&frame_buffer); |
1563 | |
1564 | sci_controller_copy_sata_response(response_buffer: &ireq->stp.rsp, |
1565 | frame_header, |
1566 | frame_buffer); |
1567 | } |
1568 | |
1569 | sci_controller_release_frame(ihost, frame_index); |
1570 | |
1571 | return status; |
1572 | } |
1573 | |
1574 | static enum sci_status process_unsolicited_fis(struct isci_request *ireq, |
1575 | u32 frame_index) |
1576 | { |
1577 | struct isci_host *ihost = ireq->owning_controller; |
1578 | enum sci_status status; |
1579 | struct dev_to_host_fis *; |
1580 | u32 *frame_buffer; |
1581 | |
1582 | status = sci_unsolicited_frame_control_get_header(uf_control: &ihost->uf_control, |
1583 | frame_index, |
1584 | frame_header: (void **)&frame_header); |
1585 | |
1586 | if (status != SCI_SUCCESS) |
1587 | return status; |
1588 | |
1589 | if (frame_header->fis_type != FIS_REGD2H) { |
1590 | dev_err(&ireq->isci_host->pdev->dev, |
1591 | "%s ERROR: invalid fis type 0x%X\n" , |
1592 | __func__, frame_header->fis_type); |
1593 | return SCI_FAILURE; |
1594 | } |
1595 | |
1596 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
1597 | frame_index, |
1598 | frame_buffer: (void **)&frame_buffer); |
1599 | |
1600 | sci_controller_copy_sata_response(response_buffer: &ireq->stp.rsp, |
1601 | frame_header: (u32 *)frame_header, |
1602 | frame_buffer); |
1603 | |
1604 | /* Frame has been decoded return it to the controller */ |
1605 | sci_controller_release_frame(ihost, frame_index); |
1606 | |
1607 | return status; |
1608 | } |
1609 | |
1610 | static enum sci_status atapi_d2h_reg_frame_handler(struct isci_request *ireq, |
1611 | u32 frame_index) |
1612 | { |
1613 | struct sas_task *task = isci_request_access_task(ireq); |
1614 | enum sci_status status; |
1615 | |
1616 | status = process_unsolicited_fis(ireq, frame_index); |
1617 | |
1618 | if (status == SCI_SUCCESS) { |
1619 | if (ireq->stp.rsp.status & ATA_ERR) |
1620 | status = SCI_FAILURE_IO_RESPONSE_VALID; |
1621 | } else { |
1622 | status = SCI_FAILURE_IO_RESPONSE_VALID; |
1623 | } |
1624 | |
1625 | if (status != SCI_SUCCESS) { |
1626 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1627 | ireq->sci_status = status; |
1628 | } else { |
1629 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1630 | ireq->sci_status = SCI_SUCCESS; |
1631 | } |
1632 | |
1633 | /* the d2h ufi is the end of non-data commands */ |
1634 | if (task->data_dir == DMA_NONE) |
1635 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1636 | |
1637 | return status; |
1638 | } |
1639 | |
1640 | static void scu_atapi_reconstruct_raw_frame_task_context(struct isci_request *ireq) |
1641 | { |
1642 | struct ata_device *dev = sas_to_ata_dev(dev: ireq->target_device->domain_dev); |
1643 | void *atapi_cdb = ireq->ttype_ptr.io_task_ptr->ata_task.atapi_packet; |
1644 | struct scu_task_context *task_context = ireq->tc; |
1645 | |
1646 | /* fill in the SCU Task Context for a DATA fis containing CDB in Raw Frame |
1647 | * type. The TC for previous Packet fis was already there, we only need to |
1648 | * change the H2D fis content. |
1649 | */ |
1650 | memset(&ireq->stp.cmd, 0, sizeof(struct host_to_dev_fis)); |
1651 | memcpy(((u8 *)&ireq->stp.cmd + sizeof(u32)), atapi_cdb, ATAPI_CDB_LEN); |
1652 | memset(&(task_context->type.stp), 0, sizeof(struct stp_task_context)); |
1653 | task_context->type.stp.fis_type = FIS_DATA; |
1654 | task_context->transfer_length_bytes = dev->cdb_len; |
1655 | } |
1656 | |
1657 | static void scu_atapi_construct_task_context(struct isci_request *ireq) |
1658 | { |
1659 | struct ata_device *dev = sas_to_ata_dev(dev: ireq->target_device->domain_dev); |
1660 | struct sas_task *task = isci_request_access_task(ireq); |
1661 | struct scu_task_context *task_context = ireq->tc; |
1662 | int cdb_len = dev->cdb_len; |
1663 | |
1664 | /* reference: SSTL 1.13.4.2 |
1665 | * task_type, sata_direction |
1666 | */ |
1667 | if (task->data_dir == DMA_TO_DEVICE) { |
1668 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_OUT; |
1669 | task_context->sata_direction = 0; |
1670 | } else { |
1671 | /* todo: for NO_DATA command, we need to send out raw frame. */ |
1672 | task_context->task_type = SCU_TASK_TYPE_PACKET_DMA_IN; |
1673 | task_context->sata_direction = 1; |
1674 | } |
1675 | |
1676 | memset(&task_context->type.stp, 0, sizeof(task_context->type.stp)); |
1677 | task_context->type.stp.fis_type = FIS_DATA; |
1678 | |
1679 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); |
1680 | memcpy(&ireq->stp.cmd.lbal, task->ata_task.atapi_packet, cdb_len); |
1681 | task_context->ssp_command_iu_length = cdb_len / sizeof(u32); |
1682 | |
1683 | /* task phase is set to TX_CMD */ |
1684 | task_context->task_phase = 0x1; |
1685 | |
1686 | /* retry counter */ |
1687 | task_context->stp_retry_count = 0; |
1688 | |
1689 | /* data transfer size. */ |
1690 | task_context->transfer_length_bytes = task->total_xfer_len; |
1691 | |
1692 | /* setup sgl */ |
1693 | sci_request_build_sgl(ireq); |
1694 | } |
1695 | |
1696 | enum sci_status |
1697 | sci_io_request_frame_handler(struct isci_request *ireq, |
1698 | u32 frame_index) |
1699 | { |
1700 | struct isci_host *ihost = ireq->owning_controller; |
1701 | struct isci_stp_request *stp_req = &ireq->stp.req; |
1702 | enum sci_base_request_states state; |
1703 | enum sci_status status; |
1704 | ssize_t word_cnt; |
1705 | |
1706 | state = ireq->sm.current_state_id; |
1707 | switch (state) { |
1708 | case SCI_REQ_STARTED: { |
1709 | struct ssp_frame_hdr ssp_hdr; |
1710 | void *; |
1711 | |
1712 | sci_unsolicited_frame_control_get_header(uf_control: &ihost->uf_control, |
1713 | frame_index, |
1714 | frame_header: &frame_header); |
1715 | |
1716 | word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32); |
1717 | sci_swab32_cpy(dest: &ssp_hdr, src: frame_header, word_cnt); |
1718 | |
1719 | if (ssp_hdr.frame_type == SSP_RESPONSE) { |
1720 | struct ssp_response_iu *resp_iu; |
1721 | ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32); |
1722 | |
1723 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
1724 | frame_index, |
1725 | frame_buffer: (void **)&resp_iu); |
1726 | |
1727 | sci_swab32_cpy(dest: &ireq->ssp.rsp, src: resp_iu, word_cnt); |
1728 | |
1729 | resp_iu = &ireq->ssp.rsp; |
1730 | |
1731 | if (resp_iu->datapres == SAS_DATAPRES_RESPONSE_DATA || |
1732 | resp_iu->datapres == SAS_DATAPRES_SENSE_DATA) { |
1733 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1734 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1735 | } else { |
1736 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1737 | ireq->sci_status = SCI_SUCCESS; |
1738 | } |
1739 | } else { |
1740 | /* not a response frame, why did it get forwarded? */ |
1741 | dev_err(&ihost->pdev->dev, |
1742 | "%s: SCIC IO Request 0x%p received unexpected " |
1743 | "frame %d type 0x%02x\n" , __func__, ireq, |
1744 | frame_index, ssp_hdr.frame_type); |
1745 | } |
1746 | |
1747 | /* |
1748 | * In any case we are done with this frame buffer return it to |
1749 | * the controller |
1750 | */ |
1751 | sci_controller_release_frame(ihost, frame_index); |
1752 | |
1753 | return SCI_SUCCESS; |
1754 | } |
1755 | |
1756 | case SCI_REQ_TASK_WAIT_TC_RESP: |
1757 | sci_io_request_copy_response(ireq); |
1758 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1759 | sci_controller_release_frame(ihost, frame_index); |
1760 | return SCI_SUCCESS; |
1761 | |
1762 | case SCI_REQ_SMP_WAIT_RESP: { |
1763 | struct sas_task *task = isci_request_access_task(ireq); |
1764 | struct scatterlist *sg = &task->smp_task.smp_resp; |
1765 | void *, *kaddr; |
1766 | u8 *rsp; |
1767 | |
1768 | sci_unsolicited_frame_control_get_header(uf_control: &ihost->uf_control, |
1769 | frame_index, |
1770 | frame_header: &frame_header); |
1771 | kaddr = kmap_atomic(page: sg_page(sg)); |
1772 | rsp = kaddr + sg->offset; |
1773 | sci_swab32_cpy(dest: rsp, src: frame_header, word_cnt: 1); |
1774 | |
1775 | if (rsp[0] == SMP_RESPONSE) { |
1776 | void *smp_resp; |
1777 | |
1778 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
1779 | frame_index, |
1780 | frame_buffer: &smp_resp); |
1781 | |
1782 | word_cnt = (sg->length/4)-1; |
1783 | if (word_cnt > 0) |
1784 | word_cnt = min_t(unsigned int, word_cnt, |
1785 | SCU_UNSOLICITED_FRAME_BUFFER_SIZE/4); |
1786 | sci_swab32_cpy(dest: rsp + 4, src: smp_resp, word_cnt); |
1787 | |
1788 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
1789 | ireq->sci_status = SCI_SUCCESS; |
1790 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_SMP_WAIT_TC_COMP); |
1791 | } else { |
1792 | /* |
1793 | * This was not a response frame why did it get |
1794 | * forwarded? |
1795 | */ |
1796 | dev_err(&ihost->pdev->dev, |
1797 | "%s: SCIC SMP Request 0x%p received unexpected " |
1798 | "frame %d type 0x%02x\n" , |
1799 | __func__, |
1800 | ireq, |
1801 | frame_index, |
1802 | rsp[0]); |
1803 | |
1804 | ireq->scu_status = SCU_TASK_DONE_SMP_FRM_TYPE_ERR; |
1805 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
1806 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1807 | } |
1808 | kunmap_atomic(kaddr); |
1809 | |
1810 | sci_controller_release_frame(ihost, frame_index); |
1811 | |
1812 | return SCI_SUCCESS; |
1813 | } |
1814 | |
1815 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: |
1816 | return sci_stp_request_udma_general_frame_handler(ireq, |
1817 | frame_index); |
1818 | |
1819 | case SCI_REQ_STP_UDMA_WAIT_D2H: |
1820 | /* Use the general frame handler to copy the resposne data */ |
1821 | status = sci_stp_request_udma_general_frame_handler(ireq, frame_index); |
1822 | |
1823 | if (status != SCI_SUCCESS) |
1824 | return status; |
1825 | |
1826 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1827 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1828 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1829 | return SCI_SUCCESS; |
1830 | |
1831 | case SCI_REQ_STP_NON_DATA_WAIT_D2H: { |
1832 | struct dev_to_host_fis *; |
1833 | u32 *frame_buffer; |
1834 | |
1835 | status = sci_unsolicited_frame_control_get_header(uf_control: &ihost->uf_control, |
1836 | frame_index, |
1837 | frame_header: (void **)&frame_header); |
1838 | |
1839 | if (status != SCI_SUCCESS) { |
1840 | dev_err(&ihost->pdev->dev, |
1841 | "%s: SCIC IO Request 0x%p could not get frame " |
1842 | "header for frame index %d, status %x\n" , |
1843 | __func__, |
1844 | stp_req, |
1845 | frame_index, |
1846 | status); |
1847 | |
1848 | return status; |
1849 | } |
1850 | |
1851 | switch (frame_header->fis_type) { |
1852 | case FIS_REGD2H: |
1853 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
1854 | frame_index, |
1855 | frame_buffer: (void **)&frame_buffer); |
1856 | |
1857 | sci_controller_copy_sata_response(response_buffer: &ireq->stp.rsp, |
1858 | frame_header, |
1859 | frame_buffer); |
1860 | |
1861 | /* The command has completed with error */ |
1862 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1863 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1864 | break; |
1865 | |
1866 | default: |
1867 | dev_warn(&ihost->pdev->dev, |
1868 | "%s: IO Request:0x%p Frame Id:%d protocol " |
1869 | "violation occurred\n" , __func__, stp_req, |
1870 | frame_index); |
1871 | |
1872 | ireq->scu_status = SCU_TASK_DONE_UNEXP_FIS; |
1873 | ireq->sci_status = SCI_FAILURE_PROTOCOL_VIOLATION; |
1874 | break; |
1875 | } |
1876 | |
1877 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1878 | |
1879 | /* Frame has been decoded return it to the controller */ |
1880 | sci_controller_release_frame(ihost, frame_index); |
1881 | |
1882 | return status; |
1883 | } |
1884 | |
1885 | case SCI_REQ_STP_PIO_WAIT_FRAME: { |
1886 | struct sas_task *task = isci_request_access_task(ireq); |
1887 | struct dev_to_host_fis *; |
1888 | u32 *frame_buffer; |
1889 | |
1890 | status = sci_unsolicited_frame_control_get_header(uf_control: &ihost->uf_control, |
1891 | frame_index, |
1892 | frame_header: (void **)&frame_header); |
1893 | |
1894 | if (status != SCI_SUCCESS) { |
1895 | dev_err(&ihost->pdev->dev, |
1896 | "%s: SCIC IO Request 0x%p could not get frame " |
1897 | "header for frame index %d, status %x\n" , |
1898 | __func__, stp_req, frame_index, status); |
1899 | return status; |
1900 | } |
1901 | |
1902 | switch (frame_header->fis_type) { |
1903 | case FIS_PIO_SETUP: |
1904 | /* Get from the frame buffer the PIO Setup Data */ |
1905 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
1906 | frame_index, |
1907 | frame_buffer: (void **)&frame_buffer); |
1908 | |
1909 | /* Get the data from the PIO Setup The SCU Hardware |
1910 | * returns first word in the frame_header and the rest |
1911 | * of the data is in the frame buffer so we need to |
1912 | * back up one dword |
1913 | */ |
1914 | |
1915 | /* transfer_count: first 16bits in the 4th dword */ |
1916 | stp_req->pio_len = frame_buffer[3] & 0xffff; |
1917 | |
1918 | /* status: 4th byte in the 3rd dword */ |
1919 | stp_req->status = (frame_buffer[2] >> 24) & 0xff; |
1920 | |
1921 | sci_controller_copy_sata_response(response_buffer: &ireq->stp.rsp, |
1922 | frame_header, |
1923 | frame_buffer); |
1924 | |
1925 | ireq->stp.rsp.status = stp_req->status; |
1926 | |
1927 | /* The next state is dependent on whether the |
1928 | * request was PIO Data-in or Data out |
1929 | */ |
1930 | if (task->data_dir == DMA_FROM_DEVICE) { |
1931 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_PIO_DATA_IN); |
1932 | } else if (task->data_dir == DMA_TO_DEVICE) { |
1933 | /* Transmit data */ |
1934 | status = sci_stp_request_pio_data_out_transmit_data(ireq); |
1935 | if (status != SCI_SUCCESS) |
1936 | break; |
1937 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_PIO_DATA_OUT); |
1938 | } |
1939 | break; |
1940 | |
1941 | case FIS_SETDEVBITS: |
1942 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_PIO_WAIT_FRAME); |
1943 | break; |
1944 | |
1945 | case FIS_REGD2H: |
1946 | if (frame_header->status & ATA_BUSY) { |
1947 | /* |
1948 | * Now why is the drive sending a D2H Register |
1949 | * FIS when it is still busy? Do nothing since |
1950 | * we are still in the right state. |
1951 | */ |
1952 | dev_dbg(&ihost->pdev->dev, |
1953 | "%s: SCIC PIO Request 0x%p received " |
1954 | "D2H Register FIS with BSY status " |
1955 | "0x%x\n" , |
1956 | __func__, |
1957 | stp_req, |
1958 | frame_header->status); |
1959 | break; |
1960 | } |
1961 | |
1962 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
1963 | frame_index, |
1964 | frame_buffer: (void **)&frame_buffer); |
1965 | |
1966 | sci_controller_copy_sata_response(response_buffer: &ireq->stp.rsp, |
1967 | frame_header, |
1968 | frame_buffer); |
1969 | |
1970 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
1971 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
1972 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
1973 | break; |
1974 | |
1975 | default: |
1976 | /* FIXME: what do we do here? */ |
1977 | break; |
1978 | } |
1979 | |
1980 | /* Frame is decoded return it to the controller */ |
1981 | sci_controller_release_frame(ihost, frame_index); |
1982 | |
1983 | return status; |
1984 | } |
1985 | |
1986 | case SCI_REQ_STP_PIO_DATA_IN: { |
1987 | struct dev_to_host_fis *; |
1988 | struct sata_fis_data *frame_buffer; |
1989 | |
1990 | status = sci_unsolicited_frame_control_get_header(uf_control: &ihost->uf_control, |
1991 | frame_index, |
1992 | frame_header: (void **)&frame_header); |
1993 | |
1994 | if (status != SCI_SUCCESS) { |
1995 | dev_err(&ihost->pdev->dev, |
1996 | "%s: SCIC IO Request 0x%p could not get frame " |
1997 | "header for frame index %d, status %x\n" , |
1998 | __func__, |
1999 | stp_req, |
2000 | frame_index, |
2001 | status); |
2002 | return status; |
2003 | } |
2004 | |
2005 | if (frame_header->fis_type != FIS_DATA) { |
2006 | dev_err(&ihost->pdev->dev, |
2007 | "%s: SCIC PIO Request 0x%p received frame %d " |
2008 | "with fis type 0x%02x when expecting a data " |
2009 | "fis.\n" , |
2010 | __func__, |
2011 | stp_req, |
2012 | frame_index, |
2013 | frame_header->fis_type); |
2014 | |
2015 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2016 | ireq->sci_status = SCI_FAILURE_IO_REQUIRES_SCSI_ABORT; |
2017 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2018 | |
2019 | /* Frame is decoded return it to the controller */ |
2020 | sci_controller_release_frame(ihost, frame_index); |
2021 | return status; |
2022 | } |
2023 | |
2024 | if (stp_req->sgl.index < 0) { |
2025 | ireq->saved_rx_frame_index = frame_index; |
2026 | stp_req->pio_len = 0; |
2027 | } else { |
2028 | sci_unsolicited_frame_control_get_buffer(uf_control: &ihost->uf_control, |
2029 | frame_index, |
2030 | frame_buffer: (void **)&frame_buffer); |
2031 | |
2032 | status = sci_stp_request_pio_data_in_copy_data(stp_req, |
2033 | data_buffer: (u8 *)frame_buffer); |
2034 | |
2035 | /* Frame is decoded return it to the controller */ |
2036 | sci_controller_release_frame(ihost, frame_index); |
2037 | } |
2038 | |
2039 | /* Check for the end of the transfer, are there more |
2040 | * bytes remaining for this data transfer |
2041 | */ |
2042 | if (status != SCI_SUCCESS || stp_req->pio_len != 0) |
2043 | return status; |
2044 | |
2045 | if ((stp_req->status & ATA_BUSY) == 0) { |
2046 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2047 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
2048 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2049 | } else { |
2050 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_PIO_WAIT_FRAME); |
2051 | } |
2052 | return status; |
2053 | } |
2054 | |
2055 | case SCI_REQ_ATAPI_WAIT_PIO_SETUP: { |
2056 | struct sas_task *task = isci_request_access_task(ireq); |
2057 | |
2058 | sci_controller_release_frame(ihost, frame_index); |
2059 | ireq->target_device->working_request = ireq; |
2060 | if (task->data_dir == DMA_NONE) { |
2061 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_ATAPI_WAIT_TC_COMP); |
2062 | scu_atapi_reconstruct_raw_frame_task_context(ireq); |
2063 | } else { |
2064 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_ATAPI_WAIT_D2H); |
2065 | scu_atapi_construct_task_context(ireq); |
2066 | } |
2067 | |
2068 | sci_controller_continue_io(ireq); |
2069 | return SCI_SUCCESS; |
2070 | } |
2071 | case SCI_REQ_ATAPI_WAIT_D2H: |
2072 | return atapi_d2h_reg_frame_handler(ireq, frame_index); |
2073 | case SCI_REQ_ABORTING: |
2074 | /* |
2075 | * TODO: Is it even possible to get an unsolicited frame in the |
2076 | * aborting state? |
2077 | */ |
2078 | sci_controller_release_frame(ihost, frame_index); |
2079 | return SCI_SUCCESS; |
2080 | |
2081 | default: |
2082 | dev_warn(&ihost->pdev->dev, |
2083 | "%s: SCIC IO Request given unexpected frame %x while " |
2084 | "in state %d\n" , |
2085 | __func__, |
2086 | frame_index, |
2087 | state); |
2088 | |
2089 | sci_controller_release_frame(ihost, frame_index); |
2090 | return SCI_FAILURE_INVALID_STATE; |
2091 | } |
2092 | } |
2093 | |
2094 | static enum sci_status stp_request_udma_await_tc_event(struct isci_request *ireq, |
2095 | u32 completion_code) |
2096 | { |
2097 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2098 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
2099 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2100 | ireq->sci_status = SCI_SUCCESS; |
2101 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2102 | break; |
2103 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS): |
2104 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR): |
2105 | /* We must check ther response buffer to see if the D2H |
2106 | * Register FIS was received before we got the TC |
2107 | * completion. |
2108 | */ |
2109 | if (ireq->stp.rsp.fis_type == FIS_REGD2H) { |
2110 | sci_remote_device_suspend(idev: ireq->target_device, |
2111 | reason: SCI_SW_SUSPEND_NORMAL); |
2112 | |
2113 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2114 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
2115 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2116 | } else { |
2117 | /* If we have an error completion status for the |
2118 | * TC then we can expect a D2H register FIS from |
2119 | * the device so we must change state to wait |
2120 | * for it |
2121 | */ |
2122 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_STP_UDMA_WAIT_D2H); |
2123 | } |
2124 | break; |
2125 | |
2126 | /* TODO Check to see if any of these completion status need to |
2127 | * wait for the device to host register fis. |
2128 | */ |
2129 | /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR |
2130 | * - this comes only for B0 |
2131 | */ |
2132 | default: |
2133 | /* All other completion status cause the IO to be complete. */ |
2134 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
2135 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
2136 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2137 | break; |
2138 | } |
2139 | |
2140 | return SCI_SUCCESS; |
2141 | } |
2142 | |
2143 | static enum sci_status atapi_raw_completion(struct isci_request *ireq, u32 completion_code, |
2144 | enum sci_base_request_states next) |
2145 | { |
2146 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2147 | case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD): |
2148 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2149 | ireq->sci_status = SCI_SUCCESS; |
2150 | sci_change_state(sm: &ireq->sm, next_state: next); |
2151 | break; |
2152 | default: |
2153 | /* All other completion status cause the IO to be complete. |
2154 | * If a NAK was received, then it is up to the user to retry |
2155 | * the request. |
2156 | */ |
2157 | ireq->scu_status = SCU_NORMALIZE_COMPLETION_STATUS(completion_code); |
2158 | ireq->sci_status = SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR; |
2159 | |
2160 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2161 | break; |
2162 | } |
2163 | |
2164 | return SCI_SUCCESS; |
2165 | } |
2166 | |
2167 | static enum sci_status atapi_data_tc_completion_handler(struct isci_request *ireq, |
2168 | u32 completion_code) |
2169 | { |
2170 | struct isci_remote_device *idev = ireq->target_device; |
2171 | struct dev_to_host_fis *d2h = &ireq->stp.rsp; |
2172 | enum sci_status status = SCI_SUCCESS; |
2173 | |
2174 | switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) { |
2175 | case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT): |
2176 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2177 | break; |
2178 | |
2179 | case (SCU_TASK_DONE_UNEXP_FIS << SCU_COMPLETION_TL_STATUS_SHIFT): { |
2180 | u16 len = sci_req_tx_bytes(ireq); |
2181 | |
2182 | /* likely non-error data underrun, workaround missing |
2183 | * d2h frame from the controller |
2184 | */ |
2185 | if (d2h->fis_type != FIS_REGD2H) { |
2186 | d2h->fis_type = FIS_REGD2H; |
2187 | d2h->flags = (1 << 6); |
2188 | d2h->status = 0x50; |
2189 | d2h->error = 0; |
2190 | d2h->lbal = 0; |
2191 | d2h->byte_count_low = len & 0xff; |
2192 | d2h->byte_count_high = len >> 8; |
2193 | d2h->device = 0xa0; |
2194 | d2h->lbal_exp = 0; |
2195 | d2h->lbam_exp = 0; |
2196 | d2h->lbah_exp = 0; |
2197 | d2h->_r_a = 0; |
2198 | d2h->sector_count = 0x3; |
2199 | d2h->sector_count_exp = 0; |
2200 | d2h->_r_b = 0; |
2201 | d2h->_r_c = 0; |
2202 | d2h->_r_d = 0; |
2203 | } |
2204 | |
2205 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2206 | ireq->sci_status = SCI_SUCCESS_IO_DONE_EARLY; |
2207 | status = ireq->sci_status; |
2208 | |
2209 | /* the hw will have suspended the rnc, so complete the |
2210 | * request upon pending resume |
2211 | */ |
2212 | sci_change_state(sm: &idev->sm, next_state: SCI_STP_DEV_ATAPI_ERROR); |
2213 | break; |
2214 | } |
2215 | case (SCU_TASK_DONE_EXCESS_DATA << SCU_COMPLETION_TL_STATUS_SHIFT): |
2216 | /* In this case, there is no UF coming after. |
2217 | * compelte the IO now. |
2218 | */ |
2219 | ireq->scu_status = SCU_TASK_DONE_GOOD; |
2220 | ireq->sci_status = SCI_SUCCESS; |
2221 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_COMPLETED); |
2222 | break; |
2223 | |
2224 | default: |
2225 | if (d2h->fis_type == FIS_REGD2H) { |
2226 | /* UF received change the device state to ATAPI_ERROR */ |
2227 | status = ireq->sci_status; |
2228 | sci_change_state(sm: &idev->sm, next_state: SCI_STP_DEV_ATAPI_ERROR); |
2229 | } else { |
2230 | /* If receiving any non-success TC status, no UF |
2231 | * received yet, then an UF for the status fis |
2232 | * is coming after (XXX: suspect this is |
2233 | * actually a protocol error or a bug like the |
2234 | * DONE_UNEXP_FIS case) |
2235 | */ |
2236 | ireq->scu_status = SCU_TASK_DONE_CHECK_RESPONSE; |
2237 | ireq->sci_status = SCI_FAILURE_IO_RESPONSE_VALID; |
2238 | |
2239 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_ATAPI_WAIT_D2H); |
2240 | } |
2241 | break; |
2242 | } |
2243 | |
2244 | return status; |
2245 | } |
2246 | |
2247 | static int sci_request_smp_completion_status_is_tx_suspend( |
2248 | unsigned int completion_status) |
2249 | { |
2250 | switch (completion_status) { |
2251 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2252 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2253 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2254 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2255 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2256 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2257 | return 1; |
2258 | } |
2259 | return 0; |
2260 | } |
2261 | |
2262 | static int sci_request_smp_completion_status_is_tx_rx_suspend( |
2263 | unsigned int completion_status) |
2264 | { |
2265 | return 0; /* There are no Tx/Rx SMP suspend conditions. */ |
2266 | } |
2267 | |
2268 | static int sci_request_ssp_completion_status_is_tx_suspend( |
2269 | unsigned int completion_status) |
2270 | { |
2271 | switch (completion_status) { |
2272 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: |
2273 | case SCU_TASK_DONE_LF_ERR: |
2274 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2275 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2276 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2277 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2278 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2279 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2280 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2281 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2282 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2283 | return 1; |
2284 | } |
2285 | return 0; |
2286 | } |
2287 | |
2288 | static int sci_request_ssp_completion_status_is_tx_rx_suspend( |
2289 | unsigned int completion_status) |
2290 | { |
2291 | return 0; /* There are no Tx/Rx SSP suspend conditions. */ |
2292 | } |
2293 | |
2294 | static int sci_request_stpsata_completion_status_is_tx_suspend( |
2295 | unsigned int completion_status) |
2296 | { |
2297 | switch (completion_status) { |
2298 | case SCU_TASK_DONE_TX_RAW_CMD_ERR: |
2299 | case SCU_TASK_DONE_LL_R_ERR: |
2300 | case SCU_TASK_DONE_LL_PERR: |
2301 | case SCU_TASK_DONE_REG_ERR: |
2302 | case SCU_TASK_DONE_SDB_ERR: |
2303 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2304 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2305 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2306 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2307 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2308 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2309 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2310 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2311 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2312 | return 1; |
2313 | } |
2314 | return 0; |
2315 | } |
2316 | |
2317 | |
2318 | static int sci_request_stpsata_completion_status_is_tx_rx_suspend( |
2319 | unsigned int completion_status) |
2320 | { |
2321 | switch (completion_status) { |
2322 | case SCU_TASK_DONE_LF_ERR: |
2323 | case SCU_TASK_DONE_LL_SY_TERM: |
2324 | case SCU_TASK_DONE_LL_LF_TERM: |
2325 | case SCU_TASK_DONE_BREAK_RCVD: |
2326 | case SCU_TASK_DONE_INV_FIS_LEN: |
2327 | case SCU_TASK_DONE_UNEXP_FIS: |
2328 | case SCU_TASK_DONE_UNEXP_SDBFIS: |
2329 | case SCU_TASK_DONE_MAX_PLD_ERR: |
2330 | return 1; |
2331 | } |
2332 | return 0; |
2333 | } |
2334 | |
2335 | static void sci_request_handle_suspending_completions( |
2336 | struct isci_request *ireq, |
2337 | u32 completion_code) |
2338 | { |
2339 | int is_tx = 0; |
2340 | int is_tx_rx = 0; |
2341 | |
2342 | switch (ireq->protocol) { |
2343 | case SAS_PROTOCOL_SMP: |
2344 | is_tx = sci_request_smp_completion_status_is_tx_suspend( |
2345 | completion_status: completion_code); |
2346 | is_tx_rx = sci_request_smp_completion_status_is_tx_rx_suspend( |
2347 | completion_status: completion_code); |
2348 | break; |
2349 | case SAS_PROTOCOL_SSP: |
2350 | is_tx = sci_request_ssp_completion_status_is_tx_suspend( |
2351 | completion_status: completion_code); |
2352 | is_tx_rx = sci_request_ssp_completion_status_is_tx_rx_suspend( |
2353 | completion_status: completion_code); |
2354 | break; |
2355 | case SAS_PROTOCOL_STP: |
2356 | is_tx = sci_request_stpsata_completion_status_is_tx_suspend( |
2357 | completion_status: completion_code); |
2358 | is_tx_rx = |
2359 | sci_request_stpsata_completion_status_is_tx_rx_suspend( |
2360 | completion_status: completion_code); |
2361 | break; |
2362 | default: |
2363 | dev_warn(&ireq->isci_host->pdev->dev, |
2364 | "%s: request %p has no valid protocol\n" , |
2365 | __func__, ireq); |
2366 | break; |
2367 | } |
2368 | if (is_tx || is_tx_rx) { |
2369 | BUG_ON(is_tx && is_tx_rx); |
2370 | |
2371 | sci_remote_node_context_suspend( |
2372 | sci_rnc: &ireq->target_device->rnc, |
2373 | reason: SCI_HW_SUSPEND, |
2374 | suspension_code: (is_tx_rx) ? SCU_EVENT_TL_RNC_SUSPEND_TX_RX |
2375 | : SCU_EVENT_TL_RNC_SUSPEND_TX); |
2376 | } |
2377 | } |
2378 | |
2379 | enum sci_status |
2380 | sci_io_request_tc_completion(struct isci_request *ireq, |
2381 | u32 completion_code) |
2382 | { |
2383 | enum sci_base_request_states state; |
2384 | struct isci_host *ihost = ireq->owning_controller; |
2385 | |
2386 | state = ireq->sm.current_state_id; |
2387 | |
2388 | /* Decode those completions that signal upcoming suspension events. */ |
2389 | sci_request_handle_suspending_completions( |
2390 | ireq, SCU_GET_COMPLETION_TL_STATUS(completion_code)); |
2391 | |
2392 | switch (state) { |
2393 | case SCI_REQ_STARTED: |
2394 | return request_started_state_tc_event(ireq, completion_code); |
2395 | |
2396 | case SCI_REQ_TASK_WAIT_TC_COMP: |
2397 | return ssp_task_request_await_tc_event(ireq, |
2398 | completion_code); |
2399 | |
2400 | case SCI_REQ_SMP_WAIT_RESP: |
2401 | return smp_request_await_response_tc_event(ireq, |
2402 | completion_code); |
2403 | |
2404 | case SCI_REQ_SMP_WAIT_TC_COMP: |
2405 | return smp_request_await_tc_event(ireq, completion_code); |
2406 | |
2407 | case SCI_REQ_STP_UDMA_WAIT_TC_COMP: |
2408 | return stp_request_udma_await_tc_event(ireq, |
2409 | completion_code); |
2410 | |
2411 | case SCI_REQ_STP_NON_DATA_WAIT_H2D: |
2412 | return stp_request_non_data_await_h2d_tc_event(ireq, |
2413 | completion_code); |
2414 | |
2415 | case SCI_REQ_STP_PIO_WAIT_H2D: |
2416 | return stp_request_pio_await_h2d_completion_tc_event(ireq, |
2417 | completion_code); |
2418 | |
2419 | case SCI_REQ_STP_PIO_DATA_OUT: |
2420 | return pio_data_out_tx_done_tc_event(ireq, completion_code); |
2421 | |
2422 | case SCI_REQ_ABORTING: |
2423 | return request_aborting_state_tc_event(ireq, |
2424 | completion_code); |
2425 | |
2426 | case SCI_REQ_ATAPI_WAIT_H2D: |
2427 | return atapi_raw_completion(ireq, completion_code, |
2428 | next: SCI_REQ_ATAPI_WAIT_PIO_SETUP); |
2429 | |
2430 | case SCI_REQ_ATAPI_WAIT_TC_COMP: |
2431 | return atapi_raw_completion(ireq, completion_code, |
2432 | next: SCI_REQ_ATAPI_WAIT_D2H); |
2433 | |
2434 | case SCI_REQ_ATAPI_WAIT_D2H: |
2435 | return atapi_data_tc_completion_handler(ireq, completion_code); |
2436 | |
2437 | default: |
2438 | dev_warn(&ihost->pdev->dev, "%s: %x in wrong state %s\n" , |
2439 | __func__, completion_code, req_state_name(state)); |
2440 | return SCI_FAILURE_INVALID_STATE; |
2441 | } |
2442 | } |
2443 | |
2444 | /** |
2445 | * isci_request_process_response_iu() - This function sets the status and |
2446 | * response iu, in the task struct, from the request object for the upper |
2447 | * layer driver. |
2448 | * @task: This parameter is the task struct from the upper layer driver. |
2449 | * @resp_iu: This parameter points to the response iu of the completed request. |
2450 | * @dev: This parameter specifies the linux device struct. |
2451 | * |
2452 | * none. |
2453 | */ |
2454 | static void isci_request_process_response_iu( |
2455 | struct sas_task *task, |
2456 | struct ssp_response_iu *resp_iu, |
2457 | struct device *dev) |
2458 | { |
2459 | dev_dbg(dev, |
2460 | "%s: resp_iu = %p " |
2461 | "resp_iu->status = 0x%x,\nresp_iu->datapres = %d " |
2462 | "resp_iu->response_data_len = %x, " |
2463 | "resp_iu->sense_data_len = %x\nresponse data: " , |
2464 | __func__, |
2465 | resp_iu, |
2466 | resp_iu->status, |
2467 | resp_iu->datapres, |
2468 | resp_iu->response_data_len, |
2469 | resp_iu->sense_data_len); |
2470 | |
2471 | task->task_status.stat = resp_iu->status; |
2472 | |
2473 | /* libsas updates the task status fields based on the response iu. */ |
2474 | sas_ssp_task_response(dev, task, iu: resp_iu); |
2475 | } |
2476 | |
2477 | /** |
2478 | * isci_request_set_open_reject_status() - This function prepares the I/O |
2479 | * completion for OPEN_REJECT conditions. |
2480 | * @request: This parameter is the completed isci_request object. |
2481 | * @task: This parameter is the task struct from the upper layer driver. |
2482 | * @response_ptr: This parameter specifies the service response for the I/O. |
2483 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2484 | * @open_rej_reason: This parameter specifies the encoded reason for the |
2485 | * abandon-class reject. |
2486 | * |
2487 | * none. |
2488 | */ |
2489 | static void isci_request_set_open_reject_status( |
2490 | struct isci_request *request, |
2491 | struct sas_task *task, |
2492 | enum service_response *response_ptr, |
2493 | enum exec_status *status_ptr, |
2494 | enum sas_open_rej_reason open_rej_reason) |
2495 | { |
2496 | /* Task in the target is done. */ |
2497 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2498 | *response_ptr = SAS_TASK_UNDELIVERED; |
2499 | *status_ptr = SAS_OPEN_REJECT; |
2500 | task->task_status.open_rej_reason = open_rej_reason; |
2501 | } |
2502 | |
2503 | /** |
2504 | * isci_request_handle_controller_specific_errors() - This function decodes |
2505 | * controller-specific I/O completion error conditions. |
2506 | * @idev: Remote device |
2507 | * @request: This parameter is the completed isci_request object. |
2508 | * @task: This parameter is the task struct from the upper layer driver. |
2509 | * @response_ptr: This parameter specifies the service response for the I/O. |
2510 | * @status_ptr: This parameter specifies the exec status for the I/O. |
2511 | * |
2512 | * none. |
2513 | */ |
2514 | static void isci_request_handle_controller_specific_errors( |
2515 | struct isci_remote_device *idev, |
2516 | struct isci_request *request, |
2517 | struct sas_task *task, |
2518 | enum service_response *response_ptr, |
2519 | enum exec_status *status_ptr) |
2520 | { |
2521 | unsigned int cstatus; |
2522 | |
2523 | cstatus = request->scu_status; |
2524 | |
2525 | dev_dbg(&request->isci_host->pdev->dev, |
2526 | "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR " |
2527 | "- controller status = 0x%x\n" , |
2528 | __func__, request, cstatus); |
2529 | |
2530 | /* Decode the controller-specific errors; most |
2531 | * important is to recognize those conditions in which |
2532 | * the target may still have a task outstanding that |
2533 | * must be aborted. |
2534 | * |
2535 | * Note that there are SCU completion codes being |
2536 | * named in the decode below for which SCIC has already |
2537 | * done work to handle them in a way other than as |
2538 | * a controller-specific completion code; these are left |
2539 | * in the decode below for completeness sake. |
2540 | */ |
2541 | switch (cstatus) { |
2542 | case SCU_TASK_DONE_DMASETUP_DIRERR: |
2543 | /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */ |
2544 | case SCU_TASK_DONE_XFERCNT_ERR: |
2545 | /* Also SCU_TASK_DONE_SMP_UFI_ERR: */ |
2546 | if (task->task_proto == SAS_PROTOCOL_SMP) { |
2547 | /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */ |
2548 | *response_ptr = SAS_TASK_COMPLETE; |
2549 | |
2550 | /* See if the device has been/is being stopped. Note |
2551 | * that we ignore the quiesce state, since we are |
2552 | * concerned about the actual device state. |
2553 | */ |
2554 | if (!idev) |
2555 | *status_ptr = SAS_DEVICE_UNKNOWN; |
2556 | else |
2557 | *status_ptr = SAS_ABORTED_TASK; |
2558 | |
2559 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2560 | } else { |
2561 | /* Task in the target is not done. */ |
2562 | *response_ptr = SAS_TASK_UNDELIVERED; |
2563 | |
2564 | if (!idev) |
2565 | *status_ptr = SAS_DEVICE_UNKNOWN; |
2566 | else |
2567 | *status_ptr = SAS_SAM_STAT_TASK_ABORTED; |
2568 | |
2569 | clear_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2570 | } |
2571 | |
2572 | break; |
2573 | |
2574 | case SCU_TASK_DONE_CRC_ERR: |
2575 | case SCU_TASK_DONE_NAK_CMD_ERR: |
2576 | case SCU_TASK_DONE_EXCESS_DATA: |
2577 | case SCU_TASK_DONE_UNEXP_FIS: |
2578 | /* Also SCU_TASK_DONE_UNEXP_RESP: */ |
2579 | case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */ |
2580 | case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */ |
2581 | case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */ |
2582 | /* These are conditions in which the target |
2583 | * has completed the task, so that no cleanup |
2584 | * is necessary. |
2585 | */ |
2586 | *response_ptr = SAS_TASK_COMPLETE; |
2587 | |
2588 | /* See if the device has been/is being stopped. Note |
2589 | * that we ignore the quiesce state, since we are |
2590 | * concerned about the actual device state. |
2591 | */ |
2592 | if (!idev) |
2593 | *status_ptr = SAS_DEVICE_UNKNOWN; |
2594 | else |
2595 | *status_ptr = SAS_ABORTED_TASK; |
2596 | |
2597 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2598 | break; |
2599 | |
2600 | |
2601 | /* Note that the only open reject completion codes seen here will be |
2602 | * abandon-class codes; all others are automatically retried in the SCU. |
2603 | */ |
2604 | case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION: |
2605 | |
2606 | isci_request_set_open_reject_status( |
2607 | request, task, response_ptr, status_ptr, |
2608 | open_rej_reason: SAS_OREJ_WRONG_DEST); |
2609 | break; |
2610 | |
2611 | case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION: |
2612 | |
2613 | /* Note - the return of AB0 will change when |
2614 | * libsas implements detection of zone violations. |
2615 | */ |
2616 | isci_request_set_open_reject_status( |
2617 | request, task, response_ptr, status_ptr, |
2618 | open_rej_reason: SAS_OREJ_RESV_AB0); |
2619 | break; |
2620 | |
2621 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1: |
2622 | |
2623 | isci_request_set_open_reject_status( |
2624 | request, task, response_ptr, status_ptr, |
2625 | open_rej_reason: SAS_OREJ_RESV_AB1); |
2626 | break; |
2627 | |
2628 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2: |
2629 | |
2630 | isci_request_set_open_reject_status( |
2631 | request, task, response_ptr, status_ptr, |
2632 | open_rej_reason: SAS_OREJ_RESV_AB2); |
2633 | break; |
2634 | |
2635 | case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3: |
2636 | |
2637 | isci_request_set_open_reject_status( |
2638 | request, task, response_ptr, status_ptr, |
2639 | open_rej_reason: SAS_OREJ_RESV_AB3); |
2640 | break; |
2641 | |
2642 | case SCU_TASK_OPEN_REJECT_BAD_DESTINATION: |
2643 | |
2644 | isci_request_set_open_reject_status( |
2645 | request, task, response_ptr, status_ptr, |
2646 | open_rej_reason: SAS_OREJ_BAD_DEST); |
2647 | break; |
2648 | |
2649 | case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY: |
2650 | |
2651 | isci_request_set_open_reject_status( |
2652 | request, task, response_ptr, status_ptr, |
2653 | open_rej_reason: SAS_OREJ_STP_NORES); |
2654 | break; |
2655 | |
2656 | case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED: |
2657 | |
2658 | isci_request_set_open_reject_status( |
2659 | request, task, response_ptr, status_ptr, |
2660 | open_rej_reason: SAS_OREJ_EPROTO); |
2661 | break; |
2662 | |
2663 | case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED: |
2664 | |
2665 | isci_request_set_open_reject_status( |
2666 | request, task, response_ptr, status_ptr, |
2667 | open_rej_reason: SAS_OREJ_CONN_RATE); |
2668 | break; |
2669 | |
2670 | case SCU_TASK_DONE_LL_R_ERR: |
2671 | /* Also SCU_TASK_DONE_ACK_NAK_TO: */ |
2672 | case SCU_TASK_DONE_LL_PERR: |
2673 | case SCU_TASK_DONE_LL_SY_TERM: |
2674 | /* Also SCU_TASK_DONE_NAK_ERR:*/ |
2675 | case SCU_TASK_DONE_LL_LF_TERM: |
2676 | /* Also SCU_TASK_DONE_DATA_LEN_ERR: */ |
2677 | case SCU_TASK_DONE_LL_ABORT_ERR: |
2678 | case SCU_TASK_DONE_SEQ_INV_TYPE: |
2679 | /* Also SCU_TASK_DONE_UNEXP_XR: */ |
2680 | case SCU_TASK_DONE_XR_IU_LEN_ERR: |
2681 | case SCU_TASK_DONE_INV_FIS_LEN: |
2682 | /* Also SCU_TASK_DONE_XR_WD_LEN: */ |
2683 | case SCU_TASK_DONE_SDMA_ERR: |
2684 | case SCU_TASK_DONE_OFFSET_ERR: |
2685 | case SCU_TASK_DONE_MAX_PLD_ERR: |
2686 | case SCU_TASK_DONE_LF_ERR: |
2687 | case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */ |
2688 | case SCU_TASK_DONE_SMP_LL_RX_ERR: |
2689 | case SCU_TASK_DONE_UNEXP_DATA: |
2690 | case SCU_TASK_DONE_UNEXP_SDBFIS: |
2691 | case SCU_TASK_DONE_REG_ERR: |
2692 | case SCU_TASK_DONE_SDB_ERR: |
2693 | case SCU_TASK_DONE_TASK_ABORT: |
2694 | default: |
2695 | /* Task in the target is not done. */ |
2696 | *response_ptr = SAS_TASK_UNDELIVERED; |
2697 | *status_ptr = SAS_SAM_STAT_TASK_ABORTED; |
2698 | |
2699 | if (task->task_proto == SAS_PROTOCOL_SMP) |
2700 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2701 | else |
2702 | clear_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2703 | break; |
2704 | } |
2705 | } |
2706 | |
2707 | static void isci_process_stp_response(struct sas_task *task, struct dev_to_host_fis *fis) |
2708 | { |
2709 | struct task_status_struct *ts = &task->task_status; |
2710 | struct ata_task_resp *resp = (void *)&ts->buf[0]; |
2711 | |
2712 | resp->frame_len = sizeof(*fis); |
2713 | memcpy(resp->ending_fis, fis, sizeof(*fis)); |
2714 | ts->buf_valid_size = sizeof(*resp); |
2715 | |
2716 | /* If an error is flagged let libata decode the fis */ |
2717 | if (ac_err_mask(status: fis->status)) |
2718 | ts->stat = SAS_PROTO_RESPONSE; |
2719 | else |
2720 | ts->stat = SAS_SAM_STAT_GOOD; |
2721 | |
2722 | ts->resp = SAS_TASK_COMPLETE; |
2723 | } |
2724 | |
2725 | static void isci_request_io_request_complete(struct isci_host *ihost, |
2726 | struct isci_request *request, |
2727 | enum sci_io_status completion_status) |
2728 | { |
2729 | struct sas_task *task = isci_request_access_task(request); |
2730 | struct ssp_response_iu *resp_iu; |
2731 | unsigned long task_flags; |
2732 | struct isci_remote_device *idev = request->target_device; |
2733 | enum service_response response = SAS_TASK_UNDELIVERED; |
2734 | enum exec_status status = SAS_ABORTED_TASK; |
2735 | |
2736 | dev_dbg(&ihost->pdev->dev, |
2737 | "%s: request = %p, task = %p, " |
2738 | "task->data_dir = %d completion_status = 0x%x\n" , |
2739 | __func__, request, task, task->data_dir, completion_status); |
2740 | |
2741 | /* The request is done from an SCU HW perspective. */ |
2742 | |
2743 | /* This is an active request being completed from the core. */ |
2744 | switch (completion_status) { |
2745 | |
2746 | case SCI_IO_FAILURE_RESPONSE_VALID: |
2747 | dev_dbg(&ihost->pdev->dev, |
2748 | "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n" , |
2749 | __func__, request, task); |
2750 | |
2751 | if (sas_protocol_ata(proto: task->task_proto)) { |
2752 | isci_process_stp_response(task, fis: &request->stp.rsp); |
2753 | } else if (SAS_PROTOCOL_SSP == task->task_proto) { |
2754 | |
2755 | /* crack the iu response buffer. */ |
2756 | resp_iu = &request->ssp.rsp; |
2757 | isci_request_process_response_iu(task, resp_iu, |
2758 | dev: &ihost->pdev->dev); |
2759 | |
2760 | } else if (SAS_PROTOCOL_SMP == task->task_proto) { |
2761 | |
2762 | dev_err(&ihost->pdev->dev, |
2763 | "%s: SCI_IO_FAILURE_RESPONSE_VALID: " |
2764 | "SAS_PROTOCOL_SMP protocol\n" , |
2765 | __func__); |
2766 | |
2767 | } else |
2768 | dev_err(&ihost->pdev->dev, |
2769 | "%s: unknown protocol\n" , __func__); |
2770 | |
2771 | /* use the task status set in the task struct by the |
2772 | * isci_request_process_response_iu call. |
2773 | */ |
2774 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2775 | response = task->task_status.resp; |
2776 | status = task->task_status.stat; |
2777 | break; |
2778 | |
2779 | case SCI_IO_SUCCESS: |
2780 | case SCI_IO_SUCCESS_IO_DONE_EARLY: |
2781 | |
2782 | response = SAS_TASK_COMPLETE; |
2783 | status = SAS_SAM_STAT_GOOD; |
2784 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2785 | |
2786 | if (completion_status == SCI_IO_SUCCESS_IO_DONE_EARLY) { |
2787 | |
2788 | /* This was an SSP / STP / SATA transfer. |
2789 | * There is a possibility that less data than |
2790 | * the maximum was transferred. |
2791 | */ |
2792 | u32 transferred_length = sci_req_tx_bytes(ireq: request); |
2793 | |
2794 | task->task_status.residual |
2795 | = task->total_xfer_len - transferred_length; |
2796 | |
2797 | /* If there were residual bytes, call this an |
2798 | * underrun. |
2799 | */ |
2800 | if (task->task_status.residual != 0) |
2801 | status = SAS_DATA_UNDERRUN; |
2802 | |
2803 | dev_dbg(&ihost->pdev->dev, |
2804 | "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n" , |
2805 | __func__, status); |
2806 | |
2807 | } else |
2808 | dev_dbg(&ihost->pdev->dev, "%s: SCI_IO_SUCCESS\n" , |
2809 | __func__); |
2810 | break; |
2811 | |
2812 | case SCI_IO_FAILURE_TERMINATED: |
2813 | |
2814 | dev_dbg(&ihost->pdev->dev, |
2815 | "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n" , |
2816 | __func__, request, task); |
2817 | |
2818 | /* The request was terminated explicitly. */ |
2819 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2820 | response = SAS_TASK_UNDELIVERED; |
2821 | |
2822 | /* See if the device has been/is being stopped. Note |
2823 | * that we ignore the quiesce state, since we are |
2824 | * concerned about the actual device state. |
2825 | */ |
2826 | if (!idev) |
2827 | status = SAS_DEVICE_UNKNOWN; |
2828 | else |
2829 | status = SAS_ABORTED_TASK; |
2830 | break; |
2831 | |
2832 | case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR: |
2833 | |
2834 | isci_request_handle_controller_specific_errors(idev, request, |
2835 | task, response_ptr: &response, |
2836 | status_ptr: &status); |
2837 | break; |
2838 | |
2839 | case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED: |
2840 | /* This is a special case, in that the I/O completion |
2841 | * is telling us that the device needs a reset. |
2842 | * In order for the device reset condition to be |
2843 | * noticed, the I/O has to be handled in the error |
2844 | * handler. Set the reset flag and cause the |
2845 | * SCSI error thread to be scheduled. |
2846 | */ |
2847 | spin_lock_irqsave(&task->task_state_lock, task_flags); |
2848 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; |
2849 | spin_unlock_irqrestore(lock: &task->task_state_lock, flags: task_flags); |
2850 | |
2851 | /* Fail the I/O. */ |
2852 | response = SAS_TASK_UNDELIVERED; |
2853 | status = SAS_SAM_STAT_TASK_ABORTED; |
2854 | |
2855 | clear_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2856 | break; |
2857 | |
2858 | case SCI_FAILURE_RETRY_REQUIRED: |
2859 | |
2860 | /* Fail the I/O so it can be retried. */ |
2861 | response = SAS_TASK_UNDELIVERED; |
2862 | if (!idev) |
2863 | status = SAS_DEVICE_UNKNOWN; |
2864 | else |
2865 | status = SAS_ABORTED_TASK; |
2866 | |
2867 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2868 | break; |
2869 | |
2870 | |
2871 | default: |
2872 | /* Catch any otherwise unhandled error codes here. */ |
2873 | dev_dbg(&ihost->pdev->dev, |
2874 | "%s: invalid completion code: 0x%x - " |
2875 | "isci_request = %p\n" , |
2876 | __func__, completion_status, request); |
2877 | |
2878 | response = SAS_TASK_UNDELIVERED; |
2879 | |
2880 | /* See if the device has been/is being stopped. Note |
2881 | * that we ignore the quiesce state, since we are |
2882 | * concerned about the actual device state. |
2883 | */ |
2884 | if (!idev) |
2885 | status = SAS_DEVICE_UNKNOWN; |
2886 | else |
2887 | status = SAS_ABORTED_TASK; |
2888 | |
2889 | if (SAS_PROTOCOL_SMP == task->task_proto) |
2890 | set_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2891 | else |
2892 | clear_bit(IREQ_COMPLETE_IN_TARGET, addr: &request->flags); |
2893 | break; |
2894 | } |
2895 | |
2896 | switch (task->task_proto) { |
2897 | case SAS_PROTOCOL_SSP: |
2898 | if (task->data_dir == DMA_NONE) |
2899 | break; |
2900 | if (task->num_scatter == 0) |
2901 | /* 0 indicates a single dma address */ |
2902 | dma_unmap_single(&ihost->pdev->dev, |
2903 | request->zero_scatter_daddr, |
2904 | task->total_xfer_len, task->data_dir); |
2905 | else /* unmap the sgl dma addresses */ |
2906 | dma_unmap_sg(&ihost->pdev->dev, task->scatter, |
2907 | request->num_sg_entries, task->data_dir); |
2908 | break; |
2909 | case SAS_PROTOCOL_SMP: { |
2910 | struct scatterlist *sg = &task->smp_task.smp_req; |
2911 | struct smp_req *smp_req; |
2912 | void *kaddr; |
2913 | |
2914 | dma_unmap_sg(&ihost->pdev->dev, sg, 1, DMA_TO_DEVICE); |
2915 | |
2916 | /* need to swab it back in case the command buffer is re-used */ |
2917 | kaddr = kmap_atomic(page: sg_page(sg)); |
2918 | smp_req = kaddr + sg->offset; |
2919 | sci_swab32_cpy(dest: smp_req, src: smp_req, word_cnt: sg->length / sizeof(u32)); |
2920 | kunmap_atomic(kaddr); |
2921 | break; |
2922 | } |
2923 | default: |
2924 | break; |
2925 | } |
2926 | |
2927 | spin_lock_irqsave(&task->task_state_lock, task_flags); |
2928 | |
2929 | task->task_status.resp = response; |
2930 | task->task_status.stat = status; |
2931 | |
2932 | if (test_bit(IREQ_COMPLETE_IN_TARGET, &request->flags)) { |
2933 | /* Normal notification (task_done) */ |
2934 | task->task_state_flags |= SAS_TASK_STATE_DONE; |
2935 | task->task_state_flags &= ~SAS_TASK_STATE_PENDING; |
2936 | } |
2937 | spin_unlock_irqrestore(lock: &task->task_state_lock, flags: task_flags); |
2938 | |
2939 | /* complete the io request to the core. */ |
2940 | sci_controller_complete_io(ihost, idev: request->target_device, ireq: request); |
2941 | |
2942 | /* set terminated handle so it cannot be completed or |
2943 | * terminated again, and to cause any calls into abort |
2944 | * task to recognize the already completed case. |
2945 | */ |
2946 | set_bit(IREQ_TERMINATED, addr: &request->flags); |
2947 | |
2948 | ireq_done(ihost, ireq: request, task); |
2949 | } |
2950 | |
2951 | static void sci_request_started_state_enter(struct sci_base_state_machine *sm) |
2952 | { |
2953 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2954 | struct domain_device *dev = ireq->target_device->domain_dev; |
2955 | enum sci_base_request_states state; |
2956 | struct sas_task *task; |
2957 | |
2958 | /* XXX as hch said always creating an internal sas_task for tmf |
2959 | * requests would simplify the driver |
2960 | */ |
2961 | task = (test_bit(IREQ_TMF, &ireq->flags)) ? NULL : isci_request_access_task(ireq); |
2962 | |
2963 | /* all unaccelerated request types (non ssp or ncq) handled with |
2964 | * substates |
2965 | */ |
2966 | if (!task && dev->dev_type == SAS_END_DEVICE) { |
2967 | state = SCI_REQ_TASK_WAIT_TC_COMP; |
2968 | } else if (task && task->task_proto == SAS_PROTOCOL_SMP) { |
2969 | state = SCI_REQ_SMP_WAIT_RESP; |
2970 | } else if (task && sas_protocol_ata(proto: task->task_proto) && |
2971 | !task->ata_task.use_ncq) { |
2972 | if (dev->sata_dev.class == ATA_DEV_ATAPI && |
2973 | task->ata_task.fis.command == ATA_CMD_PACKET) { |
2974 | state = SCI_REQ_ATAPI_WAIT_H2D; |
2975 | } else if (task->data_dir == DMA_NONE) { |
2976 | state = SCI_REQ_STP_NON_DATA_WAIT_H2D; |
2977 | } else if (task->ata_task.dma_xfer) { |
2978 | state = SCI_REQ_STP_UDMA_WAIT_TC_COMP; |
2979 | } else /* PIO */ { |
2980 | state = SCI_REQ_STP_PIO_WAIT_H2D; |
2981 | } |
2982 | } else { |
2983 | /* SSP or NCQ are fully accelerated, no substates */ |
2984 | return; |
2985 | } |
2986 | sci_change_state(sm, next_state: state); |
2987 | } |
2988 | |
2989 | static void sci_request_completed_state_enter(struct sci_base_state_machine *sm) |
2990 | { |
2991 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
2992 | struct isci_host *ihost = ireq->owning_controller; |
2993 | |
2994 | /* Tell the SCI_USER that the IO request is complete */ |
2995 | if (!test_bit(IREQ_TMF, &ireq->flags)) |
2996 | isci_request_io_request_complete(ihost, request: ireq, |
2997 | completion_status: ireq->sci_status); |
2998 | else |
2999 | isci_task_request_complete(isci_host: ihost, request: ireq, completion_status: ireq->sci_status); |
3000 | } |
3001 | |
3002 | static void sci_request_aborting_state_enter(struct sci_base_state_machine *sm) |
3003 | { |
3004 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
3005 | |
3006 | /* Setting the abort bit in the Task Context is required by the silicon. */ |
3007 | ireq->tc->abort = 1; |
3008 | } |
3009 | |
3010 | static void sci_stp_request_started_non_data_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
3011 | { |
3012 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
3013 | |
3014 | ireq->target_device->working_request = ireq; |
3015 | } |
3016 | |
3017 | static void sci_stp_request_started_pio_await_h2d_completion_enter(struct sci_base_state_machine *sm) |
3018 | { |
3019 | struct isci_request *ireq = container_of(sm, typeof(*ireq), sm); |
3020 | |
3021 | ireq->target_device->working_request = ireq; |
3022 | } |
3023 | |
3024 | static const struct sci_base_state sci_request_state_table[] = { |
3025 | [SCI_REQ_INIT] = { }, |
3026 | [SCI_REQ_CONSTRUCTED] = { }, |
3027 | [SCI_REQ_STARTED] = { |
3028 | .enter_state = sci_request_started_state_enter, |
3029 | }, |
3030 | [SCI_REQ_STP_NON_DATA_WAIT_H2D] = { |
3031 | .enter_state = sci_stp_request_started_non_data_await_h2d_completion_enter, |
3032 | }, |
3033 | [SCI_REQ_STP_NON_DATA_WAIT_D2H] = { }, |
3034 | [SCI_REQ_STP_PIO_WAIT_H2D] = { |
3035 | .enter_state = sci_stp_request_started_pio_await_h2d_completion_enter, |
3036 | }, |
3037 | [SCI_REQ_STP_PIO_WAIT_FRAME] = { }, |
3038 | [SCI_REQ_STP_PIO_DATA_IN] = { }, |
3039 | [SCI_REQ_STP_PIO_DATA_OUT] = { }, |
3040 | [SCI_REQ_STP_UDMA_WAIT_TC_COMP] = { }, |
3041 | [SCI_REQ_STP_UDMA_WAIT_D2H] = { }, |
3042 | [SCI_REQ_TASK_WAIT_TC_COMP] = { }, |
3043 | [SCI_REQ_TASK_WAIT_TC_RESP] = { }, |
3044 | [SCI_REQ_SMP_WAIT_RESP] = { }, |
3045 | [SCI_REQ_SMP_WAIT_TC_COMP] = { }, |
3046 | [SCI_REQ_ATAPI_WAIT_H2D] = { }, |
3047 | [SCI_REQ_ATAPI_WAIT_PIO_SETUP] = { }, |
3048 | [SCI_REQ_ATAPI_WAIT_D2H] = { }, |
3049 | [SCI_REQ_ATAPI_WAIT_TC_COMP] = { }, |
3050 | [SCI_REQ_COMPLETED] = { |
3051 | .enter_state = sci_request_completed_state_enter, |
3052 | }, |
3053 | [SCI_REQ_ABORTING] = { |
3054 | .enter_state = sci_request_aborting_state_enter, |
3055 | }, |
3056 | [SCI_REQ_FINAL] = { }, |
3057 | }; |
3058 | |
3059 | static void |
3060 | sci_general_request_construct(struct isci_host *ihost, |
3061 | struct isci_remote_device *idev, |
3062 | struct isci_request *ireq) |
3063 | { |
3064 | sci_init_sm(sm: &ireq->sm, state_table: sci_request_state_table, initial_state: SCI_REQ_INIT); |
3065 | |
3066 | ireq->target_device = idev; |
3067 | ireq->protocol = SAS_PROTOCOL_NONE; |
3068 | ireq->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX; |
3069 | |
3070 | ireq->sci_status = SCI_SUCCESS; |
3071 | ireq->scu_status = 0; |
3072 | ireq->post_context = 0xFFFFFFFF; |
3073 | } |
3074 | |
3075 | static enum sci_status |
3076 | sci_io_request_construct(struct isci_host *ihost, |
3077 | struct isci_remote_device *idev, |
3078 | struct isci_request *ireq) |
3079 | { |
3080 | struct domain_device *dev = idev->domain_dev; |
3081 | enum sci_status status = SCI_SUCCESS; |
3082 | |
3083 | /* Build the common part of the request */ |
3084 | sci_general_request_construct(ihost, idev, ireq); |
3085 | |
3086 | if (idev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) |
3087 | return SCI_FAILURE_INVALID_REMOTE_DEVICE; |
3088 | |
3089 | if (dev->dev_type == SAS_END_DEVICE) |
3090 | /* pass */; |
3091 | else if (dev_is_sata(dev)) |
3092 | memset(&ireq->stp.cmd, 0, sizeof(ireq->stp.cmd)); |
3093 | else if (dev_is_expander(type: dev->dev_type)) |
3094 | /* pass */; |
3095 | else |
3096 | return SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3097 | |
3098 | memset(ireq->tc, 0, offsetof(struct scu_task_context, sgl_pair_ab)); |
3099 | |
3100 | return status; |
3101 | } |
3102 | |
3103 | enum sci_status sci_task_request_construct(struct isci_host *ihost, |
3104 | struct isci_remote_device *idev, |
3105 | u16 io_tag, struct isci_request *ireq) |
3106 | { |
3107 | struct domain_device *dev = idev->domain_dev; |
3108 | enum sci_status status = SCI_SUCCESS; |
3109 | |
3110 | /* Build the common part of the request */ |
3111 | sci_general_request_construct(ihost, idev, ireq); |
3112 | |
3113 | if (dev->dev_type == SAS_END_DEVICE || dev_is_sata(dev)) { |
3114 | set_bit(IREQ_TMF, addr: &ireq->flags); |
3115 | memset(ireq->tc, 0, sizeof(struct scu_task_context)); |
3116 | |
3117 | /* Set the protocol indicator. */ |
3118 | if (dev_is_sata(dev)) |
3119 | ireq->protocol = SAS_PROTOCOL_STP; |
3120 | else |
3121 | ireq->protocol = SAS_PROTOCOL_SSP; |
3122 | } else |
3123 | status = SCI_FAILURE_UNSUPPORTED_PROTOCOL; |
3124 | |
3125 | return status; |
3126 | } |
3127 | |
3128 | static enum sci_status isci_request_ssp_request_construct( |
3129 | struct isci_request *request) |
3130 | { |
3131 | enum sci_status status; |
3132 | |
3133 | dev_dbg(&request->isci_host->pdev->dev, |
3134 | "%s: request = %p\n" , |
3135 | __func__, |
3136 | request); |
3137 | status = sci_io_request_construct_basic_ssp(ireq: request); |
3138 | return status; |
3139 | } |
3140 | |
3141 | static enum sci_status isci_request_stp_request_construct(struct isci_request *ireq) |
3142 | { |
3143 | struct sas_task *task = isci_request_access_task(ireq); |
3144 | struct host_to_dev_fis *fis = &ireq->stp.cmd; |
3145 | struct ata_queued_cmd *qc = task->uldd_task; |
3146 | enum sci_status status; |
3147 | |
3148 | dev_dbg(&ireq->isci_host->pdev->dev, |
3149 | "%s: ireq = %p\n" , |
3150 | __func__, |
3151 | ireq); |
3152 | |
3153 | memcpy(fis, &task->ata_task.fis, sizeof(struct host_to_dev_fis)); |
3154 | if (!task->ata_task.device_control_reg_update) |
3155 | fis->flags |= 0x80; |
3156 | fis->flags &= 0xF0; |
3157 | |
3158 | status = sci_io_request_construct_basic_sata(ireq); |
3159 | |
3160 | if (qc && (qc->tf.command == ATA_CMD_FPDMA_WRITE || |
3161 | qc->tf.command == ATA_CMD_FPDMA_READ || |
3162 | qc->tf.command == ATA_CMD_FPDMA_RECV || |
3163 | qc->tf.command == ATA_CMD_FPDMA_SEND || |
3164 | qc->tf.command == ATA_CMD_NCQ_NON_DATA)) { |
3165 | fis->sector_count = qc->tag << 3; |
3166 | ireq->tc->type.stp.ncq_tag = qc->tag; |
3167 | } |
3168 | |
3169 | return status; |
3170 | } |
3171 | |
3172 | static enum sci_status |
3173 | sci_io_request_construct_smp(struct device *dev, |
3174 | struct isci_request *ireq, |
3175 | struct sas_task *task) |
3176 | { |
3177 | struct scatterlist *sg = &task->smp_task.smp_req; |
3178 | struct isci_remote_device *idev; |
3179 | struct scu_task_context *task_context; |
3180 | struct isci_port *iport; |
3181 | struct smp_req *smp_req; |
3182 | void *kaddr; |
3183 | u8 req_len; |
3184 | u32 cmd; |
3185 | |
3186 | kaddr = kmap_atomic(page: sg_page(sg)); |
3187 | smp_req = kaddr + sg->offset; |
3188 | /* |
3189 | * Look at the SMP requests' header fields; for certain SAS 1.x SMP |
3190 | * functions under SAS 2.0, a zero request length really indicates |
3191 | * a non-zero default length. |
3192 | */ |
3193 | if (smp_req->req_len == 0) { |
3194 | switch (smp_req->func) { |
3195 | case SMP_DISCOVER: |
3196 | case SMP_REPORT_PHY_ERR_LOG: |
3197 | case SMP_REPORT_PHY_SATA: |
3198 | case SMP_REPORT_ROUTE_INFO: |
3199 | smp_req->req_len = 2; |
3200 | break; |
3201 | case SMP_CONF_ROUTE_INFO: |
3202 | case SMP_PHY_CONTROL: |
3203 | case SMP_PHY_TEST_FUNCTION: |
3204 | smp_req->req_len = 9; |
3205 | break; |
3206 | /* Default - zero is a valid default for 2.0. */ |
3207 | } |
3208 | } |
3209 | req_len = smp_req->req_len; |
3210 | sci_swab32_cpy(dest: smp_req, src: smp_req, word_cnt: sg->length / sizeof(u32)); |
3211 | cmd = *(u32 *) smp_req; |
3212 | kunmap_atomic(kaddr); |
3213 | |
3214 | if (!dma_map_sg(dev, sg, 1, DMA_TO_DEVICE)) |
3215 | return SCI_FAILURE; |
3216 | |
3217 | ireq->protocol = SAS_PROTOCOL_SMP; |
3218 | |
3219 | /* byte swap the smp request. */ |
3220 | |
3221 | task_context = ireq->tc; |
3222 | |
3223 | idev = ireq->target_device; |
3224 | iport = idev->owning_port; |
3225 | |
3226 | /* |
3227 | * Fill in the TC with its required data |
3228 | * 00h |
3229 | */ |
3230 | task_context->priority = 0; |
3231 | task_context->initiator_request = 1; |
3232 | task_context->connection_rate = idev->connection_rate; |
3233 | task_context->protocol_engine_index = ISCI_PEG; |
3234 | task_context->logical_port_index = iport->physical_port_index; |
3235 | task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP; |
3236 | task_context->abort = 0; |
3237 | task_context->valid = SCU_TASK_CONTEXT_VALID; |
3238 | task_context->context_type = SCU_TASK_CONTEXT_TYPE; |
3239 | |
3240 | /* 04h */ |
3241 | task_context->remote_node_index = idev->rnc.remote_node_index; |
3242 | task_context->command_code = 0; |
3243 | task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST; |
3244 | |
3245 | /* 08h */ |
3246 | task_context->link_layer_control = 0; |
3247 | task_context->do_not_dma_ssp_good_response = 1; |
3248 | task_context->strict_ordering = 0; |
3249 | task_context->control_frame = 1; |
3250 | task_context->timeout_enable = 0; |
3251 | task_context->block_guard_enable = 0; |
3252 | |
3253 | /* 0ch */ |
3254 | task_context->address_modifier = 0; |
3255 | |
3256 | /* 10h */ |
3257 | task_context->ssp_command_iu_length = req_len; |
3258 | |
3259 | /* 14h */ |
3260 | task_context->transfer_length_bytes = 0; |
3261 | |
3262 | /* |
3263 | * 18h ~ 30h, protocol specific |
3264 | * since commandIU has been build by framework at this point, we just |
3265 | * copy the frist DWord from command IU to this location. */ |
3266 | memcpy(&task_context->type.smp, &cmd, sizeof(u32)); |
3267 | |
3268 | /* |
3269 | * 40h |
3270 | * "For SMP you could program it to zero. We would prefer that way |
3271 | * so that done code will be consistent." - Venki |
3272 | */ |
3273 | task_context->task_phase = 0; |
3274 | |
3275 | ireq->post_context = (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC | |
3276 | (ISCI_PEG << SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) | |
3277 | (iport->physical_port_index << |
3278 | SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) | |
3279 | ISCI_TAG_TCI(ireq->io_tag)); |
3280 | /* |
3281 | * Copy the physical address for the command buffer to the SCU Task |
3282 | * Context command buffer should not contain command header. |
3283 | */ |
3284 | task_context->command_iu_upper = upper_32_bits(sg_dma_address(sg)); |
3285 | task_context->command_iu_lower = lower_32_bits(sg_dma_address(sg) + sizeof(u32)); |
3286 | |
3287 | /* SMP response comes as UF, so no need to set response IU address. */ |
3288 | task_context->response_iu_upper = 0; |
3289 | task_context->response_iu_lower = 0; |
3290 | |
3291 | sci_change_state(sm: &ireq->sm, next_state: SCI_REQ_CONSTRUCTED); |
3292 | |
3293 | return SCI_SUCCESS; |
3294 | } |
3295 | |
3296 | /* |
3297 | * isci_smp_request_build() - This function builds the smp request. |
3298 | * @ireq: This parameter points to the isci_request allocated in the |
3299 | * request construct function. |
3300 | * |
3301 | * SCI_SUCCESS on successfull completion, or specific failure code. |
3302 | */ |
3303 | static enum sci_status isci_smp_request_build(struct isci_request *ireq) |
3304 | { |
3305 | struct sas_task *task = isci_request_access_task(ireq); |
3306 | struct device *dev = &ireq->isci_host->pdev->dev; |
3307 | enum sci_status status = SCI_FAILURE; |
3308 | |
3309 | status = sci_io_request_construct_smp(dev, ireq, task); |
3310 | if (status != SCI_SUCCESS) |
3311 | dev_dbg(&ireq->isci_host->pdev->dev, |
3312 | "%s: failed with status = %d\n" , |
3313 | __func__, |
3314 | status); |
3315 | |
3316 | return status; |
3317 | } |
3318 | |
3319 | /** |
3320 | * isci_io_request_build() - This function builds the io request object. |
3321 | * @ihost: This parameter specifies the ISCI host object |
3322 | * @request: This parameter points to the isci_request object allocated in the |
3323 | * request construct function. |
3324 | * @idev: This parameter is the handle for the sci core's remote device |
3325 | * object that is the destination for this request. |
3326 | * |
3327 | * SCI_SUCCESS on successfull completion, or specific failure code. |
3328 | */ |
3329 | static enum sci_status isci_io_request_build(struct isci_host *ihost, |
3330 | struct isci_request *request, |
3331 | struct isci_remote_device *idev) |
3332 | { |
3333 | enum sci_status status = SCI_SUCCESS; |
3334 | struct sas_task *task = isci_request_access_task(request); |
3335 | |
3336 | dev_dbg(&ihost->pdev->dev, |
3337 | "%s: idev = 0x%p; request = %p, " |
3338 | "num_scatter = %d\n" , |
3339 | __func__, |
3340 | idev, |
3341 | request, |
3342 | task->num_scatter); |
3343 | |
3344 | /* map the sgl addresses, if present. |
3345 | * libata does the mapping for sata devices |
3346 | * before we get the request. |
3347 | */ |
3348 | if (task->num_scatter && |
3349 | !sas_protocol_ata(proto: task->task_proto) && |
3350 | !(SAS_PROTOCOL_SMP & task->task_proto)) { |
3351 | |
3352 | request->num_sg_entries = dma_map_sg( |
3353 | &ihost->pdev->dev, |
3354 | task->scatter, |
3355 | task->num_scatter, |
3356 | task->data_dir |
3357 | ); |
3358 | |
3359 | if (request->num_sg_entries == 0) |
3360 | return SCI_FAILURE_INSUFFICIENT_RESOURCES; |
3361 | } |
3362 | |
3363 | status = sci_io_request_construct(ihost, idev, ireq: request); |
3364 | |
3365 | if (status != SCI_SUCCESS) { |
3366 | dev_dbg(&ihost->pdev->dev, |
3367 | "%s: failed request construct\n" , |
3368 | __func__); |
3369 | return SCI_FAILURE; |
3370 | } |
3371 | |
3372 | switch (task->task_proto) { |
3373 | case SAS_PROTOCOL_SMP: |
3374 | status = isci_smp_request_build(ireq: request); |
3375 | break; |
3376 | case SAS_PROTOCOL_SSP: |
3377 | status = isci_request_ssp_request_construct(request); |
3378 | break; |
3379 | case SAS_PROTOCOL_SATA: |
3380 | case SAS_PROTOCOL_STP: |
3381 | case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP: |
3382 | status = isci_request_stp_request_construct(ireq: request); |
3383 | break; |
3384 | default: |
3385 | dev_dbg(&ihost->pdev->dev, |
3386 | "%s: unknown protocol\n" , __func__); |
3387 | return SCI_FAILURE; |
3388 | } |
3389 | |
3390 | return status; |
3391 | } |
3392 | |
3393 | static struct isci_request *isci_request_from_tag(struct isci_host *ihost, u16 tag) |
3394 | { |
3395 | struct isci_request *ireq; |
3396 | |
3397 | ireq = ihost->reqs[ISCI_TAG_TCI(tag)]; |
3398 | ireq->io_tag = tag; |
3399 | ireq->io_request_completion = NULL; |
3400 | ireq->flags = 0; |
3401 | ireq->num_sg_entries = 0; |
3402 | |
3403 | return ireq; |
3404 | } |
3405 | |
3406 | struct isci_request *isci_io_request_from_tag(struct isci_host *ihost, |
3407 | struct sas_task *task, |
3408 | u16 tag) |
3409 | { |
3410 | struct isci_request *ireq; |
3411 | |
3412 | ireq = isci_request_from_tag(ihost, tag); |
3413 | ireq->ttype_ptr.io_task_ptr = task; |
3414 | clear_bit(IREQ_TMF, addr: &ireq->flags); |
3415 | task->lldd_task = ireq; |
3416 | |
3417 | return ireq; |
3418 | } |
3419 | |
3420 | struct isci_request *isci_tmf_request_from_tag(struct isci_host *ihost, |
3421 | struct isci_tmf *isci_tmf, |
3422 | u16 tag) |
3423 | { |
3424 | struct isci_request *ireq; |
3425 | |
3426 | ireq = isci_request_from_tag(ihost, tag); |
3427 | ireq->ttype_ptr.tmf_task_ptr = isci_tmf; |
3428 | set_bit(IREQ_TMF, addr: &ireq->flags); |
3429 | |
3430 | return ireq; |
3431 | } |
3432 | |
3433 | int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev, |
3434 | struct sas_task *task, struct isci_request *ireq) |
3435 | { |
3436 | enum sci_status status; |
3437 | unsigned long flags; |
3438 | int ret = 0; |
3439 | |
3440 | status = isci_io_request_build(ihost, request: ireq, idev); |
3441 | if (status != SCI_SUCCESS) { |
3442 | dev_dbg(&ihost->pdev->dev, |
3443 | "%s: request_construct failed - status = 0x%x\n" , |
3444 | __func__, |
3445 | status); |
3446 | return status; |
3447 | } |
3448 | |
3449 | spin_lock_irqsave(&ihost->scic_lock, flags); |
3450 | |
3451 | if (test_bit(IDEV_IO_NCQERROR, &idev->flags)) { |
3452 | |
3453 | if (isci_task_is_ncq_recovery(task)) { |
3454 | |
3455 | /* The device is in an NCQ recovery state. Issue the |
3456 | * request on the task side. Note that it will |
3457 | * complete on the I/O request side because the |
3458 | * request was built that way (ie. |
3459 | * ireq->is_task_management_request is false). |
3460 | */ |
3461 | status = sci_controller_start_task(ihost, |
3462 | idev, |
3463 | ireq); |
3464 | } else { |
3465 | status = SCI_FAILURE; |
3466 | } |
3467 | } else { |
3468 | /* send the request, let the core assign the IO TAG. */ |
3469 | status = sci_controller_start_io(ihost, idev, |
3470 | ireq); |
3471 | } |
3472 | |
3473 | if (status != SCI_SUCCESS && |
3474 | status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3475 | dev_dbg(&ihost->pdev->dev, |
3476 | "%s: failed request start (0x%x)\n" , |
3477 | __func__, status); |
3478 | spin_unlock_irqrestore(lock: &ihost->scic_lock, flags); |
3479 | return status; |
3480 | } |
3481 | /* Either I/O started OK, or the core has signaled that |
3482 | * the device needs a target reset. |
3483 | */ |
3484 | if (status != SCI_SUCCESS) { |
3485 | /* The request did not really start in the |
3486 | * hardware, so clear the request handle |
3487 | * here so no terminations will be done. |
3488 | */ |
3489 | set_bit(IREQ_TERMINATED, addr: &ireq->flags); |
3490 | } |
3491 | spin_unlock_irqrestore(lock: &ihost->scic_lock, flags); |
3492 | |
3493 | if (status == |
3494 | SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) { |
3495 | /* Signal libsas that we need the SCSI error |
3496 | * handler thread to work on this I/O and that |
3497 | * we want a device reset. |
3498 | */ |
3499 | spin_lock_irqsave(&task->task_state_lock, flags); |
3500 | task->task_state_flags |= SAS_TASK_NEED_DEV_RESET; |
3501 | spin_unlock_irqrestore(lock: &task->task_state_lock, flags); |
3502 | |
3503 | /* Cause this task to be scheduled in the SCSI error |
3504 | * handler thread. |
3505 | */ |
3506 | sas_task_abort(task); |
3507 | |
3508 | /* Change the status, since we are holding |
3509 | * the I/O until it is managed by the SCSI |
3510 | * error handler. |
3511 | */ |
3512 | status = SCI_SUCCESS; |
3513 | } |
3514 | |
3515 | return ret; |
3516 | } |
3517 | |