1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * CXL Flash Device Driver |
4 | * |
5 | * Written by: Manoj N. Kumar <manoj@linux.vnet.ibm.com>, IBM Corporation |
6 | * Matthew R. Ochs <mrochs@linux.vnet.ibm.com>, IBM Corporation |
7 | * |
8 | * Copyright (C) 2015 IBM Corporation |
9 | */ |
10 | |
11 | #include <linux/delay.h> |
12 | #include <linux/list.h> |
13 | #include <linux/module.h> |
14 | #include <linux/pci.h> |
15 | |
16 | #include <asm/unaligned.h> |
17 | |
18 | #include <scsi/scsi_cmnd.h> |
19 | #include <scsi/scsi_host.h> |
20 | #include <uapi/scsi/cxlflash_ioctl.h> |
21 | |
22 | #include "main.h" |
23 | #include "sislite.h" |
24 | #include "common.h" |
25 | |
26 | MODULE_DESCRIPTION(CXLFLASH_ADAPTER_NAME); |
27 | MODULE_AUTHOR("Manoj N. Kumar <manoj@linux.vnet.ibm.com>" ); |
28 | MODULE_AUTHOR("Matthew R. Ochs <mrochs@linux.vnet.ibm.com>" ); |
29 | MODULE_LICENSE("GPL" ); |
30 | |
31 | static char *cxlflash_devnode(const struct device *dev, umode_t *mode); |
32 | static const struct class cxlflash_class = { |
33 | .name = "cxlflash" , |
34 | .devnode = cxlflash_devnode, |
35 | }; |
36 | |
37 | static u32 cxlflash_major; |
38 | static DECLARE_BITMAP(cxlflash_minor, CXLFLASH_MAX_ADAPTERS); |
39 | |
40 | /** |
41 | * process_cmd_err() - command error handler |
42 | * @cmd: AFU command that experienced the error. |
43 | * @scp: SCSI command associated with the AFU command in error. |
44 | * |
45 | * Translates error bits from AFU command to SCSI command results. |
46 | */ |
47 | static void process_cmd_err(struct afu_cmd *cmd, struct scsi_cmnd *scp) |
48 | { |
49 | struct afu *afu = cmd->parent; |
50 | struct cxlflash_cfg *cfg = afu->parent; |
51 | struct device *dev = &cfg->dev->dev; |
52 | struct sisl_ioasa *ioasa; |
53 | u32 resid; |
54 | |
55 | ioasa = &(cmd->sa); |
56 | |
57 | if (ioasa->rc.flags & SISL_RC_FLAGS_UNDERRUN) { |
58 | resid = ioasa->resid; |
59 | scsi_set_resid(cmd: scp, resid); |
60 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p, resid = %d\n" , |
61 | __func__, cmd, scp, resid); |
62 | } |
63 | |
64 | if (ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN) { |
65 | dev_dbg(dev, "%s: cmd underrun cmd = %p scp = %p\n" , |
66 | __func__, cmd, scp); |
67 | scp->result = (DID_ERROR << 16); |
68 | } |
69 | |
70 | dev_dbg(dev, "%s: cmd failed afu_rc=%02x scsi_rc=%02x fc_rc=%02x " |
71 | "afu_extra=%02x scsi_extra=%02x fc_extra=%02x\n" , __func__, |
72 | ioasa->rc.afu_rc, ioasa->rc.scsi_rc, ioasa->rc.fc_rc, |
73 | ioasa->afu_extra, ioasa->scsi_extra, ioasa->fc_extra); |
74 | |
75 | if (ioasa->rc.scsi_rc) { |
76 | /* We have a SCSI status */ |
77 | if (ioasa->rc.flags & SISL_RC_FLAGS_SENSE_VALID) { |
78 | memcpy(scp->sense_buffer, ioasa->sense_data, |
79 | SISL_SENSE_DATA_LEN); |
80 | scp->result = ioasa->rc.scsi_rc; |
81 | } else |
82 | scp->result = ioasa->rc.scsi_rc | (DID_ERROR << 16); |
83 | } |
84 | |
85 | /* |
86 | * We encountered an error. Set scp->result based on nature |
87 | * of error. |
88 | */ |
89 | if (ioasa->rc.fc_rc) { |
90 | /* We have an FC status */ |
91 | switch (ioasa->rc.fc_rc) { |
92 | case SISL_FC_RC_LINKDOWN: |
93 | scp->result = (DID_REQUEUE << 16); |
94 | break; |
95 | case SISL_FC_RC_RESID: |
96 | /* This indicates an FCP resid underrun */ |
97 | if (!(ioasa->rc.flags & SISL_RC_FLAGS_OVERRUN)) { |
98 | /* If the SISL_RC_FLAGS_OVERRUN flag was set, |
99 | * then we will handle this error else where. |
100 | * If not then we must handle it here. |
101 | * This is probably an AFU bug. |
102 | */ |
103 | scp->result = (DID_ERROR << 16); |
104 | } |
105 | break; |
106 | case SISL_FC_RC_RESIDERR: |
107 | /* Resid mismatch between adapter and device */ |
108 | case SISL_FC_RC_TGTABORT: |
109 | case SISL_FC_RC_ABORTOK: |
110 | case SISL_FC_RC_ABORTFAIL: |
111 | case SISL_FC_RC_NOLOGI: |
112 | case SISL_FC_RC_ABORTPEND: |
113 | case SISL_FC_RC_WRABORTPEND: |
114 | case SISL_FC_RC_NOEXP: |
115 | case SISL_FC_RC_INUSE: |
116 | scp->result = (DID_ERROR << 16); |
117 | break; |
118 | } |
119 | } |
120 | |
121 | if (ioasa->rc.afu_rc) { |
122 | /* We have an AFU error */ |
123 | switch (ioasa->rc.afu_rc) { |
124 | case SISL_AFU_RC_NO_CHANNELS: |
125 | scp->result = (DID_NO_CONNECT << 16); |
126 | break; |
127 | case SISL_AFU_RC_DATA_DMA_ERR: |
128 | switch (ioasa->afu_extra) { |
129 | case SISL_AFU_DMA_ERR_PAGE_IN: |
130 | /* Retry */ |
131 | scp->result = (DID_IMM_RETRY << 16); |
132 | break; |
133 | case SISL_AFU_DMA_ERR_INVALID_EA: |
134 | default: |
135 | scp->result = (DID_ERROR << 16); |
136 | } |
137 | break; |
138 | case SISL_AFU_RC_OUT_OF_DATA_BUFS: |
139 | /* Retry */ |
140 | scp->result = (DID_ERROR << 16); |
141 | break; |
142 | default: |
143 | scp->result = (DID_ERROR << 16); |
144 | } |
145 | } |
146 | } |
147 | |
148 | /** |
149 | * cmd_complete() - command completion handler |
150 | * @cmd: AFU command that has completed. |
151 | * |
152 | * For SCSI commands this routine prepares and submits commands that have |
153 | * either completed or timed out to the SCSI stack. For internal commands |
154 | * (TMF or AFU), this routine simply notifies the originator that the |
155 | * command has completed. |
156 | */ |
157 | static void cmd_complete(struct afu_cmd *cmd) |
158 | { |
159 | struct scsi_cmnd *scp; |
160 | ulong lock_flags; |
161 | struct afu *afu = cmd->parent; |
162 | struct cxlflash_cfg *cfg = afu->parent; |
163 | struct device *dev = &cfg->dev->dev; |
164 | struct hwq *hwq = get_hwq(afu, index: cmd->hwq_index); |
165 | |
166 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
167 | list_del(entry: &cmd->list); |
168 | spin_unlock_irqrestore(lock: &hwq->hsq_slock, flags: lock_flags); |
169 | |
170 | if (cmd->scp) { |
171 | scp = cmd->scp; |
172 | if (unlikely(cmd->sa.ioasc)) |
173 | process_cmd_err(cmd, scp); |
174 | else |
175 | scp->result = (DID_OK << 16); |
176 | |
177 | dev_dbg_ratelimited(dev, "%s:scp=%p result=%08x ioasc=%08x\n" , |
178 | __func__, scp, scp->result, cmd->sa.ioasc); |
179 | scsi_done(cmd: scp); |
180 | } else if (cmd->cmd_tmf) { |
181 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
182 | cfg->tmf_active = false; |
183 | wake_up_all_locked(&cfg->tmf_waitq); |
184 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, flags: lock_flags); |
185 | } else |
186 | complete(&cmd->cevent); |
187 | } |
188 | |
189 | /** |
190 | * flush_pending_cmds() - flush all pending commands on this hardware queue |
191 | * @hwq: Hardware queue to flush. |
192 | * |
193 | * The hardware send queue lock associated with this hardware queue must be |
194 | * held when calling this routine. |
195 | */ |
196 | static void flush_pending_cmds(struct hwq *hwq) |
197 | { |
198 | struct cxlflash_cfg *cfg = hwq->afu->parent; |
199 | struct afu_cmd *cmd, *tmp; |
200 | struct scsi_cmnd *scp; |
201 | ulong lock_flags; |
202 | |
203 | list_for_each_entry_safe(cmd, tmp, &hwq->pending_cmds, list) { |
204 | /* Bypass command when on a doneq, cmd_complete() will handle */ |
205 | if (!list_empty(head: &cmd->queue)) |
206 | continue; |
207 | |
208 | list_del(entry: &cmd->list); |
209 | |
210 | if (cmd->scp) { |
211 | scp = cmd->scp; |
212 | scp->result = (DID_IMM_RETRY << 16); |
213 | scsi_done(cmd: scp); |
214 | } else { |
215 | cmd->cmd_aborted = true; |
216 | |
217 | if (cmd->cmd_tmf) { |
218 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
219 | cfg->tmf_active = false; |
220 | wake_up_all_locked(&cfg->tmf_waitq); |
221 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, |
222 | flags: lock_flags); |
223 | } else |
224 | complete(&cmd->cevent); |
225 | } |
226 | } |
227 | } |
228 | |
229 | /** |
230 | * context_reset() - reset context via specified register |
231 | * @hwq: Hardware queue owning the context to be reset. |
232 | * @reset_reg: MMIO register to perform reset. |
233 | * |
234 | * When the reset is successful, the SISLite specification guarantees that |
235 | * the AFU has aborted all currently pending I/O. Accordingly, these commands |
236 | * must be flushed. |
237 | * |
238 | * Return: 0 on success, -errno on failure |
239 | */ |
240 | static int context_reset(struct hwq *hwq, __be64 __iomem *reset_reg) |
241 | { |
242 | struct cxlflash_cfg *cfg = hwq->afu->parent; |
243 | struct device *dev = &cfg->dev->dev; |
244 | int rc = -ETIMEDOUT; |
245 | int nretry = 0; |
246 | u64 val = 0x1; |
247 | ulong lock_flags; |
248 | |
249 | dev_dbg(dev, "%s: hwq=%p\n" , __func__, hwq); |
250 | |
251 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
252 | |
253 | writeq_be(val, reset_reg); |
254 | do { |
255 | val = readq_be(reset_reg); |
256 | if ((val & 0x1) == 0x0) { |
257 | rc = 0; |
258 | break; |
259 | } |
260 | |
261 | /* Double delay each time */ |
262 | udelay(1 << nretry); |
263 | } while (nretry++ < MC_ROOM_RETRY_CNT); |
264 | |
265 | if (!rc) |
266 | flush_pending_cmds(hwq); |
267 | |
268 | spin_unlock_irqrestore(lock: &hwq->hsq_slock, flags: lock_flags); |
269 | |
270 | dev_dbg(dev, "%s: returning rc=%d, val=%016llx nretry=%d\n" , |
271 | __func__, rc, val, nretry); |
272 | return rc; |
273 | } |
274 | |
275 | /** |
276 | * context_reset_ioarrin() - reset context via IOARRIN register |
277 | * @hwq: Hardware queue owning the context to be reset. |
278 | * |
279 | * Return: 0 on success, -errno on failure |
280 | */ |
281 | static int context_reset_ioarrin(struct hwq *hwq) |
282 | { |
283 | return context_reset(hwq, reset_reg: &hwq->host_map->ioarrin); |
284 | } |
285 | |
286 | /** |
287 | * context_reset_sq() - reset context via SQ_CONTEXT_RESET register |
288 | * @hwq: Hardware queue owning the context to be reset. |
289 | * |
290 | * Return: 0 on success, -errno on failure |
291 | */ |
292 | static int context_reset_sq(struct hwq *hwq) |
293 | { |
294 | return context_reset(hwq, reset_reg: &hwq->host_map->sq_ctx_reset); |
295 | } |
296 | |
297 | /** |
298 | * send_cmd_ioarrin() - sends an AFU command via IOARRIN register |
299 | * @afu: AFU associated with the host. |
300 | * @cmd: AFU command to send. |
301 | * |
302 | * Return: |
303 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
304 | */ |
305 | static int send_cmd_ioarrin(struct afu *afu, struct afu_cmd *cmd) |
306 | { |
307 | struct cxlflash_cfg *cfg = afu->parent; |
308 | struct device *dev = &cfg->dev->dev; |
309 | struct hwq *hwq = get_hwq(afu, index: cmd->hwq_index); |
310 | int rc = 0; |
311 | s64 room; |
312 | ulong lock_flags; |
313 | |
314 | /* |
315 | * To avoid the performance penalty of MMIO, spread the update of |
316 | * 'room' over multiple commands. |
317 | */ |
318 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
319 | if (--hwq->room < 0) { |
320 | room = readq_be(&hwq->host_map->cmd_room); |
321 | if (room <= 0) { |
322 | dev_dbg_ratelimited(dev, "%s: no cmd_room to send " |
323 | "0x%02X, room=0x%016llX\n" , |
324 | __func__, cmd->rcb.cdb[0], room); |
325 | hwq->room = 0; |
326 | rc = SCSI_MLQUEUE_HOST_BUSY; |
327 | goto out; |
328 | } |
329 | hwq->room = room - 1; |
330 | } |
331 | |
332 | list_add(new: &cmd->list, head: &hwq->pending_cmds); |
333 | writeq_be((u64)&cmd->rcb, &hwq->host_map->ioarrin); |
334 | out: |
335 | spin_unlock_irqrestore(lock: &hwq->hsq_slock, flags: lock_flags); |
336 | dev_dbg_ratelimited(dev, "%s: cmd=%p len=%u ea=%016llx rc=%d\n" , |
337 | __func__, cmd, cmd->rcb.data_len, cmd->rcb.data_ea, rc); |
338 | return rc; |
339 | } |
340 | |
341 | /** |
342 | * send_cmd_sq() - sends an AFU command via SQ ring |
343 | * @afu: AFU associated with the host. |
344 | * @cmd: AFU command to send. |
345 | * |
346 | * Return: |
347 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
348 | */ |
349 | static int send_cmd_sq(struct afu *afu, struct afu_cmd *cmd) |
350 | { |
351 | struct cxlflash_cfg *cfg = afu->parent; |
352 | struct device *dev = &cfg->dev->dev; |
353 | struct hwq *hwq = get_hwq(afu, index: cmd->hwq_index); |
354 | int rc = 0; |
355 | int newval; |
356 | ulong lock_flags; |
357 | |
358 | newval = atomic_dec_if_positive(v: &hwq->hsq_credits); |
359 | if (newval <= 0) { |
360 | rc = SCSI_MLQUEUE_HOST_BUSY; |
361 | goto out; |
362 | } |
363 | |
364 | cmd->rcb.ioasa = &cmd->sa; |
365 | |
366 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
367 | |
368 | *hwq->hsq_curr = cmd->rcb; |
369 | if (hwq->hsq_curr < hwq->hsq_end) |
370 | hwq->hsq_curr++; |
371 | else |
372 | hwq->hsq_curr = hwq->hsq_start; |
373 | |
374 | list_add(new: &cmd->list, head: &hwq->pending_cmds); |
375 | writeq_be((u64)hwq->hsq_curr, &hwq->host_map->sq_tail); |
376 | |
377 | spin_unlock_irqrestore(lock: &hwq->hsq_slock, flags: lock_flags); |
378 | out: |
379 | dev_dbg(dev, "%s: cmd=%p len=%u ea=%016llx ioasa=%p rc=%d curr=%p " |
380 | "head=%016llx tail=%016llx\n" , __func__, cmd, cmd->rcb.data_len, |
381 | cmd->rcb.data_ea, cmd->rcb.ioasa, rc, hwq->hsq_curr, |
382 | readq_be(&hwq->host_map->sq_head), |
383 | readq_be(&hwq->host_map->sq_tail)); |
384 | return rc; |
385 | } |
386 | |
387 | /** |
388 | * wait_resp() - polls for a response or timeout to a sent AFU command |
389 | * @afu: AFU associated with the host. |
390 | * @cmd: AFU command that was sent. |
391 | * |
392 | * Return: 0 on success, -errno on failure |
393 | */ |
394 | static int wait_resp(struct afu *afu, struct afu_cmd *cmd) |
395 | { |
396 | struct cxlflash_cfg *cfg = afu->parent; |
397 | struct device *dev = &cfg->dev->dev; |
398 | int rc = 0; |
399 | ulong timeout = msecs_to_jiffies(m: cmd->rcb.timeout * 2 * 1000); |
400 | |
401 | timeout = wait_for_completion_timeout(x: &cmd->cevent, timeout); |
402 | if (!timeout) |
403 | rc = -ETIMEDOUT; |
404 | |
405 | if (cmd->cmd_aborted) |
406 | rc = -EAGAIN; |
407 | |
408 | if (unlikely(cmd->sa.ioasc != 0)) { |
409 | dev_err(dev, "%s: cmd %02x failed, ioasc=%08x\n" , |
410 | __func__, cmd->rcb.cdb[0], cmd->sa.ioasc); |
411 | rc = -EIO; |
412 | } |
413 | |
414 | return rc; |
415 | } |
416 | |
417 | /** |
418 | * cmd_to_target_hwq() - selects a target hardware queue for a SCSI command |
419 | * @host: SCSI host associated with device. |
420 | * @scp: SCSI command to send. |
421 | * @afu: SCSI command to send. |
422 | * |
423 | * Hashes a command based upon the hardware queue mode. |
424 | * |
425 | * Return: Trusted index of target hardware queue |
426 | */ |
427 | static u32 cmd_to_target_hwq(struct Scsi_Host *host, struct scsi_cmnd *scp, |
428 | struct afu *afu) |
429 | { |
430 | u32 tag; |
431 | u32 hwq = 0; |
432 | |
433 | if (afu->num_hwqs == 1) |
434 | return 0; |
435 | |
436 | switch (afu->hwq_mode) { |
437 | case HWQ_MODE_RR: |
438 | hwq = afu->hwq_rr_count++ % afu->num_hwqs; |
439 | break; |
440 | case HWQ_MODE_TAG: |
441 | tag = blk_mq_unique_tag(rq: scsi_cmd_to_rq(scmd: scp)); |
442 | hwq = blk_mq_unique_tag_to_hwq(unique_tag: tag); |
443 | break; |
444 | case HWQ_MODE_CPU: |
445 | hwq = smp_processor_id() % afu->num_hwqs; |
446 | break; |
447 | default: |
448 | WARN_ON_ONCE(1); |
449 | } |
450 | |
451 | return hwq; |
452 | } |
453 | |
454 | /** |
455 | * send_tmf() - sends a Task Management Function (TMF) |
456 | * @cfg: Internal structure associated with the host. |
457 | * @sdev: SCSI device destined for TMF. |
458 | * @tmfcmd: TMF command to send. |
459 | * |
460 | * Return: |
461 | * 0 on success, SCSI_MLQUEUE_HOST_BUSY or -errno on failure |
462 | */ |
463 | static int send_tmf(struct cxlflash_cfg *cfg, struct scsi_device *sdev, |
464 | u64 tmfcmd) |
465 | { |
466 | struct afu *afu = cfg->afu; |
467 | struct afu_cmd *cmd = NULL; |
468 | struct device *dev = &cfg->dev->dev; |
469 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
470 | bool needs_deletion = false; |
471 | char *buf = NULL; |
472 | ulong lock_flags; |
473 | int rc = 0; |
474 | ulong to; |
475 | |
476 | buf = kzalloc(size: sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); |
477 | if (unlikely(!buf)) { |
478 | dev_err(dev, "%s: no memory for command\n" , __func__); |
479 | rc = -ENOMEM; |
480 | goto out; |
481 | } |
482 | |
483 | cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); |
484 | INIT_LIST_HEAD(list: &cmd->queue); |
485 | |
486 | /* When Task Management Function is active do not send another */ |
487 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
488 | if (cfg->tmf_active) |
489 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
490 | !cfg->tmf_active, |
491 | cfg->tmf_slock); |
492 | cfg->tmf_active = true; |
493 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, flags: lock_flags); |
494 | |
495 | cmd->parent = afu; |
496 | cmd->cmd_tmf = true; |
497 | cmd->hwq_index = hwq->index; |
498 | |
499 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
500 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
501 | cmd->rcb.port_sel = CHAN2PORTMASK(sdev->channel); |
502 | cmd->rcb.lun_id = lun_to_lunid(lun: sdev->lun); |
503 | cmd->rcb.req_flags = (SISL_REQ_FLAGS_PORT_LUN_ID | |
504 | SISL_REQ_FLAGS_SUP_UNDERRUN | |
505 | SISL_REQ_FLAGS_TMF_CMD); |
506 | memcpy(cmd->rcb.cdb, &tmfcmd, sizeof(tmfcmd)); |
507 | |
508 | rc = afu->send_cmd(afu, cmd); |
509 | if (unlikely(rc)) { |
510 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
511 | cfg->tmf_active = false; |
512 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, flags: lock_flags); |
513 | goto out; |
514 | } |
515 | |
516 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
517 | to = msecs_to_jiffies(m: 5000); |
518 | to = wait_event_interruptible_lock_irq_timeout(cfg->tmf_waitq, |
519 | !cfg->tmf_active, |
520 | cfg->tmf_slock, |
521 | to); |
522 | if (!to) { |
523 | dev_err(dev, "%s: TMF timed out\n" , __func__); |
524 | rc = -ETIMEDOUT; |
525 | needs_deletion = true; |
526 | } else if (cmd->cmd_aborted) { |
527 | dev_err(dev, "%s: TMF aborted\n" , __func__); |
528 | rc = -EAGAIN; |
529 | } else if (cmd->sa.ioasc) { |
530 | dev_err(dev, "%s: TMF failed ioasc=%08x\n" , |
531 | __func__, cmd->sa.ioasc); |
532 | rc = -EIO; |
533 | } |
534 | cfg->tmf_active = false; |
535 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, flags: lock_flags); |
536 | |
537 | if (needs_deletion) { |
538 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
539 | list_del(entry: &cmd->list); |
540 | spin_unlock_irqrestore(lock: &hwq->hsq_slock, flags: lock_flags); |
541 | } |
542 | out: |
543 | kfree(objp: buf); |
544 | return rc; |
545 | } |
546 | |
547 | /** |
548 | * cxlflash_driver_info() - information handler for this host driver |
549 | * @host: SCSI host associated with device. |
550 | * |
551 | * Return: A string describing the device. |
552 | */ |
553 | static const char *cxlflash_driver_info(struct Scsi_Host *host) |
554 | { |
555 | return CXLFLASH_ADAPTER_NAME; |
556 | } |
557 | |
558 | /** |
559 | * cxlflash_queuecommand() - sends a mid-layer request |
560 | * @host: SCSI host associated with device. |
561 | * @scp: SCSI command to send. |
562 | * |
563 | * Return: 0 on success, SCSI_MLQUEUE_HOST_BUSY on failure |
564 | */ |
565 | static int cxlflash_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scp) |
566 | { |
567 | struct cxlflash_cfg *cfg = shost_priv(shost: host); |
568 | struct afu *afu = cfg->afu; |
569 | struct device *dev = &cfg->dev->dev; |
570 | struct afu_cmd *cmd = sc_to_afuci(sc: scp); |
571 | struct scatterlist *sg = scsi_sglist(cmd: scp); |
572 | int hwq_index = cmd_to_target_hwq(host, scp, afu); |
573 | struct hwq *hwq = get_hwq(afu, index: hwq_index); |
574 | u16 req_flags = SISL_REQ_FLAGS_SUP_UNDERRUN; |
575 | ulong lock_flags; |
576 | int rc = 0; |
577 | |
578 | dev_dbg_ratelimited(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
579 | "cdb=(%08x-%08x-%08x-%08x)\n" , |
580 | __func__, scp, host->host_no, scp->device->channel, |
581 | scp->device->id, scp->device->lun, |
582 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
583 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
584 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
585 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
586 | |
587 | /* |
588 | * If a Task Management Function is active, wait for it to complete |
589 | * before continuing with regular commands. |
590 | */ |
591 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
592 | if (cfg->tmf_active) { |
593 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, flags: lock_flags); |
594 | rc = SCSI_MLQUEUE_HOST_BUSY; |
595 | goto out; |
596 | } |
597 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, flags: lock_flags); |
598 | |
599 | switch (cfg->state) { |
600 | case STATE_PROBING: |
601 | case STATE_PROBED: |
602 | case STATE_RESET: |
603 | dev_dbg_ratelimited(dev, "%s: device is in reset\n" , __func__); |
604 | rc = SCSI_MLQUEUE_HOST_BUSY; |
605 | goto out; |
606 | case STATE_FAILTERM: |
607 | dev_dbg_ratelimited(dev, "%s: device has failed\n" , __func__); |
608 | scp->result = (DID_NO_CONNECT << 16); |
609 | scsi_done(cmd: scp); |
610 | rc = 0; |
611 | goto out; |
612 | default: |
613 | atomic_inc(v: &afu->cmds_active); |
614 | break; |
615 | } |
616 | |
617 | if (likely(sg)) { |
618 | cmd->rcb.data_len = sg->length; |
619 | cmd->rcb.data_ea = (uintptr_t)sg_virt(sg); |
620 | } |
621 | |
622 | cmd->scp = scp; |
623 | cmd->parent = afu; |
624 | cmd->hwq_index = hwq_index; |
625 | |
626 | cmd->sa.ioasc = 0; |
627 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
628 | cmd->rcb.msi = SISL_MSI_RRQ_UPDATED; |
629 | cmd->rcb.port_sel = CHAN2PORTMASK(scp->device->channel); |
630 | cmd->rcb.lun_id = lun_to_lunid(lun: scp->device->lun); |
631 | |
632 | if (scp->sc_data_direction == DMA_TO_DEVICE) |
633 | req_flags |= SISL_REQ_FLAGS_HOST_WRITE; |
634 | |
635 | cmd->rcb.req_flags = req_flags; |
636 | memcpy(cmd->rcb.cdb, scp->cmnd, sizeof(cmd->rcb.cdb)); |
637 | |
638 | rc = afu->send_cmd(afu, cmd); |
639 | atomic_dec(v: &afu->cmds_active); |
640 | out: |
641 | return rc; |
642 | } |
643 | |
644 | /** |
645 | * cxlflash_wait_for_pci_err_recovery() - wait for error recovery during probe |
646 | * @cfg: Internal structure associated with the host. |
647 | */ |
648 | static void cxlflash_wait_for_pci_err_recovery(struct cxlflash_cfg *cfg) |
649 | { |
650 | struct pci_dev *pdev = cfg->dev; |
651 | |
652 | if (pci_channel_offline(pdev)) |
653 | wait_event_timeout(cfg->reset_waitq, |
654 | !pci_channel_offline(pdev), |
655 | CXLFLASH_PCI_ERROR_RECOVERY_TIMEOUT); |
656 | } |
657 | |
658 | /** |
659 | * free_mem() - free memory associated with the AFU |
660 | * @cfg: Internal structure associated with the host. |
661 | */ |
662 | static void free_mem(struct cxlflash_cfg *cfg) |
663 | { |
664 | struct afu *afu = cfg->afu; |
665 | |
666 | if (cfg->afu) { |
667 | free_pages(addr: (ulong)afu, order: get_order(size: sizeof(struct afu))); |
668 | cfg->afu = NULL; |
669 | } |
670 | } |
671 | |
672 | /** |
673 | * cxlflash_reset_sync() - synchronizing point for asynchronous resets |
674 | * @cfg: Internal structure associated with the host. |
675 | */ |
676 | static void cxlflash_reset_sync(struct cxlflash_cfg *cfg) |
677 | { |
678 | if (cfg->async_reset_cookie == 0) |
679 | return; |
680 | |
681 | /* Wait until all async calls prior to this cookie have completed */ |
682 | async_synchronize_cookie(cookie: cfg->async_reset_cookie + 1); |
683 | cfg->async_reset_cookie = 0; |
684 | } |
685 | |
686 | /** |
687 | * stop_afu() - stops the AFU command timers and unmaps the MMIO space |
688 | * @cfg: Internal structure associated with the host. |
689 | * |
690 | * Safe to call with AFU in a partially allocated/initialized state. |
691 | * |
692 | * Cancels scheduled worker threads, waits for any active internal AFU |
693 | * commands to timeout, disables IRQ polling and then unmaps the MMIO space. |
694 | */ |
695 | static void stop_afu(struct cxlflash_cfg *cfg) |
696 | { |
697 | struct afu *afu = cfg->afu; |
698 | struct hwq *hwq; |
699 | int i; |
700 | |
701 | cancel_work_sync(work: &cfg->work_q); |
702 | if (!current_is_async()) |
703 | cxlflash_reset_sync(cfg); |
704 | |
705 | if (likely(afu)) { |
706 | while (atomic_read(v: &afu->cmds_active)) |
707 | ssleep(seconds: 1); |
708 | |
709 | if (afu_is_irqpoll_enabled(afu)) { |
710 | for (i = 0; i < afu->num_hwqs; i++) { |
711 | hwq = get_hwq(afu, index: i); |
712 | |
713 | irq_poll_disable(&hwq->irqpoll); |
714 | } |
715 | } |
716 | |
717 | if (likely(afu->afu_map)) { |
718 | cfg->ops->psa_unmap(afu->afu_map); |
719 | afu->afu_map = NULL; |
720 | } |
721 | } |
722 | } |
723 | |
724 | /** |
725 | * term_intr() - disables all AFU interrupts |
726 | * @cfg: Internal structure associated with the host. |
727 | * @level: Depth of allocation, where to begin waterfall tear down. |
728 | * @index: Index of the hardware queue. |
729 | * |
730 | * Safe to call with AFU/MC in partially allocated/initialized state. |
731 | */ |
732 | static void term_intr(struct cxlflash_cfg *cfg, enum undo_level level, |
733 | u32 index) |
734 | { |
735 | struct afu *afu = cfg->afu; |
736 | struct device *dev = &cfg->dev->dev; |
737 | struct hwq *hwq; |
738 | |
739 | if (!afu) { |
740 | dev_err(dev, "%s: returning with NULL afu\n" , __func__); |
741 | return; |
742 | } |
743 | |
744 | hwq = get_hwq(afu, index); |
745 | |
746 | if (!hwq->ctx_cookie) { |
747 | dev_err(dev, "%s: returning with NULL MC\n" , __func__); |
748 | return; |
749 | } |
750 | |
751 | switch (level) { |
752 | case UNMAP_THREE: |
753 | /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ |
754 | if (index == PRIMARY_HWQ) |
755 | cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 3, hwq); |
756 | fallthrough; |
757 | case UNMAP_TWO: |
758 | cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 2, hwq); |
759 | fallthrough; |
760 | case UNMAP_ONE: |
761 | cfg->ops->unmap_afu_irq(hwq->ctx_cookie, 1, hwq); |
762 | fallthrough; |
763 | case FREE_IRQ: |
764 | cfg->ops->free_afu_irqs(hwq->ctx_cookie); |
765 | fallthrough; |
766 | case UNDO_NOOP: |
767 | /* No action required */ |
768 | break; |
769 | } |
770 | } |
771 | |
772 | /** |
773 | * term_mc() - terminates the master context |
774 | * @cfg: Internal structure associated with the host. |
775 | * @index: Index of the hardware queue. |
776 | * |
777 | * Safe to call with AFU/MC in partially allocated/initialized state. |
778 | */ |
779 | static void term_mc(struct cxlflash_cfg *cfg, u32 index) |
780 | { |
781 | struct afu *afu = cfg->afu; |
782 | struct device *dev = &cfg->dev->dev; |
783 | struct hwq *hwq; |
784 | ulong lock_flags; |
785 | |
786 | if (!afu) { |
787 | dev_err(dev, "%s: returning with NULL afu\n" , __func__); |
788 | return; |
789 | } |
790 | |
791 | hwq = get_hwq(afu, index); |
792 | |
793 | if (!hwq->ctx_cookie) { |
794 | dev_err(dev, "%s: returning with NULL MC\n" , __func__); |
795 | return; |
796 | } |
797 | |
798 | WARN_ON(cfg->ops->stop_context(hwq->ctx_cookie)); |
799 | if (index != PRIMARY_HWQ) |
800 | WARN_ON(cfg->ops->release_context(hwq->ctx_cookie)); |
801 | hwq->ctx_cookie = NULL; |
802 | |
803 | spin_lock_irqsave(&hwq->hrrq_slock, lock_flags); |
804 | hwq->hrrq_online = false; |
805 | spin_unlock_irqrestore(lock: &hwq->hrrq_slock, flags: lock_flags); |
806 | |
807 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
808 | flush_pending_cmds(hwq); |
809 | spin_unlock_irqrestore(lock: &hwq->hsq_slock, flags: lock_flags); |
810 | } |
811 | |
812 | /** |
813 | * term_afu() - terminates the AFU |
814 | * @cfg: Internal structure associated with the host. |
815 | * |
816 | * Safe to call with AFU/MC in partially allocated/initialized state. |
817 | */ |
818 | static void term_afu(struct cxlflash_cfg *cfg) |
819 | { |
820 | struct device *dev = &cfg->dev->dev; |
821 | int k; |
822 | |
823 | /* |
824 | * Tear down is carefully orchestrated to ensure |
825 | * no interrupts can come in when the problem state |
826 | * area is unmapped. |
827 | * |
828 | * 1) Disable all AFU interrupts for each master |
829 | * 2) Unmap the problem state area |
830 | * 3) Stop each master context |
831 | */ |
832 | for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) |
833 | term_intr(cfg, level: UNMAP_THREE, index: k); |
834 | |
835 | stop_afu(cfg); |
836 | |
837 | for (k = cfg->afu->num_hwqs - 1; k >= 0; k--) |
838 | term_mc(cfg, index: k); |
839 | |
840 | dev_dbg(dev, "%s: returning\n" , __func__); |
841 | } |
842 | |
843 | /** |
844 | * notify_shutdown() - notifies device of pending shutdown |
845 | * @cfg: Internal structure associated with the host. |
846 | * @wait: Whether to wait for shutdown processing to complete. |
847 | * |
848 | * This function will notify the AFU that the adapter is being shutdown |
849 | * and will wait for shutdown processing to complete if wait is true. |
850 | * This notification should flush pending I/Os to the device and halt |
851 | * further I/Os until the next AFU reset is issued and device restarted. |
852 | */ |
853 | static void notify_shutdown(struct cxlflash_cfg *cfg, bool wait) |
854 | { |
855 | struct afu *afu = cfg->afu; |
856 | struct device *dev = &cfg->dev->dev; |
857 | struct dev_dependent_vals *ddv; |
858 | __be64 __iomem *fc_port_regs; |
859 | u64 reg, status; |
860 | int i, retry_cnt = 0; |
861 | |
862 | ddv = (struct dev_dependent_vals *)cfg->dev_id->driver_data; |
863 | if (!(ddv->flags & CXLFLASH_NOTIFY_SHUTDOWN)) |
864 | return; |
865 | |
866 | if (!afu || !afu->afu_map) { |
867 | dev_dbg(dev, "%s: Problem state area not mapped\n" , __func__); |
868 | return; |
869 | } |
870 | |
871 | /* Notify AFU */ |
872 | for (i = 0; i < cfg->num_fc_ports; i++) { |
873 | fc_port_regs = get_fc_port_regs(cfg, i); |
874 | |
875 | reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); |
876 | reg |= SISL_FC_SHUTDOWN_NORMAL; |
877 | writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); |
878 | } |
879 | |
880 | if (!wait) |
881 | return; |
882 | |
883 | /* Wait up to 1.5 seconds for shutdown processing to complete */ |
884 | for (i = 0; i < cfg->num_fc_ports; i++) { |
885 | fc_port_regs = get_fc_port_regs(cfg, i); |
886 | retry_cnt = 0; |
887 | |
888 | while (true) { |
889 | status = readq_be(&fc_port_regs[FC_STATUS / 8]); |
890 | if (status & SISL_STATUS_SHUTDOWN_COMPLETE) |
891 | break; |
892 | if (++retry_cnt >= MC_RETRY_CNT) { |
893 | dev_dbg(dev, "%s: port %d shutdown processing " |
894 | "not yet completed\n" , __func__, i); |
895 | break; |
896 | } |
897 | msleep(msecs: 100 * retry_cnt); |
898 | } |
899 | } |
900 | } |
901 | |
902 | /** |
903 | * cxlflash_get_minor() - gets the first available minor number |
904 | * |
905 | * Return: Unique minor number that can be used to create the character device. |
906 | */ |
907 | static int cxlflash_get_minor(void) |
908 | { |
909 | int minor; |
910 | long bit; |
911 | |
912 | bit = find_first_zero_bit(addr: cxlflash_minor, CXLFLASH_MAX_ADAPTERS); |
913 | if (bit >= CXLFLASH_MAX_ADAPTERS) |
914 | return -1; |
915 | |
916 | minor = bit & MINORMASK; |
917 | set_bit(nr: minor, addr: cxlflash_minor); |
918 | return minor; |
919 | } |
920 | |
921 | /** |
922 | * cxlflash_put_minor() - releases the minor number |
923 | * @minor: Minor number that is no longer needed. |
924 | */ |
925 | static void cxlflash_put_minor(int minor) |
926 | { |
927 | clear_bit(nr: minor, addr: cxlflash_minor); |
928 | } |
929 | |
930 | /** |
931 | * cxlflash_release_chrdev() - release the character device for the host |
932 | * @cfg: Internal structure associated with the host. |
933 | */ |
934 | static void cxlflash_release_chrdev(struct cxlflash_cfg *cfg) |
935 | { |
936 | device_unregister(dev: cfg->chardev); |
937 | cfg->chardev = NULL; |
938 | cdev_del(&cfg->cdev); |
939 | cxlflash_put_minor(MINOR(cfg->cdev.dev)); |
940 | } |
941 | |
942 | /** |
943 | * cxlflash_remove() - PCI entry point to tear down host |
944 | * @pdev: PCI device associated with the host. |
945 | * |
946 | * Safe to use as a cleanup in partially allocated/initialized state. Note that |
947 | * the reset_waitq is flushed as part of the stop/termination of user contexts. |
948 | */ |
949 | static void cxlflash_remove(struct pci_dev *pdev) |
950 | { |
951 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
952 | struct device *dev = &pdev->dev; |
953 | ulong lock_flags; |
954 | |
955 | if (!pci_is_enabled(pdev)) { |
956 | dev_dbg(dev, "%s: Device is disabled\n" , __func__); |
957 | return; |
958 | } |
959 | |
960 | /* Yield to running recovery threads before continuing with remove */ |
961 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && |
962 | cfg->state != STATE_PROBING); |
963 | spin_lock_irqsave(&cfg->tmf_slock, lock_flags); |
964 | if (cfg->tmf_active) |
965 | wait_event_interruptible_lock_irq(cfg->tmf_waitq, |
966 | !cfg->tmf_active, |
967 | cfg->tmf_slock); |
968 | spin_unlock_irqrestore(lock: &cfg->tmf_slock, flags: lock_flags); |
969 | |
970 | /* Notify AFU and wait for shutdown processing to complete */ |
971 | notify_shutdown(cfg, wait: true); |
972 | |
973 | cfg->state = STATE_FAILTERM; |
974 | cxlflash_stop_term_user_contexts(cfg); |
975 | |
976 | switch (cfg->init_state) { |
977 | case INIT_STATE_CDEV: |
978 | cxlflash_release_chrdev(cfg); |
979 | fallthrough; |
980 | case INIT_STATE_SCSI: |
981 | cxlflash_term_local_luns(cfg); |
982 | scsi_remove_host(cfg->host); |
983 | fallthrough; |
984 | case INIT_STATE_AFU: |
985 | term_afu(cfg); |
986 | fallthrough; |
987 | case INIT_STATE_PCI: |
988 | cfg->ops->destroy_afu(cfg->afu_cookie); |
989 | pci_disable_device(dev: pdev); |
990 | fallthrough; |
991 | case INIT_STATE_NONE: |
992 | free_mem(cfg); |
993 | scsi_host_put(t: cfg->host); |
994 | break; |
995 | } |
996 | |
997 | dev_dbg(dev, "%s: returning\n" , __func__); |
998 | } |
999 | |
1000 | /** |
1001 | * alloc_mem() - allocates the AFU and its command pool |
1002 | * @cfg: Internal structure associated with the host. |
1003 | * |
1004 | * A partially allocated state remains on failure. |
1005 | * |
1006 | * Return: |
1007 | * 0 on success |
1008 | * -ENOMEM on failure to allocate memory |
1009 | */ |
1010 | static int alloc_mem(struct cxlflash_cfg *cfg) |
1011 | { |
1012 | int rc = 0; |
1013 | struct device *dev = &cfg->dev->dev; |
1014 | |
1015 | /* AFU is ~28k, i.e. only one 64k page or up to seven 4k pages */ |
1016 | cfg->afu = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, |
1017 | order: get_order(size: sizeof(struct afu))); |
1018 | if (unlikely(!cfg->afu)) { |
1019 | dev_err(dev, "%s: cannot get %d free pages\n" , |
1020 | __func__, get_order(sizeof(struct afu))); |
1021 | rc = -ENOMEM; |
1022 | goto out; |
1023 | } |
1024 | cfg->afu->parent = cfg; |
1025 | cfg->afu->desired_hwqs = CXLFLASH_DEF_HWQS; |
1026 | cfg->afu->afu_map = NULL; |
1027 | out: |
1028 | return rc; |
1029 | } |
1030 | |
1031 | /** |
1032 | * init_pci() - initializes the host as a PCI device |
1033 | * @cfg: Internal structure associated with the host. |
1034 | * |
1035 | * Return: 0 on success, -errno on failure |
1036 | */ |
1037 | static int init_pci(struct cxlflash_cfg *cfg) |
1038 | { |
1039 | struct pci_dev *pdev = cfg->dev; |
1040 | struct device *dev = &cfg->dev->dev; |
1041 | int rc = 0; |
1042 | |
1043 | rc = pci_enable_device(dev: pdev); |
1044 | if (rc || pci_channel_offline(pdev)) { |
1045 | if (pci_channel_offline(pdev)) { |
1046 | cxlflash_wait_for_pci_err_recovery(cfg); |
1047 | rc = pci_enable_device(dev: pdev); |
1048 | } |
1049 | |
1050 | if (rc) { |
1051 | dev_err(dev, "%s: Cannot enable adapter\n" , __func__); |
1052 | cxlflash_wait_for_pci_err_recovery(cfg); |
1053 | goto out; |
1054 | } |
1055 | } |
1056 | |
1057 | out: |
1058 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
1059 | return rc; |
1060 | } |
1061 | |
1062 | /** |
1063 | * init_scsi() - adds the host to the SCSI stack and kicks off host scan |
1064 | * @cfg: Internal structure associated with the host. |
1065 | * |
1066 | * Return: 0 on success, -errno on failure |
1067 | */ |
1068 | static int init_scsi(struct cxlflash_cfg *cfg) |
1069 | { |
1070 | struct pci_dev *pdev = cfg->dev; |
1071 | struct device *dev = &cfg->dev->dev; |
1072 | int rc = 0; |
1073 | |
1074 | rc = scsi_add_host(host: cfg->host, dev: &pdev->dev); |
1075 | if (rc) { |
1076 | dev_err(dev, "%s: scsi_add_host failed rc=%d\n" , __func__, rc); |
1077 | goto out; |
1078 | } |
1079 | |
1080 | scsi_scan_host(cfg->host); |
1081 | |
1082 | out: |
1083 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
1084 | return rc; |
1085 | } |
1086 | |
1087 | /** |
1088 | * set_port_online() - transitions the specified host FC port to online state |
1089 | * @fc_regs: Top of MMIO region defined for specified port. |
1090 | * |
1091 | * The provided MMIO region must be mapped prior to call. Online state means |
1092 | * that the FC link layer has synced, completed the handshaking process, and |
1093 | * is ready for login to start. |
1094 | */ |
1095 | static void set_port_online(__be64 __iomem *fc_regs) |
1096 | { |
1097 | u64 cmdcfg; |
1098 | |
1099 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); |
1100 | cmdcfg &= (~FC_MTIP_CMDCONFIG_OFFLINE); /* clear OFF_LINE */ |
1101 | cmdcfg |= (FC_MTIP_CMDCONFIG_ONLINE); /* set ON_LINE */ |
1102 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); |
1103 | } |
1104 | |
1105 | /** |
1106 | * set_port_offline() - transitions the specified host FC port to offline state |
1107 | * @fc_regs: Top of MMIO region defined for specified port. |
1108 | * |
1109 | * The provided MMIO region must be mapped prior to call. |
1110 | */ |
1111 | static void set_port_offline(__be64 __iomem *fc_regs) |
1112 | { |
1113 | u64 cmdcfg; |
1114 | |
1115 | cmdcfg = readq_be(&fc_regs[FC_MTIP_CMDCONFIG / 8]); |
1116 | cmdcfg &= (~FC_MTIP_CMDCONFIG_ONLINE); /* clear ON_LINE */ |
1117 | cmdcfg |= (FC_MTIP_CMDCONFIG_OFFLINE); /* set OFF_LINE */ |
1118 | writeq_be(cmdcfg, &fc_regs[FC_MTIP_CMDCONFIG / 8]); |
1119 | } |
1120 | |
1121 | /** |
1122 | * wait_port_online() - waits for the specified host FC port come online |
1123 | * @fc_regs: Top of MMIO region defined for specified port. |
1124 | * @delay_us: Number of microseconds to delay between reading port status. |
1125 | * @nretry: Number of cycles to retry reading port status. |
1126 | * |
1127 | * The provided MMIO region must be mapped prior to call. This will timeout |
1128 | * when the cable is not plugged in. |
1129 | * |
1130 | * Return: |
1131 | * TRUE (1) when the specified port is online |
1132 | * FALSE (0) when the specified port fails to come online after timeout |
1133 | */ |
1134 | static bool wait_port_online(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
1135 | { |
1136 | u64 status; |
1137 | |
1138 | WARN_ON(delay_us < 1000); |
1139 | |
1140 | do { |
1141 | msleep(msecs: delay_us / 1000); |
1142 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
1143 | if (status == U64_MAX) |
1144 | nretry /= 2; |
1145 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_ONLINE && |
1146 | nretry--); |
1147 | |
1148 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_ONLINE); |
1149 | } |
1150 | |
1151 | /** |
1152 | * wait_port_offline() - waits for the specified host FC port go offline |
1153 | * @fc_regs: Top of MMIO region defined for specified port. |
1154 | * @delay_us: Number of microseconds to delay between reading port status. |
1155 | * @nretry: Number of cycles to retry reading port status. |
1156 | * |
1157 | * The provided MMIO region must be mapped prior to call. |
1158 | * |
1159 | * Return: |
1160 | * TRUE (1) when the specified port is offline |
1161 | * FALSE (0) when the specified port fails to go offline after timeout |
1162 | */ |
1163 | static bool wait_port_offline(__be64 __iomem *fc_regs, u32 delay_us, u32 nretry) |
1164 | { |
1165 | u64 status; |
1166 | |
1167 | WARN_ON(delay_us < 1000); |
1168 | |
1169 | do { |
1170 | msleep(msecs: delay_us / 1000); |
1171 | status = readq_be(&fc_regs[FC_MTIP_STATUS / 8]); |
1172 | if (status == U64_MAX) |
1173 | nretry /= 2; |
1174 | } while ((status & FC_MTIP_STATUS_MASK) != FC_MTIP_STATUS_OFFLINE && |
1175 | nretry--); |
1176 | |
1177 | return ((status & FC_MTIP_STATUS_MASK) == FC_MTIP_STATUS_OFFLINE); |
1178 | } |
1179 | |
1180 | /** |
1181 | * afu_set_wwpn() - configures the WWPN for the specified host FC port |
1182 | * @afu: AFU associated with the host that owns the specified FC port. |
1183 | * @port: Port number being configured. |
1184 | * @fc_regs: Top of MMIO region defined for specified port. |
1185 | * @wwpn: The world-wide-port-number previously discovered for port. |
1186 | * |
1187 | * The provided MMIO region must be mapped prior to call. As part of the |
1188 | * sequence to configure the WWPN, the port is toggled offline and then back |
1189 | * online. This toggling action can cause this routine to delay up to a few |
1190 | * seconds. When configured to use the internal LUN feature of the AFU, a |
1191 | * failure to come online is overridden. |
1192 | */ |
1193 | static void afu_set_wwpn(struct afu *afu, int port, __be64 __iomem *fc_regs, |
1194 | u64 wwpn) |
1195 | { |
1196 | struct cxlflash_cfg *cfg = afu->parent; |
1197 | struct device *dev = &cfg->dev->dev; |
1198 | |
1199 | set_port_offline(fc_regs); |
1200 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
1201 | FC_PORT_STATUS_RETRY_CNT)) { |
1202 | dev_dbg(dev, "%s: wait on port %d to go offline timed out\n" , |
1203 | __func__, port); |
1204 | } |
1205 | |
1206 | writeq_be(wwpn, &fc_regs[FC_PNAME / 8]); |
1207 | |
1208 | set_port_online(fc_regs); |
1209 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
1210 | FC_PORT_STATUS_RETRY_CNT)) { |
1211 | dev_dbg(dev, "%s: wait on port %d to go online timed out\n" , |
1212 | __func__, port); |
1213 | } |
1214 | } |
1215 | |
1216 | /** |
1217 | * afu_link_reset() - resets the specified host FC port |
1218 | * @afu: AFU associated with the host that owns the specified FC port. |
1219 | * @port: Port number being configured. |
1220 | * @fc_regs: Top of MMIO region defined for specified port. |
1221 | * |
1222 | * The provided MMIO region must be mapped prior to call. The sequence to |
1223 | * reset the port involves toggling it offline and then back online. This |
1224 | * action can cause this routine to delay up to a few seconds. An effort |
1225 | * is made to maintain link with the device by switching to host to use |
1226 | * the alternate port exclusively while the reset takes place. |
1227 | * failure to come online is overridden. |
1228 | */ |
1229 | static void afu_link_reset(struct afu *afu, int port, __be64 __iomem *fc_regs) |
1230 | { |
1231 | struct cxlflash_cfg *cfg = afu->parent; |
1232 | struct device *dev = &cfg->dev->dev; |
1233 | u64 port_sel; |
1234 | |
1235 | /* first switch the AFU to the other links, if any */ |
1236 | port_sel = readq_be(&afu->afu_map->global.regs.afu_port_sel); |
1237 | port_sel &= ~(1ULL << port); |
1238 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1239 | cxlflash_afu_sync(afu, c: 0, r: 0, AFU_GSYNC); |
1240 | |
1241 | set_port_offline(fc_regs); |
1242 | if (!wait_port_offline(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
1243 | FC_PORT_STATUS_RETRY_CNT)) |
1244 | dev_err(dev, "%s: wait on port %d to go offline timed out\n" , |
1245 | __func__, port); |
1246 | |
1247 | set_port_online(fc_regs); |
1248 | if (!wait_port_online(fc_regs, FC_PORT_STATUS_RETRY_INTERVAL_US, |
1249 | FC_PORT_STATUS_RETRY_CNT)) |
1250 | dev_err(dev, "%s: wait on port %d to go online timed out\n" , |
1251 | __func__, port); |
1252 | |
1253 | /* switch back to include this port */ |
1254 | port_sel |= (1ULL << port); |
1255 | writeq_be(port_sel, &afu->afu_map->global.regs.afu_port_sel); |
1256 | cxlflash_afu_sync(afu, c: 0, r: 0, AFU_GSYNC); |
1257 | |
1258 | dev_dbg(dev, "%s: returning port_sel=%016llx\n" , __func__, port_sel); |
1259 | } |
1260 | |
1261 | /** |
1262 | * afu_err_intr_init() - clears and initializes the AFU for error interrupts |
1263 | * @afu: AFU associated with the host. |
1264 | */ |
1265 | static void afu_err_intr_init(struct afu *afu) |
1266 | { |
1267 | struct cxlflash_cfg *cfg = afu->parent; |
1268 | __be64 __iomem *fc_port_regs; |
1269 | int i; |
1270 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
1271 | u64 reg; |
1272 | |
1273 | /* global async interrupts: AFU clears afu_ctrl on context exit |
1274 | * if async interrupts were sent to that context. This prevents |
1275 | * the AFU form sending further async interrupts when |
1276 | * there is |
1277 | * nobody to receive them. |
1278 | */ |
1279 | |
1280 | /* mask all */ |
1281 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_mask); |
1282 | /* set LISN# to send and point to primary master context */ |
1283 | reg = ((u64) (((hwq->ctx_hndl << 8) | SISL_MSI_ASYNC_ERROR)) << 40); |
1284 | |
1285 | if (afu->internal_lun) |
1286 | reg |= 1; /* Bit 63 indicates local lun */ |
1287 | writeq_be(reg, &afu->afu_map->global.regs.afu_ctrl); |
1288 | /* clear all */ |
1289 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); |
1290 | /* unmask bits that are of interest */ |
1291 | /* note: afu can send an interrupt after this step */ |
1292 | writeq_be(SISL_ASTATUS_MASK, &afu->afu_map->global.regs.aintr_mask); |
1293 | /* clear again in case a bit came on after previous clear but before */ |
1294 | /* unmask */ |
1295 | writeq_be(-1ULL, &afu->afu_map->global.regs.aintr_clear); |
1296 | |
1297 | /* Clear/Set internal lun bits */ |
1298 | fc_port_regs = get_fc_port_regs(cfg, i: 0); |
1299 | reg = readq_be(&fc_port_regs[FC_CONFIG2 / 8]); |
1300 | reg &= SISL_FC_INTERNAL_MASK; |
1301 | if (afu->internal_lun) |
1302 | reg |= ((u64)(afu->internal_lun - 1) << SISL_FC_INTERNAL_SHIFT); |
1303 | writeq_be(reg, &fc_port_regs[FC_CONFIG2 / 8]); |
1304 | |
1305 | /* now clear FC errors */ |
1306 | for (i = 0; i < cfg->num_fc_ports; i++) { |
1307 | fc_port_regs = get_fc_port_regs(cfg, i); |
1308 | |
1309 | writeq_be(0xFFFFFFFFU, &fc_port_regs[FC_ERROR / 8]); |
1310 | writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); |
1311 | } |
1312 | |
1313 | /* sync interrupts for master's IOARRIN write */ |
1314 | /* note that unlike asyncs, there can be no pending sync interrupts */ |
1315 | /* at this time (this is a fresh context and master has not written */ |
1316 | /* IOARRIN yet), so there is nothing to clear. */ |
1317 | |
1318 | /* set LISN#, it is always sent to the context that wrote IOARRIN */ |
1319 | for (i = 0; i < afu->num_hwqs; i++) { |
1320 | hwq = get_hwq(afu, index: i); |
1321 | |
1322 | reg = readq_be(&hwq->host_map->ctx_ctrl); |
1323 | WARN_ON((reg & SISL_CTX_CTRL_LISN_MASK) != 0); |
1324 | reg |= SISL_MSI_SYNC_ERROR; |
1325 | writeq_be(reg, &hwq->host_map->ctx_ctrl); |
1326 | writeq_be(SISL_ISTATUS_MASK, &hwq->host_map->intr_mask); |
1327 | } |
1328 | } |
1329 | |
1330 | /** |
1331 | * cxlflash_sync_err_irq() - interrupt handler for synchronous errors |
1332 | * @irq: Interrupt number. |
1333 | * @data: Private data provided at interrupt registration, the AFU. |
1334 | * |
1335 | * Return: Always return IRQ_HANDLED. |
1336 | */ |
1337 | static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) |
1338 | { |
1339 | struct hwq *hwq = (struct hwq *)data; |
1340 | struct cxlflash_cfg *cfg = hwq->afu->parent; |
1341 | struct device *dev = &cfg->dev->dev; |
1342 | u64 reg; |
1343 | u64 reg_unmasked; |
1344 | |
1345 | reg = readq_be(&hwq->host_map->intr_status); |
1346 | reg_unmasked = (reg & SISL_ISTATUS_UNMASK); |
1347 | |
1348 | if (reg_unmasked == 0UL) { |
1349 | dev_err(dev, "%s: spurious interrupt, intr_status=%016llx\n" , |
1350 | __func__, reg); |
1351 | goto cxlflash_sync_err_irq_exit; |
1352 | } |
1353 | |
1354 | dev_err(dev, "%s: unexpected interrupt, intr_status=%016llx\n" , |
1355 | __func__, reg); |
1356 | |
1357 | writeq_be(reg_unmasked, &hwq->host_map->intr_clear); |
1358 | |
1359 | cxlflash_sync_err_irq_exit: |
1360 | return IRQ_HANDLED; |
1361 | } |
1362 | |
1363 | /** |
1364 | * process_hrrq() - process the read-response queue |
1365 | * @hwq: HWQ associated with the host. |
1366 | * @doneq: Queue of commands harvested from the RRQ. |
1367 | * @budget: Threshold of RRQ entries to process. |
1368 | * |
1369 | * This routine must be called holding the disabled RRQ spin lock. |
1370 | * |
1371 | * Return: The number of entries processed. |
1372 | */ |
1373 | static int process_hrrq(struct hwq *hwq, struct list_head *doneq, int budget) |
1374 | { |
1375 | struct afu *afu = hwq->afu; |
1376 | struct afu_cmd *cmd; |
1377 | struct sisl_ioasa *ioasa; |
1378 | struct sisl_ioarcb *ioarcb; |
1379 | bool toggle = hwq->toggle; |
1380 | int num_hrrq = 0; |
1381 | u64 entry, |
1382 | *hrrq_start = hwq->hrrq_start, |
1383 | *hrrq_end = hwq->hrrq_end, |
1384 | *hrrq_curr = hwq->hrrq_curr; |
1385 | |
1386 | /* Process ready RRQ entries up to the specified budget (if any) */ |
1387 | while (true) { |
1388 | entry = *hrrq_curr; |
1389 | |
1390 | if ((entry & SISL_RESP_HANDLE_T_BIT) != toggle) |
1391 | break; |
1392 | |
1393 | entry &= ~SISL_RESP_HANDLE_T_BIT; |
1394 | |
1395 | if (afu_is_sq_cmd_mode(afu)) { |
1396 | ioasa = (struct sisl_ioasa *)entry; |
1397 | cmd = container_of(ioasa, struct afu_cmd, sa); |
1398 | } else { |
1399 | ioarcb = (struct sisl_ioarcb *)entry; |
1400 | cmd = container_of(ioarcb, struct afu_cmd, rcb); |
1401 | } |
1402 | |
1403 | list_add_tail(new: &cmd->queue, head: doneq); |
1404 | |
1405 | /* Advance to next entry or wrap and flip the toggle bit */ |
1406 | if (hrrq_curr < hrrq_end) |
1407 | hrrq_curr++; |
1408 | else { |
1409 | hrrq_curr = hrrq_start; |
1410 | toggle ^= SISL_RESP_HANDLE_T_BIT; |
1411 | } |
1412 | |
1413 | atomic_inc(v: &hwq->hsq_credits); |
1414 | num_hrrq++; |
1415 | |
1416 | if (budget > 0 && num_hrrq >= budget) |
1417 | break; |
1418 | } |
1419 | |
1420 | hwq->hrrq_curr = hrrq_curr; |
1421 | hwq->toggle = toggle; |
1422 | |
1423 | return num_hrrq; |
1424 | } |
1425 | |
1426 | /** |
1427 | * process_cmd_doneq() - process a queue of harvested RRQ commands |
1428 | * @doneq: Queue of completed commands. |
1429 | * |
1430 | * Note that upon return the queue can no longer be trusted. |
1431 | */ |
1432 | static void process_cmd_doneq(struct list_head *doneq) |
1433 | { |
1434 | struct afu_cmd *cmd, *tmp; |
1435 | |
1436 | WARN_ON(list_empty(doneq)); |
1437 | |
1438 | list_for_each_entry_safe(cmd, tmp, doneq, queue) |
1439 | cmd_complete(cmd); |
1440 | } |
1441 | |
1442 | /** |
1443 | * cxlflash_irqpoll() - process a queue of harvested RRQ commands |
1444 | * @irqpoll: IRQ poll structure associated with queue to poll. |
1445 | * @budget: Threshold of RRQ entries to process per poll. |
1446 | * |
1447 | * Return: The number of entries processed. |
1448 | */ |
1449 | static int cxlflash_irqpoll(struct irq_poll *irqpoll, int budget) |
1450 | { |
1451 | struct hwq *hwq = container_of(irqpoll, struct hwq, irqpoll); |
1452 | unsigned long hrrq_flags; |
1453 | LIST_HEAD(doneq); |
1454 | int num_entries = 0; |
1455 | |
1456 | spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); |
1457 | |
1458 | num_entries = process_hrrq(hwq, doneq: &doneq, budget); |
1459 | if (num_entries < budget) |
1460 | irq_poll_complete(irqpoll); |
1461 | |
1462 | spin_unlock_irqrestore(lock: &hwq->hrrq_slock, flags: hrrq_flags); |
1463 | |
1464 | process_cmd_doneq(doneq: &doneq); |
1465 | return num_entries; |
1466 | } |
1467 | |
1468 | /** |
1469 | * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) |
1470 | * @irq: Interrupt number. |
1471 | * @data: Private data provided at interrupt registration, the AFU. |
1472 | * |
1473 | * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found. |
1474 | */ |
1475 | static irqreturn_t cxlflash_rrq_irq(int irq, void *data) |
1476 | { |
1477 | struct hwq *hwq = (struct hwq *)data; |
1478 | struct afu *afu = hwq->afu; |
1479 | unsigned long hrrq_flags; |
1480 | LIST_HEAD(doneq); |
1481 | int num_entries = 0; |
1482 | |
1483 | spin_lock_irqsave(&hwq->hrrq_slock, hrrq_flags); |
1484 | |
1485 | /* Silently drop spurious interrupts when queue is not online */ |
1486 | if (!hwq->hrrq_online) { |
1487 | spin_unlock_irqrestore(lock: &hwq->hrrq_slock, flags: hrrq_flags); |
1488 | return IRQ_HANDLED; |
1489 | } |
1490 | |
1491 | if (afu_is_irqpoll_enabled(afu)) { |
1492 | irq_poll_sched(&hwq->irqpoll); |
1493 | spin_unlock_irqrestore(lock: &hwq->hrrq_slock, flags: hrrq_flags); |
1494 | return IRQ_HANDLED; |
1495 | } |
1496 | |
1497 | num_entries = process_hrrq(hwq, doneq: &doneq, budget: -1); |
1498 | spin_unlock_irqrestore(lock: &hwq->hrrq_slock, flags: hrrq_flags); |
1499 | |
1500 | if (num_entries == 0) |
1501 | return IRQ_NONE; |
1502 | |
1503 | process_cmd_doneq(doneq: &doneq); |
1504 | return IRQ_HANDLED; |
1505 | } |
1506 | |
1507 | /* |
1508 | * Asynchronous interrupt information table |
1509 | * |
1510 | * NOTE: |
1511 | * - Order matters here as this array is indexed by bit position. |
1512 | * |
1513 | * - The checkpatch script considers the BUILD_SISL_ASTATUS_FC_PORT macro |
1514 | * as complex and complains due to a lack of parentheses/braces. |
1515 | */ |
1516 | #define ASTATUS_FC(_a, _b, _c, _d) \ |
1517 | { SISL_ASTATUS_FC##_a##_##_b, _c, _a, (_d) } |
1518 | |
1519 | #define BUILD_SISL_ASTATUS_FC_PORT(_a) \ |
1520 | ASTATUS_FC(_a, LINK_UP, "link up", 0), \ |
1521 | ASTATUS_FC(_a, LINK_DN, "link down", 0), \ |
1522 | ASTATUS_FC(_a, LOGI_S, "login succeeded", SCAN_HOST), \ |
1523 | ASTATUS_FC(_a, LOGI_F, "login failed", CLR_FC_ERROR), \ |
1524 | ASTATUS_FC(_a, LOGI_R, "login timed out, retrying", LINK_RESET), \ |
1525 | ASTATUS_FC(_a, CRC_T, "CRC threshold exceeded", LINK_RESET), \ |
1526 | ASTATUS_FC(_a, LOGO, "target initiated LOGO", 0), \ |
1527 | ASTATUS_FC(_a, OTHER, "other error", CLR_FC_ERROR | LINK_RESET) |
1528 | |
1529 | static const struct asyc_intr_info ainfo[] = { |
1530 | BUILD_SISL_ASTATUS_FC_PORT(1), |
1531 | BUILD_SISL_ASTATUS_FC_PORT(0), |
1532 | BUILD_SISL_ASTATUS_FC_PORT(3), |
1533 | BUILD_SISL_ASTATUS_FC_PORT(2) |
1534 | }; |
1535 | |
1536 | /** |
1537 | * cxlflash_async_err_irq() - interrupt handler for asynchronous errors |
1538 | * @irq: Interrupt number. |
1539 | * @data: Private data provided at interrupt registration, the AFU. |
1540 | * |
1541 | * Return: Always return IRQ_HANDLED. |
1542 | */ |
1543 | static irqreturn_t cxlflash_async_err_irq(int irq, void *data) |
1544 | { |
1545 | struct hwq *hwq = (struct hwq *)data; |
1546 | struct afu *afu = hwq->afu; |
1547 | struct cxlflash_cfg *cfg = afu->parent; |
1548 | struct device *dev = &cfg->dev->dev; |
1549 | const struct asyc_intr_info *info; |
1550 | struct sisl_global_map __iomem *global = &afu->afu_map->global; |
1551 | __be64 __iomem *fc_port_regs; |
1552 | u64 reg_unmasked; |
1553 | u64 reg; |
1554 | u64 bit; |
1555 | u8 port; |
1556 | |
1557 | reg = readq_be(&global->regs.aintr_status); |
1558 | reg_unmasked = (reg & SISL_ASTATUS_UNMASK); |
1559 | |
1560 | if (unlikely(reg_unmasked == 0)) { |
1561 | dev_err(dev, "%s: spurious interrupt, aintr_status=%016llx\n" , |
1562 | __func__, reg); |
1563 | goto out; |
1564 | } |
1565 | |
1566 | /* FYI, it is 'okay' to clear AFU status before FC_ERROR */ |
1567 | writeq_be(reg_unmasked, &global->regs.aintr_clear); |
1568 | |
1569 | /* Check each bit that is on */ |
1570 | for_each_set_bit(bit, (ulong *)®_unmasked, BITS_PER_LONG) { |
1571 | if (unlikely(bit >= ARRAY_SIZE(ainfo))) { |
1572 | WARN_ON_ONCE(1); |
1573 | continue; |
1574 | } |
1575 | |
1576 | info = &ainfo[bit]; |
1577 | if (unlikely(info->status != 1ULL << bit)) { |
1578 | WARN_ON_ONCE(1); |
1579 | continue; |
1580 | } |
1581 | |
1582 | port = info->port; |
1583 | fc_port_regs = get_fc_port_regs(cfg, i: port); |
1584 | |
1585 | dev_err(dev, "%s: FC Port %d -> %s, fc_status=%016llx\n" , |
1586 | __func__, port, info->desc, |
1587 | readq_be(&fc_port_regs[FC_STATUS / 8])); |
1588 | |
1589 | /* |
1590 | * Do link reset first, some OTHER errors will set FC_ERROR |
1591 | * again if cleared before or w/o a reset |
1592 | */ |
1593 | if (info->action & LINK_RESET) { |
1594 | dev_err(dev, "%s: FC Port %d: resetting link\n" , |
1595 | __func__, port); |
1596 | cfg->lr_state = LINK_RESET_REQUIRED; |
1597 | cfg->lr_port = port; |
1598 | schedule_work(work: &cfg->work_q); |
1599 | } |
1600 | |
1601 | if (info->action & CLR_FC_ERROR) { |
1602 | reg = readq_be(&fc_port_regs[FC_ERROR / 8]); |
1603 | |
1604 | /* |
1605 | * Since all errors are unmasked, FC_ERROR and FC_ERRCAP |
1606 | * should be the same and tracing one is sufficient. |
1607 | */ |
1608 | |
1609 | dev_err(dev, "%s: fc %d: clearing fc_error=%016llx\n" , |
1610 | __func__, port, reg); |
1611 | |
1612 | writeq_be(reg, &fc_port_regs[FC_ERROR / 8]); |
1613 | writeq_be(0, &fc_port_regs[FC_ERRCAP / 8]); |
1614 | } |
1615 | |
1616 | if (info->action & SCAN_HOST) { |
1617 | atomic_inc(v: &cfg->scan_host_needed); |
1618 | schedule_work(work: &cfg->work_q); |
1619 | } |
1620 | } |
1621 | |
1622 | out: |
1623 | return IRQ_HANDLED; |
1624 | } |
1625 | |
1626 | /** |
1627 | * read_vpd() - obtains the WWPNs from VPD |
1628 | * @cfg: Internal structure associated with the host. |
1629 | * @wwpn: Array of size MAX_FC_PORTS to pass back WWPNs |
1630 | * |
1631 | * Return: 0 on success, -errno on failure |
1632 | */ |
1633 | static int read_vpd(struct cxlflash_cfg *cfg, u64 wwpn[]) |
1634 | { |
1635 | struct device *dev = &cfg->dev->dev; |
1636 | struct pci_dev *pdev = cfg->dev; |
1637 | int i, k, rc = 0; |
1638 | unsigned int kw_size; |
1639 | ssize_t vpd_size; |
1640 | char vpd_data[CXLFLASH_VPD_LEN]; |
1641 | char tmp_buf[WWPN_BUF_LEN] = { 0 }; |
1642 | const struct dev_dependent_vals *ddv = (struct dev_dependent_vals *) |
1643 | cfg->dev_id->driver_data; |
1644 | const bool wwpn_vpd_required = ddv->flags & CXLFLASH_WWPN_VPD_REQUIRED; |
1645 | const char *wwpn_vpd_tags[MAX_FC_PORTS] = { "V5" , "V6" , "V7" , "V8" }; |
1646 | |
1647 | /* Get the VPD data from the device */ |
1648 | vpd_size = cfg->ops->read_adapter_vpd(pdev, vpd_data, sizeof(vpd_data)); |
1649 | if (unlikely(vpd_size <= 0)) { |
1650 | dev_err(dev, "%s: Unable to read VPD (size = %ld)\n" , |
1651 | __func__, vpd_size); |
1652 | rc = -ENODEV; |
1653 | goto out; |
1654 | } |
1655 | |
1656 | /* |
1657 | * Find the offset of the WWPN tag within the read only |
1658 | * VPD data and validate the found field (partials are |
1659 | * no good to us). Convert the ASCII data to an integer |
1660 | * value. Note that we must copy to a temporary buffer |
1661 | * because the conversion service requires that the ASCII |
1662 | * string be terminated. |
1663 | * |
1664 | * Allow for WWPN not being found for all devices, setting |
1665 | * the returned WWPN to zero when not found. Notify with a |
1666 | * log error for cards that should have had WWPN keywords |
1667 | * in the VPD - cards requiring WWPN will not have their |
1668 | * ports programmed and operate in an undefined state. |
1669 | */ |
1670 | for (k = 0; k < cfg->num_fc_ports; k++) { |
1671 | i = pci_vpd_find_ro_info_keyword(buf: vpd_data, len: vpd_size, |
1672 | kw: wwpn_vpd_tags[k], size: &kw_size); |
1673 | if (i == -ENOENT) { |
1674 | if (wwpn_vpd_required) |
1675 | dev_err(dev, "%s: Port %d WWPN not found\n" , |
1676 | __func__, k); |
1677 | wwpn[k] = 0ULL; |
1678 | continue; |
1679 | } |
1680 | |
1681 | if (i < 0 || kw_size != WWPN_LEN) { |
1682 | dev_err(dev, "%s: Port %d WWPN incomplete or bad VPD\n" , |
1683 | __func__, k); |
1684 | rc = -ENODEV; |
1685 | goto out; |
1686 | } |
1687 | |
1688 | memcpy(tmp_buf, &vpd_data[i], WWPN_LEN); |
1689 | rc = kstrtoul(s: tmp_buf, WWPN_LEN, res: (ulong *)&wwpn[k]); |
1690 | if (unlikely(rc)) { |
1691 | dev_err(dev, "%s: WWPN conversion failed for port %d\n" , |
1692 | __func__, k); |
1693 | rc = -ENODEV; |
1694 | goto out; |
1695 | } |
1696 | |
1697 | dev_dbg(dev, "%s: wwpn%d=%016llx\n" , __func__, k, wwpn[k]); |
1698 | } |
1699 | |
1700 | out: |
1701 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
1702 | return rc; |
1703 | } |
1704 | |
1705 | /** |
1706 | * init_pcr() - initialize the provisioning and control registers |
1707 | * @cfg: Internal structure associated with the host. |
1708 | * |
1709 | * Also sets up fast access to the mapped registers and initializes AFU |
1710 | * command fields that never change. |
1711 | */ |
1712 | static void init_pcr(struct cxlflash_cfg *cfg) |
1713 | { |
1714 | struct afu *afu = cfg->afu; |
1715 | struct sisl_ctrl_map __iomem *ctrl_map; |
1716 | struct hwq *hwq; |
1717 | void *cookie; |
1718 | int i; |
1719 | |
1720 | for (i = 0; i < MAX_CONTEXT; i++) { |
1721 | ctrl_map = &afu->afu_map->ctrls[i].ctrl; |
1722 | /* Disrupt any clients that could be running */ |
1723 | /* e.g. clients that survived a master restart */ |
1724 | writeq_be(0, &ctrl_map->rht_start); |
1725 | writeq_be(0, &ctrl_map->rht_cnt_id); |
1726 | writeq_be(0, &ctrl_map->ctx_cap); |
1727 | } |
1728 | |
1729 | /* Copy frequently used fields into hwq */ |
1730 | for (i = 0; i < afu->num_hwqs; i++) { |
1731 | hwq = get_hwq(afu, index: i); |
1732 | cookie = hwq->ctx_cookie; |
1733 | |
1734 | hwq->ctx_hndl = (u16) cfg->ops->process_element(cookie); |
1735 | hwq->host_map = &afu->afu_map->hosts[hwq->ctx_hndl].host; |
1736 | hwq->ctrl_map = &afu->afu_map->ctrls[hwq->ctx_hndl].ctrl; |
1737 | |
1738 | /* Program the Endian Control for the master context */ |
1739 | writeq_be(SISL_ENDIAN_CTRL, &hwq->host_map->endian_ctrl); |
1740 | } |
1741 | } |
1742 | |
1743 | /** |
1744 | * init_global() - initialize AFU global registers |
1745 | * @cfg: Internal structure associated with the host. |
1746 | */ |
1747 | static int init_global(struct cxlflash_cfg *cfg) |
1748 | { |
1749 | struct afu *afu = cfg->afu; |
1750 | struct device *dev = &cfg->dev->dev; |
1751 | struct hwq *hwq; |
1752 | struct sisl_host_map __iomem *hmap; |
1753 | __be64 __iomem *fc_port_regs; |
1754 | u64 wwpn[MAX_FC_PORTS]; /* wwpn of AFU ports */ |
1755 | int i = 0, num_ports = 0; |
1756 | int rc = 0; |
1757 | int j; |
1758 | void *ctx; |
1759 | u64 reg; |
1760 | |
1761 | rc = read_vpd(cfg, wwpn: &wwpn[0]); |
1762 | if (rc) { |
1763 | dev_err(dev, "%s: could not read vpd rc=%d\n" , __func__, rc); |
1764 | goto out; |
1765 | } |
1766 | |
1767 | /* Set up RRQ and SQ in HWQ for master issued cmds */ |
1768 | for (i = 0; i < afu->num_hwqs; i++) { |
1769 | hwq = get_hwq(afu, index: i); |
1770 | hmap = hwq->host_map; |
1771 | |
1772 | writeq_be((u64) hwq->hrrq_start, &hmap->rrq_start); |
1773 | writeq_be((u64) hwq->hrrq_end, &hmap->rrq_end); |
1774 | hwq->hrrq_online = true; |
1775 | |
1776 | if (afu_is_sq_cmd_mode(afu)) { |
1777 | writeq_be((u64)hwq->hsq_start, &hmap->sq_start); |
1778 | writeq_be((u64)hwq->hsq_end, &hmap->sq_end); |
1779 | } |
1780 | } |
1781 | |
1782 | /* AFU configuration */ |
1783 | reg = readq_be(&afu->afu_map->global.regs.afu_config); |
1784 | reg |= SISL_AFUCONF_AR_ALL|SISL_AFUCONF_ENDIAN; |
1785 | /* enable all auto retry options and control endianness */ |
1786 | /* leave others at default: */ |
1787 | /* CTX_CAP write protected, mbox_r does not clear on read and */ |
1788 | /* checker on if dual afu */ |
1789 | writeq_be(reg, &afu->afu_map->global.regs.afu_config); |
1790 | |
1791 | /* Global port select: select either port */ |
1792 | if (afu->internal_lun) { |
1793 | /* Only use port 0 */ |
1794 | writeq_be(PORT0, &afu->afu_map->global.regs.afu_port_sel); |
1795 | num_ports = 0; |
1796 | } else { |
1797 | writeq_be(PORT_MASK(cfg->num_fc_ports), |
1798 | &afu->afu_map->global.regs.afu_port_sel); |
1799 | num_ports = cfg->num_fc_ports; |
1800 | } |
1801 | |
1802 | for (i = 0; i < num_ports; i++) { |
1803 | fc_port_regs = get_fc_port_regs(cfg, i); |
1804 | |
1805 | /* Unmask all errors (but they are still masked at AFU) */ |
1806 | writeq_be(0, &fc_port_regs[FC_ERRMSK / 8]); |
1807 | /* Clear CRC error cnt & set a threshold */ |
1808 | (void)readq_be(&fc_port_regs[FC_CNT_CRCERR / 8]); |
1809 | writeq_be(MC_CRC_THRESH, &fc_port_regs[FC_CRC_THRESH / 8]); |
1810 | |
1811 | /* Set WWPNs. If already programmed, wwpn[i] is 0 */ |
1812 | if (wwpn[i] != 0) |
1813 | afu_set_wwpn(afu, port: i, fc_regs: &fc_port_regs[0], wwpn: wwpn[i]); |
1814 | /* Programming WWPN back to back causes additional |
1815 | * offline/online transitions and a PLOGI |
1816 | */ |
1817 | msleep(msecs: 100); |
1818 | } |
1819 | |
1820 | if (afu_is_ocxl_lisn(afu)) { |
1821 | /* Set up the LISN effective address for each master */ |
1822 | for (i = 0; i < afu->num_hwqs; i++) { |
1823 | hwq = get_hwq(afu, index: i); |
1824 | ctx = hwq->ctx_cookie; |
1825 | |
1826 | for (j = 0; j < hwq->num_irqs; j++) { |
1827 | reg = cfg->ops->get_irq_objhndl(ctx, j); |
1828 | writeq_be(reg, &hwq->ctrl_map->lisn_ea[j]); |
1829 | } |
1830 | |
1831 | reg = hwq->ctx_hndl; |
1832 | writeq_be(SISL_LISN_PASID(reg, reg), |
1833 | &hwq->ctrl_map->lisn_pasid[0]); |
1834 | writeq_be(SISL_LISN_PASID(0UL, reg), |
1835 | &hwq->ctrl_map->lisn_pasid[1]); |
1836 | } |
1837 | } |
1838 | |
1839 | /* Set up master's own CTX_CAP to allow real mode, host translation */ |
1840 | /* tables, afu cmds and read/write GSCSI cmds. */ |
1841 | /* First, unlock ctx_cap write by reading mbox */ |
1842 | for (i = 0; i < afu->num_hwqs; i++) { |
1843 | hwq = get_hwq(afu, index: i); |
1844 | |
1845 | (void)readq_be(&hwq->ctrl_map->mbox_r); /* unlock ctx_cap */ |
1846 | writeq_be((SISL_CTX_CAP_REAL_MODE | SISL_CTX_CAP_HOST_XLATE | |
1847 | SISL_CTX_CAP_READ_CMD | SISL_CTX_CAP_WRITE_CMD | |
1848 | SISL_CTX_CAP_AFU_CMD | SISL_CTX_CAP_GSCSI_CMD), |
1849 | &hwq->ctrl_map->ctx_cap); |
1850 | } |
1851 | |
1852 | /* |
1853 | * Determine write-same unmap support for host by evaluating the unmap |
1854 | * sector support bit of the context control register associated with |
1855 | * the primary hardware queue. Note that while this status is reflected |
1856 | * in a context register, the outcome can be assumed to be host-wide. |
1857 | */ |
1858 | hwq = get_hwq(afu, PRIMARY_HWQ); |
1859 | reg = readq_be(&hwq->host_map->ctx_ctrl); |
1860 | if (reg & SISL_CTX_CTRL_UNMAP_SECTOR) |
1861 | cfg->ws_unmap = true; |
1862 | |
1863 | /* Initialize heartbeat */ |
1864 | afu->hb = readq_be(&afu->afu_map->global.regs.afu_hb); |
1865 | out: |
1866 | return rc; |
1867 | } |
1868 | |
1869 | /** |
1870 | * start_afu() - initializes and starts the AFU |
1871 | * @cfg: Internal structure associated with the host. |
1872 | */ |
1873 | static int start_afu(struct cxlflash_cfg *cfg) |
1874 | { |
1875 | struct afu *afu = cfg->afu; |
1876 | struct device *dev = &cfg->dev->dev; |
1877 | struct hwq *hwq; |
1878 | int rc = 0; |
1879 | int i; |
1880 | |
1881 | init_pcr(cfg); |
1882 | |
1883 | /* Initialize each HWQ */ |
1884 | for (i = 0; i < afu->num_hwqs; i++) { |
1885 | hwq = get_hwq(afu, index: i); |
1886 | |
1887 | /* After an AFU reset, RRQ entries are stale, clear them */ |
1888 | memset(&hwq->rrq_entry, 0, sizeof(hwq->rrq_entry)); |
1889 | |
1890 | /* Initialize RRQ pointers */ |
1891 | hwq->hrrq_start = &hwq->rrq_entry[0]; |
1892 | hwq->hrrq_end = &hwq->rrq_entry[NUM_RRQ_ENTRY - 1]; |
1893 | hwq->hrrq_curr = hwq->hrrq_start; |
1894 | hwq->toggle = 1; |
1895 | |
1896 | /* Initialize spin locks */ |
1897 | spin_lock_init(&hwq->hrrq_slock); |
1898 | spin_lock_init(&hwq->hsq_slock); |
1899 | |
1900 | /* Initialize SQ */ |
1901 | if (afu_is_sq_cmd_mode(afu)) { |
1902 | memset(&hwq->sq, 0, sizeof(hwq->sq)); |
1903 | hwq->hsq_start = &hwq->sq[0]; |
1904 | hwq->hsq_end = &hwq->sq[NUM_SQ_ENTRY - 1]; |
1905 | hwq->hsq_curr = hwq->hsq_start; |
1906 | |
1907 | atomic_set(v: &hwq->hsq_credits, NUM_SQ_ENTRY - 1); |
1908 | } |
1909 | |
1910 | /* Initialize IRQ poll */ |
1911 | if (afu_is_irqpoll_enabled(afu)) |
1912 | irq_poll_init(&hwq->irqpoll, afu->irqpoll_weight, |
1913 | cxlflash_irqpoll); |
1914 | |
1915 | } |
1916 | |
1917 | rc = init_global(cfg); |
1918 | |
1919 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
1920 | return rc; |
1921 | } |
1922 | |
1923 | /** |
1924 | * init_intr() - setup interrupt handlers for the master context |
1925 | * @cfg: Internal structure associated with the host. |
1926 | * @hwq: Hardware queue to initialize. |
1927 | * |
1928 | * Return: 0 on success, -errno on failure |
1929 | */ |
1930 | static enum undo_level init_intr(struct cxlflash_cfg *cfg, |
1931 | struct hwq *hwq) |
1932 | { |
1933 | struct device *dev = &cfg->dev->dev; |
1934 | void *ctx = hwq->ctx_cookie; |
1935 | int rc = 0; |
1936 | enum undo_level level = UNDO_NOOP; |
1937 | bool is_primary_hwq = (hwq->index == PRIMARY_HWQ); |
1938 | int num_irqs = hwq->num_irqs; |
1939 | |
1940 | rc = cfg->ops->allocate_afu_irqs(ctx, num_irqs); |
1941 | if (unlikely(rc)) { |
1942 | dev_err(dev, "%s: allocate_afu_irqs failed rc=%d\n" , |
1943 | __func__, rc); |
1944 | level = UNDO_NOOP; |
1945 | goto out; |
1946 | } |
1947 | |
1948 | rc = cfg->ops->map_afu_irq(ctx, 1, cxlflash_sync_err_irq, hwq, |
1949 | "SISL_MSI_SYNC_ERROR" ); |
1950 | if (unlikely(rc <= 0)) { |
1951 | dev_err(dev, "%s: SISL_MSI_SYNC_ERROR map failed\n" , __func__); |
1952 | level = FREE_IRQ; |
1953 | goto out; |
1954 | } |
1955 | |
1956 | rc = cfg->ops->map_afu_irq(ctx, 2, cxlflash_rrq_irq, hwq, |
1957 | "SISL_MSI_RRQ_UPDATED" ); |
1958 | if (unlikely(rc <= 0)) { |
1959 | dev_err(dev, "%s: SISL_MSI_RRQ_UPDATED map failed\n" , __func__); |
1960 | level = UNMAP_ONE; |
1961 | goto out; |
1962 | } |
1963 | |
1964 | /* SISL_MSI_ASYNC_ERROR is setup only for the primary HWQ */ |
1965 | if (!is_primary_hwq) |
1966 | goto out; |
1967 | |
1968 | rc = cfg->ops->map_afu_irq(ctx, 3, cxlflash_async_err_irq, hwq, |
1969 | "SISL_MSI_ASYNC_ERROR" ); |
1970 | if (unlikely(rc <= 0)) { |
1971 | dev_err(dev, "%s: SISL_MSI_ASYNC_ERROR map failed\n" , __func__); |
1972 | level = UNMAP_TWO; |
1973 | goto out; |
1974 | } |
1975 | out: |
1976 | return level; |
1977 | } |
1978 | |
1979 | /** |
1980 | * init_mc() - create and register as the master context |
1981 | * @cfg: Internal structure associated with the host. |
1982 | * @index: HWQ Index of the master context. |
1983 | * |
1984 | * Return: 0 on success, -errno on failure |
1985 | */ |
1986 | static int init_mc(struct cxlflash_cfg *cfg, u32 index) |
1987 | { |
1988 | void *ctx; |
1989 | struct device *dev = &cfg->dev->dev; |
1990 | struct hwq *hwq = get_hwq(afu: cfg->afu, index); |
1991 | int rc = 0; |
1992 | int num_irqs; |
1993 | enum undo_level level; |
1994 | |
1995 | hwq->afu = cfg->afu; |
1996 | hwq->index = index; |
1997 | INIT_LIST_HEAD(list: &hwq->pending_cmds); |
1998 | |
1999 | if (index == PRIMARY_HWQ) { |
2000 | ctx = cfg->ops->get_context(cfg->dev, cfg->afu_cookie); |
2001 | num_irqs = 3; |
2002 | } else { |
2003 | ctx = cfg->ops->dev_context_init(cfg->dev, cfg->afu_cookie); |
2004 | num_irqs = 2; |
2005 | } |
2006 | if (IS_ERR_OR_NULL(ptr: ctx)) { |
2007 | rc = -ENOMEM; |
2008 | goto err1; |
2009 | } |
2010 | |
2011 | WARN_ON(hwq->ctx_cookie); |
2012 | hwq->ctx_cookie = ctx; |
2013 | hwq->num_irqs = num_irqs; |
2014 | |
2015 | /* Set it up as a master with the CXL */ |
2016 | cfg->ops->set_master(ctx); |
2017 | |
2018 | /* Reset AFU when initializing primary context */ |
2019 | if (index == PRIMARY_HWQ) { |
2020 | rc = cfg->ops->afu_reset(ctx); |
2021 | if (unlikely(rc)) { |
2022 | dev_err(dev, "%s: AFU reset failed rc=%d\n" , |
2023 | __func__, rc); |
2024 | goto err1; |
2025 | } |
2026 | } |
2027 | |
2028 | level = init_intr(cfg, hwq); |
2029 | if (unlikely(level)) { |
2030 | dev_err(dev, "%s: interrupt init failed rc=%d\n" , __func__, rc); |
2031 | goto err2; |
2032 | } |
2033 | |
2034 | /* Finally, activate the context by starting it */ |
2035 | rc = cfg->ops->start_context(hwq->ctx_cookie); |
2036 | if (unlikely(rc)) { |
2037 | dev_err(dev, "%s: start context failed rc=%d\n" , __func__, rc); |
2038 | level = UNMAP_THREE; |
2039 | goto err2; |
2040 | } |
2041 | |
2042 | out: |
2043 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
2044 | return rc; |
2045 | err2: |
2046 | term_intr(cfg, level, index); |
2047 | if (index != PRIMARY_HWQ) |
2048 | cfg->ops->release_context(ctx); |
2049 | err1: |
2050 | hwq->ctx_cookie = NULL; |
2051 | goto out; |
2052 | } |
2053 | |
2054 | /** |
2055 | * get_num_afu_ports() - determines and configures the number of AFU ports |
2056 | * @cfg: Internal structure associated with the host. |
2057 | * |
2058 | * This routine determines the number of AFU ports by converting the global |
2059 | * port selection mask. The converted value is only valid following an AFU |
2060 | * reset (explicit or power-on). This routine must be invoked shortly after |
2061 | * mapping as other routines are dependent on the number of ports during the |
2062 | * initialization sequence. |
2063 | * |
2064 | * To support legacy AFUs that might not have reflected an initial global |
2065 | * port mask (value read is 0), default to the number of ports originally |
2066 | * supported by the cxlflash driver (2) before hardware with other port |
2067 | * offerings was introduced. |
2068 | */ |
2069 | static void get_num_afu_ports(struct cxlflash_cfg *cfg) |
2070 | { |
2071 | struct afu *afu = cfg->afu; |
2072 | struct device *dev = &cfg->dev->dev; |
2073 | u64 port_mask; |
2074 | int num_fc_ports = LEGACY_FC_PORTS; |
2075 | |
2076 | port_mask = readq_be(&afu->afu_map->global.regs.afu_port_sel); |
2077 | if (port_mask != 0ULL) |
2078 | num_fc_ports = min(ilog2(port_mask) + 1, MAX_FC_PORTS); |
2079 | |
2080 | dev_dbg(dev, "%s: port_mask=%016llx num_fc_ports=%d\n" , |
2081 | __func__, port_mask, num_fc_ports); |
2082 | |
2083 | cfg->num_fc_ports = num_fc_ports; |
2084 | cfg->host->max_channel = PORTNUM2CHAN(num_fc_ports); |
2085 | } |
2086 | |
2087 | /** |
2088 | * init_afu() - setup as master context and start AFU |
2089 | * @cfg: Internal structure associated with the host. |
2090 | * |
2091 | * This routine is a higher level of control for configuring the |
2092 | * AFU on probe and reset paths. |
2093 | * |
2094 | * Return: 0 on success, -errno on failure |
2095 | */ |
2096 | static int init_afu(struct cxlflash_cfg *cfg) |
2097 | { |
2098 | u64 reg; |
2099 | int rc = 0; |
2100 | struct afu *afu = cfg->afu; |
2101 | struct device *dev = &cfg->dev->dev; |
2102 | struct hwq *hwq; |
2103 | int i; |
2104 | |
2105 | cfg->ops->perst_reloads_same_image(cfg->afu_cookie, true); |
2106 | |
2107 | mutex_init(&afu->sync_active); |
2108 | afu->num_hwqs = afu->desired_hwqs; |
2109 | for (i = 0; i < afu->num_hwqs; i++) { |
2110 | rc = init_mc(cfg, index: i); |
2111 | if (rc) { |
2112 | dev_err(dev, "%s: init_mc failed rc=%d index=%d\n" , |
2113 | __func__, rc, i); |
2114 | goto err1; |
2115 | } |
2116 | } |
2117 | |
2118 | /* Map the entire MMIO space of the AFU using the first context */ |
2119 | hwq = get_hwq(afu, PRIMARY_HWQ); |
2120 | afu->afu_map = cfg->ops->psa_map(hwq->ctx_cookie); |
2121 | if (!afu->afu_map) { |
2122 | dev_err(dev, "%s: psa_map failed\n" , __func__); |
2123 | rc = -ENOMEM; |
2124 | goto err1; |
2125 | } |
2126 | |
2127 | /* No byte reverse on reading afu_version or string will be backwards */ |
2128 | reg = readq(addr: &afu->afu_map->global.regs.afu_version); |
2129 | memcpy(afu->version, ®, sizeof(reg)); |
2130 | afu->interface_version = |
2131 | readq_be(&afu->afu_map->global.regs.interface_version); |
2132 | if ((afu->interface_version + 1) == 0) { |
2133 | dev_err(dev, "Back level AFU, please upgrade. AFU version %s " |
2134 | "interface version %016llx\n" , afu->version, |
2135 | afu->interface_version); |
2136 | rc = -EINVAL; |
2137 | goto err1; |
2138 | } |
2139 | |
2140 | if (afu_is_sq_cmd_mode(afu)) { |
2141 | afu->send_cmd = send_cmd_sq; |
2142 | afu->context_reset = context_reset_sq; |
2143 | } else { |
2144 | afu->send_cmd = send_cmd_ioarrin; |
2145 | afu->context_reset = context_reset_ioarrin; |
2146 | } |
2147 | |
2148 | dev_dbg(dev, "%s: afu_ver=%s interface_ver=%016llx\n" , __func__, |
2149 | afu->version, afu->interface_version); |
2150 | |
2151 | get_num_afu_ports(cfg); |
2152 | |
2153 | rc = start_afu(cfg); |
2154 | if (rc) { |
2155 | dev_err(dev, "%s: start_afu failed, rc=%d\n" , __func__, rc); |
2156 | goto err1; |
2157 | } |
2158 | |
2159 | afu_err_intr_init(afu: cfg->afu); |
2160 | for (i = 0; i < afu->num_hwqs; i++) { |
2161 | hwq = get_hwq(afu, index: i); |
2162 | |
2163 | hwq->room = readq_be(&hwq->host_map->cmd_room); |
2164 | } |
2165 | |
2166 | /* Restore the LUN mappings */ |
2167 | cxlflash_restore_luntable(cfg); |
2168 | out: |
2169 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
2170 | return rc; |
2171 | |
2172 | err1: |
2173 | for (i = afu->num_hwqs - 1; i >= 0; i--) { |
2174 | term_intr(cfg, level: UNMAP_THREE, index: i); |
2175 | term_mc(cfg, index: i); |
2176 | } |
2177 | goto out; |
2178 | } |
2179 | |
2180 | /** |
2181 | * afu_reset() - resets the AFU |
2182 | * @cfg: Internal structure associated with the host. |
2183 | * |
2184 | * Return: 0 on success, -errno on failure |
2185 | */ |
2186 | static int afu_reset(struct cxlflash_cfg *cfg) |
2187 | { |
2188 | struct device *dev = &cfg->dev->dev; |
2189 | int rc = 0; |
2190 | |
2191 | /* Stop the context before the reset. Since the context is |
2192 | * no longer available restart it after the reset is complete |
2193 | */ |
2194 | term_afu(cfg); |
2195 | |
2196 | rc = init_afu(cfg); |
2197 | |
2198 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
2199 | return rc; |
2200 | } |
2201 | |
2202 | /** |
2203 | * drain_ioctls() - wait until all currently executing ioctls have completed |
2204 | * @cfg: Internal structure associated with the host. |
2205 | * |
2206 | * Obtain write access to read/write semaphore that wraps ioctl |
2207 | * handling to 'drain' ioctls currently executing. |
2208 | */ |
2209 | static void drain_ioctls(struct cxlflash_cfg *cfg) |
2210 | { |
2211 | down_write(sem: &cfg->ioctl_rwsem); |
2212 | up_write(sem: &cfg->ioctl_rwsem); |
2213 | } |
2214 | |
2215 | /** |
2216 | * cxlflash_async_reset_host() - asynchronous host reset handler |
2217 | * @data: Private data provided while scheduling reset. |
2218 | * @cookie: Cookie that can be used for checkpointing. |
2219 | */ |
2220 | static void cxlflash_async_reset_host(void *data, async_cookie_t cookie) |
2221 | { |
2222 | struct cxlflash_cfg *cfg = data; |
2223 | struct device *dev = &cfg->dev->dev; |
2224 | int rc = 0; |
2225 | |
2226 | if (cfg->state != STATE_RESET) { |
2227 | dev_dbg(dev, "%s: Not performing a reset, state=%d\n" , |
2228 | __func__, cfg->state); |
2229 | goto out; |
2230 | } |
2231 | |
2232 | drain_ioctls(cfg); |
2233 | cxlflash_mark_contexts_error(cfg); |
2234 | rc = afu_reset(cfg); |
2235 | if (rc) |
2236 | cfg->state = STATE_FAILTERM; |
2237 | else |
2238 | cfg->state = STATE_NORMAL; |
2239 | wake_up_all(&cfg->reset_waitq); |
2240 | |
2241 | out: |
2242 | scsi_unblock_requests(cfg->host); |
2243 | } |
2244 | |
2245 | /** |
2246 | * cxlflash_schedule_async_reset() - schedule an asynchronous host reset |
2247 | * @cfg: Internal structure associated with the host. |
2248 | */ |
2249 | static void cxlflash_schedule_async_reset(struct cxlflash_cfg *cfg) |
2250 | { |
2251 | struct device *dev = &cfg->dev->dev; |
2252 | |
2253 | if (cfg->state != STATE_NORMAL) { |
2254 | dev_dbg(dev, "%s: Not performing reset state=%d\n" , |
2255 | __func__, cfg->state); |
2256 | return; |
2257 | } |
2258 | |
2259 | cfg->state = STATE_RESET; |
2260 | scsi_block_requests(cfg->host); |
2261 | cfg->async_reset_cookie = async_schedule(func: cxlflash_async_reset_host, |
2262 | data: cfg); |
2263 | } |
2264 | |
2265 | /** |
2266 | * send_afu_cmd() - builds and sends an internal AFU command |
2267 | * @afu: AFU associated with the host. |
2268 | * @rcb: Pre-populated IOARCB describing command to send. |
2269 | * |
2270 | * The AFU can only take one internal AFU command at a time. This limitation is |
2271 | * enforced by using a mutex to provide exclusive access to the AFU during the |
2272 | * operation. This design point requires calling threads to not be on interrupt |
2273 | * context due to the possibility of sleeping during concurrent AFU operations. |
2274 | * |
2275 | * The command status is optionally passed back to the caller when the caller |
2276 | * populates the IOASA field of the IOARCB with a pointer to an IOASA structure. |
2277 | * |
2278 | * Return: |
2279 | * 0 on success, -errno on failure |
2280 | */ |
2281 | static int send_afu_cmd(struct afu *afu, struct sisl_ioarcb *rcb) |
2282 | { |
2283 | struct cxlflash_cfg *cfg = afu->parent; |
2284 | struct device *dev = &cfg->dev->dev; |
2285 | struct afu_cmd *cmd = NULL; |
2286 | struct hwq *hwq = get_hwq(afu, PRIMARY_HWQ); |
2287 | ulong lock_flags; |
2288 | char *buf = NULL; |
2289 | int rc = 0; |
2290 | int nretry = 0; |
2291 | |
2292 | if (cfg->state != STATE_NORMAL) { |
2293 | dev_dbg(dev, "%s: Sync not required state=%u\n" , |
2294 | __func__, cfg->state); |
2295 | return 0; |
2296 | } |
2297 | |
2298 | mutex_lock(&afu->sync_active); |
2299 | atomic_inc(v: &afu->cmds_active); |
2300 | buf = kmalloc(size: sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); |
2301 | if (unlikely(!buf)) { |
2302 | dev_err(dev, "%s: no memory for command\n" , __func__); |
2303 | rc = -ENOMEM; |
2304 | goto out; |
2305 | } |
2306 | |
2307 | cmd = (struct afu_cmd *)PTR_ALIGN(buf, __alignof__(*cmd)); |
2308 | |
2309 | retry: |
2310 | memset(cmd, 0, sizeof(*cmd)); |
2311 | memcpy(&cmd->rcb, rcb, sizeof(*rcb)); |
2312 | INIT_LIST_HEAD(list: &cmd->queue); |
2313 | init_completion(x: &cmd->cevent); |
2314 | cmd->parent = afu; |
2315 | cmd->hwq_index = hwq->index; |
2316 | cmd->rcb.ctx_id = hwq->ctx_hndl; |
2317 | |
2318 | dev_dbg(dev, "%s: afu=%p cmd=%p type=%02x nretry=%d\n" , |
2319 | __func__, afu, cmd, cmd->rcb.cdb[0], nretry); |
2320 | |
2321 | rc = afu->send_cmd(afu, cmd); |
2322 | if (unlikely(rc)) { |
2323 | rc = -ENOBUFS; |
2324 | goto out; |
2325 | } |
2326 | |
2327 | rc = wait_resp(afu, cmd); |
2328 | switch (rc) { |
2329 | case -ETIMEDOUT: |
2330 | rc = afu->context_reset(hwq); |
2331 | if (rc) { |
2332 | /* Delete the command from pending_cmds list */ |
2333 | spin_lock_irqsave(&hwq->hsq_slock, lock_flags); |
2334 | list_del(entry: &cmd->list); |
2335 | spin_unlock_irqrestore(lock: &hwq->hsq_slock, flags: lock_flags); |
2336 | |
2337 | cxlflash_schedule_async_reset(cfg); |
2338 | break; |
2339 | } |
2340 | fallthrough; /* to retry */ |
2341 | case -EAGAIN: |
2342 | if (++nretry < 2) |
2343 | goto retry; |
2344 | fallthrough; /* to exit */ |
2345 | default: |
2346 | break; |
2347 | } |
2348 | |
2349 | if (rcb->ioasa) |
2350 | *rcb->ioasa = cmd->sa; |
2351 | out: |
2352 | atomic_dec(v: &afu->cmds_active); |
2353 | mutex_unlock(lock: &afu->sync_active); |
2354 | kfree(objp: buf); |
2355 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
2356 | return rc; |
2357 | } |
2358 | |
2359 | /** |
2360 | * cxlflash_afu_sync() - builds and sends an AFU sync command |
2361 | * @afu: AFU associated with the host. |
2362 | * @ctx: Identifies context requesting sync. |
2363 | * @res: Identifies resource requesting sync. |
2364 | * @mode: Type of sync to issue (lightweight, heavyweight, global). |
2365 | * |
2366 | * AFU sync operations are only necessary and allowed when the device is |
2367 | * operating normally. When not operating normally, sync requests can occur as |
2368 | * part of cleaning up resources associated with an adapter prior to removal. |
2369 | * In this scenario, these requests are simply ignored (safe due to the AFU |
2370 | * going away). |
2371 | * |
2372 | * Return: |
2373 | * 0 on success, -errno on failure |
2374 | */ |
2375 | int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx, res_hndl_t res, u8 mode) |
2376 | { |
2377 | struct cxlflash_cfg *cfg = afu->parent; |
2378 | struct device *dev = &cfg->dev->dev; |
2379 | struct sisl_ioarcb rcb = { 0 }; |
2380 | |
2381 | dev_dbg(dev, "%s: afu=%p ctx=%u res=%u mode=%u\n" , |
2382 | __func__, afu, ctx, res, mode); |
2383 | |
2384 | rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; |
2385 | rcb.msi = SISL_MSI_RRQ_UPDATED; |
2386 | rcb.timeout = MC_AFU_SYNC_TIMEOUT; |
2387 | |
2388 | rcb.cdb[0] = SISL_AFU_CMD_SYNC; |
2389 | rcb.cdb[1] = mode; |
2390 | put_unaligned_be16(val: ctx, p: &rcb.cdb[2]); |
2391 | put_unaligned_be32(val: res, p: &rcb.cdb[4]); |
2392 | |
2393 | return send_afu_cmd(afu, rcb: &rcb); |
2394 | } |
2395 | |
2396 | /** |
2397 | * cxlflash_eh_abort_handler() - abort a SCSI command |
2398 | * @scp: SCSI command to abort. |
2399 | * |
2400 | * CXL Flash devices do not support a single command abort. Reset the context |
2401 | * as per SISLite specification. Flush any pending commands in the hardware |
2402 | * queue before the reset. |
2403 | * |
2404 | * Return: SUCCESS/FAILED as defined in scsi/scsi.h |
2405 | */ |
2406 | static int cxlflash_eh_abort_handler(struct scsi_cmnd *scp) |
2407 | { |
2408 | int rc = FAILED; |
2409 | struct Scsi_Host *host = scp->device->host; |
2410 | struct cxlflash_cfg *cfg = shost_priv(shost: host); |
2411 | struct afu_cmd *cmd = sc_to_afuc(sc: scp); |
2412 | struct device *dev = &cfg->dev->dev; |
2413 | struct afu *afu = cfg->afu; |
2414 | struct hwq *hwq = get_hwq(afu, index: cmd->hwq_index); |
2415 | |
2416 | dev_dbg(dev, "%s: (scp=%p) %d/%d/%d/%llu " |
2417 | "cdb=(%08x-%08x-%08x-%08x)\n" , __func__, scp, host->host_no, |
2418 | scp->device->channel, scp->device->id, scp->device->lun, |
2419 | get_unaligned_be32(&((u32 *)scp->cmnd)[0]), |
2420 | get_unaligned_be32(&((u32 *)scp->cmnd)[1]), |
2421 | get_unaligned_be32(&((u32 *)scp->cmnd)[2]), |
2422 | get_unaligned_be32(&((u32 *)scp->cmnd)[3])); |
2423 | |
2424 | /* When the state is not normal, another reset/reload is in progress. |
2425 | * Return failed and the mid-layer will invoke host reset handler. |
2426 | */ |
2427 | if (cfg->state != STATE_NORMAL) { |
2428 | dev_dbg(dev, "%s: Invalid state for abort, state=%d\n" , |
2429 | __func__, cfg->state); |
2430 | goto out; |
2431 | } |
2432 | |
2433 | rc = afu->context_reset(hwq); |
2434 | if (unlikely(rc)) |
2435 | goto out; |
2436 | |
2437 | rc = SUCCESS; |
2438 | |
2439 | out: |
2440 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
2441 | return rc; |
2442 | } |
2443 | |
2444 | /** |
2445 | * cxlflash_eh_device_reset_handler() - reset a single LUN |
2446 | * @scp: SCSI command to send. |
2447 | * |
2448 | * Return: |
2449 | * SUCCESS as defined in scsi/scsi.h |
2450 | * FAILED as defined in scsi/scsi.h |
2451 | */ |
2452 | static int cxlflash_eh_device_reset_handler(struct scsi_cmnd *scp) |
2453 | { |
2454 | int rc = SUCCESS; |
2455 | struct scsi_device *sdev = scp->device; |
2456 | struct Scsi_Host *host = sdev->host; |
2457 | struct cxlflash_cfg *cfg = shost_priv(shost: host); |
2458 | struct device *dev = &cfg->dev->dev; |
2459 | int rcr = 0; |
2460 | |
2461 | dev_dbg(dev, "%s: %d/%d/%d/%llu\n" , __func__, |
2462 | host->host_no, sdev->channel, sdev->id, sdev->lun); |
2463 | retry: |
2464 | switch (cfg->state) { |
2465 | case STATE_NORMAL: |
2466 | rcr = send_tmf(cfg, sdev, TMF_LUN_RESET); |
2467 | if (unlikely(rcr)) |
2468 | rc = FAILED; |
2469 | break; |
2470 | case STATE_RESET: |
2471 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
2472 | goto retry; |
2473 | default: |
2474 | rc = FAILED; |
2475 | break; |
2476 | } |
2477 | |
2478 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
2479 | return rc; |
2480 | } |
2481 | |
2482 | /** |
2483 | * cxlflash_eh_host_reset_handler() - reset the host adapter |
2484 | * @scp: SCSI command from stack identifying host. |
2485 | * |
2486 | * Following a reset, the state is evaluated again in case an EEH occurred |
2487 | * during the reset. In such a scenario, the host reset will either yield |
2488 | * until the EEH recovery is complete or return success or failure based |
2489 | * upon the current device state. |
2490 | * |
2491 | * Return: |
2492 | * SUCCESS as defined in scsi/scsi.h |
2493 | * FAILED as defined in scsi/scsi.h |
2494 | */ |
2495 | static int cxlflash_eh_host_reset_handler(struct scsi_cmnd *scp) |
2496 | { |
2497 | int rc = SUCCESS; |
2498 | int rcr = 0; |
2499 | struct Scsi_Host *host = scp->device->host; |
2500 | struct cxlflash_cfg *cfg = shost_priv(shost: host); |
2501 | struct device *dev = &cfg->dev->dev; |
2502 | |
2503 | dev_dbg(dev, "%s: %d\n" , __func__, host->host_no); |
2504 | |
2505 | switch (cfg->state) { |
2506 | case STATE_NORMAL: |
2507 | cfg->state = STATE_RESET; |
2508 | drain_ioctls(cfg); |
2509 | cxlflash_mark_contexts_error(cfg); |
2510 | rcr = afu_reset(cfg); |
2511 | if (rcr) { |
2512 | rc = FAILED; |
2513 | cfg->state = STATE_FAILTERM; |
2514 | } else |
2515 | cfg->state = STATE_NORMAL; |
2516 | wake_up_all(&cfg->reset_waitq); |
2517 | ssleep(seconds: 1); |
2518 | fallthrough; |
2519 | case STATE_RESET: |
2520 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
2521 | if (cfg->state == STATE_NORMAL) |
2522 | break; |
2523 | fallthrough; |
2524 | default: |
2525 | rc = FAILED; |
2526 | break; |
2527 | } |
2528 | |
2529 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
2530 | return rc; |
2531 | } |
2532 | |
2533 | /** |
2534 | * cxlflash_change_queue_depth() - change the queue depth for the device |
2535 | * @sdev: SCSI device destined for queue depth change. |
2536 | * @qdepth: Requested queue depth value to set. |
2537 | * |
2538 | * The requested queue depth is capped to the maximum supported value. |
2539 | * |
2540 | * Return: The actual queue depth set. |
2541 | */ |
2542 | static int cxlflash_change_queue_depth(struct scsi_device *sdev, int qdepth) |
2543 | { |
2544 | |
2545 | if (qdepth > CXLFLASH_MAX_CMDS_PER_LUN) |
2546 | qdepth = CXLFLASH_MAX_CMDS_PER_LUN; |
2547 | |
2548 | scsi_change_queue_depth(sdev, qdepth); |
2549 | return sdev->queue_depth; |
2550 | } |
2551 | |
2552 | /** |
2553 | * cxlflash_show_port_status() - queries and presents the current port status |
2554 | * @port: Desired port for status reporting. |
2555 | * @cfg: Internal structure associated with the host. |
2556 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2557 | * |
2558 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
2559 | */ |
2560 | static ssize_t cxlflash_show_port_status(u32 port, |
2561 | struct cxlflash_cfg *cfg, |
2562 | char *buf) |
2563 | { |
2564 | struct device *dev = &cfg->dev->dev; |
2565 | char *disp_status; |
2566 | u64 status; |
2567 | __be64 __iomem *fc_port_regs; |
2568 | |
2569 | WARN_ON(port >= MAX_FC_PORTS); |
2570 | |
2571 | if (port >= cfg->num_fc_ports) { |
2572 | dev_info(dev, "%s: Port %d not supported on this card.\n" , |
2573 | __func__, port); |
2574 | return -EINVAL; |
2575 | } |
2576 | |
2577 | fc_port_regs = get_fc_port_regs(cfg, i: port); |
2578 | status = readq_be(&fc_port_regs[FC_MTIP_STATUS / 8]); |
2579 | status &= FC_MTIP_STATUS_MASK; |
2580 | |
2581 | if (status == FC_MTIP_STATUS_ONLINE) |
2582 | disp_status = "online" ; |
2583 | else if (status == FC_MTIP_STATUS_OFFLINE) |
2584 | disp_status = "offline" ; |
2585 | else |
2586 | disp_status = "unknown" ; |
2587 | |
2588 | return scnprintf(buf, PAGE_SIZE, fmt: "%s\n" , disp_status); |
2589 | } |
2590 | |
2591 | /** |
2592 | * port0_show() - queries and presents the current status of port 0 |
2593 | * @dev: Generic device associated with the host owning the port. |
2594 | * @attr: Device attribute representing the port. |
2595 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2596 | * |
2597 | * Return: The size of the ASCII string returned in @buf. |
2598 | */ |
2599 | static ssize_t port0_show(struct device *dev, |
2600 | struct device_attribute *attr, |
2601 | char *buf) |
2602 | { |
2603 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2604 | |
2605 | return cxlflash_show_port_status(port: 0, cfg, buf); |
2606 | } |
2607 | |
2608 | /** |
2609 | * port1_show() - queries and presents the current status of port 1 |
2610 | * @dev: Generic device associated with the host owning the port. |
2611 | * @attr: Device attribute representing the port. |
2612 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2613 | * |
2614 | * Return: The size of the ASCII string returned in @buf. |
2615 | */ |
2616 | static ssize_t port1_show(struct device *dev, |
2617 | struct device_attribute *attr, |
2618 | char *buf) |
2619 | { |
2620 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2621 | |
2622 | return cxlflash_show_port_status(port: 1, cfg, buf); |
2623 | } |
2624 | |
2625 | /** |
2626 | * port2_show() - queries and presents the current status of port 2 |
2627 | * @dev: Generic device associated with the host owning the port. |
2628 | * @attr: Device attribute representing the port. |
2629 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2630 | * |
2631 | * Return: The size of the ASCII string returned in @buf. |
2632 | */ |
2633 | static ssize_t port2_show(struct device *dev, |
2634 | struct device_attribute *attr, |
2635 | char *buf) |
2636 | { |
2637 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2638 | |
2639 | return cxlflash_show_port_status(port: 2, cfg, buf); |
2640 | } |
2641 | |
2642 | /** |
2643 | * port3_show() - queries and presents the current status of port 3 |
2644 | * @dev: Generic device associated with the host owning the port. |
2645 | * @attr: Device attribute representing the port. |
2646 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2647 | * |
2648 | * Return: The size of the ASCII string returned in @buf. |
2649 | */ |
2650 | static ssize_t port3_show(struct device *dev, |
2651 | struct device_attribute *attr, |
2652 | char *buf) |
2653 | { |
2654 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2655 | |
2656 | return cxlflash_show_port_status(port: 3, cfg, buf); |
2657 | } |
2658 | |
2659 | /** |
2660 | * lun_mode_show() - presents the current LUN mode of the host |
2661 | * @dev: Generic device associated with the host. |
2662 | * @attr: Device attribute representing the LUN mode. |
2663 | * @buf: Buffer of length PAGE_SIZE to report back the LUN mode in ASCII. |
2664 | * |
2665 | * Return: The size of the ASCII string returned in @buf. |
2666 | */ |
2667 | static ssize_t lun_mode_show(struct device *dev, |
2668 | struct device_attribute *attr, char *buf) |
2669 | { |
2670 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2671 | struct afu *afu = cfg->afu; |
2672 | |
2673 | return scnprintf(buf, PAGE_SIZE, fmt: "%u\n" , afu->internal_lun); |
2674 | } |
2675 | |
2676 | /** |
2677 | * lun_mode_store() - sets the LUN mode of the host |
2678 | * @dev: Generic device associated with the host. |
2679 | * @attr: Device attribute representing the LUN mode. |
2680 | * @buf: Buffer of length PAGE_SIZE containing the LUN mode in ASCII. |
2681 | * @count: Length of data resizing in @buf. |
2682 | * |
2683 | * The CXL Flash AFU supports a dummy LUN mode where the external |
2684 | * links and storage are not required. Space on the FPGA is used |
2685 | * to create 1 or 2 small LUNs which are presented to the system |
2686 | * as if they were a normal storage device. This feature is useful |
2687 | * during development and also provides manufacturing with a way |
2688 | * to test the AFU without an actual device. |
2689 | * |
2690 | * 0 = external LUN[s] (default) |
2691 | * 1 = internal LUN (1 x 64K, 512B blocks, id 0) |
2692 | * 2 = internal LUN (1 x 64K, 4K blocks, id 0) |
2693 | * 3 = internal LUN (2 x 32K, 512B blocks, ids 0,1) |
2694 | * 4 = internal LUN (2 x 32K, 4K blocks, ids 0,1) |
2695 | * |
2696 | * Return: The size of the ASCII string returned in @buf. |
2697 | */ |
2698 | static ssize_t lun_mode_store(struct device *dev, |
2699 | struct device_attribute *attr, |
2700 | const char *buf, size_t count) |
2701 | { |
2702 | struct Scsi_Host *shost = class_to_shost(dev); |
2703 | struct cxlflash_cfg *cfg = shost_priv(shost); |
2704 | struct afu *afu = cfg->afu; |
2705 | int rc; |
2706 | u32 lun_mode; |
2707 | |
2708 | rc = kstrtouint(s: buf, base: 10, res: &lun_mode); |
2709 | if (!rc && (lun_mode < 5) && (lun_mode != afu->internal_lun)) { |
2710 | afu->internal_lun = lun_mode; |
2711 | |
2712 | /* |
2713 | * When configured for internal LUN, there is only one channel, |
2714 | * channel number 0, else there will be one less than the number |
2715 | * of fc ports for this card. |
2716 | */ |
2717 | if (afu->internal_lun) |
2718 | shost->max_channel = 0; |
2719 | else |
2720 | shost->max_channel = PORTNUM2CHAN(cfg->num_fc_ports); |
2721 | |
2722 | afu_reset(cfg); |
2723 | scsi_scan_host(cfg->host); |
2724 | } |
2725 | |
2726 | return count; |
2727 | } |
2728 | |
2729 | /** |
2730 | * ioctl_version_show() - presents the current ioctl version of the host |
2731 | * @dev: Generic device associated with the host. |
2732 | * @attr: Device attribute representing the ioctl version. |
2733 | * @buf: Buffer of length PAGE_SIZE to report back the ioctl version. |
2734 | * |
2735 | * Return: The size of the ASCII string returned in @buf. |
2736 | */ |
2737 | static ssize_t ioctl_version_show(struct device *dev, |
2738 | struct device_attribute *attr, char *buf) |
2739 | { |
2740 | ssize_t bytes = 0; |
2741 | |
2742 | bytes = scnprintf(buf, PAGE_SIZE, |
2743 | fmt: "disk: %u\n" , DK_CXLFLASH_VERSION_0); |
2744 | bytes += scnprintf(buf: buf + bytes, PAGE_SIZE - bytes, |
2745 | fmt: "host: %u\n" , HT_CXLFLASH_VERSION_0); |
2746 | |
2747 | return bytes; |
2748 | } |
2749 | |
2750 | /** |
2751 | * cxlflash_show_port_lun_table() - queries and presents the port LUN table |
2752 | * @port: Desired port for status reporting. |
2753 | * @cfg: Internal structure associated with the host. |
2754 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2755 | * |
2756 | * Return: The size of the ASCII string returned in @buf or -EINVAL. |
2757 | */ |
2758 | static ssize_t cxlflash_show_port_lun_table(u32 port, |
2759 | struct cxlflash_cfg *cfg, |
2760 | char *buf) |
2761 | { |
2762 | struct device *dev = &cfg->dev->dev; |
2763 | __be64 __iomem *fc_port_luns; |
2764 | int i; |
2765 | ssize_t bytes = 0; |
2766 | |
2767 | WARN_ON(port >= MAX_FC_PORTS); |
2768 | |
2769 | if (port >= cfg->num_fc_ports) { |
2770 | dev_info(dev, "%s: Port %d not supported on this card.\n" , |
2771 | __func__, port); |
2772 | return -EINVAL; |
2773 | } |
2774 | |
2775 | fc_port_luns = get_fc_port_luns(cfg, i: port); |
2776 | |
2777 | for (i = 0; i < CXLFLASH_NUM_VLUNS; i++) |
2778 | bytes += scnprintf(buf: buf + bytes, PAGE_SIZE - bytes, |
2779 | fmt: "%03d: %016llx\n" , |
2780 | i, readq_be(&fc_port_luns[i])); |
2781 | return bytes; |
2782 | } |
2783 | |
2784 | /** |
2785 | * port0_lun_table_show() - presents the current LUN table of port 0 |
2786 | * @dev: Generic device associated with the host owning the port. |
2787 | * @attr: Device attribute representing the port. |
2788 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2789 | * |
2790 | * Return: The size of the ASCII string returned in @buf. |
2791 | */ |
2792 | static ssize_t port0_lun_table_show(struct device *dev, |
2793 | struct device_attribute *attr, |
2794 | char *buf) |
2795 | { |
2796 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2797 | |
2798 | return cxlflash_show_port_lun_table(port: 0, cfg, buf); |
2799 | } |
2800 | |
2801 | /** |
2802 | * port1_lun_table_show() - presents the current LUN table of port 1 |
2803 | * @dev: Generic device associated with the host owning the port. |
2804 | * @attr: Device attribute representing the port. |
2805 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2806 | * |
2807 | * Return: The size of the ASCII string returned in @buf. |
2808 | */ |
2809 | static ssize_t port1_lun_table_show(struct device *dev, |
2810 | struct device_attribute *attr, |
2811 | char *buf) |
2812 | { |
2813 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2814 | |
2815 | return cxlflash_show_port_lun_table(port: 1, cfg, buf); |
2816 | } |
2817 | |
2818 | /** |
2819 | * port2_lun_table_show() - presents the current LUN table of port 2 |
2820 | * @dev: Generic device associated with the host owning the port. |
2821 | * @attr: Device attribute representing the port. |
2822 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2823 | * |
2824 | * Return: The size of the ASCII string returned in @buf. |
2825 | */ |
2826 | static ssize_t port2_lun_table_show(struct device *dev, |
2827 | struct device_attribute *attr, |
2828 | char *buf) |
2829 | { |
2830 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2831 | |
2832 | return cxlflash_show_port_lun_table(port: 2, cfg, buf); |
2833 | } |
2834 | |
2835 | /** |
2836 | * port3_lun_table_show() - presents the current LUN table of port 3 |
2837 | * @dev: Generic device associated with the host owning the port. |
2838 | * @attr: Device attribute representing the port. |
2839 | * @buf: Buffer of length PAGE_SIZE to report back port status in ASCII. |
2840 | * |
2841 | * Return: The size of the ASCII string returned in @buf. |
2842 | */ |
2843 | static ssize_t port3_lun_table_show(struct device *dev, |
2844 | struct device_attribute *attr, |
2845 | char *buf) |
2846 | { |
2847 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2848 | |
2849 | return cxlflash_show_port_lun_table(port: 3, cfg, buf); |
2850 | } |
2851 | |
2852 | /** |
2853 | * irqpoll_weight_show() - presents the current IRQ poll weight for the host |
2854 | * @dev: Generic device associated with the host. |
2855 | * @attr: Device attribute representing the IRQ poll weight. |
2856 | * @buf: Buffer of length PAGE_SIZE to report back the current IRQ poll |
2857 | * weight in ASCII. |
2858 | * |
2859 | * An IRQ poll weight of 0 indicates polling is disabled. |
2860 | * |
2861 | * Return: The size of the ASCII string returned in @buf. |
2862 | */ |
2863 | static ssize_t irqpoll_weight_show(struct device *dev, |
2864 | struct device_attribute *attr, char *buf) |
2865 | { |
2866 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2867 | struct afu *afu = cfg->afu; |
2868 | |
2869 | return scnprintf(buf, PAGE_SIZE, fmt: "%u\n" , afu->irqpoll_weight); |
2870 | } |
2871 | |
2872 | /** |
2873 | * irqpoll_weight_store() - sets the current IRQ poll weight for the host |
2874 | * @dev: Generic device associated with the host. |
2875 | * @attr: Device attribute representing the IRQ poll weight. |
2876 | * @buf: Buffer of length PAGE_SIZE containing the desired IRQ poll |
2877 | * weight in ASCII. |
2878 | * @count: Length of data resizing in @buf. |
2879 | * |
2880 | * An IRQ poll weight of 0 indicates polling is disabled. |
2881 | * |
2882 | * Return: The size of the ASCII string returned in @buf. |
2883 | */ |
2884 | static ssize_t irqpoll_weight_store(struct device *dev, |
2885 | struct device_attribute *attr, |
2886 | const char *buf, size_t count) |
2887 | { |
2888 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2889 | struct device *cfgdev = &cfg->dev->dev; |
2890 | struct afu *afu = cfg->afu; |
2891 | struct hwq *hwq; |
2892 | u32 weight; |
2893 | int rc, i; |
2894 | |
2895 | rc = kstrtouint(s: buf, base: 10, res: &weight); |
2896 | if (rc) |
2897 | return -EINVAL; |
2898 | |
2899 | if (weight > 256) { |
2900 | dev_info(cfgdev, |
2901 | "Invalid IRQ poll weight. It must be 256 or less.\n" ); |
2902 | return -EINVAL; |
2903 | } |
2904 | |
2905 | if (weight == afu->irqpoll_weight) { |
2906 | dev_info(cfgdev, |
2907 | "Current IRQ poll weight has the same weight.\n" ); |
2908 | return -EINVAL; |
2909 | } |
2910 | |
2911 | if (afu_is_irqpoll_enabled(afu)) { |
2912 | for (i = 0; i < afu->num_hwqs; i++) { |
2913 | hwq = get_hwq(afu, index: i); |
2914 | |
2915 | irq_poll_disable(&hwq->irqpoll); |
2916 | } |
2917 | } |
2918 | |
2919 | afu->irqpoll_weight = weight; |
2920 | |
2921 | if (weight > 0) { |
2922 | for (i = 0; i < afu->num_hwqs; i++) { |
2923 | hwq = get_hwq(afu, index: i); |
2924 | |
2925 | irq_poll_init(&hwq->irqpoll, weight, cxlflash_irqpoll); |
2926 | } |
2927 | } |
2928 | |
2929 | return count; |
2930 | } |
2931 | |
2932 | /** |
2933 | * num_hwqs_show() - presents the number of hardware queues for the host |
2934 | * @dev: Generic device associated with the host. |
2935 | * @attr: Device attribute representing the number of hardware queues. |
2936 | * @buf: Buffer of length PAGE_SIZE to report back the number of hardware |
2937 | * queues in ASCII. |
2938 | * |
2939 | * Return: The size of the ASCII string returned in @buf. |
2940 | */ |
2941 | static ssize_t num_hwqs_show(struct device *dev, |
2942 | struct device_attribute *attr, char *buf) |
2943 | { |
2944 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2945 | struct afu *afu = cfg->afu; |
2946 | |
2947 | return scnprintf(buf, PAGE_SIZE, fmt: "%u\n" , afu->num_hwqs); |
2948 | } |
2949 | |
2950 | /** |
2951 | * num_hwqs_store() - sets the number of hardware queues for the host |
2952 | * @dev: Generic device associated with the host. |
2953 | * @attr: Device attribute representing the number of hardware queues. |
2954 | * @buf: Buffer of length PAGE_SIZE containing the number of hardware |
2955 | * queues in ASCII. |
2956 | * @count: Length of data resizing in @buf. |
2957 | * |
2958 | * n > 0: num_hwqs = n |
2959 | * n = 0: num_hwqs = num_online_cpus() |
2960 | * n < 0: num_online_cpus() / abs(n) |
2961 | * |
2962 | * Return: The size of the ASCII string returned in @buf. |
2963 | */ |
2964 | static ssize_t num_hwqs_store(struct device *dev, |
2965 | struct device_attribute *attr, |
2966 | const char *buf, size_t count) |
2967 | { |
2968 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
2969 | struct afu *afu = cfg->afu; |
2970 | int rc; |
2971 | int nhwqs, num_hwqs; |
2972 | |
2973 | rc = kstrtoint(s: buf, base: 10, res: &nhwqs); |
2974 | if (rc) |
2975 | return -EINVAL; |
2976 | |
2977 | if (nhwqs >= 1) |
2978 | num_hwqs = nhwqs; |
2979 | else if (nhwqs == 0) |
2980 | num_hwqs = num_online_cpus(); |
2981 | else |
2982 | num_hwqs = num_online_cpus() / abs(nhwqs); |
2983 | |
2984 | afu->desired_hwqs = min(num_hwqs, CXLFLASH_MAX_HWQS); |
2985 | WARN_ON_ONCE(afu->desired_hwqs == 0); |
2986 | |
2987 | retry: |
2988 | switch (cfg->state) { |
2989 | case STATE_NORMAL: |
2990 | cfg->state = STATE_RESET; |
2991 | drain_ioctls(cfg); |
2992 | cxlflash_mark_contexts_error(cfg); |
2993 | rc = afu_reset(cfg); |
2994 | if (rc) |
2995 | cfg->state = STATE_FAILTERM; |
2996 | else |
2997 | cfg->state = STATE_NORMAL; |
2998 | wake_up_all(&cfg->reset_waitq); |
2999 | break; |
3000 | case STATE_RESET: |
3001 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET); |
3002 | if (cfg->state == STATE_NORMAL) |
3003 | goto retry; |
3004 | fallthrough; |
3005 | default: |
3006 | /* Ideally should not happen */ |
3007 | dev_err(dev, "%s: Device is not ready, state=%d\n" , |
3008 | __func__, cfg->state); |
3009 | break; |
3010 | } |
3011 | |
3012 | return count; |
3013 | } |
3014 | |
3015 | static const char *hwq_mode_name[MAX_HWQ_MODE] = { "rr" , "tag" , "cpu" }; |
3016 | |
3017 | /** |
3018 | * hwq_mode_show() - presents the HWQ steering mode for the host |
3019 | * @dev: Generic device associated with the host. |
3020 | * @attr: Device attribute representing the HWQ steering mode. |
3021 | * @buf: Buffer of length PAGE_SIZE to report back the HWQ steering mode |
3022 | * as a character string. |
3023 | * |
3024 | * Return: The size of the ASCII string returned in @buf. |
3025 | */ |
3026 | static ssize_t hwq_mode_show(struct device *dev, |
3027 | struct device_attribute *attr, char *buf) |
3028 | { |
3029 | struct cxlflash_cfg *cfg = shost_priv(class_to_shost(dev)); |
3030 | struct afu *afu = cfg->afu; |
3031 | |
3032 | return scnprintf(buf, PAGE_SIZE, fmt: "%s\n" , hwq_mode_name[afu->hwq_mode]); |
3033 | } |
3034 | |
3035 | /** |
3036 | * hwq_mode_store() - sets the HWQ steering mode for the host |
3037 | * @dev: Generic device associated with the host. |
3038 | * @attr: Device attribute representing the HWQ steering mode. |
3039 | * @buf: Buffer of length PAGE_SIZE containing the HWQ steering mode |
3040 | * as a character string. |
3041 | * @count: Length of data resizing in @buf. |
3042 | * |
3043 | * rr = Round-Robin |
3044 | * tag = Block MQ Tagging |
3045 | * cpu = CPU Affinity |
3046 | * |
3047 | * Return: The size of the ASCII string returned in @buf. |
3048 | */ |
3049 | static ssize_t hwq_mode_store(struct device *dev, |
3050 | struct device_attribute *attr, |
3051 | const char *buf, size_t count) |
3052 | { |
3053 | struct Scsi_Host *shost = class_to_shost(dev); |
3054 | struct cxlflash_cfg *cfg = shost_priv(shost); |
3055 | struct device *cfgdev = &cfg->dev->dev; |
3056 | struct afu *afu = cfg->afu; |
3057 | int i; |
3058 | u32 mode = MAX_HWQ_MODE; |
3059 | |
3060 | for (i = 0; i < MAX_HWQ_MODE; i++) { |
3061 | if (!strncmp(hwq_mode_name[i], buf, strlen(hwq_mode_name[i]))) { |
3062 | mode = i; |
3063 | break; |
3064 | } |
3065 | } |
3066 | |
3067 | if (mode >= MAX_HWQ_MODE) { |
3068 | dev_info(cfgdev, "Invalid HWQ steering mode.\n" ); |
3069 | return -EINVAL; |
3070 | } |
3071 | |
3072 | afu->hwq_mode = mode; |
3073 | |
3074 | return count; |
3075 | } |
3076 | |
3077 | /** |
3078 | * mode_show() - presents the current mode of the device |
3079 | * @dev: Generic device associated with the device. |
3080 | * @attr: Device attribute representing the device mode. |
3081 | * @buf: Buffer of length PAGE_SIZE to report back the dev mode in ASCII. |
3082 | * |
3083 | * Return: The size of the ASCII string returned in @buf. |
3084 | */ |
3085 | static ssize_t mode_show(struct device *dev, |
3086 | struct device_attribute *attr, char *buf) |
3087 | { |
3088 | struct scsi_device *sdev = to_scsi_device(dev); |
3089 | |
3090 | return scnprintf(buf, PAGE_SIZE, fmt: "%s\n" , |
3091 | sdev->hostdata ? "superpipe" : "legacy" ); |
3092 | } |
3093 | |
3094 | /* |
3095 | * Host attributes |
3096 | */ |
3097 | static DEVICE_ATTR_RO(port0); |
3098 | static DEVICE_ATTR_RO(port1); |
3099 | static DEVICE_ATTR_RO(port2); |
3100 | static DEVICE_ATTR_RO(port3); |
3101 | static DEVICE_ATTR_RW(lun_mode); |
3102 | static DEVICE_ATTR_RO(ioctl_version); |
3103 | static DEVICE_ATTR_RO(port0_lun_table); |
3104 | static DEVICE_ATTR_RO(port1_lun_table); |
3105 | static DEVICE_ATTR_RO(port2_lun_table); |
3106 | static DEVICE_ATTR_RO(port3_lun_table); |
3107 | static DEVICE_ATTR_RW(irqpoll_weight); |
3108 | static DEVICE_ATTR_RW(num_hwqs); |
3109 | static DEVICE_ATTR_RW(hwq_mode); |
3110 | |
3111 | static struct attribute *cxlflash_host_attrs[] = { |
3112 | &dev_attr_port0.attr, |
3113 | &dev_attr_port1.attr, |
3114 | &dev_attr_port2.attr, |
3115 | &dev_attr_port3.attr, |
3116 | &dev_attr_lun_mode.attr, |
3117 | &dev_attr_ioctl_version.attr, |
3118 | &dev_attr_port0_lun_table.attr, |
3119 | &dev_attr_port1_lun_table.attr, |
3120 | &dev_attr_port2_lun_table.attr, |
3121 | &dev_attr_port3_lun_table.attr, |
3122 | &dev_attr_irqpoll_weight.attr, |
3123 | &dev_attr_num_hwqs.attr, |
3124 | &dev_attr_hwq_mode.attr, |
3125 | NULL |
3126 | }; |
3127 | |
3128 | ATTRIBUTE_GROUPS(cxlflash_host); |
3129 | |
3130 | /* |
3131 | * Device attributes |
3132 | */ |
3133 | static DEVICE_ATTR_RO(mode); |
3134 | |
3135 | static struct attribute *cxlflash_dev_attrs[] = { |
3136 | &dev_attr_mode.attr, |
3137 | NULL |
3138 | }; |
3139 | |
3140 | ATTRIBUTE_GROUPS(cxlflash_dev); |
3141 | |
3142 | /* |
3143 | * Host template |
3144 | */ |
3145 | static struct scsi_host_template driver_template = { |
3146 | .module = THIS_MODULE, |
3147 | .name = CXLFLASH_ADAPTER_NAME, |
3148 | .info = cxlflash_driver_info, |
3149 | .ioctl = cxlflash_ioctl, |
3150 | .proc_name = CXLFLASH_NAME, |
3151 | .queuecommand = cxlflash_queuecommand, |
3152 | .eh_abort_handler = cxlflash_eh_abort_handler, |
3153 | .eh_device_reset_handler = cxlflash_eh_device_reset_handler, |
3154 | .eh_host_reset_handler = cxlflash_eh_host_reset_handler, |
3155 | .change_queue_depth = cxlflash_change_queue_depth, |
3156 | .cmd_per_lun = CXLFLASH_MAX_CMDS_PER_LUN, |
3157 | .can_queue = CXLFLASH_MAX_CMDS, |
3158 | .cmd_size = sizeof(struct afu_cmd) + __alignof__(struct afu_cmd) - 1, |
3159 | .this_id = -1, |
3160 | .sg_tablesize = 1, /* No scatter gather support */ |
3161 | .max_sectors = CXLFLASH_MAX_SECTORS, |
3162 | .shost_groups = cxlflash_host_groups, |
3163 | .sdev_groups = cxlflash_dev_groups, |
3164 | }; |
3165 | |
3166 | /* |
3167 | * Device dependent values |
3168 | */ |
3169 | static struct dev_dependent_vals dev_corsa_vals = { CXLFLASH_MAX_SECTORS, |
3170 | CXLFLASH_WWPN_VPD_REQUIRED }; |
3171 | static struct dev_dependent_vals dev_flash_gt_vals = { CXLFLASH_MAX_SECTORS, |
3172 | CXLFLASH_NOTIFY_SHUTDOWN }; |
3173 | static struct dev_dependent_vals dev_briard_vals = { CXLFLASH_MAX_SECTORS, |
3174 | (CXLFLASH_NOTIFY_SHUTDOWN | |
3175 | CXLFLASH_OCXL_DEV) }; |
3176 | |
3177 | /* |
3178 | * PCI device binding table |
3179 | */ |
3180 | static struct pci_device_id cxlflash_pci_table[] = { |
3181 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CORSA, |
3182 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_corsa_vals}, |
3183 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_FLASH_GT, |
3184 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_flash_gt_vals}, |
3185 | {PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_BRIARD, |
3186 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&dev_briard_vals}, |
3187 | {} |
3188 | }; |
3189 | |
3190 | MODULE_DEVICE_TABLE(pci, cxlflash_pci_table); |
3191 | |
3192 | /** |
3193 | * cxlflash_worker_thread() - work thread handler for the AFU |
3194 | * @work: Work structure contained within cxlflash associated with host. |
3195 | * |
3196 | * Handles the following events: |
3197 | * - Link reset which cannot be performed on interrupt context due to |
3198 | * blocking up to a few seconds |
3199 | * - Rescan the host |
3200 | */ |
3201 | static void cxlflash_worker_thread(struct work_struct *work) |
3202 | { |
3203 | struct cxlflash_cfg *cfg = container_of(work, struct cxlflash_cfg, |
3204 | work_q); |
3205 | struct afu *afu = cfg->afu; |
3206 | struct device *dev = &cfg->dev->dev; |
3207 | __be64 __iomem *fc_port_regs; |
3208 | int port; |
3209 | ulong lock_flags; |
3210 | |
3211 | /* Avoid MMIO if the device has failed */ |
3212 | |
3213 | if (cfg->state != STATE_NORMAL) |
3214 | return; |
3215 | |
3216 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
3217 | |
3218 | if (cfg->lr_state == LINK_RESET_REQUIRED) { |
3219 | port = cfg->lr_port; |
3220 | if (port < 0) |
3221 | dev_err(dev, "%s: invalid port index %d\n" , |
3222 | __func__, port); |
3223 | else { |
3224 | spin_unlock_irqrestore(lock: cfg->host->host_lock, |
3225 | flags: lock_flags); |
3226 | |
3227 | /* The reset can block... */ |
3228 | fc_port_regs = get_fc_port_regs(cfg, i: port); |
3229 | afu_link_reset(afu, port, fc_regs: fc_port_regs); |
3230 | spin_lock_irqsave(cfg->host->host_lock, lock_flags); |
3231 | } |
3232 | |
3233 | cfg->lr_state = LINK_RESET_COMPLETE; |
3234 | } |
3235 | |
3236 | spin_unlock_irqrestore(lock: cfg->host->host_lock, flags: lock_flags); |
3237 | |
3238 | if (atomic_dec_if_positive(v: &cfg->scan_host_needed) >= 0) |
3239 | scsi_scan_host(cfg->host); |
3240 | } |
3241 | |
3242 | /** |
3243 | * cxlflash_chr_open() - character device open handler |
3244 | * @inode: Device inode associated with this character device. |
3245 | * @file: File pointer for this device. |
3246 | * |
3247 | * Only users with admin privileges are allowed to open the character device. |
3248 | * |
3249 | * Return: 0 on success, -errno on failure |
3250 | */ |
3251 | static int cxlflash_chr_open(struct inode *inode, struct file *file) |
3252 | { |
3253 | struct cxlflash_cfg *cfg; |
3254 | |
3255 | if (!capable(CAP_SYS_ADMIN)) |
3256 | return -EACCES; |
3257 | |
3258 | cfg = container_of(inode->i_cdev, struct cxlflash_cfg, cdev); |
3259 | file->private_data = cfg; |
3260 | |
3261 | return 0; |
3262 | } |
3263 | |
3264 | /** |
3265 | * decode_hioctl() - translates encoded host ioctl to easily identifiable string |
3266 | * @cmd: The host ioctl command to decode. |
3267 | * |
3268 | * Return: A string identifying the decoded host ioctl. |
3269 | */ |
3270 | static char *decode_hioctl(unsigned int cmd) |
3271 | { |
3272 | switch (cmd) { |
3273 | case HT_CXLFLASH_LUN_PROVISION: |
3274 | return __stringify_1(HT_CXLFLASH_LUN_PROVISION); |
3275 | } |
3276 | |
3277 | return "UNKNOWN" ; |
3278 | } |
3279 | |
3280 | /** |
3281 | * cxlflash_lun_provision() - host LUN provisioning handler |
3282 | * @cfg: Internal structure associated with the host. |
3283 | * @lunprov: Kernel copy of userspace ioctl data structure. |
3284 | * |
3285 | * Return: 0 on success, -errno on failure |
3286 | */ |
3287 | static int cxlflash_lun_provision(struct cxlflash_cfg *cfg, |
3288 | struct ht_cxlflash_lun_provision *lunprov) |
3289 | { |
3290 | struct afu *afu = cfg->afu; |
3291 | struct device *dev = &cfg->dev->dev; |
3292 | struct sisl_ioarcb rcb; |
3293 | struct sisl_ioasa asa; |
3294 | __be64 __iomem *fc_port_regs; |
3295 | u16 port = lunprov->port; |
3296 | u16 scmd = lunprov->hdr.subcmd; |
3297 | u16 type; |
3298 | u64 reg; |
3299 | u64 size; |
3300 | u64 lun_id; |
3301 | int rc = 0; |
3302 | |
3303 | if (!afu_is_lun_provision(afu)) { |
3304 | rc = -ENOTSUPP; |
3305 | goto out; |
3306 | } |
3307 | |
3308 | if (port >= cfg->num_fc_ports) { |
3309 | rc = -EINVAL; |
3310 | goto out; |
3311 | } |
3312 | |
3313 | switch (scmd) { |
3314 | case HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN: |
3315 | type = SISL_AFU_LUN_PROVISION_CREATE; |
3316 | size = lunprov->size; |
3317 | lun_id = 0; |
3318 | break; |
3319 | case HT_CXLFLASH_LUN_PROVISION_SUBCMD_DELETE_LUN: |
3320 | type = SISL_AFU_LUN_PROVISION_DELETE; |
3321 | size = 0; |
3322 | lun_id = lunprov->lun_id; |
3323 | break; |
3324 | case HT_CXLFLASH_LUN_PROVISION_SUBCMD_QUERY_PORT: |
3325 | fc_port_regs = get_fc_port_regs(cfg, i: port); |
3326 | |
3327 | reg = readq_be(&fc_port_regs[FC_MAX_NUM_LUNS / 8]); |
3328 | lunprov->max_num_luns = reg; |
3329 | reg = readq_be(&fc_port_regs[FC_CUR_NUM_LUNS / 8]); |
3330 | lunprov->cur_num_luns = reg; |
3331 | reg = readq_be(&fc_port_regs[FC_MAX_CAP_PORT / 8]); |
3332 | lunprov->max_cap_port = reg; |
3333 | reg = readq_be(&fc_port_regs[FC_CUR_CAP_PORT / 8]); |
3334 | lunprov->cur_cap_port = reg; |
3335 | |
3336 | goto out; |
3337 | default: |
3338 | rc = -EINVAL; |
3339 | goto out; |
3340 | } |
3341 | |
3342 | memset(&rcb, 0, sizeof(rcb)); |
3343 | memset(&asa, 0, sizeof(asa)); |
3344 | rcb.req_flags = SISL_REQ_FLAGS_AFU_CMD; |
3345 | rcb.lun_id = lun_id; |
3346 | rcb.msi = SISL_MSI_RRQ_UPDATED; |
3347 | rcb.timeout = MC_LUN_PROV_TIMEOUT; |
3348 | rcb.ioasa = &asa; |
3349 | |
3350 | rcb.cdb[0] = SISL_AFU_CMD_LUN_PROVISION; |
3351 | rcb.cdb[1] = type; |
3352 | rcb.cdb[2] = port; |
3353 | put_unaligned_be64(val: size, p: &rcb.cdb[8]); |
3354 | |
3355 | rc = send_afu_cmd(afu, rcb: &rcb); |
3356 | if (rc) { |
3357 | dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n" , |
3358 | __func__, rc, asa.ioasc, asa.afu_extra); |
3359 | goto out; |
3360 | } |
3361 | |
3362 | if (scmd == HT_CXLFLASH_LUN_PROVISION_SUBCMD_CREATE_LUN) { |
3363 | lunprov->lun_id = (u64)asa.lunid_hi << 32 | asa.lunid_lo; |
3364 | memcpy(lunprov->wwid, asa.wwid, sizeof(lunprov->wwid)); |
3365 | } |
3366 | out: |
3367 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
3368 | return rc; |
3369 | } |
3370 | |
3371 | /** |
3372 | * cxlflash_afu_debug() - host AFU debug handler |
3373 | * @cfg: Internal structure associated with the host. |
3374 | * @afu_dbg: Kernel copy of userspace ioctl data structure. |
3375 | * |
3376 | * For debug requests requiring a data buffer, always provide an aligned |
3377 | * (cache line) buffer to the AFU to appease any alignment requirements. |
3378 | * |
3379 | * Return: 0 on success, -errno on failure |
3380 | */ |
3381 | static int cxlflash_afu_debug(struct cxlflash_cfg *cfg, |
3382 | struct ht_cxlflash_afu_debug *afu_dbg) |
3383 | { |
3384 | struct afu *afu = cfg->afu; |
3385 | struct device *dev = &cfg->dev->dev; |
3386 | struct sisl_ioarcb rcb; |
3387 | struct sisl_ioasa asa; |
3388 | char *buf = NULL; |
3389 | char *kbuf = NULL; |
3390 | void __user *ubuf = (__force void __user *)afu_dbg->data_ea; |
3391 | u16 req_flags = SISL_REQ_FLAGS_AFU_CMD; |
3392 | u32 ulen = afu_dbg->data_len; |
3393 | bool is_write = afu_dbg->hdr.flags & HT_CXLFLASH_HOST_WRITE; |
3394 | int rc = 0; |
3395 | |
3396 | if (!afu_is_afu_debug(afu)) { |
3397 | rc = -ENOTSUPP; |
3398 | goto out; |
3399 | } |
3400 | |
3401 | if (ulen) { |
3402 | req_flags |= SISL_REQ_FLAGS_SUP_UNDERRUN; |
3403 | |
3404 | if (ulen > HT_CXLFLASH_AFU_DEBUG_MAX_DATA_LEN) { |
3405 | rc = -EINVAL; |
3406 | goto out; |
3407 | } |
3408 | |
3409 | buf = kmalloc(size: ulen + cache_line_size() - 1, GFP_KERNEL); |
3410 | if (unlikely(!buf)) { |
3411 | rc = -ENOMEM; |
3412 | goto out; |
3413 | } |
3414 | |
3415 | kbuf = PTR_ALIGN(buf, cache_line_size()); |
3416 | |
3417 | if (is_write) { |
3418 | req_flags |= SISL_REQ_FLAGS_HOST_WRITE; |
3419 | |
3420 | if (copy_from_user(to: kbuf, from: ubuf, n: ulen)) { |
3421 | rc = -EFAULT; |
3422 | goto out; |
3423 | } |
3424 | } |
3425 | } |
3426 | |
3427 | memset(&rcb, 0, sizeof(rcb)); |
3428 | memset(&asa, 0, sizeof(asa)); |
3429 | |
3430 | rcb.req_flags = req_flags; |
3431 | rcb.msi = SISL_MSI_RRQ_UPDATED; |
3432 | rcb.timeout = MC_AFU_DEBUG_TIMEOUT; |
3433 | rcb.ioasa = &asa; |
3434 | |
3435 | if (ulen) { |
3436 | rcb.data_len = ulen; |
3437 | rcb.data_ea = (uintptr_t)kbuf; |
3438 | } |
3439 | |
3440 | rcb.cdb[0] = SISL_AFU_CMD_DEBUG; |
3441 | memcpy(&rcb.cdb[4], afu_dbg->afu_subcmd, |
3442 | HT_CXLFLASH_AFU_DEBUG_SUBCMD_LEN); |
3443 | |
3444 | rc = send_afu_cmd(afu, rcb: &rcb); |
3445 | if (rc) { |
3446 | dev_err(dev, "%s: send_afu_cmd failed rc=%d asc=%08x afux=%x\n" , |
3447 | __func__, rc, asa.ioasc, asa.afu_extra); |
3448 | goto out; |
3449 | } |
3450 | |
3451 | if (ulen && !is_write) { |
3452 | if (copy_to_user(to: ubuf, from: kbuf, n: ulen)) |
3453 | rc = -EFAULT; |
3454 | } |
3455 | out: |
3456 | kfree(objp: buf); |
3457 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
3458 | return rc; |
3459 | } |
3460 | |
3461 | /** |
3462 | * cxlflash_chr_ioctl() - character device IOCTL handler |
3463 | * @file: File pointer for this device. |
3464 | * @cmd: IOCTL command. |
3465 | * @arg: Userspace ioctl data structure. |
3466 | * |
3467 | * A read/write semaphore is used to implement a 'drain' of currently |
3468 | * running ioctls. The read semaphore is taken at the beginning of each |
3469 | * ioctl thread and released upon concluding execution. Additionally the |
3470 | * semaphore should be released and then reacquired in any ioctl execution |
3471 | * path which will wait for an event to occur that is outside the scope of |
3472 | * the ioctl (i.e. an adapter reset). To drain the ioctls currently running, |
3473 | * a thread simply needs to acquire the write semaphore. |
3474 | * |
3475 | * Return: 0 on success, -errno on failure |
3476 | */ |
3477 | static long cxlflash_chr_ioctl(struct file *file, unsigned int cmd, |
3478 | unsigned long arg) |
3479 | { |
3480 | typedef int (*hioctl) (struct cxlflash_cfg *, void *); |
3481 | |
3482 | struct cxlflash_cfg *cfg = file->private_data; |
3483 | struct device *dev = &cfg->dev->dev; |
3484 | char buf[sizeof(union cxlflash_ht_ioctls)]; |
3485 | void __user *uarg = (void __user *)arg; |
3486 | struct ht_cxlflash_hdr *hdr; |
3487 | size_t size = 0; |
3488 | bool known_ioctl = false; |
3489 | int idx = 0; |
3490 | int rc = 0; |
3491 | hioctl do_ioctl = NULL; |
3492 | |
3493 | static const struct { |
3494 | size_t size; |
3495 | hioctl ioctl; |
3496 | } ioctl_tbl[] = { /* NOTE: order matters here */ |
3497 | { sizeof(struct ht_cxlflash_lun_provision), |
3498 | (hioctl)cxlflash_lun_provision }, |
3499 | { sizeof(struct ht_cxlflash_afu_debug), |
3500 | (hioctl)cxlflash_afu_debug }, |
3501 | }; |
3502 | |
3503 | /* Hold read semaphore so we can drain if needed */ |
3504 | down_read(sem: &cfg->ioctl_rwsem); |
3505 | |
3506 | dev_dbg(dev, "%s: cmd=%u idx=%d tbl_size=%lu\n" , |
3507 | __func__, cmd, idx, sizeof(ioctl_tbl)); |
3508 | |
3509 | switch (cmd) { |
3510 | case HT_CXLFLASH_LUN_PROVISION: |
3511 | case HT_CXLFLASH_AFU_DEBUG: |
3512 | known_ioctl = true; |
3513 | idx = _IOC_NR(HT_CXLFLASH_LUN_PROVISION) - _IOC_NR(cmd); |
3514 | size = ioctl_tbl[idx].size; |
3515 | do_ioctl = ioctl_tbl[idx].ioctl; |
3516 | |
3517 | if (likely(do_ioctl)) |
3518 | break; |
3519 | |
3520 | fallthrough; |
3521 | default: |
3522 | rc = -EINVAL; |
3523 | goto out; |
3524 | } |
3525 | |
3526 | if (unlikely(copy_from_user(&buf, uarg, size))) { |
3527 | dev_err(dev, "%s: copy_from_user() fail " |
3528 | "size=%lu cmd=%d (%s) uarg=%p\n" , |
3529 | __func__, size, cmd, decode_hioctl(cmd), uarg); |
3530 | rc = -EFAULT; |
3531 | goto out; |
3532 | } |
3533 | |
3534 | hdr = (struct ht_cxlflash_hdr *)&buf; |
3535 | if (hdr->version != HT_CXLFLASH_VERSION_0) { |
3536 | dev_dbg(dev, "%s: Version %u not supported for %s\n" , |
3537 | __func__, hdr->version, decode_hioctl(cmd)); |
3538 | rc = -EINVAL; |
3539 | goto out; |
3540 | } |
3541 | |
3542 | if (hdr->rsvd[0] || hdr->rsvd[1] || hdr->return_flags) { |
3543 | dev_dbg(dev, "%s: Reserved/rflags populated\n" , __func__); |
3544 | rc = -EINVAL; |
3545 | goto out; |
3546 | } |
3547 | |
3548 | rc = do_ioctl(cfg, (void *)&buf); |
3549 | if (likely(!rc)) |
3550 | if (unlikely(copy_to_user(uarg, &buf, size))) { |
3551 | dev_err(dev, "%s: copy_to_user() fail " |
3552 | "size=%lu cmd=%d (%s) uarg=%p\n" , |
3553 | __func__, size, cmd, decode_hioctl(cmd), uarg); |
3554 | rc = -EFAULT; |
3555 | } |
3556 | |
3557 | /* fall through to exit */ |
3558 | |
3559 | out: |
3560 | up_read(sem: &cfg->ioctl_rwsem); |
3561 | if (unlikely(rc && known_ioctl)) |
3562 | dev_err(dev, "%s: ioctl %s (%08X) returned rc=%d\n" , |
3563 | __func__, decode_hioctl(cmd), cmd, rc); |
3564 | else |
3565 | dev_dbg(dev, "%s: ioctl %s (%08X) returned rc=%d\n" , |
3566 | __func__, decode_hioctl(cmd), cmd, rc); |
3567 | return rc; |
3568 | } |
3569 | |
3570 | /* |
3571 | * Character device file operations |
3572 | */ |
3573 | static const struct file_operations cxlflash_chr_fops = { |
3574 | .owner = THIS_MODULE, |
3575 | .open = cxlflash_chr_open, |
3576 | .unlocked_ioctl = cxlflash_chr_ioctl, |
3577 | .compat_ioctl = compat_ptr_ioctl, |
3578 | }; |
3579 | |
3580 | /** |
3581 | * init_chrdev() - initialize the character device for the host |
3582 | * @cfg: Internal structure associated with the host. |
3583 | * |
3584 | * Return: 0 on success, -errno on failure |
3585 | */ |
3586 | static int init_chrdev(struct cxlflash_cfg *cfg) |
3587 | { |
3588 | struct device *dev = &cfg->dev->dev; |
3589 | struct device *char_dev; |
3590 | dev_t devno; |
3591 | int minor; |
3592 | int rc = 0; |
3593 | |
3594 | minor = cxlflash_get_minor(); |
3595 | if (unlikely(minor < 0)) { |
3596 | dev_err(dev, "%s: Exhausted allowed adapters\n" , __func__); |
3597 | rc = -ENOSPC; |
3598 | goto out; |
3599 | } |
3600 | |
3601 | devno = MKDEV(cxlflash_major, minor); |
3602 | cdev_init(&cfg->cdev, &cxlflash_chr_fops); |
3603 | |
3604 | rc = cdev_add(&cfg->cdev, devno, 1); |
3605 | if (rc) { |
3606 | dev_err(dev, "%s: cdev_add failed rc=%d\n" , __func__, rc); |
3607 | goto err1; |
3608 | } |
3609 | |
3610 | char_dev = device_create(cls: &cxlflash_class, NULL, devt: devno, |
3611 | NULL, fmt: "cxlflash%d" , minor); |
3612 | if (IS_ERR(ptr: char_dev)) { |
3613 | rc = PTR_ERR(ptr: char_dev); |
3614 | dev_err(dev, "%s: device_create failed rc=%d\n" , |
3615 | __func__, rc); |
3616 | goto err2; |
3617 | } |
3618 | |
3619 | cfg->chardev = char_dev; |
3620 | out: |
3621 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
3622 | return rc; |
3623 | err2: |
3624 | cdev_del(&cfg->cdev); |
3625 | err1: |
3626 | cxlflash_put_minor(minor); |
3627 | goto out; |
3628 | } |
3629 | |
3630 | /** |
3631 | * cxlflash_probe() - PCI entry point to add host |
3632 | * @pdev: PCI device associated with the host. |
3633 | * @dev_id: PCI device id associated with device. |
3634 | * |
3635 | * The device will initially start out in a 'probing' state and |
3636 | * transition to the 'normal' state at the end of a successful |
3637 | * probe. Should an EEH event occur during probe, the notification |
3638 | * thread (error_detected()) will wait until the probe handler |
3639 | * is nearly complete. At that time, the device will be moved to |
3640 | * a 'probed' state and the EEH thread woken up to drive the slot |
3641 | * reset and recovery (device moves to 'normal' state). Meanwhile, |
3642 | * the probe will be allowed to exit successfully. |
3643 | * |
3644 | * Return: 0 on success, -errno on failure |
3645 | */ |
3646 | static int cxlflash_probe(struct pci_dev *pdev, |
3647 | const struct pci_device_id *dev_id) |
3648 | { |
3649 | struct Scsi_Host *host; |
3650 | struct cxlflash_cfg *cfg = NULL; |
3651 | struct device *dev = &pdev->dev; |
3652 | struct dev_dependent_vals *ddv; |
3653 | int rc = 0; |
3654 | int k; |
3655 | |
3656 | dev_dbg(&pdev->dev, "%s: Found CXLFLASH with IRQ: %d\n" , |
3657 | __func__, pdev->irq); |
3658 | |
3659 | ddv = (struct dev_dependent_vals *)dev_id->driver_data; |
3660 | driver_template.max_sectors = ddv->max_sectors; |
3661 | |
3662 | host = scsi_host_alloc(&driver_template, sizeof(struct cxlflash_cfg)); |
3663 | if (!host) { |
3664 | dev_err(dev, "%s: scsi_host_alloc failed\n" , __func__); |
3665 | rc = -ENOMEM; |
3666 | goto out; |
3667 | } |
3668 | |
3669 | host->max_id = CXLFLASH_MAX_NUM_TARGETS_PER_BUS; |
3670 | host->max_lun = CXLFLASH_MAX_NUM_LUNS_PER_TARGET; |
3671 | host->unique_id = host->host_no; |
3672 | host->max_cmd_len = CXLFLASH_MAX_CDB_LEN; |
3673 | |
3674 | cfg = shost_priv(shost: host); |
3675 | cfg->state = STATE_PROBING; |
3676 | cfg->host = host; |
3677 | rc = alloc_mem(cfg); |
3678 | if (rc) { |
3679 | dev_err(dev, "%s: alloc_mem failed\n" , __func__); |
3680 | rc = -ENOMEM; |
3681 | scsi_host_put(t: cfg->host); |
3682 | goto out; |
3683 | } |
3684 | |
3685 | cfg->init_state = INIT_STATE_NONE; |
3686 | cfg->dev = pdev; |
3687 | cfg->cxl_fops = cxlflash_cxl_fops; |
3688 | cfg->ops = cxlflash_assign_ops(ddv); |
3689 | WARN_ON_ONCE(!cfg->ops); |
3690 | |
3691 | /* |
3692 | * Promoted LUNs move to the top of the LUN table. The rest stay on |
3693 | * the bottom half. The bottom half grows from the end (index = 255), |
3694 | * whereas the top half grows from the beginning (index = 0). |
3695 | * |
3696 | * Initialize the last LUN index for all possible ports. |
3697 | */ |
3698 | cfg->promote_lun_index = 0; |
3699 | |
3700 | for (k = 0; k < MAX_FC_PORTS; k++) |
3701 | cfg->last_lun_index[k] = CXLFLASH_NUM_VLUNS/2 - 1; |
3702 | |
3703 | cfg->dev_id = (struct pci_device_id *)dev_id; |
3704 | |
3705 | init_waitqueue_head(&cfg->tmf_waitq); |
3706 | init_waitqueue_head(&cfg->reset_waitq); |
3707 | |
3708 | INIT_WORK(&cfg->work_q, cxlflash_worker_thread); |
3709 | cfg->lr_state = LINK_RESET_INVALID; |
3710 | cfg->lr_port = -1; |
3711 | spin_lock_init(&cfg->tmf_slock); |
3712 | mutex_init(&cfg->ctx_tbl_list_mutex); |
3713 | mutex_init(&cfg->ctx_recovery_mutex); |
3714 | init_rwsem(&cfg->ioctl_rwsem); |
3715 | INIT_LIST_HEAD(list: &cfg->ctx_err_recovery); |
3716 | INIT_LIST_HEAD(list: &cfg->lluns); |
3717 | |
3718 | pci_set_drvdata(pdev, data: cfg); |
3719 | |
3720 | rc = init_pci(cfg); |
3721 | if (rc) { |
3722 | dev_err(dev, "%s: init_pci failed rc=%d\n" , __func__, rc); |
3723 | goto out_remove; |
3724 | } |
3725 | cfg->init_state = INIT_STATE_PCI; |
3726 | |
3727 | cfg->afu_cookie = cfg->ops->create_afu(pdev); |
3728 | if (unlikely(!cfg->afu_cookie)) { |
3729 | dev_err(dev, "%s: create_afu failed\n" , __func__); |
3730 | rc = -ENOMEM; |
3731 | goto out_remove; |
3732 | } |
3733 | |
3734 | rc = init_afu(cfg); |
3735 | if (rc && !wq_has_sleeper(wq_head: &cfg->reset_waitq)) { |
3736 | dev_err(dev, "%s: init_afu failed rc=%d\n" , __func__, rc); |
3737 | goto out_remove; |
3738 | } |
3739 | cfg->init_state = INIT_STATE_AFU; |
3740 | |
3741 | rc = init_scsi(cfg); |
3742 | if (rc) { |
3743 | dev_err(dev, "%s: init_scsi failed rc=%d\n" , __func__, rc); |
3744 | goto out_remove; |
3745 | } |
3746 | cfg->init_state = INIT_STATE_SCSI; |
3747 | |
3748 | rc = init_chrdev(cfg); |
3749 | if (rc) { |
3750 | dev_err(dev, "%s: init_chrdev failed rc=%d\n" , __func__, rc); |
3751 | goto out_remove; |
3752 | } |
3753 | cfg->init_state = INIT_STATE_CDEV; |
3754 | |
3755 | if (wq_has_sleeper(wq_head: &cfg->reset_waitq)) { |
3756 | cfg->state = STATE_PROBED; |
3757 | wake_up_all(&cfg->reset_waitq); |
3758 | } else |
3759 | cfg->state = STATE_NORMAL; |
3760 | out: |
3761 | dev_dbg(dev, "%s: returning rc=%d\n" , __func__, rc); |
3762 | return rc; |
3763 | |
3764 | out_remove: |
3765 | cfg->state = STATE_PROBED; |
3766 | cxlflash_remove(pdev); |
3767 | goto out; |
3768 | } |
3769 | |
3770 | /** |
3771 | * cxlflash_pci_error_detected() - called when a PCI error is detected |
3772 | * @pdev: PCI device struct. |
3773 | * @state: PCI channel state. |
3774 | * |
3775 | * When an EEH occurs during an active reset, wait until the reset is |
3776 | * complete and then take action based upon the device state. |
3777 | * |
3778 | * Return: PCI_ERS_RESULT_NEED_RESET or PCI_ERS_RESULT_DISCONNECT |
3779 | */ |
3780 | static pci_ers_result_t cxlflash_pci_error_detected(struct pci_dev *pdev, |
3781 | pci_channel_state_t state) |
3782 | { |
3783 | int rc = 0; |
3784 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
3785 | struct device *dev = &cfg->dev->dev; |
3786 | |
3787 | dev_dbg(dev, "%s: pdev=%p state=%u\n" , __func__, pdev, state); |
3788 | |
3789 | switch (state) { |
3790 | case pci_channel_io_frozen: |
3791 | wait_event(cfg->reset_waitq, cfg->state != STATE_RESET && |
3792 | cfg->state != STATE_PROBING); |
3793 | if (cfg->state == STATE_FAILTERM) |
3794 | return PCI_ERS_RESULT_DISCONNECT; |
3795 | |
3796 | cfg->state = STATE_RESET; |
3797 | scsi_block_requests(cfg->host); |
3798 | drain_ioctls(cfg); |
3799 | rc = cxlflash_mark_contexts_error(cfg); |
3800 | if (unlikely(rc)) |
3801 | dev_err(dev, "%s: Failed to mark user contexts rc=%d\n" , |
3802 | __func__, rc); |
3803 | term_afu(cfg); |
3804 | return PCI_ERS_RESULT_NEED_RESET; |
3805 | case pci_channel_io_perm_failure: |
3806 | cfg->state = STATE_FAILTERM; |
3807 | wake_up_all(&cfg->reset_waitq); |
3808 | scsi_unblock_requests(cfg->host); |
3809 | return PCI_ERS_RESULT_DISCONNECT; |
3810 | default: |
3811 | break; |
3812 | } |
3813 | return PCI_ERS_RESULT_NEED_RESET; |
3814 | } |
3815 | |
3816 | /** |
3817 | * cxlflash_pci_slot_reset() - called when PCI slot has been reset |
3818 | * @pdev: PCI device struct. |
3819 | * |
3820 | * This routine is called by the pci error recovery code after the PCI |
3821 | * slot has been reset, just before we should resume normal operations. |
3822 | * |
3823 | * Return: PCI_ERS_RESULT_RECOVERED or PCI_ERS_RESULT_DISCONNECT |
3824 | */ |
3825 | static pci_ers_result_t cxlflash_pci_slot_reset(struct pci_dev *pdev) |
3826 | { |
3827 | int rc = 0; |
3828 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
3829 | struct device *dev = &cfg->dev->dev; |
3830 | |
3831 | dev_dbg(dev, "%s: pdev=%p\n" , __func__, pdev); |
3832 | |
3833 | rc = init_afu(cfg); |
3834 | if (unlikely(rc)) { |
3835 | dev_err(dev, "%s: EEH recovery failed rc=%d\n" , __func__, rc); |
3836 | return PCI_ERS_RESULT_DISCONNECT; |
3837 | } |
3838 | |
3839 | return PCI_ERS_RESULT_RECOVERED; |
3840 | } |
3841 | |
3842 | /** |
3843 | * cxlflash_pci_resume() - called when normal operation can resume |
3844 | * @pdev: PCI device struct |
3845 | */ |
3846 | static void cxlflash_pci_resume(struct pci_dev *pdev) |
3847 | { |
3848 | struct cxlflash_cfg *cfg = pci_get_drvdata(pdev); |
3849 | struct device *dev = &cfg->dev->dev; |
3850 | |
3851 | dev_dbg(dev, "%s: pdev=%p\n" , __func__, pdev); |
3852 | |
3853 | cfg->state = STATE_NORMAL; |
3854 | wake_up_all(&cfg->reset_waitq); |
3855 | scsi_unblock_requests(cfg->host); |
3856 | } |
3857 | |
3858 | /** |
3859 | * cxlflash_devnode() - provides devtmpfs for devices in the cxlflash class |
3860 | * @dev: Character device. |
3861 | * @mode: Mode that can be used to verify access. |
3862 | * |
3863 | * Return: Allocated string describing the devtmpfs structure. |
3864 | */ |
3865 | static char *cxlflash_devnode(const struct device *dev, umode_t *mode) |
3866 | { |
3867 | return kasprintf(GFP_KERNEL, fmt: "cxlflash/%s" , dev_name(dev)); |
3868 | } |
3869 | |
3870 | /** |
3871 | * cxlflash_class_init() - create character device class |
3872 | * |
3873 | * Return: 0 on success, -errno on failure |
3874 | */ |
3875 | static int cxlflash_class_init(void) |
3876 | { |
3877 | dev_t devno; |
3878 | int rc = 0; |
3879 | |
3880 | rc = alloc_chrdev_region(&devno, 0, CXLFLASH_MAX_ADAPTERS, "cxlflash" ); |
3881 | if (unlikely(rc)) { |
3882 | pr_err("%s: alloc_chrdev_region failed rc=%d\n" , __func__, rc); |
3883 | goto out; |
3884 | } |
3885 | |
3886 | cxlflash_major = MAJOR(devno); |
3887 | |
3888 | rc = class_register(class: &cxlflash_class); |
3889 | if (rc) { |
3890 | pr_err("%s: class_create failed rc=%d\n" , __func__, rc); |
3891 | goto err; |
3892 | } |
3893 | |
3894 | out: |
3895 | pr_debug("%s: returning rc=%d\n" , __func__, rc); |
3896 | return rc; |
3897 | err: |
3898 | unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); |
3899 | goto out; |
3900 | } |
3901 | |
3902 | /** |
3903 | * cxlflash_class_exit() - destroy character device class |
3904 | */ |
3905 | static void cxlflash_class_exit(void) |
3906 | { |
3907 | dev_t devno = MKDEV(cxlflash_major, 0); |
3908 | |
3909 | class_unregister(class: &cxlflash_class); |
3910 | unregister_chrdev_region(devno, CXLFLASH_MAX_ADAPTERS); |
3911 | } |
3912 | |
3913 | static const struct pci_error_handlers cxlflash_err_handler = { |
3914 | .error_detected = cxlflash_pci_error_detected, |
3915 | .slot_reset = cxlflash_pci_slot_reset, |
3916 | .resume = cxlflash_pci_resume, |
3917 | }; |
3918 | |
3919 | /* |
3920 | * PCI device structure |
3921 | */ |
3922 | static struct pci_driver cxlflash_driver = { |
3923 | .name = CXLFLASH_NAME, |
3924 | .id_table = cxlflash_pci_table, |
3925 | .probe = cxlflash_probe, |
3926 | .remove = cxlflash_remove, |
3927 | .shutdown = cxlflash_remove, |
3928 | .err_handler = &cxlflash_err_handler, |
3929 | }; |
3930 | |
3931 | /** |
3932 | * init_cxlflash() - module entry point |
3933 | * |
3934 | * Return: 0 on success, -errno on failure |
3935 | */ |
3936 | static int __init init_cxlflash(void) |
3937 | { |
3938 | int rc; |
3939 | |
3940 | check_sizes(); |
3941 | cxlflash_list_init(); |
3942 | rc = cxlflash_class_init(); |
3943 | if (unlikely(rc)) |
3944 | goto out; |
3945 | |
3946 | rc = pci_register_driver(&cxlflash_driver); |
3947 | if (unlikely(rc)) |
3948 | goto err; |
3949 | out: |
3950 | pr_debug("%s: returning rc=%d\n" , __func__, rc); |
3951 | return rc; |
3952 | err: |
3953 | cxlflash_class_exit(); |
3954 | goto out; |
3955 | } |
3956 | |
3957 | /** |
3958 | * exit_cxlflash() - module exit point |
3959 | */ |
3960 | static void __exit exit_cxlflash(void) |
3961 | { |
3962 | cxlflash_term_global_luns(); |
3963 | cxlflash_free_errpage(); |
3964 | |
3965 | pci_unregister_driver(dev: &cxlflash_driver); |
3966 | cxlflash_class_exit(); |
3967 | } |
3968 | |
3969 | module_init(init_cxlflash); |
3970 | module_exit(exit_cxlflash); |
3971 | |